Sync up with android12-5.10 for the following commits: 2a2327c4e8ac FROMLIST: power_supply: Use of-thermal cdev registration API b90fe5ef8f96 FROMLIST: power_supply: Register cooling device outside of probe fc64efcd068c Revert "UPSTREAM: tracefs: Have tracefs directories not set OTH permission bits by default" 1eb3049da0eb FROMGIT: usb: dwc3: gadget: Prevent core from processing stale TRBs ee1e2de73cc5 UPSTREAM: cgroup-v1: Require capabilities to set release_agent 7e6f112bebea FROMGIT: f2fs: move f2fs to use reader-unfair rwsems 23686f5ee870 UPSTREAM: f2fs: do not bother checkpoint by f2fs_get_node_info fa055ddfd585 BACKPORT: f2fs: avoid down_write on nat_tree_lock during checkpoint c8701aa0a76b ANDROID: GKI: enable RCU_BOOST c34fa06f4bbb FROMLIST: rcu: Don't deboost before reporting expedited quiescent state 3a49d3b67726 FROMGIT: usb: f_fs: Fix use-after-free for epfile 250abe08bbb4 UPSTREAM: usb: gadget: f_fs: Clear ffs_eventfd in ffs_data_clear. d449d91bc954 ANDROID: update new gki symbol b2fcb7b63b30 ANDROID: abi: qcom: Add dma_{alloc,free}_noncoherent 5d79e49205a6 UPSTREAM: binder: fix async_free_space accounting for empty parcels 6aa9e78d6e98 FROMGIT: rcu: Allow expedited RCU grace periods on incoming CPUs 2f61ec09b01c ANDROID: abi_gki_aarch64_qcom: Add iommu_setup_dma_ops restricted vh 6a9ff8fa2669 ANDROID: iommu: Add restricted vendor hook 2aba795b317c FROMLIST: arm64: cpufeature: List early Cortex-A510 parts as having broken dbm 2861bbc5b5a4 FROMLIST: arm64: Add Cortex-A510 CPU part definition b0d13db791b8 FROMGIT: printk: ringbuffer: Improve prb_next_seq() performance 4b1862e4fc69 ANDROID: incremental-fs: fix GPF in pending_reads_dispatch_ioctl 445019bbca5d UPSTREAM: bpf: Fix integer overflow in argument calculation for bpf_map_area_alloc 032a6762957f UPSTREAM: tee: handle lookup of shm with reference count 0 d461f54be339 ANDROID: Incremental-fs: Doc: correct a sysfs path in incfs.rst 1bfc9c16ae45 ANDROID: selftests: fix incfs_test fd4c6594f5ce ANDROID: incremental-fs: fix mount_fs issue a512242e66ce BACKPORT: arm64: errata: Add workaround for TSB flush failures e48051244a03 UPSTREAM: arm64: Add Neoverse-N2, Cortex-A710 CPU part definition dd3256d4392c UPSTREAM: coresight: trbe: Defer the probe on offline CPUs 71aebf8793c5 UPSTREAM: coresight: etm4x: Use Trace Filtering controls dynamically 2bb8b3c90759 BACKPORT: coresight: etm4x: Save restore TRFCR_EL1 79b64fa78085 UPSTREAM: coresight: tmc-etr: Speed up for bounce buffer in flat mode aee6af704646 UPSTREAM: coresight: tmc-etr: Add barrier after updating AUX ring buffer a0009ade3873 Revert half of "ANDROID: cpu/hotplug: create vendor hook for cpu_up/cpu_down" a863cef344a0 Revert half of "ANDROID: arm64: add vendor hooks for bti and pauth fault" 9f58bcd61459 Revert half of "ANDROID: vendor_hooks: Add param for android_vh_cpu_up/down" 4b3396046c76 Revert "ANDROID: vendor_hooks: Add a hook for task tagging" d8fe0b1fc2f5 Revert "ANDROID: GKI: net: add vendor hooks for 'struct nf_conn' lifecycle" 92ab2aeca5ae Revert "ANDROID: GKI: net: add vendor hooks for 'struct sock' lifecycle" b3e6d6eec69f Revert "ANDROID: vendor_hooks: add hook and OEM data for slab shrink" e09000ee1915 Revert half of "ANDROID: vendor_hooks: Add hooks for memory when debug" 3f305a910171 Revert half of "ANDROID: gic-v3: Add vendor hook to GIC v3" 3b4ca92614fd Merge tag 'android12-5.10.81_r00' into android12-5.10 bdc732d112f3 UPSTREAM: tracefs: Set all files to the same group ownership as the mount option 8455746a4564 UPSTREAM: tracefs: Have new files inherit the ownership of their parent 9c63be2adac9 UPSTREAM: tracefs: Have tracefs directories not set OTH permission bits by default 64095600fd53 Revert "ANDROID: vendor_hooks: Add hooks to recognize special worker thread." 7887091009b2 Revert "ANDROID: sysrq: add vendor hook for sysrq crash information" 63e7148b2729 Revert "ANDROID: user: Add vendor hook to user for GKI purpose" 18975040b944 Revert portions of "ANDROID: sched: Add vendor hooks for sched." 96c08d921060 Revert portions of "ANDROID: vendor_hooks: Add hooks for scheduler" a32e89883a53 UPSTREAM: vfs: fs_context: fix up param length parsing in legacy_parse_param New functions/variables are now being tracked as well, that came from the android12-5.10 branch: Leaf changes summary: 5 artifacts changed Changed leaf types summary: 0 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 4 Added functions Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 1 Added variable 4 Added functions: [A] 'function int __traceiter_android_rvh_iommu_setup_dma_ops(void*, device*, u64, u64)' [A] 'function void* dma_alloc_noncoherent(device*, size_t, dma_addr_t*, dma_data_direction, gfp_t)' [A] 'function void dma_free_noncoherent(device*, size_t, void*, dma_addr_t, dma_data_direction)' [A] 'function void static_key_enable_cpuslocked(static_key*)' 1 Added variable: [A] 'tracepoint __tracepoint_android_rvh_iommu_setup_dma_ops' Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I7a5a82681cc94f6b3dcd17e159da8976be0bcb78
824 lines
20 KiB
C
824 lines
20 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* fs/f2fs/xattr.c
|
|
*
|
|
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
|
* http://www.samsung.com/
|
|
*
|
|
* Portions of this code from linux/fs/ext2/xattr.c
|
|
*
|
|
* Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
|
|
*
|
|
* Fix by Harrison Xing <harrison@mountainviewdata.com>.
|
|
* Extended attributes for symlinks and special files added per
|
|
* suggestion of Luka Renko <luka.renko@hermes.si>.
|
|
* xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
|
|
* Red Hat Inc.
|
|
*/
|
|
#include <linux/rwsem.h>
|
|
#include <linux/f2fs_fs.h>
|
|
#include <linux/security.h>
|
|
#include <linux/posix_acl_xattr.h>
|
|
#include "f2fs.h"
|
|
#include "xattr.h"
|
|
#include "segment.h"
|
|
|
|
static void *xattr_alloc(struct f2fs_sb_info *sbi, int size, bool *is_inline)
|
|
{
|
|
if (likely(size == sbi->inline_xattr_slab_size)) {
|
|
*is_inline = true;
|
|
return kmem_cache_zalloc(sbi->inline_xattr_slab, GFP_NOFS);
|
|
}
|
|
*is_inline = false;
|
|
return f2fs_kzalloc(sbi, size, GFP_NOFS);
|
|
}
|
|
|
|
static void xattr_free(struct f2fs_sb_info *sbi, void *xattr_addr,
|
|
bool is_inline)
|
|
{
|
|
if (is_inline)
|
|
kmem_cache_free(sbi->inline_xattr_slab, xattr_addr);
|
|
else
|
|
kfree(xattr_addr);
|
|
}
|
|
|
|
static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
|
|
struct dentry *unused, struct inode *inode,
|
|
const char *name, void *buffer, size_t size, int flags)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
|
|
|
switch (handler->flags) {
|
|
case F2FS_XATTR_INDEX_USER:
|
|
if (!test_opt(sbi, XATTR_USER))
|
|
return -EOPNOTSUPP;
|
|
break;
|
|
case F2FS_XATTR_INDEX_TRUSTED:
|
|
case F2FS_XATTR_INDEX_SECURITY:
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
return f2fs_getxattr(inode, handler->flags, name,
|
|
buffer, size, NULL);
|
|
}
|
|
|
|
static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
|
|
struct dentry *unused, struct inode *inode,
|
|
const char *name, const void *value,
|
|
size_t size, int flags)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
|
|
|
|
switch (handler->flags) {
|
|
case F2FS_XATTR_INDEX_USER:
|
|
if (!test_opt(sbi, XATTR_USER))
|
|
return -EOPNOTSUPP;
|
|
break;
|
|
case F2FS_XATTR_INDEX_TRUSTED:
|
|
case F2FS_XATTR_INDEX_SECURITY:
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
return f2fs_setxattr(inode, handler->flags, name,
|
|
value, size, NULL, flags);
|
|
}
|
|
|
|
static bool f2fs_xattr_user_list(struct dentry *dentry)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
|
|
|
|
return test_opt(sbi, XATTR_USER);
|
|
}
|
|
|
|
static bool f2fs_xattr_trusted_list(struct dentry *dentry)
|
|
{
|
|
return capable(CAP_SYS_ADMIN);
|
|
}
|
|
|
|
static int f2fs_xattr_advise_get(const struct xattr_handler *handler,
|
|
struct dentry *unused, struct inode *inode,
|
|
const char *name, void *buffer, size_t size, int flags)
|
|
{
|
|
if (buffer)
|
|
*((char *)buffer) = F2FS_I(inode)->i_advise;
|
|
return sizeof(char);
|
|
}
|
|
|
|
static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
|
|
struct dentry *unused, struct inode *inode,
|
|
const char *name, const void *value,
|
|
size_t size, int flags)
|
|
{
|
|
unsigned char old_advise = F2FS_I(inode)->i_advise;
|
|
unsigned char new_advise;
|
|
|
|
if (!inode_owner_or_capable(inode))
|
|
return -EPERM;
|
|
if (value == NULL)
|
|
return -EINVAL;
|
|
|
|
new_advise = *(char *)value;
|
|
if (new_advise & ~FADVISE_MODIFIABLE_BITS)
|
|
return -EINVAL;
|
|
|
|
new_advise = new_advise & FADVISE_MODIFIABLE_BITS;
|
|
new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS;
|
|
|
|
F2FS_I(inode)->i_advise = new_advise;
|
|
f2fs_mark_inode_dirty_sync(inode, true);
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_F2FS_FS_SECURITY
|
|
static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
|
|
void *page)
|
|
{
|
|
const struct xattr *xattr;
|
|
int err = 0;
|
|
|
|
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
|
|
err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
|
|
xattr->name, xattr->value,
|
|
xattr->value_len, (struct page *)page, 0);
|
|
if (err < 0)
|
|
break;
|
|
}
|
|
return err;
|
|
}
|
|
|
|
int f2fs_init_security(struct inode *inode, struct inode *dir,
|
|
const struct qstr *qstr, struct page *ipage)
|
|
{
|
|
return security_inode_init_security(inode, dir, qstr,
|
|
&f2fs_initxattrs, ipage);
|
|
}
|
|
#endif
|
|
|
|
const struct xattr_handler f2fs_xattr_user_handler = {
|
|
.prefix = XATTR_USER_PREFIX,
|
|
.flags = F2FS_XATTR_INDEX_USER,
|
|
.list = f2fs_xattr_user_list,
|
|
.get = f2fs_xattr_generic_get,
|
|
.set = f2fs_xattr_generic_set,
|
|
};
|
|
|
|
const struct xattr_handler f2fs_xattr_trusted_handler = {
|
|
.prefix = XATTR_TRUSTED_PREFIX,
|
|
.flags = F2FS_XATTR_INDEX_TRUSTED,
|
|
.list = f2fs_xattr_trusted_list,
|
|
.get = f2fs_xattr_generic_get,
|
|
.set = f2fs_xattr_generic_set,
|
|
};
|
|
|
|
const struct xattr_handler f2fs_xattr_advise_handler = {
|
|
.name = F2FS_SYSTEM_ADVISE_NAME,
|
|
.flags = F2FS_XATTR_INDEX_ADVISE,
|
|
.get = f2fs_xattr_advise_get,
|
|
.set = f2fs_xattr_advise_set,
|
|
};
|
|
|
|
const struct xattr_handler f2fs_xattr_security_handler = {
|
|
.prefix = XATTR_SECURITY_PREFIX,
|
|
.flags = F2FS_XATTR_INDEX_SECURITY,
|
|
.get = f2fs_xattr_generic_get,
|
|
.set = f2fs_xattr_generic_set,
|
|
};
|
|
|
|
static const struct xattr_handler *f2fs_xattr_handler_map[] = {
|
|
[F2FS_XATTR_INDEX_USER] = &f2fs_xattr_user_handler,
|
|
#ifdef CONFIG_F2FS_FS_POSIX_ACL
|
|
[F2FS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
|
|
[F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
|
|
#endif
|
|
[F2FS_XATTR_INDEX_TRUSTED] = &f2fs_xattr_trusted_handler,
|
|
#ifdef CONFIG_F2FS_FS_SECURITY
|
|
[F2FS_XATTR_INDEX_SECURITY] = &f2fs_xattr_security_handler,
|
|
#endif
|
|
[F2FS_XATTR_INDEX_ADVISE] = &f2fs_xattr_advise_handler,
|
|
};
|
|
|
|
const struct xattr_handler *f2fs_xattr_handlers[] = {
|
|
&f2fs_xattr_user_handler,
|
|
#ifdef CONFIG_F2FS_FS_POSIX_ACL
|
|
&posix_acl_access_xattr_handler,
|
|
&posix_acl_default_xattr_handler,
|
|
#endif
|
|
&f2fs_xattr_trusted_handler,
|
|
#ifdef CONFIG_F2FS_FS_SECURITY
|
|
&f2fs_xattr_security_handler,
|
|
#endif
|
|
&f2fs_xattr_advise_handler,
|
|
NULL,
|
|
};
|
|
|
|
static inline const struct xattr_handler *f2fs_xattr_handler(int index)
|
|
{
|
|
const struct xattr_handler *handler = NULL;
|
|
|
|
if (index > 0 && index < ARRAY_SIZE(f2fs_xattr_handler_map))
|
|
handler = f2fs_xattr_handler_map[index];
|
|
return handler;
|
|
}
|
|
|
|
static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
|
|
void *last_base_addr, int index,
|
|
size_t len, const char *name)
|
|
{
|
|
struct f2fs_xattr_entry *entry;
|
|
|
|
list_for_each_xattr(entry, base_addr) {
|
|
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
|
|
(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
|
|
return NULL;
|
|
|
|
if (entry->e_name_index != index)
|
|
continue;
|
|
if (entry->e_name_len != len)
|
|
continue;
|
|
if (!memcmp(entry->e_name, name, len))
|
|
break;
|
|
}
|
|
return entry;
|
|
}
|
|
|
|
static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
|
|
void *base_addr, void **last_addr, int index,
|
|
size_t len, const char *name)
|
|
{
|
|
struct f2fs_xattr_entry *entry;
|
|
unsigned int inline_size = inline_xattr_size(inode);
|
|
void *max_addr = base_addr + inline_size;
|
|
|
|
list_for_each_xattr(entry, base_addr) {
|
|
if ((void *)entry + sizeof(__u32) > max_addr ||
|
|
(void *)XATTR_NEXT_ENTRY(entry) > max_addr) {
|
|
*last_addr = entry;
|
|
return NULL;
|
|
}
|
|
if (entry->e_name_index != index)
|
|
continue;
|
|
if (entry->e_name_len != len)
|
|
continue;
|
|
if (!memcmp(entry->e_name, name, len))
|
|
break;
|
|
}
|
|
|
|
/* inline xattr header or entry across max inline xattr size */
|
|
if (IS_XATTR_LAST_ENTRY(entry) &&
|
|
(void *)entry + sizeof(__u32) > max_addr) {
|
|
*last_addr = entry;
|
|
return NULL;
|
|
}
|
|
return entry;
|
|
}
|
|
|
|
static int read_inline_xattr(struct inode *inode, struct page *ipage,
|
|
void *txattr_addr)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
unsigned int inline_size = inline_xattr_size(inode);
|
|
struct page *page = NULL;
|
|
void *inline_addr;
|
|
|
|
if (ipage) {
|
|
inline_addr = inline_xattr_addr(inode, ipage);
|
|
} else {
|
|
page = f2fs_get_node_page(sbi, inode->i_ino);
|
|
if (IS_ERR(page))
|
|
return PTR_ERR(page);
|
|
|
|
inline_addr = inline_xattr_addr(inode, page);
|
|
}
|
|
memcpy(txattr_addr, inline_addr, inline_size);
|
|
f2fs_put_page(page, 1);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int read_xattr_block(struct inode *inode, void *txattr_addr)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
|
|
unsigned int inline_size = inline_xattr_size(inode);
|
|
struct page *xpage;
|
|
void *xattr_addr;
|
|
|
|
/* The inode already has an extended attribute block. */
|
|
xpage = f2fs_get_node_page(sbi, xnid);
|
|
if (IS_ERR(xpage))
|
|
return PTR_ERR(xpage);
|
|
|
|
xattr_addr = page_address(xpage);
|
|
memcpy(txattr_addr + inline_size, xattr_addr, VALID_XATTR_BLOCK_SIZE);
|
|
f2fs_put_page(xpage, 1);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
|
|
unsigned int index, unsigned int len,
|
|
const char *name, struct f2fs_xattr_entry **xe,
|
|
void **base_addr, int *base_size,
|
|
bool *is_inline)
|
|
{
|
|
void *cur_addr, *txattr_addr, *last_txattr_addr;
|
|
void *last_addr = NULL;
|
|
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
|
|
unsigned int inline_size = inline_xattr_size(inode);
|
|
int err;
|
|
|
|
if (!xnid && !inline_size)
|
|
return -ENODATA;
|
|
|
|
*base_size = XATTR_SIZE(inode) + XATTR_PADDING_SIZE;
|
|
txattr_addr = xattr_alloc(F2FS_I_SB(inode), *base_size, is_inline);
|
|
if (!txattr_addr)
|
|
return -ENOMEM;
|
|
|
|
last_txattr_addr = (void *)txattr_addr + XATTR_SIZE(inode);
|
|
|
|
/* read from inline xattr */
|
|
if (inline_size) {
|
|
err = read_inline_xattr(inode, ipage, txattr_addr);
|
|
if (err)
|
|
goto out;
|
|
|
|
*xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
|
|
index, len, name);
|
|
if (*xe) {
|
|
*base_size = inline_size;
|
|
goto check;
|
|
}
|
|
}
|
|
|
|
/* read from xattr node block */
|
|
if (xnid) {
|
|
err = read_xattr_block(inode, txattr_addr);
|
|
if (err)
|
|
goto out;
|
|
}
|
|
|
|
if (last_addr)
|
|
cur_addr = XATTR_HDR(last_addr) - 1;
|
|
else
|
|
cur_addr = txattr_addr;
|
|
|
|
*xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
|
|
if (!*xe) {
|
|
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
|
|
inode->i_ino);
|
|
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
|
|
err = -EFSCORRUPTED;
|
|
goto out;
|
|
}
|
|
check:
|
|
if (IS_XATTR_LAST_ENTRY(*xe)) {
|
|
err = -ENODATA;
|
|
goto out;
|
|
}
|
|
|
|
*base_addr = txattr_addr;
|
|
return 0;
|
|
out:
|
|
xattr_free(F2FS_I_SB(inode), txattr_addr, *is_inline);
|
|
return err;
|
|
}
|
|
|
|
static int read_all_xattrs(struct inode *inode, struct page *ipage,
|
|
void **base_addr)
|
|
{
|
|
struct f2fs_xattr_header *header;
|
|
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
|
|
unsigned int size = VALID_XATTR_BLOCK_SIZE;
|
|
unsigned int inline_size = inline_xattr_size(inode);
|
|
void *txattr_addr;
|
|
int err;
|
|
|
|
txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
|
|
inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
|
|
if (!txattr_addr)
|
|
return -ENOMEM;
|
|
|
|
/* read from inline xattr */
|
|
if (inline_size) {
|
|
err = read_inline_xattr(inode, ipage, txattr_addr);
|
|
if (err)
|
|
goto fail;
|
|
}
|
|
|
|
/* read from xattr node block */
|
|
if (xnid) {
|
|
err = read_xattr_block(inode, txattr_addr);
|
|
if (err)
|
|
goto fail;
|
|
}
|
|
|
|
header = XATTR_HDR(txattr_addr);
|
|
|
|
/* never been allocated xattrs */
|
|
if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
|
|
header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
|
|
header->h_refcount = cpu_to_le32(1);
|
|
}
|
|
*base_addr = txattr_addr;
|
|
return 0;
|
|
fail:
|
|
kfree(txattr_addr);
|
|
return err;
|
|
}
|
|
|
|
static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
|
|
void *txattr_addr, struct page *ipage)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
size_t inline_size = inline_xattr_size(inode);
|
|
struct page *in_page = NULL;
|
|
void *xattr_addr;
|
|
void *inline_addr = NULL;
|
|
struct page *xpage;
|
|
nid_t new_nid = 0;
|
|
int err = 0;
|
|
|
|
if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
|
|
if (!f2fs_alloc_nid(sbi, &new_nid))
|
|
return -ENOSPC;
|
|
|
|
/* write to inline xattr */
|
|
if (inline_size) {
|
|
if (ipage) {
|
|
inline_addr = inline_xattr_addr(inode, ipage);
|
|
} else {
|
|
in_page = f2fs_get_node_page(sbi, inode->i_ino);
|
|
if (IS_ERR(in_page)) {
|
|
f2fs_alloc_nid_failed(sbi, new_nid);
|
|
return PTR_ERR(in_page);
|
|
}
|
|
inline_addr = inline_xattr_addr(inode, in_page);
|
|
}
|
|
|
|
f2fs_wait_on_page_writeback(ipage ? ipage : in_page,
|
|
NODE, true, true);
|
|
/* no need to use xattr node block */
|
|
if (hsize <= inline_size) {
|
|
err = f2fs_truncate_xattr_node(inode);
|
|
f2fs_alloc_nid_failed(sbi, new_nid);
|
|
if (err) {
|
|
f2fs_put_page(in_page, 1);
|
|
return err;
|
|
}
|
|
memcpy(inline_addr, txattr_addr, inline_size);
|
|
set_page_dirty(ipage ? ipage : in_page);
|
|
goto in_page_out;
|
|
}
|
|
}
|
|
|
|
/* write to xattr node block */
|
|
if (F2FS_I(inode)->i_xattr_nid) {
|
|
xpage = f2fs_get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
|
|
if (IS_ERR(xpage)) {
|
|
err = PTR_ERR(xpage);
|
|
f2fs_alloc_nid_failed(sbi, new_nid);
|
|
goto in_page_out;
|
|
}
|
|
f2fs_bug_on(sbi, new_nid);
|
|
f2fs_wait_on_page_writeback(xpage, NODE, true, true);
|
|
} else {
|
|
struct dnode_of_data dn;
|
|
|
|
set_new_dnode(&dn, inode, NULL, NULL, new_nid);
|
|
xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
|
|
if (IS_ERR(xpage)) {
|
|
err = PTR_ERR(xpage);
|
|
f2fs_alloc_nid_failed(sbi, new_nid);
|
|
goto in_page_out;
|
|
}
|
|
f2fs_alloc_nid_done(sbi, new_nid);
|
|
}
|
|
xattr_addr = page_address(xpage);
|
|
|
|
if (inline_size)
|
|
memcpy(inline_addr, txattr_addr, inline_size);
|
|
memcpy(xattr_addr, txattr_addr + inline_size, VALID_XATTR_BLOCK_SIZE);
|
|
|
|
if (inline_size)
|
|
set_page_dirty(ipage ? ipage : in_page);
|
|
set_page_dirty(xpage);
|
|
|
|
f2fs_put_page(xpage, 1);
|
|
in_page_out:
|
|
f2fs_put_page(in_page, 1);
|
|
return err;
|
|
}
|
|
|
|
int f2fs_getxattr(struct inode *inode, int index, const char *name,
|
|
void *buffer, size_t buffer_size, struct page *ipage)
|
|
{
|
|
struct f2fs_xattr_entry *entry = NULL;
|
|
int error;
|
|
unsigned int size, len;
|
|
void *base_addr = NULL;
|
|
int base_size;
|
|
bool is_inline;
|
|
|
|
if (name == NULL)
|
|
return -EINVAL;
|
|
|
|
len = strlen(name);
|
|
if (len > F2FS_NAME_LEN)
|
|
return -ERANGE;
|
|
|
|
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
|
|
error = lookup_all_xattrs(inode, ipage, index, len, name,
|
|
&entry, &base_addr, &base_size, &is_inline);
|
|
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
|
|
if (error)
|
|
return error;
|
|
|
|
size = le16_to_cpu(entry->e_value_size);
|
|
|
|
if (buffer && size > buffer_size) {
|
|
error = -ERANGE;
|
|
goto out;
|
|
}
|
|
|
|
if (buffer) {
|
|
char *pval = entry->e_name + entry->e_name_len;
|
|
|
|
if (base_size - (pval - (char *)base_addr) < size) {
|
|
error = -ERANGE;
|
|
goto out;
|
|
}
|
|
memcpy(buffer, pval, size);
|
|
}
|
|
error = size;
|
|
out:
|
|
xattr_free(F2FS_I_SB(inode), base_addr, is_inline);
|
|
return error;
|
|
}
|
|
|
|
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
|
|
{
|
|
struct inode *inode = d_inode(dentry);
|
|
struct f2fs_xattr_entry *entry;
|
|
void *base_addr, *last_base_addr;
|
|
int error;
|
|
size_t rest = buffer_size;
|
|
|
|
f2fs_down_read(&F2FS_I(inode)->i_xattr_sem);
|
|
error = read_all_xattrs(inode, NULL, &base_addr);
|
|
f2fs_up_read(&F2FS_I(inode)->i_xattr_sem);
|
|
if (error)
|
|
return error;
|
|
|
|
last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
|
|
|
|
list_for_each_xattr(entry, base_addr) {
|
|
const struct xattr_handler *handler =
|
|
f2fs_xattr_handler(entry->e_name_index);
|
|
const char *prefix;
|
|
size_t prefix_len;
|
|
size_t size;
|
|
|
|
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
|
|
(void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
|
|
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
|
|
inode->i_ino);
|
|
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
|
|
error = -EFSCORRUPTED;
|
|
goto cleanup;
|
|
}
|
|
|
|
if (!handler || (handler->list && !handler->list(dentry)))
|
|
continue;
|
|
|
|
prefix = xattr_prefix(handler);
|
|
prefix_len = strlen(prefix);
|
|
size = prefix_len + entry->e_name_len + 1;
|
|
if (buffer) {
|
|
if (size > rest) {
|
|
error = -ERANGE;
|
|
goto cleanup;
|
|
}
|
|
memcpy(buffer, prefix, prefix_len);
|
|
buffer += prefix_len;
|
|
memcpy(buffer, entry->e_name, entry->e_name_len);
|
|
buffer += entry->e_name_len;
|
|
*buffer++ = 0;
|
|
}
|
|
rest -= size;
|
|
}
|
|
error = buffer_size - rest;
|
|
cleanup:
|
|
kfree(base_addr);
|
|
return error;
|
|
}
|
|
|
|
static bool f2fs_xattr_value_same(struct f2fs_xattr_entry *entry,
|
|
const void *value, size_t size)
|
|
{
|
|
void *pval = entry->e_name + entry->e_name_len;
|
|
|
|
return (le16_to_cpu(entry->e_value_size) == size) &&
|
|
!memcmp(pval, value, size);
|
|
}
|
|
|
|
static int __f2fs_setxattr(struct inode *inode, int index,
|
|
const char *name, const void *value, size_t size,
|
|
struct page *ipage, int flags)
|
|
{
|
|
struct f2fs_xattr_entry *here, *last;
|
|
void *base_addr, *last_base_addr;
|
|
int found, newsize;
|
|
size_t len;
|
|
__u32 new_hsize;
|
|
int error;
|
|
|
|
if (name == NULL)
|
|
return -EINVAL;
|
|
|
|
if (value == NULL)
|
|
size = 0;
|
|
|
|
len = strlen(name);
|
|
|
|
if (len > F2FS_NAME_LEN)
|
|
return -ERANGE;
|
|
|
|
if (size > MAX_VALUE_LEN(inode))
|
|
return -E2BIG;
|
|
|
|
error = read_all_xattrs(inode, ipage, &base_addr);
|
|
if (error)
|
|
return error;
|
|
|
|
last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
|
|
|
|
/* find entry with wanted name. */
|
|
here = __find_xattr(base_addr, last_base_addr, index, len, name);
|
|
if (!here) {
|
|
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
|
|
inode->i_ino);
|
|
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
|
|
error = -EFSCORRUPTED;
|
|
goto exit;
|
|
}
|
|
|
|
found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
|
|
|
|
if (found) {
|
|
if ((flags & XATTR_CREATE)) {
|
|
error = -EEXIST;
|
|
goto exit;
|
|
}
|
|
|
|
if (value && f2fs_xattr_value_same(here, value, size))
|
|
goto same;
|
|
} else if ((flags & XATTR_REPLACE)) {
|
|
error = -ENODATA;
|
|
goto exit;
|
|
}
|
|
|
|
last = here;
|
|
while (!IS_XATTR_LAST_ENTRY(last)) {
|
|
if ((void *)(last) + sizeof(__u32) > last_base_addr ||
|
|
(void *)XATTR_NEXT_ENTRY(last) > last_base_addr) {
|
|
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has invalid last xattr entry, entry_size: %zu",
|
|
inode->i_ino, ENTRY_SIZE(last));
|
|
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
|
|
error = -EFSCORRUPTED;
|
|
goto exit;
|
|
}
|
|
last = XATTR_NEXT_ENTRY(last);
|
|
}
|
|
|
|
newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + len + size);
|
|
|
|
/* 1. Check space */
|
|
if (value) {
|
|
int free;
|
|
/*
|
|
* If value is NULL, it is remove operation.
|
|
* In case of update operation, we calculate free.
|
|
*/
|
|
free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
|
|
if (found)
|
|
free = free + ENTRY_SIZE(here);
|
|
|
|
if (unlikely(free < newsize)) {
|
|
error = -E2BIG;
|
|
goto exit;
|
|
}
|
|
}
|
|
|
|
/* 2. Remove old entry */
|
|
if (found) {
|
|
/*
|
|
* If entry is found, remove old entry.
|
|
* If not found, remove operation is not needed.
|
|
*/
|
|
struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here);
|
|
int oldsize = ENTRY_SIZE(here);
|
|
|
|
memmove(here, next, (char *)last - (char *)next);
|
|
last = (struct f2fs_xattr_entry *)((char *)last - oldsize);
|
|
memset(last, 0, oldsize);
|
|
}
|
|
|
|
new_hsize = (char *)last - (char *)base_addr;
|
|
|
|
/* 3. Write new entry */
|
|
if (value) {
|
|
char *pval;
|
|
/*
|
|
* Before we come here, old entry is removed.
|
|
* We just write new entry.
|
|
*/
|
|
last->e_name_index = index;
|
|
last->e_name_len = len;
|
|
memcpy(last->e_name, name, len);
|
|
pval = last->e_name + len;
|
|
memcpy(pval, value, size);
|
|
last->e_value_size = cpu_to_le16(size);
|
|
new_hsize += newsize;
|
|
}
|
|
|
|
error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
|
|
if (error)
|
|
goto exit;
|
|
|
|
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
|
|
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
|
|
f2fs_set_encrypted_inode(inode);
|
|
f2fs_mark_inode_dirty_sync(inode, true);
|
|
if (!error && S_ISDIR(inode->i_mode))
|
|
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
|
|
|
|
same:
|
|
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
|
|
inode->i_mode = F2FS_I(inode)->i_acl_mode;
|
|
inode->i_ctime = current_time(inode);
|
|
clear_inode_flag(inode, FI_ACL_MODE);
|
|
}
|
|
|
|
exit:
|
|
kfree(base_addr);
|
|
return error;
|
|
}
|
|
|
|
int f2fs_setxattr(struct inode *inode, int index, const char *name,
|
|
const void *value, size_t size,
|
|
struct page *ipage, int flags)
|
|
{
|
|
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
|
int err;
|
|
|
|
if (unlikely(f2fs_cp_error(sbi)))
|
|
return -EIO;
|
|
if (!f2fs_is_checkpoint_ready(sbi))
|
|
return -ENOSPC;
|
|
|
|
err = dquot_initialize(inode);
|
|
if (err)
|
|
return err;
|
|
|
|
/* this case is only from f2fs_init_inode_metadata */
|
|
if (ipage)
|
|
return __f2fs_setxattr(inode, index, name, value,
|
|
size, ipage, flags);
|
|
f2fs_balance_fs(sbi, true);
|
|
|
|
f2fs_lock_op(sbi);
|
|
f2fs_down_write(&F2FS_I(inode)->i_xattr_sem);
|
|
err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags);
|
|
f2fs_up_write(&F2FS_I(inode)->i_xattr_sem);
|
|
f2fs_unlock_op(sbi);
|
|
|
|
f2fs_update_time(sbi, REQ_TIME);
|
|
return err;
|
|
}
|
|
|
|
int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi)
|
|
{
|
|
dev_t dev = sbi->sb->s_bdev->bd_dev;
|
|
char slab_name[32];
|
|
|
|
sprintf(slab_name, "f2fs_xattr_entry-%u:%u", MAJOR(dev), MINOR(dev));
|
|
|
|
sbi->inline_xattr_slab_size = F2FS_OPTION(sbi).inline_xattr_size *
|
|
sizeof(__le32) + XATTR_PADDING_SIZE;
|
|
|
|
sbi->inline_xattr_slab = f2fs_kmem_cache_create(slab_name,
|
|
sbi->inline_xattr_slab_size);
|
|
if (!sbi->inline_xattr_slab)
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi)
|
|
{
|
|
kmem_cache_destroy(sbi->inline_xattr_slab);
|
|
}
|