io_uring: rename file related variables to rsrc
This is a prep rename patch for subsequent patches to generalize file registration. [io_uring_rsrc_update:: rename fds -> data] Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Bijan Mottahedeh <bijan.mottahedeh@oracle.com> [leave io_uring_files_update as struct] Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2b358604aa
commit
269bbe5fd4
228
fs/io_uring.c
228
fs/io_uring.c
@ -195,24 +195,29 @@ struct io_mapped_ubuf {
|
|||||||
unsigned long acct_pages;
|
unsigned long acct_pages;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fixed_file_table {
|
struct io_rsrc_put {
|
||||||
|
struct list_head list;
|
||||||
|
struct file *file;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct fixed_rsrc_table {
|
||||||
struct file **files;
|
struct file **files;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fixed_file_ref_node {
|
struct fixed_rsrc_ref_node {
|
||||||
struct percpu_ref refs;
|
struct percpu_ref refs;
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
struct list_head file_list;
|
struct list_head rsrc_list;
|
||||||
struct fixed_file_data *file_data;
|
struct fixed_rsrc_data *rsrc_data;
|
||||||
struct llist_node llist;
|
struct llist_node llist;
|
||||||
bool done;
|
bool done;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct fixed_file_data {
|
struct fixed_rsrc_data {
|
||||||
struct fixed_file_table *table;
|
struct fixed_rsrc_table *table;
|
||||||
struct io_ring_ctx *ctx;
|
struct io_ring_ctx *ctx;
|
||||||
|
|
||||||
struct fixed_file_ref_node *node;
|
struct fixed_rsrc_ref_node *node;
|
||||||
struct percpu_ref refs;
|
struct percpu_ref refs;
|
||||||
struct completion done;
|
struct completion done;
|
||||||
struct list_head ref_list;
|
struct list_head ref_list;
|
||||||
@ -319,7 +324,7 @@ struct io_ring_ctx {
|
|||||||
* readers must ensure that ->refs is alive as long as the file* is
|
* readers must ensure that ->refs is alive as long as the file* is
|
||||||
* used. Only updated through io_uring_register(2).
|
* used. Only updated through io_uring_register(2).
|
||||||
*/
|
*/
|
||||||
struct fixed_file_data *file_data;
|
struct fixed_rsrc_data *file_data;
|
||||||
unsigned nr_user_files;
|
unsigned nr_user_files;
|
||||||
|
|
||||||
/* if used, fixed mapped user buffers */
|
/* if used, fixed mapped user buffers */
|
||||||
@ -384,8 +389,8 @@ struct io_ring_ctx {
|
|||||||
struct list_head inflight_list;
|
struct list_head inflight_list;
|
||||||
} ____cacheline_aligned_in_smp;
|
} ____cacheline_aligned_in_smp;
|
||||||
|
|
||||||
struct delayed_work file_put_work;
|
struct delayed_work rsrc_put_work;
|
||||||
struct llist_head file_put_llist;
|
struct llist_head rsrc_put_llist;
|
||||||
|
|
||||||
struct work_struct exit_work;
|
struct work_struct exit_work;
|
||||||
struct io_restriction restrictions;
|
struct io_restriction restrictions;
|
||||||
@ -494,7 +499,7 @@ struct io_open {
|
|||||||
unsigned long nofile;
|
unsigned long nofile;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct io_files_update {
|
struct io_rsrc_update {
|
||||||
struct file *file;
|
struct file *file;
|
||||||
u64 arg;
|
u64 arg;
|
||||||
u32 nr_args;
|
u32 nr_args;
|
||||||
@ -688,7 +693,7 @@ struct io_kiocb {
|
|||||||
struct io_sr_msg sr_msg;
|
struct io_sr_msg sr_msg;
|
||||||
struct io_open open;
|
struct io_open open;
|
||||||
struct io_close close;
|
struct io_close close;
|
||||||
struct io_files_update files_update;
|
struct io_rsrc_update rsrc_update;
|
||||||
struct io_fadvise fadvise;
|
struct io_fadvise fadvise;
|
||||||
struct io_madvise madvise;
|
struct io_madvise madvise;
|
||||||
struct io_epoll epoll;
|
struct io_epoll epoll;
|
||||||
@ -718,7 +723,7 @@ struct io_kiocb {
|
|||||||
u64 user_data;
|
u64 user_data;
|
||||||
|
|
||||||
struct io_kiocb *link;
|
struct io_kiocb *link;
|
||||||
struct percpu_ref *fixed_file_refs;
|
struct percpu_ref *fixed_rsrc_refs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 1. used with ctx->iopoll_list with reads/writes
|
* 1. used with ctx->iopoll_list with reads/writes
|
||||||
@ -996,8 +1001,8 @@ enum io_mem_account {
|
|||||||
static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
|
static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
|
||||||
struct task_struct *task);
|
struct task_struct *task);
|
||||||
|
|
||||||
static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
|
static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node);
|
||||||
static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
|
static struct fixed_rsrc_ref_node *alloc_fixed_file_ref_node(
|
||||||
struct io_ring_ctx *ctx);
|
struct io_ring_ctx *ctx);
|
||||||
|
|
||||||
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
|
static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
|
||||||
@ -1010,13 +1015,13 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
|
|||||||
static void __io_queue_linked_timeout(struct io_kiocb *req);
|
static void __io_queue_linked_timeout(struct io_kiocb *req);
|
||||||
static void io_queue_linked_timeout(struct io_kiocb *req);
|
static void io_queue_linked_timeout(struct io_kiocb *req);
|
||||||
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||||
struct io_uring_files_update *ip,
|
struct io_uring_rsrc_update *ip,
|
||||||
unsigned nr_args);
|
unsigned nr_args);
|
||||||
static void __io_clean_op(struct io_kiocb *req);
|
static void __io_clean_op(struct io_kiocb *req);
|
||||||
static struct file *io_file_get(struct io_submit_state *state,
|
static struct file *io_file_get(struct io_submit_state *state,
|
||||||
struct io_kiocb *req, int fd, bool fixed);
|
struct io_kiocb *req, int fd, bool fixed);
|
||||||
static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
|
static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
|
||||||
static void io_file_put_work(struct work_struct *work);
|
static void io_rsrc_put_work(struct work_struct *work);
|
||||||
|
|
||||||
static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
|
static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
|
||||||
struct iovec **iovec, struct iov_iter *iter,
|
struct iovec **iovec, struct iov_iter *iter,
|
||||||
@ -1057,9 +1062,9 @@ static inline void io_set_resource_node(struct io_kiocb *req)
|
|||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
|
|
||||||
if (!req->fixed_file_refs) {
|
if (!req->fixed_rsrc_refs) {
|
||||||
req->fixed_file_refs = &ctx->file_data->node->refs;
|
req->fixed_rsrc_refs = &ctx->file_data->node->refs;
|
||||||
percpu_ref_get(req->fixed_file_refs);
|
percpu_ref_get(req->fixed_rsrc_refs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1330,8 +1335,8 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
|
|||||||
INIT_LIST_HEAD(&ctx->timeout_list);
|
INIT_LIST_HEAD(&ctx->timeout_list);
|
||||||
spin_lock_init(&ctx->inflight_lock);
|
spin_lock_init(&ctx->inflight_lock);
|
||||||
INIT_LIST_HEAD(&ctx->inflight_list);
|
INIT_LIST_HEAD(&ctx->inflight_list);
|
||||||
INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
|
INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
|
||||||
init_llist_head(&ctx->file_put_llist);
|
init_llist_head(&ctx->rsrc_put_llist);
|
||||||
return ctx;
|
return ctx;
|
||||||
err:
|
err:
|
||||||
if (ctx->fallback_req)
|
if (ctx->fallback_req)
|
||||||
@ -2011,8 +2016,8 @@ static void io_dismantle_req(struct io_kiocb *req)
|
|||||||
kfree(req->async_data);
|
kfree(req->async_data);
|
||||||
if (req->file)
|
if (req->file)
|
||||||
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
|
io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
|
||||||
if (req->fixed_file_refs)
|
if (req->fixed_rsrc_refs)
|
||||||
percpu_ref_put(req->fixed_file_refs);
|
percpu_ref_put(req->fixed_rsrc_refs);
|
||||||
io_req_clean_work(req);
|
io_req_clean_work(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5988,7 +5993,7 @@ static int io_async_cancel(struct io_kiocb *req)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_files_update_prep(struct io_kiocb *req,
|
static int io_rsrc_update_prep(struct io_kiocb *req,
|
||||||
const struct io_uring_sqe *sqe)
|
const struct io_uring_sqe *sqe)
|
||||||
{
|
{
|
||||||
if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
|
if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
|
||||||
@ -5998,11 +6003,11 @@ static int io_files_update_prep(struct io_kiocb *req,
|
|||||||
if (sqe->ioprio || sqe->rw_flags)
|
if (sqe->ioprio || sqe->rw_flags)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
req->files_update.offset = READ_ONCE(sqe->off);
|
req->rsrc_update.offset = READ_ONCE(sqe->off);
|
||||||
req->files_update.nr_args = READ_ONCE(sqe->len);
|
req->rsrc_update.nr_args = READ_ONCE(sqe->len);
|
||||||
if (!req->files_update.nr_args)
|
if (!req->rsrc_update.nr_args)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
req->files_update.arg = READ_ONCE(sqe->addr);
|
req->rsrc_update.arg = READ_ONCE(sqe->addr);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6010,17 +6015,17 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock,
|
|||||||
struct io_comp_state *cs)
|
struct io_comp_state *cs)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
struct io_uring_files_update up;
|
struct io_uring_rsrc_update up;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (force_nonblock)
|
if (force_nonblock)
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
up.offset = req->files_update.offset;
|
up.offset = req->rsrc_update.offset;
|
||||||
up.fds = req->files_update.arg;
|
up.data = req->rsrc_update.arg;
|
||||||
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
|
ret = __io_sqe_files_update(ctx, &up, req->rsrc_update.nr_args);
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
|
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -6075,7 +6080,7 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
|
|||||||
case IORING_OP_CLOSE:
|
case IORING_OP_CLOSE:
|
||||||
return io_close_prep(req, sqe);
|
return io_close_prep(req, sqe);
|
||||||
case IORING_OP_FILES_UPDATE:
|
case IORING_OP_FILES_UPDATE:
|
||||||
return io_files_update_prep(req, sqe);
|
return io_rsrc_update_prep(req, sqe);
|
||||||
case IORING_OP_STATX:
|
case IORING_OP_STATX:
|
||||||
return io_statx_prep(req, sqe);
|
return io_statx_prep(req, sqe);
|
||||||
case IORING_OP_FADVISE:
|
case IORING_OP_FADVISE:
|
||||||
@ -6444,7 +6449,7 @@ static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
|
|||||||
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
|
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
|
||||||
int index)
|
int index)
|
||||||
{
|
{
|
||||||
struct fixed_file_table *table;
|
struct fixed_rsrc_table *table;
|
||||||
|
|
||||||
table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
|
table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
|
||||||
return table->files[index & IORING_FILE_TABLE_MASK];
|
return table->files[index & IORING_FILE_TABLE_MASK];
|
||||||
@ -6840,7 +6845,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
|
|||||||
req->ctx = ctx;
|
req->ctx = ctx;
|
||||||
req->flags = 0;
|
req->flags = 0;
|
||||||
req->link = NULL;
|
req->link = NULL;
|
||||||
req->fixed_file_refs = NULL;
|
req->fixed_rsrc_refs = NULL;
|
||||||
/* one is dropped after submission, the other at completion */
|
/* one is dropped after submission, the other at completion */
|
||||||
refcount_set(&req->refs, 2);
|
refcount_set(&req->refs, 2);
|
||||||
req->task = current;
|
req->task = current;
|
||||||
@ -7328,28 +7333,28 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_file_ref_kill(struct percpu_ref *ref)
|
static void io_rsrc_ref_kill(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
struct fixed_file_data *data;
|
struct fixed_rsrc_data *data;
|
||||||
|
|
||||||
data = container_of(ref, struct fixed_file_data, refs);
|
data = container_of(ref, struct fixed_rsrc_data, refs);
|
||||||
complete(&data->done);
|
complete(&data->done);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_sqe_files_set_node(struct fixed_file_data *file_data,
|
static void io_sqe_rsrc_set_node(struct fixed_rsrc_data *rsrc_data,
|
||||||
struct fixed_file_ref_node *ref_node)
|
struct fixed_rsrc_ref_node *ref_node)
|
||||||
{
|
{
|
||||||
spin_lock_bh(&file_data->lock);
|
spin_lock_bh(&rsrc_data->lock);
|
||||||
file_data->node = ref_node;
|
rsrc_data->node = ref_node;
|
||||||
list_add_tail(&ref_node->node, &file_data->ref_list);
|
list_add_tail(&ref_node->node, &rsrc_data->ref_list);
|
||||||
spin_unlock_bh(&file_data->lock);
|
spin_unlock_bh(&rsrc_data->lock);
|
||||||
percpu_ref_get(&file_data->refs);
|
percpu_ref_get(&rsrc_data->refs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct fixed_file_data *data = ctx->file_data;
|
struct fixed_rsrc_data *data = ctx->file_data;
|
||||||
struct fixed_file_ref_node *backup_node, *ref_node = NULL;
|
struct fixed_rsrc_ref_node *backup_node, *ref_node = NULL;
|
||||||
unsigned nr_tables, i;
|
unsigned nr_tables, i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -7368,7 +7373,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|||||||
percpu_ref_kill(&data->refs);
|
percpu_ref_kill(&data->refs);
|
||||||
|
|
||||||
/* wait for all refs nodes to complete */
|
/* wait for all refs nodes to complete */
|
||||||
flush_delayed_work(&ctx->file_put_work);
|
flush_delayed_work(&ctx->rsrc_put_work);
|
||||||
do {
|
do {
|
||||||
ret = wait_for_completion_interruptible(&data->done);
|
ret = wait_for_completion_interruptible(&data->done);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
@ -7377,7 +7382,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
percpu_ref_resurrect(&data->refs);
|
percpu_ref_resurrect(&data->refs);
|
||||||
reinit_completion(&data->done);
|
reinit_completion(&data->done);
|
||||||
io_sqe_files_set_node(data, backup_node);
|
io_sqe_rsrc_set_node(data, backup_node);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
} while (1);
|
} while (1);
|
||||||
@ -7391,7 +7396,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
|
|||||||
kfree(data);
|
kfree(data);
|
||||||
ctx->file_data = NULL;
|
ctx->file_data = NULL;
|
||||||
ctx->nr_user_files = 0;
|
ctx->nr_user_files = 0;
|
||||||
destroy_fixed_file_ref_node(backup_node);
|
destroy_fixed_rsrc_ref_node(backup_node);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7614,13 +7619,13 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
|
static int io_sqe_alloc_file_tables(struct fixed_rsrc_data *file_data,
|
||||||
unsigned nr_tables, unsigned nr_files)
|
unsigned nr_tables, unsigned nr_files)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < nr_tables; i++) {
|
for (i = 0; i < nr_tables; i++) {
|
||||||
struct fixed_file_table *table = &file_data->table[i];
|
struct fixed_rsrc_table *table = &file_data->table[i];
|
||||||
unsigned this_files;
|
unsigned this_files;
|
||||||
|
|
||||||
this_files = min(nr_files, IORING_MAX_FILES_TABLE);
|
this_files = min(nr_files, IORING_MAX_FILES_TABLE);
|
||||||
@ -7635,7 +7640,7 @@ static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < nr_tables; i++) {
|
for (i = 0; i < nr_tables; i++) {
|
||||||
struct fixed_file_table *table = &file_data->table[i];
|
struct fixed_rsrc_table *table = &file_data->table[i];
|
||||||
kfree(table->files);
|
kfree(table->files);
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
@ -7703,56 +7708,51 @@ static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
struct io_file_put {
|
static void __io_rsrc_put_work(struct fixed_rsrc_ref_node *ref_node)
|
||||||
struct list_head list;
|
|
||||||
struct file *file;
|
|
||||||
};
|
|
||||||
|
|
||||||
static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
|
|
||||||
{
|
{
|
||||||
struct fixed_file_data *file_data = ref_node->file_data;
|
struct fixed_rsrc_data *rsrc_data = ref_node->rsrc_data;
|
||||||
struct io_ring_ctx *ctx = file_data->ctx;
|
struct io_ring_ctx *ctx = rsrc_data->ctx;
|
||||||
struct io_file_put *pfile, *tmp;
|
struct io_rsrc_put *prsrc, *tmp;
|
||||||
|
|
||||||
list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
|
list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) {
|
||||||
list_del(&pfile->list);
|
list_del(&prsrc->list);
|
||||||
io_ring_file_put(ctx, pfile->file);
|
io_ring_file_put(ctx, prsrc->file);
|
||||||
kfree(pfile);
|
kfree(prsrc);
|
||||||
}
|
}
|
||||||
|
|
||||||
percpu_ref_exit(&ref_node->refs);
|
percpu_ref_exit(&ref_node->refs);
|
||||||
kfree(ref_node);
|
kfree(ref_node);
|
||||||
percpu_ref_put(&file_data->refs);
|
percpu_ref_put(&rsrc_data->refs);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_file_put_work(struct work_struct *work)
|
static void io_rsrc_put_work(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct io_ring_ctx *ctx;
|
struct io_ring_ctx *ctx;
|
||||||
struct llist_node *node;
|
struct llist_node *node;
|
||||||
|
|
||||||
ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
|
ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work);
|
||||||
node = llist_del_all(&ctx->file_put_llist);
|
node = llist_del_all(&ctx->rsrc_put_llist);
|
||||||
|
|
||||||
while (node) {
|
while (node) {
|
||||||
struct fixed_file_ref_node *ref_node;
|
struct fixed_rsrc_ref_node *ref_node;
|
||||||
struct llist_node *next = node->next;
|
struct llist_node *next = node->next;
|
||||||
|
|
||||||
ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
|
ref_node = llist_entry(node, struct fixed_rsrc_ref_node, llist);
|
||||||
__io_file_put_work(ref_node);
|
__io_rsrc_put_work(ref_node);
|
||||||
node = next;
|
node = next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_file_data_ref_zero(struct percpu_ref *ref)
|
static void io_rsrc_data_ref_zero(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
struct fixed_file_ref_node *ref_node;
|
struct fixed_rsrc_ref_node *ref_node;
|
||||||
struct fixed_file_data *data;
|
struct fixed_rsrc_data *data;
|
||||||
struct io_ring_ctx *ctx;
|
struct io_ring_ctx *ctx;
|
||||||
bool first_add = false;
|
bool first_add = false;
|
||||||
int delay = HZ;
|
int delay = HZ;
|
||||||
|
|
||||||
ref_node = container_of(ref, struct fixed_file_ref_node, refs);
|
ref_node = container_of(ref, struct fixed_rsrc_ref_node, refs);
|
||||||
data = ref_node->file_data;
|
data = ref_node->rsrc_data;
|
||||||
ctx = data->ctx;
|
ctx = data->ctx;
|
||||||
|
|
||||||
spin_lock_bh(&data->lock);
|
spin_lock_bh(&data->lock);
|
||||||
@ -7760,12 +7760,12 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
|
|||||||
|
|
||||||
while (!list_empty(&data->ref_list)) {
|
while (!list_empty(&data->ref_list)) {
|
||||||
ref_node = list_first_entry(&data->ref_list,
|
ref_node = list_first_entry(&data->ref_list,
|
||||||
struct fixed_file_ref_node, node);
|
struct fixed_rsrc_ref_node, node);
|
||||||
/* recycle ref nodes in order */
|
/* recycle ref nodes in order */
|
||||||
if (!ref_node->done)
|
if (!ref_node->done)
|
||||||
break;
|
break;
|
||||||
list_del(&ref_node->node);
|
list_del(&ref_node->node);
|
||||||
first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
|
first_add |= llist_add(&ref_node->llist, &ctx->rsrc_put_llist);
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&data->lock);
|
spin_unlock_bh(&data->lock);
|
||||||
|
|
||||||
@ -7773,33 +7773,33 @@ static void io_file_data_ref_zero(struct percpu_ref *ref)
|
|||||||
delay = 0;
|
delay = 0;
|
||||||
|
|
||||||
if (!delay)
|
if (!delay)
|
||||||
mod_delayed_work(system_wq, &ctx->file_put_work, 0);
|
mod_delayed_work(system_wq, &ctx->rsrc_put_work, 0);
|
||||||
else if (first_add)
|
else if (first_add)
|
||||||
queue_delayed_work(system_wq, &ctx->file_put_work, delay);
|
queue_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
|
static struct fixed_rsrc_ref_node *alloc_fixed_file_ref_node(
|
||||||
struct io_ring_ctx *ctx)
|
struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct fixed_file_ref_node *ref_node;
|
struct fixed_rsrc_ref_node *ref_node;
|
||||||
|
|
||||||
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
|
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
|
||||||
if (!ref_node)
|
if (!ref_node)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
|
if (percpu_ref_init(&ref_node->refs, io_rsrc_data_ref_zero,
|
||||||
0, GFP_KERNEL)) {
|
0, GFP_KERNEL)) {
|
||||||
kfree(ref_node);
|
kfree(ref_node);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
INIT_LIST_HEAD(&ref_node->node);
|
INIT_LIST_HEAD(&ref_node->node);
|
||||||
INIT_LIST_HEAD(&ref_node->file_list);
|
INIT_LIST_HEAD(&ref_node->rsrc_list);
|
||||||
ref_node->file_data = ctx->file_data;
|
ref_node->rsrc_data = ctx->file_data;
|
||||||
ref_node->done = false;
|
ref_node->done = false;
|
||||||
return ref_node;
|
return ref_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
|
static void destroy_fixed_rsrc_ref_node(struct fixed_rsrc_ref_node *ref_node)
|
||||||
{
|
{
|
||||||
percpu_ref_exit(&ref_node->refs);
|
percpu_ref_exit(&ref_node->refs);
|
||||||
kfree(ref_node);
|
kfree(ref_node);
|
||||||
@ -7812,8 +7812,8 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||||||
unsigned nr_tables, i;
|
unsigned nr_tables, i;
|
||||||
struct file *file;
|
struct file *file;
|
||||||
int fd, ret = -ENOMEM;
|
int fd, ret = -ENOMEM;
|
||||||
struct fixed_file_ref_node *ref_node;
|
struct fixed_rsrc_ref_node *ref_node;
|
||||||
struct fixed_file_data *file_data;
|
struct fixed_rsrc_data *file_data;
|
||||||
|
|
||||||
if (ctx->file_data)
|
if (ctx->file_data)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
@ -7836,7 +7836,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||||||
if (!file_data->table)
|
if (!file_data->table)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
if (percpu_ref_init(&file_data->refs, io_file_ref_kill,
|
if (percpu_ref_init(&file_data->refs, io_rsrc_ref_kill,
|
||||||
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
|
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
@ -7845,7 +7845,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||||||
ctx->file_data = file_data;
|
ctx->file_data = file_data;
|
||||||
|
|
||||||
for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
|
for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
|
||||||
struct fixed_file_table *table;
|
struct fixed_rsrc_table *table;
|
||||||
unsigned index;
|
unsigned index;
|
||||||
|
|
||||||
if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
|
if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
|
||||||
@ -7889,7 +7889,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
io_sqe_files_set_node(file_data, ref_node);
|
io_sqe_rsrc_set_node(file_data, ref_node);
|
||||||
return ret;
|
return ret;
|
||||||
out_fput:
|
out_fput:
|
||||||
for (i = 0; i < ctx->nr_user_files; i++) {
|
for (i = 0; i < ctx->nr_user_files; i++) {
|
||||||
@ -7952,28 +7952,34 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static int io_queue_file_removal(struct fixed_file_data *data,
|
static int io_queue_rsrc_removal(struct fixed_rsrc_data *data,
|
||||||
struct file *file)
|
struct file *rsrc)
|
||||||
{
|
{
|
||||||
struct io_file_put *pfile;
|
struct io_rsrc_put *prsrc;
|
||||||
struct fixed_file_ref_node *ref_node = data->node;
|
struct fixed_rsrc_ref_node *ref_node = data->node;
|
||||||
|
|
||||||
pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
|
prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
|
||||||
if (!pfile)
|
if (!prsrc)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
pfile->file = file;
|
prsrc->file = rsrc;
|
||||||
list_add(&pfile->list, &ref_node->file_list);
|
list_add(&prsrc->list, &ref_node->rsrc_list);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int io_queue_file_removal(struct fixed_rsrc_data *data,
|
||||||
|
struct file *file)
|
||||||
|
{
|
||||||
|
return io_queue_rsrc_removal(data, file);
|
||||||
|
}
|
||||||
|
|
||||||
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
||||||
struct io_uring_files_update *up,
|
struct io_uring_rsrc_update *up,
|
||||||
unsigned nr_args)
|
unsigned nr_args)
|
||||||
{
|
{
|
||||||
struct fixed_file_data *data = ctx->file_data;
|
struct fixed_rsrc_data *data = ctx->file_data;
|
||||||
struct fixed_file_ref_node *ref_node;
|
struct fixed_rsrc_ref_node *ref_node;
|
||||||
struct file *file;
|
struct file *file;
|
||||||
__s32 __user *fds;
|
__s32 __user *fds;
|
||||||
int fd, i, err;
|
int fd, i, err;
|
||||||
@ -7990,9 +7996,9 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
done = 0;
|
done = 0;
|
||||||
fds = u64_to_user_ptr(up->fds);
|
fds = u64_to_user_ptr(up->data);
|
||||||
while (nr_args) {
|
while (nr_args) {
|
||||||
struct fixed_file_table *table;
|
struct fixed_rsrc_table *table;
|
||||||
unsigned index;
|
unsigned index;
|
||||||
|
|
||||||
err = 0;
|
err = 0;
|
||||||
@ -8045,9 +8051,9 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
|||||||
|
|
||||||
if (needs_switch) {
|
if (needs_switch) {
|
||||||
percpu_ref_kill(&data->node->refs);
|
percpu_ref_kill(&data->node->refs);
|
||||||
io_sqe_files_set_node(data, ref_node);
|
io_sqe_rsrc_set_node(data, ref_node);
|
||||||
} else
|
} else
|
||||||
destroy_fixed_file_ref_node(ref_node);
|
destroy_fixed_rsrc_ref_node(ref_node);
|
||||||
|
|
||||||
return done ? done : err;
|
return done ? done : err;
|
||||||
}
|
}
|
||||||
@ -8055,7 +8061,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
|
|||||||
static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
|
static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
|
||||||
unsigned nr_args)
|
unsigned nr_args)
|
||||||
{
|
{
|
||||||
struct io_uring_files_update up;
|
struct io_uring_rsrc_update up;
|
||||||
|
|
||||||
if (!ctx->file_data)
|
if (!ctx->file_data)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
@ -9482,7 +9488,7 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
|
|||||||
seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
|
seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
|
||||||
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
|
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
|
||||||
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
|
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
|
||||||
struct fixed_file_table *table;
|
struct fixed_rsrc_table *table;
|
||||||
struct file *f;
|
struct file *f;
|
||||||
|
|
||||||
table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
|
table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
|
||||||
|
@ -285,12 +285,19 @@ enum {
|
|||||||
IORING_REGISTER_LAST
|
IORING_REGISTER_LAST
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* deprecated, see struct io_uring_rsrc_update */
|
||||||
struct io_uring_files_update {
|
struct io_uring_files_update {
|
||||||
__u32 offset;
|
__u32 offset;
|
||||||
__u32 resv;
|
__u32 resv;
|
||||||
__aligned_u64 /* __s32 * */ fds;
|
__aligned_u64 /* __s32 * */ fds;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct io_uring_rsrc_update {
|
||||||
|
__u32 offset;
|
||||||
|
__u32 resv;
|
||||||
|
__aligned_u64 data;
|
||||||
|
};
|
||||||
|
|
||||||
#define IO_URING_OP_SUPPORTED (1U << 0)
|
#define IO_URING_OP_SUPPORTED (1U << 0)
|
||||||
|
|
||||||
struct io_uring_probe_op {
|
struct io_uring_probe_op {
|
||||||
|
Loading…
Reference in New Issue
Block a user