io_uring: use separate list entry for iopoll requests

A previous commit ended up enabling file tracking for iopoll requests,
which conflicts with both of them using the same list entry for tracking.
Add a separate list entry just for iopoll requests, avoid this issue.

No upstream commit exists for this issue.

Reported-by: Greg Thelen <gthelen@google.com>
Fixes: df3f3bb505 ("io_uring: add missing item types for various requests")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Jens Axboe 2022-06-26 18:21:03 -06:00 committed by Greg Kroah-Hartman
parent 6a7c3bcc3c
commit fb2fbb3c10

View File

@ -696,6 +696,8 @@ struct io_kiocb {
*/
struct list_head inflight_entry;
struct list_head iopoll_entry;
struct percpu_ref *fixed_file_refs;
struct callback_head task_work;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
@ -2350,8 +2352,8 @@ static void io_iopoll_queue(struct list_head *again)
struct io_kiocb *req;
do {
req = list_first_entry(again, struct io_kiocb, inflight_entry);
list_del(&req->inflight_entry);
req = list_first_entry(again, struct io_kiocb, iopoll_entry);
list_del(&req->iopoll_entry);
__io_complete_rw(req, -EAGAIN, 0, NULL);
} while (!list_empty(again));
}
@ -2373,14 +2375,14 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
while (!list_empty(done)) {
int cflags = 0;
req = list_first_entry(done, struct io_kiocb, inflight_entry);
req = list_first_entry(done, struct io_kiocb, iopoll_entry);
if (READ_ONCE(req->result) == -EAGAIN) {
req->result = 0;
req->iopoll_completed = 0;
list_move_tail(&req->inflight_entry, &again);
list_move_tail(&req->iopoll_entry, &again);
continue;
}
list_del(&req->inflight_entry);
list_del(&req->iopoll_entry);
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_rw_kbuf(req);
@ -2416,7 +2418,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
spin = !ctx->poll_multi_file && *nr_events < min;
ret = 0;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, iopoll_entry) {
struct kiocb *kiocb = &req->rw.kiocb;
/*
@ -2425,7 +2427,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
* and complete those lists first, if we have entries there.
*/
if (READ_ONCE(req->iopoll_completed)) {
list_move_tail(&req->inflight_entry, &done);
list_move_tail(&req->iopoll_entry, &done);
continue;
}
if (!list_empty(&done))
@ -2437,7 +2439,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
/* iopoll may have completed current req */
if (READ_ONCE(req->iopoll_completed))
list_move_tail(&req->inflight_entry, &done);
list_move_tail(&req->iopoll_entry, &done);
if (ret && spin)
spin = false;
@ -2670,7 +2672,7 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
struct io_kiocb *list_req;
list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
inflight_entry);
iopoll_entry);
if (list_req->file != req->file)
ctx->poll_multi_file = true;
}
@ -2680,9 +2682,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
* it to the front so we find it first.
*/
if (READ_ONCE(req->iopoll_completed))
list_add(&req->inflight_entry, &ctx->iopoll_list);
list_add(&req->iopoll_entry, &ctx->iopoll_list);
else
list_add_tail(&req->inflight_entry, &ctx->iopoll_list);
list_add_tail(&req->iopoll_entry, &ctx->iopoll_list);
if ((ctx->flags & IORING_SETUP_SQPOLL) &&
wq_has_sleeper(&ctx->sq_data->wait))