io_uring: clean up io_poll_check_events return values
The values returned are a bit confusing, where 0 and 1 have implied meaning, so add some definitions for them. Signed-off-by: Dylan Yudaken <dylany@fb.com> Link: https://lore.kernel.org/r/20220630091231.1456789-6-dylany@fb.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
d4e097dae2
commit
2ba69707d9
@ -192,13 +192,18 @@ static void io_poll_remove_entries(struct io_kiocb *req)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum {
|
||||||
|
IOU_POLL_DONE = 0,
|
||||||
|
IOU_POLL_NO_ACTION = 1,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All poll tw should go through this. Checks for poll events, manages
|
* All poll tw should go through this. Checks for poll events, manages
|
||||||
* references, does rewait, etc.
|
* references, does rewait, etc.
|
||||||
*
|
*
|
||||||
* Returns a negative error on failure. >0 when no action require, which is
|
* Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require,
|
||||||
* either spurious wakeup or multishot CQE is served. 0 when it's done with
|
* which is either spurious wakeup or multishot CQE is served.
|
||||||
* the request, then the mask is stored in req->cqe.res.
|
* IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res.
|
||||||
*/
|
*/
|
||||||
static int io_poll_check_events(struct io_kiocb *req, bool *locked)
|
static int io_poll_check_events(struct io_kiocb *req, bool *locked)
|
||||||
{
|
{
|
||||||
@ -214,10 +219,11 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
|
|||||||
|
|
||||||
/* tw handler should be the owner, and so have some references */
|
/* tw handler should be the owner, and so have some references */
|
||||||
if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
|
if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
|
||||||
return 0;
|
return IOU_POLL_DONE;
|
||||||
if (v & IO_POLL_CANCEL_FLAG)
|
if (v & IO_POLL_CANCEL_FLAG)
|
||||||
return -ECANCELED;
|
return -ECANCELED;
|
||||||
|
|
||||||
|
/* the mask was stashed in __io_poll_execute */
|
||||||
if (!req->cqe.res) {
|
if (!req->cqe.res) {
|
||||||
struct poll_table_struct pt = { ._key = req->apoll_events };
|
struct poll_table_struct pt = { ._key = req->apoll_events };
|
||||||
req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
|
req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
|
||||||
@ -226,7 +232,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
|
|||||||
if ((unlikely(!req->cqe.res)))
|
if ((unlikely(!req->cqe.res)))
|
||||||
continue;
|
continue;
|
||||||
if (req->apoll_events & EPOLLONESHOT)
|
if (req->apoll_events & EPOLLONESHOT)
|
||||||
return 0;
|
return IOU_POLL_DONE;
|
||||||
|
|
||||||
/* multishot, just fill a CQE and proceed */
|
/* multishot, just fill a CQE and proceed */
|
||||||
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
|
if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
|
||||||
@ -238,7 +244,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
|
|||||||
return -ECANCELED;
|
return -ECANCELED;
|
||||||
} else {
|
} else {
|
||||||
ret = io_poll_issue(req, locked);
|
ret = io_poll_issue(req, locked);
|
||||||
if (ret)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,7 +254,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
|
|||||||
*/
|
*/
|
||||||
} while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
|
} while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
|
||||||
|
|
||||||
return 1;
|
return IOU_POLL_NO_ACTION;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void io_poll_task_func(struct io_kiocb *req, bool *locked)
|
static void io_poll_task_func(struct io_kiocb *req, bool *locked)
|
||||||
@ -256,12 +262,11 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = io_poll_check_events(req, locked);
|
ret = io_poll_check_events(req, locked);
|
||||||
if (ret > 0)
|
if (ret == IOU_POLL_NO_ACTION)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!ret) {
|
if (ret == IOU_POLL_DONE) {
|
||||||
struct io_poll *poll = io_kiocb_to_cmd(req);
|
struct io_poll *poll = io_kiocb_to_cmd(req);
|
||||||
|
|
||||||
req->cqe.res = mangle_poll(req->cqe.res & poll->events);
|
req->cqe.res = mangle_poll(req->cqe.res & poll->events);
|
||||||
} else {
|
} else {
|
||||||
req->cqe.res = ret;
|
req->cqe.res = ret;
|
||||||
@ -280,7 +285,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = io_poll_check_events(req, locked);
|
ret = io_poll_check_events(req, locked);
|
||||||
if (ret > 0)
|
if (ret == IOU_POLL_NO_ACTION)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
io_poll_remove_entries(req);
|
io_poll_remove_entries(req);
|
||||||
|
Loading…
Reference in New Issue
Block a user