io_uring-5.12-2021-04-09

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmBwXtgQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgprCVEAC8ZqiwYFIaCODC4NouBtMjZsInyGwetwBa
 sezn2KUIsP3umjbgZMO5KrI/6zGWO5j+7O7j1eW05FoN/yE1gM477EfPVgYeupgV
 tefAOskEAwPtMrTkQJt7kTxnv2IHNhTV1LEvv5Co3ueFoIA4Fbqso1oTsn87D3JK
 x5oZ+c5tDC7VendkZ/5SX4ioysFwabZv1qUR8GxRHCx4Kk5prrdUmejb8QeUOglG
 aJ6hd0UIFNmQAW+3ujOD6wlhn3MyKVfdZpGbhQfFQz3GV88maxAxNm/5dcEuEDan
 W8khgdJHZ5mr7/oDkjmJyjp9MNvamuPw52UzrQwmMOf3RvaKanM74/mJWXcVz+J9
 tf9UsqiWwMmCpXwszA2KYBy+NIFZ3QqAy0Ed1pj0WLeYOzw0zPb34S/O9SnWy597
 /T0I2jIBsOl478Wo6U+kmvTubJoRpGz9kgXbpHLve0rLb1BEWj1hQsZZrMJf/29b
 b5zULnIQqhztFoaYdg08LFrhePIp1oVTuY8XO8/k84C4VpOFQjBKfGMtK+HalRTA
 bX2h/K3xCQPwK4VWIaM1ucvhrMTTReajfKXesQwuBmwETbs2p4tRIjL8KFw7d1rC
 2jB4UvKVnWuxE64LZ+VK86btC9eAjYmDTTEPRUVFE4ZFroXcr+MK2Qke5o2FNReN
 UOVy2Xlelw==
 =EbJ/
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-5.12-2021-04-09' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
 "Two minor fixups for the reissue logic, and one for making sure that
  unbounded work is canceled on io-wq exit"

* tag 'io_uring-5.12-2021-04-09' of git://git.kernel.dk/linux-block:
  io-wq: cancel unbounded works on io-wq destroy
  io_uring: fix rw req completion
  io_uring: clear F_REISSUE right after getting it
This commit is contained in:
Linus Torvalds 2021-04-09 15:06:52 -07:00
commit 3b9784350f
2 changed files with 21 additions and 2 deletions

View File

@ -415,6 +415,7 @@ static void io_worker_handle_work(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
bool do_kill = test_bit(IO_WQ_BIT_EXIT, &wq->state);
do {
struct io_wq_work *work;
@ -444,6 +445,9 @@ static void io_worker_handle_work(struct io_worker *worker)
unsigned int hash = io_get_work_hash(work);
next_hashed = wq_next_work(work);
if (unlikely(do_kill) && (work->flags & IO_WQ_WORK_UNBOUND))
work->flags |= IO_WQ_WORK_CANCEL;
wq->do_work(work);
io_assign_current_work(worker, NULL);

View File

@ -2762,6 +2762,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data;
bool check_reissue = kiocb->ki_complete == io_complete_rw;
/* add previously done IO, if any */
if (io && io->bytes_done > 0) {
@ -2777,6 +2778,18 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
__io_complete_rw(req, ret, 0, issue_flags);
else
io_rw_done(kiocb, ret);
if (check_reissue && req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
if (!io_rw_reissue(req)) {
int cflags = 0;
req_set_fail_links(req);
if (req->flags & REQ_F_BUFFER_SELECTED)
cflags = io_put_rw_kbuf(req);
__io_req_complete(req, issue_flags, ret, cflags);
}
}
}
static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
@ -3294,6 +3307,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
ret = io_iter_do_read(req, iter);
if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
req->flags &= ~REQ_F_REISSUE;
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
@ -3417,8 +3431,10 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
else
ret2 = -EINVAL;
if (req->flags & REQ_F_REISSUE)
if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
ret2 = -EAGAIN;
}
/*
* Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
@ -6173,7 +6189,6 @@ static void io_wq_submit_work(struct io_wq_work *work)
ret = -ECANCELED;
if (!ret) {
req->flags &= ~REQ_F_REISSUE;
do {
ret = io_issue_sqe(req, 0);
/*