io_uring: ensure IOPOLL locks around deferred work
No direct upstream commit exists for this issue. It was fixed in 5.18 as part of a larger rework of the completion side. io_commit_cqring() writes the CQ ring tail to make it visible, but it also kicks off any deferred work we have. A ring setup with IOPOLL does not need any locking around the CQ ring updates, as we're always under the ctx uring_lock. But if we have deferred work that needs processing, then io_queue_deferred() assumes that the completion_lock is held, as it is for !IOPOLL. Add a lockdep assertion to check and document this fact, and have io_iopoll_complete() check if we have deferred work and run that separately with the appropriate lock grabbed. Cc: stable@vger.kernel.org # 5.10, 5.15 Reported-by: dghost david <daviduniverse18@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
cd5837564f
commit
810e401b34
@ -1521,6 +1521,8 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
|
|||||||
|
|
||||||
static void io_queue_deferred(struct io_ring_ctx *ctx)
|
static void io_queue_deferred(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
lockdep_assert_held(&ctx->completion_lock);
|
||||||
|
|
||||||
while (!list_empty(&ctx->defer_list)) {
|
while (!list_empty(&ctx->defer_list)) {
|
||||||
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
|
struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
|
||||||
struct io_defer_entry, list);
|
struct io_defer_entry, list);
|
||||||
@ -1572,14 +1574,24 @@ static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
|
|||||||
io_queue_deferred(ctx);
|
io_queue_deferred(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void io_commit_cqring(struct io_ring_ctx *ctx)
|
static inline bool io_commit_needs_flush(struct io_ring_ctx *ctx)
|
||||||
|
{
|
||||||
|
return ctx->off_timeout_used || ctx->drain_active;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __io_commit_cqring(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
if (unlikely(ctx->off_timeout_used || ctx->drain_active))
|
|
||||||
__io_commit_cqring_flush(ctx);
|
|
||||||
/* order cqe stores with ring update */
|
/* order cqe stores with ring update */
|
||||||
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
|
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void io_commit_cqring(struct io_ring_ctx *ctx)
|
||||||
|
{
|
||||||
|
if (unlikely(io_commit_needs_flush(ctx)))
|
||||||
|
__io_commit_cqring_flush(ctx);
|
||||||
|
__io_commit_cqring(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
|
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct io_rings *r = ctx->rings;
|
struct io_rings *r = ctx->rings;
|
||||||
@ -2518,7 +2530,12 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
|
|||||||
io_req_free_batch(&rb, req, &ctx->submit_state);
|
io_req_free_batch(&rb, req, &ctx->submit_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
io_commit_cqring(ctx);
|
if (io_commit_needs_flush(ctx)) {
|
||||||
|
spin_lock(&ctx->completion_lock);
|
||||||
|
__io_commit_cqring_flush(ctx);
|
||||||
|
spin_unlock(&ctx->completion_lock);
|
||||||
|
}
|
||||||
|
__io_commit_cqring(ctx);
|
||||||
io_cqring_ev_posted_iopoll(ctx);
|
io_cqring_ev_posted_iopoll(ctx);
|
||||||
io_req_free_batch_finish(ctx, &rb);
|
io_req_free_batch_finish(ctx, &rb);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user