io_uring: always grab lock in io_cancel_async_work()
No upstream commit exists for this patch. It's not necessarily safe to check the task_list locklessly, remove this micro optimization and always grab task_lock before deeming it empty. Reported-and-tested-by: Lee Jones <lee@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
00395fd7f9
commit
b0bfceaa8c
@ -3738,9 +3738,6 @@ static void io_cancel_async_work(struct io_ring_ctx *ctx,
|
||||
{
|
||||
struct io_kiocb *req;
|
||||
|
||||
if (list_empty(&ctx->task_list))
|
||||
return;
|
||||
|
||||
spin_lock_irq(&ctx->task_lock);
|
||||
|
||||
list_for_each_entry(req, &ctx->task_list, task_list) {
|
||||
|
Loading…
Reference in New Issue
Block a user