BACKPORT: blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter
Grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter(), and
this way will prevent the request from being re-used when ->fn is
running. The approach is same as what we do during handling timeout.
Fix request use-after-free(UAF) related with completion race or queue
releasing:
- If one rq is referred before rq->q is frozen, then queue won't be
frozen before the request is released during iteration.
- If one rq is referred after rq->q is frozen, refcount_inc_not_zero()
will return false, and we won't iterate over this request.
However, still one request UAF not covered: refcount_inc_not_zero() may
read one freed request, and it will be handled in next patch.
Tested-by: John Garry <john.garry@huawei.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Ming Lei <ming.lei@redhat.com>.
Bug: 197804811
Change-Id: I0e431a8361d1412aaca3f7c0310780d9a9ad0db8
[Upstream: cherry picked from commit 2e315dc07df009c3e29d6926871f62a30cfae394]
[Pradeep: Resolved conflicts in block/blk-mq-tag.c]
Git-commit: a5d38e7c26
Git-repo: https://android.googlesource.com/kernel/common/
Signed-off-by: Pradeep P V K <pragalla@codeaurora.org>
This commit is contained in:
parent
21c7ca2b0a
commit
7fe0d300ab
@ -212,6 +212,16 @@ struct bt_iter_data {
|
|||||||
bool reserved;
|
bool reserved;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
|
||||||
|
unsigned int bitnr)
|
||||||
|
{
|
||||||
|
struct request *rq = tags->rqs[bitnr];
|
||||||
|
|
||||||
|
if (!rq || !refcount_inc_not_zero(&rq->ref))
|
||||||
|
return NULL;
|
||||||
|
return rq;
|
||||||
|
}
|
||||||
|
|
||||||
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||||
{
|
{
|
||||||
struct bt_iter_data *iter_data = data;
|
struct bt_iter_data *iter_data = data;
|
||||||
@ -219,18 +229,23 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
|||||||
struct blk_mq_tags *tags = hctx->tags;
|
struct blk_mq_tags *tags = hctx->tags;
|
||||||
bool reserved = iter_data->reserved;
|
bool reserved = iter_data->reserved;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
if (!reserved)
|
if (!reserved)
|
||||||
bitnr += tags->nr_reserved_tags;
|
bitnr += tags->nr_reserved_tags;
|
||||||
rq = tags->rqs[bitnr];
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can hit rq == NULL here, because the tagging functions
|
* We can hit rq == NULL here, because the tagging functions
|
||||||
* test and set the bit before assigning ->rqs[].
|
* test and set the bit before assigning ->rqs[].
|
||||||
*/
|
*/
|
||||||
if (rq && rq->q == hctx->queue)
|
rq = blk_mq_find_and_get_req(tags, bitnr);
|
||||||
return iter_data->fn(hctx, rq, iter_data->data, reserved);
|
if (!rq)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
if (rq->q == hctx->queue && rq->mq_hctx == hctx)
|
||||||
|
ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
|
||||||
|
blk_mq_put_rq_ref(rq);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -273,6 +288,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
|||||||
struct blk_mq_tags *tags = iter_data->tags;
|
struct blk_mq_tags *tags = iter_data->tags;
|
||||||
bool reserved = iter_data->reserved;
|
bool reserved = iter_data->reserved;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
bool ret = true;
|
||||||
|
|
||||||
if (!reserved)
|
if (!reserved)
|
||||||
bitnr += tags->nr_reserved_tags;
|
bitnr += tags->nr_reserved_tags;
|
||||||
@ -281,11 +297,13 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
|||||||
* We can hit rq == NULL here, because the tagging functions
|
* We can hit rq == NULL here, because the tagging functions
|
||||||
* test and set the bit before assining ->rqs[].
|
* test and set the bit before assining ->rqs[].
|
||||||
*/
|
*/
|
||||||
rq = tags->rqs[bitnr];
|
rq = blk_mq_find_and_get_req(tags, bitnr);
|
||||||
if (rq && blk_mq_request_started(rq))
|
if (!rq)
|
||||||
return iter_data->fn(rq, iter_data->data, reserved);
|
return true;
|
||||||
|
if (blk_mq_request_started(rq))
|
||||||
return true;
|
ret = iter_data->fn(rq, iter_data->data, reserved);
|
||||||
|
blk_mq_put_rq_ref(rq);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -342,6 +360,9 @@ static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
|
|||||||
* indicates whether or not @rq is a reserved request. Return
|
* indicates whether or not @rq is a reserved request. Return
|
||||||
* true to continue iterating tags, false to stop.
|
* true to continue iterating tags, false to stop.
|
||||||
* @priv: Will be passed as second argument to @fn.
|
* @priv: Will be passed as second argument to @fn.
|
||||||
|
*
|
||||||
|
* We grab one request reference before calling @fn and release it after
|
||||||
|
* @fn returns.
|
||||||
*/
|
*/
|
||||||
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||||
busy_tag_iter_fn *fn, void *priv)
|
busy_tag_iter_fn *fn, void *priv)
|
||||||
|
@ -886,6 +886,14 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_mq_put_rq_ref(struct request *rq)
|
||||||
|
{
|
||||||
|
if (is_flush_rq(rq, rq->mq_hctx))
|
||||||
|
rq->end_io(rq, 0);
|
||||||
|
else if (refcount_dec_and_test(&rq->ref))
|
||||||
|
__blk_mq_free_request(rq);
|
||||||
|
}
|
||||||
|
|
||||||
static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||||
struct request *rq, void *priv, bool reserved)
|
struct request *rq, void *priv, bool reserved)
|
||||||
{
|
{
|
||||||
@ -919,11 +927,7 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
|||||||
if (blk_mq_req_expired(rq, next))
|
if (blk_mq_req_expired(rq, next))
|
||||||
blk_mq_rq_timed_out(rq, reserved);
|
blk_mq_rq_timed_out(rq, reserved);
|
||||||
|
|
||||||
if (is_flush_rq(rq, hctx))
|
blk_mq_put_rq_ref(rq);
|
||||||
rq->end_io(rq, 0);
|
|
||||||
else if (refcount_dec_and_test(&rq->ref))
|
|
||||||
__blk_mq_free_request(rq);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
|||||||
bool blk_mq_get_driver_tag(struct request *rq);
|
bool blk_mq_get_driver_tag(struct request *rq);
|
||||||
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *start);
|
struct blk_mq_ctx *start);
|
||||||
|
void blk_mq_put_rq_ref(struct request *rq);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal helpers for allocating/freeing the request map
|
* Internal helpers for allocating/freeing the request map
|
||||||
|
Loading…
Reference in New Issue
Block a user