blk-mq: remove the BLK_MQ_REQ_INTERNAL flag
Just check for a non-NULL elevator directly to make the code more clear. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
36a3df5a45
commit
42fdc5e49c
@ -90,9 +90,9 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
|
||||
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
|
||||
struct sbitmap_queue *bt)
|
||||
{
|
||||
if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
|
||||
!hctx_may_queue(data->hctx, bt))
|
||||
if (!data->q->elevator && !hctx_may_queue(data->hctx, bt))
|
||||
return BLK_MQ_NO_TAG;
|
||||
|
||||
if (data->shallow_depth)
|
||||
return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
|
||||
else
|
||||
|
@ -279,7 +279,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
|
||||
struct request *rq = tags->static_rqs[tag];
|
||||
req_flags_t rq_flags = 0;
|
||||
|
||||
if (data->flags & BLK_MQ_REQ_INTERNAL) {
|
||||
if (data->q->elevator) {
|
||||
rq->tag = BLK_MQ_NO_TAG;
|
||||
rq->internal_tag = tag;
|
||||
} else {
|
||||
@ -364,8 +364,6 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||
data->flags |= BLK_MQ_REQ_NOWAIT;
|
||||
|
||||
if (e) {
|
||||
data->flags |= BLK_MQ_REQ_INTERNAL;
|
||||
|
||||
/*
|
||||
* Flush requests are special and go directly to the
|
||||
* dispatch list. Don't include reserved tags in the
|
||||
@ -380,7 +378,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||
retry:
|
||||
data->ctx = blk_mq_get_ctx(q);
|
||||
data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
|
||||
if (!(data->flags & BLK_MQ_REQ_INTERNAL))
|
||||
if (!e)
|
||||
blk_mq_tag_busy(data->hctx);
|
||||
|
||||
/*
|
||||
@ -476,9 +474,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
cpu = cpumask_first_and(data.hctx->cpumask, cpu_online_mask);
|
||||
data.ctx = __blk_mq_get_ctx(q, cpu);
|
||||
|
||||
if (q->elevator)
|
||||
data.flags |= BLK_MQ_REQ_INTERNAL;
|
||||
else
|
||||
if (!q->elevator)
|
||||
blk_mq_tag_busy(data.hctx);
|
||||
|
||||
ret = -EWOULDBLOCK;
|
||||
|
@ -159,7 +159,7 @@ struct blk_mq_alloc_data {
|
||||
|
||||
static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
|
||||
{
|
||||
if (data->flags & BLK_MQ_REQ_INTERNAL)
|
||||
if (data->q->elevator)
|
||||
return data->hctx->sched_tags;
|
||||
|
||||
return data->hctx->tags;
|
||||
|
@ -447,8 +447,6 @@ enum {
|
||||
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
|
||||
/* allocate from reserved pool */
|
||||
BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1),
|
||||
/* allocate internal/sched tag */
|
||||
BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2),
|
||||
/* set RQF_PREEMPT */
|
||||
BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3),
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user