ANDROID: add hooks into blk-mq.c for customized I/O scheduler.
Our scheduler uses the ANDROID_OEM_DATA field in the request_queue structure to customize a set of ops operations for the request queue applied by the ufs/emmc driver. Adopt fine-grained IO scheduling strategies based on specific request types and user scenarios android_rvh_internal_blk_mq_alloc_request: Obtain a new tag. This process may be scheduled because the tag cannot be allocated quickly, so a restricted hook is required. android_vh_internal_blk_mq_free_request: Release individually allocated tags android_vh_blk_mq_complete_request: Record the time when the request was completed android_vh_blk_mq_add_to_requeue_list: Add requests to a separate distribution queue android_rvh_blk_mq_delay_run_hw_queue: The process of calling queue_rq to handle the request may be scheduled, so restricted hooks need to be used android_vh_blk_mq_run_hw_queue: Set need_run to true android_vh_blk_mq_insert_request: Insert the request into a customized queue android_rvh_blk_mq_alloc_rq_map: The process of assigning customized tags may be scheduled, so restricted hooks need to be used android_rvh_blk_mq_init_allocated_queue: The customized scheduler is initialized and needs to allocate some resources. This process may be scheduled, so restricted hooks need to be used android_vh_blk_mq_exit_queue: Release resources allocated by the customized scheduler android_vh_blk_mq_alloc_tag_set: Get tagset information Bug: 319582497 Change-Id: I2b16d69a1e7085a4f5f82660b75188c517b01894 Signed-off-by: hao lv <hao.lv5@transsion.com>
This commit is contained in:
parent
34338029ab
commit
ccbc7f8808
@ -353,6 +353,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||
struct elevator_queue *e = q->elevator;
|
||||
u64 alloc_time_ns = 0;
|
||||
unsigned int tag;
|
||||
bool skip = false;
|
||||
|
||||
/* alloc_time includes depth and tag waits */
|
||||
if (blk_queue_rq_alloc_time(q))
|
||||
@ -384,6 +385,8 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||
* case just retry the hctx assignment and tag allocation as CPU hotplug
|
||||
* should have migrated us to an online CPU by now.
|
||||
*/
|
||||
trace_android_rvh_internal_blk_mq_alloc_request(&skip, &tag, data);
|
||||
if (!skip)
|
||||
tag = blk_mq_get_tag(data);
|
||||
if (tag == BLK_MQ_NO_TAG) {
|
||||
if (data->flags & BLK_MQ_REQ_NOWAIT)
|
||||
@ -496,12 +499,17 @@ static void __blk_mq_free_request(struct request *rq)
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
const int sched_tag = rq->internal_tag;
|
||||
bool skip = false;
|
||||
|
||||
blk_crypto_free_request(rq);
|
||||
blk_pm_mark_last_busy(rq);
|
||||
rq->mq_hctx = NULL;
|
||||
|
||||
trace_android_vh_internal_blk_mq_free_request(&skip, rq, hctx);
|
||||
if (!skip) {
|
||||
if (rq->tag != BLK_MQ_NO_TAG)
|
||||
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
|
||||
}
|
||||
if (sched_tag != BLK_MQ_NO_TAG)
|
||||
blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
|
||||
blk_mq_sched_restart(hctx);
|
||||
@ -701,6 +709,11 @@ EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
|
||||
**/
|
||||
void blk_mq_complete_request(struct request *rq)
|
||||
{
|
||||
bool skip = false;
|
||||
|
||||
trace_android_vh_blk_mq_complete_request(&skip, rq);
|
||||
if (skip)
|
||||
return;
|
||||
if (!blk_mq_complete_request_remote(rq))
|
||||
rq->q->mq_ops->complete(rq);
|
||||
}
|
||||
@ -827,7 +840,12 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
unsigned long flags;
|
||||
bool skip = false;
|
||||
|
||||
trace_android_vh_blk_mq_add_to_requeue_list(&skip, rq,
|
||||
kick_requeue_list);
|
||||
if (skip)
|
||||
return;
|
||||
/*
|
||||
* We abuse this flag that is otherwise used by the I/O scheduler to
|
||||
* request head insertion from the workqueue.
|
||||
@ -1593,9 +1611,15 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
|
||||
unsigned long msecs)
|
||||
{
|
||||
bool skip = false;
|
||||
|
||||
if (unlikely(blk_mq_hctx_stopped(hctx)))
|
||||
return;
|
||||
|
||||
trace_android_rvh_blk_mq_delay_run_hw_queue(&skip, hctx, async);
|
||||
if (skip)
|
||||
return;
|
||||
|
||||
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
|
||||
int cpu = get_cpu();
|
||||
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
|
||||
@ -1651,6 +1675,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
blk_mq_hctx_has_pending(hctx);
|
||||
hctx_unlock(hctx, srcu_idx);
|
||||
|
||||
trace_android_vh_blk_mq_run_hw_queue(&need_run, hctx);
|
||||
if (need_run)
|
||||
__blk_mq_delay_run_hw_queue(hctx, async, 0);
|
||||
}
|
||||
@ -1877,9 +1902,14 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
bool at_head)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
bool skip = false;
|
||||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
trace_android_vh_blk_mq_insert_request(&skip, hctx, rq);
|
||||
if (skip)
|
||||
return;
|
||||
|
||||
__blk_mq_insert_req_list(hctx, rq, at_head);
|
||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
}
|
||||
@ -2419,11 +2449,14 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
{
|
||||
struct blk_mq_tags *tags;
|
||||
int node;
|
||||
bool skip = false;
|
||||
|
||||
node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = set->numa_node;
|
||||
|
||||
trace_android_rvh_blk_mq_alloc_rq_map(&skip, &tags, set, node, flags);
|
||||
if (!skip)
|
||||
tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
|
||||
if (!tags)
|
||||
return NULL;
|
||||
@ -3362,6 +3395,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
blk_mq_add_queue_tag_set(set, q);
|
||||
blk_mq_map_swqueue(q);
|
||||
|
||||
trace_android_rvh_blk_mq_init_allocated_queue(q);
|
||||
|
||||
if (elevator_init)
|
||||
elevator_init_mq(q);
|
||||
|
||||
@ -3385,6 +3420,7 @@ void blk_mq_exit_queue(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
|
||||
trace_android_vh_blk_mq_exit_queue(q);
|
||||
/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
|
||||
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
|
||||
/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
|
||||
@ -3575,6 +3611,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
||||
if (ret)
|
||||
goto out_free_mq_map;
|
||||
|
||||
trace_android_vh_blk_mq_alloc_tag_set(set);
|
||||
|
||||
ret = blk_mq_alloc_map_and_requests(set);
|
||||
if (ret)
|
||||
goto out_free_mq_map;
|
||||
|
@ -518,6 +518,17 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmscan_kswapd_done);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page_spf);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_check);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bio_free);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_internal_blk_mq_alloc_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_internal_blk_mq_free_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_complete_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_add_to_requeue_list);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_delay_run_hw_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_run_hw_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_insert_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_alloc_rq_map);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_init_allocated_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_exit_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_alloc_tag_set);
|
||||
/*
|
||||
* For type visibility
|
||||
*/
|
||||
|
@ -14,15 +14,18 @@
|
||||
struct blk_mq_tags;
|
||||
struct blk_mq_alloc_data;
|
||||
struct blk_mq_tag_set;
|
||||
struct blk_mq_hw_ctx;
|
||||
#else
|
||||
/* struct blk_mq_tags */
|
||||
#include <../block/blk-mq-tag.h>
|
||||
/* struct blk_mq_alloc_data */
|
||||
#include <../block/blk-mq.h>
|
||||
/* struct blk_mq_tag_set */
|
||||
/* struct blk_mq_tag_set struct blk_mq_hw_ctx*/
|
||||
#include <linux/blk-mq.h>
|
||||
#endif /* __GENKSYMS__ */
|
||||
struct bio;
|
||||
struct request_queue;
|
||||
struct request;
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_alloc_rqs,
|
||||
TP_PROTO(size_t *rq_size, struct blk_mq_tag_set *set,
|
||||
@ -38,6 +41,63 @@ DECLARE_HOOK(android_vh_bio_free,
|
||||
TP_PROTO(struct bio *bio),
|
||||
TP_ARGS(bio));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_internal_blk_mq_alloc_request,
|
||||
TP_PROTO(bool *skip, int *tag, struct blk_mq_alloc_data *data),
|
||||
TP_ARGS(skip, tag, data), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_internal_blk_mq_free_request,
|
||||
TP_PROTO(bool *skip, struct request *rq, struct blk_mq_hw_ctx *hctx),
|
||||
TP_ARGS(skip, rq, hctx));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_complete_request,
|
||||
TP_PROTO(bool *skip, struct request *rq),
|
||||
TP_ARGS(skip, rq));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_add_to_requeue_list,
|
||||
TP_PROTO(bool *skip, struct request *rq, bool kick_requeue_list),
|
||||
TP_ARGS(skip, rq, kick_requeue_list));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_get_driver_tag,
|
||||
TP_PROTO(struct request *rq),
|
||||
TP_ARGS(rq));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_delay_run_hw_queue,
|
||||
TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx, bool async),
|
||||
TP_ARGS(skip, hctx, async), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_run_hw_queue,
|
||||
TP_PROTO(bool *need_run, struct blk_mq_hw_ctx *hctx),
|
||||
TP_ARGS(need_run, hctx));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_insert_request,
|
||||
TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx, struct request *rq),
|
||||
TP_ARGS(skip, hctx, rq));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_alloc_rq_map,
|
||||
TP_PROTO(bool *skip, struct blk_mq_tags **tags,
|
||||
struct blk_mq_tag_set *set, int node, unsigned int flags),
|
||||
TP_ARGS(skip, tags, set, node, flags), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_hctx_notify_dead,
|
||||
TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx),
|
||||
TP_ARGS(skip, hctx));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_init_allocated_queue,
|
||||
TP_PROTO(struct request_queue *q),
|
||||
TP_ARGS(q), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_exit_queue,
|
||||
TP_PROTO(struct request_queue *q),
|
||||
TP_ARGS(q));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_alloc_tag_set,
|
||||
TP_PROTO(struct blk_mq_tag_set *set),
|
||||
TP_ARGS(set));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_update_nr_requests,
|
||||
TP_PROTO(bool *skip, struct request_queue *q),
|
||||
TP_ARGS(skip, q));
|
||||
|
||||
#endif /* _TRACE_HOOK_BLOCK_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
Loading…
Reference in New Issue
Block a user