Merge "sched: walt: add support for throttling MVP tasks"

This commit is contained in:
qctecmdr 2024-05-27 20:39:28 -07:00 committed by Gerrit - the friendly Code Review server
commit 4dd33a8302
3 changed files with 49 additions and 7 deletions

View File

@ -4822,6 +4822,9 @@ static void walt_sched_init_rq(struct rq *rq)
wrq->num_mvp_tasks = 0;
INIT_LIST_HEAD(&wrq->mvp_tasks);
wrq->mvp_arrival_time = 0;
wrq->mvp_throttle_time = 0;
wrq->skip_mvp = false;
}
void sched_window_nr_ticks_change(void)

View File

@ -149,8 +149,13 @@ struct walt_rq {
u64 last_cc_update;
u64 cycles;
u64 util;
/* MVP */
struct list_head mvp_tasks;
int num_mvp_tasks;
u64 mvp_arrival_time; /* ts when 1st mvp task selected on this cpu */
u64 mvp_throttle_time; /* ts when mvp were throttled */
bool skip_mvp;
u64 latest_clock;
u32 enqueue_counter;
};

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/seq_file.h>
@ -1181,6 +1181,8 @@ void walt_cfs_deactivate_mvp_task(struct rq *rq, struct task_struct *p)
* slice expired: MVP slice is expired and other MVP can preempt.
* slice not expired: This MVP task can continue to run.
*/
#define MAX_MVP_TIME_NS 500000000ULL
#define MVP_THROTTLE_TIME_NS 100000000ULL
static void walt_cfs_account_mvp_runtime(struct rq *rq, struct task_struct *curr)
{
struct walt_rq *wrq = &per_cpu(walt_rq, cpu_of(rq));
@ -1200,6 +1202,23 @@ static void walt_cfs_account_mvp_runtime(struct rq *rq, struct task_struct *curr
if (!(rq->clock_update_flags & RQCF_UPDATED))
update_rq_clock(rq);
if (wrq->mvp_throttle_time) {
if ((rq->clock - wrq->mvp_throttle_time) > MVP_THROTTLE_TIME_NS) {
wrq->skip_mvp = false;
wrq->mvp_throttle_time = 0;
}
} else if (wrq->mvp_arrival_time) {
if ((rq->clock - wrq->mvp_arrival_time) > MAX_MVP_TIME_NS) {
wrq->skip_mvp = true;
wrq->mvp_arrival_time = 0;
wrq->mvp_throttle_time = rq->clock;
}
}
/*
* continue accounting even in skip_mvp state if a MVP task is selected
* by scheduler core to run on CPU.
*/
if (curr->se.sum_exec_runtime > wts->sum_exec_snapshot_for_total)
wts->total_exec = curr->se.sum_exec_runtime - wts->sum_exec_snapshot_for_total;
else
@ -1289,6 +1308,7 @@ void walt_cfs_tick(struct rq *rq)
{
struct walt_rq *wrq = &per_cpu(walt_rq, cpu_of(rq));
struct walt_task_struct *wts = (struct walt_task_struct *) rq->curr->android_vendor_data1;
bool skip_mvp;
if (unlikely(walt_disabled))
return;
@ -1298,12 +1318,15 @@ void walt_cfs_tick(struct rq *rq)
if (list_empty(&wts->mvp_list) || (wts->mvp_list.next == NULL))
goto out;
/* Reschedule if RQ's skip_mvp state changes */
skip_mvp = wrq->skip_mvp;
walt_cfs_account_mvp_runtime(rq, rq->curr);
/*
* If the current is not MVP means, we have to re-schedule to
* see if we can run any other task including MVP tasks.
*/
if ((wrq->mvp_tasks.next != &wts->mvp_list) && rq->cfs.h_nr_running > 1)
if (((skip_mvp != wrq->skip_mvp) ||
(wrq->mvp_tasks.next != &wts->mvp_list)) && rq->cfs.h_nr_running > 1)
resched_curr(rq);
out:
@ -1323,7 +1346,7 @@ static void walt_cfs_check_preempt_wakeup(void *unused, struct rq *rq, struct ta
struct walt_task_struct *wts_p = (struct walt_task_struct *) p->android_vendor_data1;
struct task_struct *c = rq->curr;
struct walt_task_struct *wts_c = (struct walt_task_struct *) rq->curr->android_vendor_data1;
bool resched = false;
bool resched = false, skip_mvp;
bool p_is_mvp, curr_is_mvp;
if (unlikely(walt_disabled))
@ -1337,7 +1360,7 @@ static void walt_cfs_check_preempt_wakeup(void *unused, struct rq *rq, struct ta
* is simple.
*/
if (!curr_is_mvp) {
if (p_is_mvp)
if (p_is_mvp && !wrq->skip_mvp)
goto preempt;
return; /* CFS decides preemption */
}
@ -1346,8 +1369,9 @@ static void walt_cfs_check_preempt_wakeup(void *unused, struct rq *rq, struct ta
* current is MVP. update its runtime before deciding the
* preemption.
*/
skip_mvp = wrq->skip_mvp;
walt_cfs_account_mvp_runtime(rq, c);
resched = (wrq->mvp_tasks.next != &wts_c->mvp_list);
resched = (skip_mvp != wrq->skip_mvp) || (wrq->mvp_tasks.next != &wts_c->mvp_list);
/*
* current is no longer eligible to run. It must have been
@ -1399,10 +1423,15 @@ static void walt_cfs_replace_next_task_fair(void *unused, struct rq *rq, struct
(*p)->on_rq, task_thread_info(*p)->cpu,
cpu_of(rq), ((*p)->flags & PF_KTHREAD));
/* We don't have MVP tasks queued */
if (list_empty(&wrq->mvp_tasks))
/* RQ is in MVP throttled state*/
if (wrq->skip_mvp)
return;
if (list_empty(&wrq->mvp_tasks)) {
wrq->mvp_arrival_time = 0;
return;
}
/* Return the first task from MVP queue */
wts = list_first_entry(&wrq->mvp_tasks, struct walt_task_struct, mvp_list);
mvp = wts_to_ts(wts);
@ -1411,6 +1440,11 @@ static void walt_cfs_replace_next_task_fair(void *unused, struct rq *rq, struct
*se = &mvp->se;
*repick = true;
/* TODO: check with team if it is fine in case clock is not updated */
/* Mark arrival of MVP task */
if (!wrq->mvp_arrival_time)
wrq->mvp_arrival_time = rq->clock;
if (simple) {
for_each_sched_entity((*se)) {
/*