sched/walt: Create WALT specific datatype

Consolidate WALT variables present in the
task_struct in a separate WALT specific datatype.

Change-Id: I6efd71d9f8cfeeb4d1417a6cdcac6cbda2175999
Signed-off-by: Sai Harshini Nimmala <snimmala@codeaurora.org>
This commit is contained in:
Sai Harshini Nimmala 2020-03-25 22:24:54 -07:00
parent d318d39f9c
commit 3804c2a592
10 changed files with 94 additions and 97 deletions

View File

@ -2913,7 +2913,7 @@ static ssize_t proc_sched_task_boost_read(struct file *file,
if (!task)
return -ESRCH;
sched_boost = task->boost;
sched_boost = task->wts.boost;
put_task_struct(task);
len = scnprintf(buffer, sizeof(buffer), "%d\n", sched_boost);
return simple_read_from_buffer(buf, count, ppos, buffer, len);
@ -2945,9 +2945,9 @@ static ssize_t proc_sched_task_boost_write(struct file *file,
goto out;
}
task->boost = sched_boost;
task->wts.boost = sched_boost;
if (sched_boost == 0)
task->boost_period = 0;
task->wts.boost_period = 0;
out:
put_task_struct(task);
return err < 0 ? err : count;
@ -2963,7 +2963,7 @@ static ssize_t proc_sched_task_boost_period_read(struct file *file,
if (!task)
return -ESRCH;
sched_boost_period_ms = div64_ul(task->boost_period, 1000000UL);
sched_boost_period_ms = div64_ul(task->wts.boost_period, 1000000UL);
put_task_struct(task);
len = snprintf(buffer, sizeof(buffer), "%llu\n", sched_boost_period_ms);
return simple_read_from_buffer(buf, count, ppos, buffer, len);
@ -2991,14 +2991,14 @@ static ssize_t proc_sched_task_boost_period_write(struct file *file,
err = kstrtouint(strstrip(buffer), 0, &sched_boost_period);
if (err)
goto out;
if (task->boost == 0 && sched_boost_period) {
if (task->wts.boost == 0 && sched_boost_period) {
/* setting boost period without boost is invalid */
err = -EINVAL;
goto out;
}
task->boost_period = (u64)sched_boost_period * 1000 * 1000;
task->boost_expires = sched_clock() + task->boost_period;
task->wts.boost_period = (u64)sched_boost_period * 1000 * 1000;
task->wts.boost_expires = sched_clock() + task->wts.boost_period;
out:
put_task_struct(task);
return err < 0 ? err : count;

View File

@ -563,8 +563,7 @@ extern void walt_update_cluster_topology(void);
#define RAVG_HIST_SIZE_MAX 5
#define NUM_BUSY_BUCKETS 10
/* ravg represents frequency scaled cpu-demand of tasks */
struct ravg {
struct walt_task_struct {
/*
* 'mark_start' marks the beginning of an event (task waking up, task
* starting to execute, task being preempted) within a window
@ -598,19 +597,34 @@ struct ravg {
*
* 'demand_scaled' represents task's demand scaled to 1024
*/
u64 mark_start;
u32 sum, demand;
u32 coloc_demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
u32 pred_demand;
u8 busy_buckets[NUM_BUSY_BUCKETS];
u16 demand_scaled;
u16 pred_demand_scaled;
u64 active_time;
u64 last_win_size;
u64 mark_start;
u32 sum, demand;
u32 coloc_demand;
u32 sum_history[RAVG_HIST_SIZE_MAX];
u32 *curr_window_cpu, *prev_window_cpu;
u32 curr_window, prev_window;
u32 pred_demand;
u8 busy_buckets[NUM_BUSY_BUCKETS];
u16 demand_scaled;
u16 pred_demand_scaled;
u64 active_time;
u64 last_win_size;
int boost;
bool wake_up_idle;
bool misfit;
u64 boost_period;
u64 boost_expires;
u64 last_sleep_ts;
u32 init_load_pct;
u32 unfilter;
u64 last_wake_ts;
u64 last_enqueued_ts;
struct related_thread_group __rcu *grp;
struct list_head grp_list;
u64 cpu_cycles;
cpumask_t cpus_requested;
};
#else
static inline void sched_exit(struct task_struct *p) { }
@ -830,20 +844,7 @@ struct task_struct {
struct sched_rt_entity rt;
#ifdef CONFIG_SCHED_WALT
int boost;
u64 boost_period;
u64 boost_expires;
u64 last_sleep_ts;
bool wake_up_idle;
struct ravg ravg;
u32 init_load_pct;
u64 last_wake_ts;
u64 last_enqueued_ts;
struct related_thread_group *grp;
struct list_head grp_list;
u64 cpu_cycles;
bool misfit;
u32 unfilter;
struct walt_task_struct wts;
#endif
#ifdef CONFIG_CGROUP_SCHED
@ -871,9 +872,6 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
#ifdef CONFIG_SCHED_WALT
cpumask_t cpus_requested;
#endif
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
@ -2194,19 +2192,19 @@ const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
#define PF_WAKE_UP_IDLE 1
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
{
return p->wake_up_idle;
return p->wts.wake_up_idle;
}
static inline int sched_set_wake_up_idle(struct task_struct *p,
int wake_up_idle)
{
p->wake_up_idle = !!wake_up_idle;
p->wts.wake_up_idle = !!wake_up_idle;
return 0;
}
static inline void set_wake_up_idle(bool enabled)
{
current->wake_up_idle = enabled;
current->wts.wake_up_idle = enabled;
}
#else
static inline u32 sched_get_wake_up_idle(struct task_struct *p)

View File

@ -1044,7 +1044,7 @@ TRACE_EVENT(sched_task_util,
__entry->rtg_skip_min = rtg_skip_min;
__entry->start_cpu = start_cpu;
#ifdef CONFIG_SCHED_WALT
__entry->unfilter = p->unfilter;
__entry->unfilter = p->wts.unfilter;
#else
__entry->unfilter = 0;
#endif

View File

@ -75,7 +75,10 @@ struct task_struct init_task
.cpus_mask = CPU_MASK_ALL,
.nr_cpus_allowed= NR_CPUS,
#ifdef CONFIG_SCHED_WALT
.cpus_requested = CPU_MASK_ALL,
.wts = {
.cpus_requested = CPU_MASK_ALL,
.wake_up_idle = false,
},
#endif
.mm = NULL,
.active_mm = &init_mm,
@ -95,9 +98,6 @@ struct task_struct init_task
#endif
#ifdef CONFIG_CGROUP_SCHED
.sched_task_group = &root_task_group,
#endif
#ifdef CONFIG_SCHED_WALT
.wake_up_idle = false,
#endif
.ptraced = LIST_HEAD_INIT(init_task.ptraced),
.ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),

View File

@ -1028,8 +1028,8 @@ static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
#ifdef CONFIG_SCHED_WALT
int ret;
if (cpumask_subset(&p->cpus_requested, cs->cpus_allowed)) {
ret = set_cpus_allowed_ptr(p, &p->cpus_requested);
if (cpumask_subset(&p->wts.cpus_requested, cs->cpus_allowed)) {
ret = set_cpus_allowed_ptr(p, &p->wts.cpus_requested);
if (!ret)
return ret;
}

View File

@ -2776,11 +2776,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
p->se.nr_migrations = 0;
p->se.vruntime = 0;
#ifdef CONFIG_SCHED_WALT
p->last_sleep_ts = 0;
p->wake_up_idle = false;
p->boost = 0;
p->boost_expires = 0;
p->boost_period = 0;
p->wts.last_sleep_ts = 0;
p->wts.wake_up_idle = false;
p->wts.boost = 0;
p->wts.boost_expires = 0;
p->wts.boost_period = 0;
#endif
INIT_LIST_HEAD(&p->se.group_node);
@ -4186,7 +4186,7 @@ static void __sched notrace __schedule(bool preempt)
if (likely(prev != next)) {
#ifdef CONFIG_SCHED_WALT
if (!prev->on_rq)
prev->last_sleep_ts = wallclock;
prev->wts.last_sleep_ts = wallclock;
#endif
walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
@ -5614,7 +5614,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
#ifdef CONFIG_SCHED_WALT
if (!retval && !(p->flags & PF_KTHREAD))
cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask);
cpumask_and(&p->wts.cpus_requested,
in_mask, cpu_possible_mask);
#endif
out_free_new_mask:
@ -6770,7 +6771,7 @@ void __init sched_init_smp(void)
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
BUG();
#ifdef CONFIG_SCHED_WALT
cpumask_copy(&current->cpus_requested, cpu_possible_mask);
cpumask_copy(&current->wts.cpus_requested, cpu_possible_mask);
#endif
sched_init_granularity();
@ -8379,13 +8380,14 @@ int set_task_boost(int boost, u64 period)
if (boost < TASK_BOOST_NONE || boost >= TASK_BOOST_END)
return -EINVAL;
if (boost) {
current->boost = boost;
current->boost_period = (u64)period * 1000 * 1000;
current->boost_expires = sched_clock() + current->boost_period;
current->wts.boost = boost;
current->wts.boost_period = (u64)period * 1000 * 1000;
current->wts.boost_expires = sched_clock() +
current->wts.boost_period;
} else {
current->boost = 0;
current->boost_expires = 0;
current->boost_period = 0;
current->wts.boost = 0;
current->wts.boost_expires = 0;
current->wts.boost_period = 0;
}
return 0;
}

View File

@ -929,9 +929,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
#ifdef CONFIG_SCHED_WALT
P(ravg.demand);
#endif
avg_atom = p->se.sum_exec_runtime;
if (nr_switches)
avg_atom = div64_ul(avg_atom, nr_switches);

View File

@ -3718,7 +3718,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
static inline unsigned long task_util_est(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
return p->ravg.demand_scaled;
return p->wts.demand_scaled;
#endif
return max(task_util(p), _task_util_est(p));
}
@ -5436,7 +5436,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!se) {
add_nr_running(rq, 1);
#ifdef CONFIG_SCHED_WALT
p->misfit = !task_fits_max(p, rq->cpu);
p->wts.misfit = !task_fits_max(p, rq->cpu);
#endif
inc_rq_walt_stats(rq, p);
/*
@ -6434,7 +6434,7 @@ static inline bool walt_get_rtg_status(struct task_struct *p)
static inline bool walt_task_skip_min_cpu(struct task_struct *p)
{
return sched_boost() != CONSERVATIVE_BOOST &&
walt_get_rtg_status(p) && p->unfilter;
walt_get_rtg_status(p) && p->wts.unfilter;
}
static inline bool walt_is_many_wakeup(int sibling_count_hint)
@ -6849,7 +6849,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
#ifdef CONFIG_SCHED_WALT
static inline int wake_to_idle(struct task_struct *p)
{
return (current->wake_up_idle || p->wake_up_idle);
return (current->wts.wake_up_idle || p->wts.wake_up_idle);
}
#else
static inline int wake_to_idle(struct task_struct *p)
@ -11411,7 +11411,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
struct cfs_rq *cfs_rq;
struct sched_entity *se = &curr->se;
#ifdef CONFIG_SCHED_WALT
bool old_misfit = curr->misfit;
bool old_misfit = curr->wts.misfit;
bool misfit;
#endif
@ -11430,7 +11430,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
if (old_misfit != misfit) {
walt_adjust_nr_big_tasks(rq, 1, misfit);
curr->misfit = misfit;
curr->wts.misfit = misfit;
}
#endif

View File

@ -2177,14 +2177,14 @@ unsigned long capacity_curr_of(int cpu);
#ifdef CONFIG_SCHED_WALT
static inline int per_task_boost(struct task_struct *p)
{
if (p->boost_period) {
if (sched_clock() > p->boost_expires) {
p->boost_period = 0;
p->boost_expires = 0;
p->boost = 0;
if (p->wts.boost_period) {
if (sched_clock() > p->wts.boost_expires) {
p->wts.boost_period = 0;
p->wts.boost_expires = 0;
p->wts.boost = 0;
}
}
return p->boost;
return p->wts.boost;
}
#else
static inline int per_task_boost(struct task_struct *p)
@ -2207,7 +2207,7 @@ static inline unsigned long capacity_orig_of(int cpu)
static inline unsigned long task_util(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
return p->ravg.demand_scaled;
return p->wts.demand_scaled;
#endif
return READ_ONCE(p->se.avg.util_avg);
}
@ -2947,23 +2947,23 @@ static inline bool is_min_capacity_cpu(int cpu)
static inline unsigned int task_load(struct task_struct *p)
{
return p->ravg.demand;
return p->wts.demand;
}
static inline unsigned int task_pl(struct task_struct *p)
{
return p->ravg.pred_demand;
return p->wts.pred_demand;
}
static inline bool task_in_related_thread_group(struct task_struct *p)
{
return !!(rcu_access_pointer(p->grp) != NULL);
return (rcu_access_pointer(p->wts.grp) != NULL);
}
static inline
struct related_thread_group *task_related_thread_group(struct task_struct *p)
{
return rcu_dereference(p->grp);
return rcu_dereference(p->wts.grp);
}
/* Is frequency of two cpus synchronized with each other? */
@ -3066,8 +3066,8 @@ static inline void clear_reserved(int cpu)
static inline bool
task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
{
return cpu_of(rq) == task_cpu(p) && (p->on_rq || p->last_sleep_ts >=
rq->window_start);
return cpu_of(rq) == task_cpu(p) && (p->on_rq ||
p->wts.last_sleep_ts >= rq->window_start);
}
static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta)

View File

@ -35,8 +35,8 @@ fixup_cumulative_runnable_avg(struct walt_sched_stats *stats,
static inline void
walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand_scaled,
p->ravg.pred_demand_scaled);
fixup_cumulative_runnable_avg(&rq->walt_stats, p->wts.demand_scaled,
p->wts.pred_demand_scaled);
/*
* Add a task's contribution to the cumulative window demand when
@ -45,16 +45,16 @@ walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
* prio/cgroup/class change.
* (2) task is waking for the first time in this window.
*/
if (p->on_rq || (p->last_sleep_ts < rq->window_start))
walt_fixup_cum_window_demand(rq, p->ravg.demand_scaled);
if (p->on_rq || (p->wts.last_sleep_ts < rq->window_start))
walt_fixup_cum_window_demand(rq, p->wts.demand_scaled);
}
static inline void
walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
{
fixup_cumulative_runnable_avg(&rq->walt_stats,
-(s64)p->ravg.demand_scaled,
-(s64)p->ravg.pred_demand_scaled);
-(s64)p->wts.demand_scaled,
-(s64)p->wts.pred_demand_scaled);
/*
* on_rq will be 1 for sleeping tasks. So check if the task
@ -62,7 +62,7 @@ walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
* prio/cgroup/class.
*/
if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand_scaled);
walt_fixup_cum_window_demand(rq, -(s64)p->wts.demand_scaled);
}
static inline void walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc)
@ -75,7 +75,7 @@ static inline void walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc)
static inline void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
if (p->misfit)
if (p->wts.misfit)
rq->walt_stats.nr_big_tasks++;
walt_inc_cumulative_runnable_avg(rq, p);
@ -83,7 +83,7 @@ static inline void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
static inline void dec_rq_walt_stats(struct rq *rq, struct task_struct *p)
{
if (p->misfit)
if (p->wts.misfit)
rq->walt_stats.nr_big_tasks--;
BUG_ON(rq->walt_stats.nr_big_tasks < 0);
@ -128,7 +128,7 @@ static inline int sched_cpu_high_irqload(int cpu)
static inline int exiting_task(struct task_struct *p)
{
return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
return (p->wts.sum_history[0] == EXITING_TASK_MARKER);
}
static inline u64
@ -159,7 +159,7 @@ void walt_sched_init_rq(struct rq *rq);
static inline void walt_update_last_enqueue(struct task_struct *p)
{
p->last_enqueued_ts = sched_ktime_clock();
p->wts.last_enqueued_ts = sched_ktime_clock();
}
static inline bool is_suh_max(void)
@ -170,10 +170,10 @@ static inline bool is_suh_max(void)
#define DEFAULT_CGROUP_COLOC_ID 1
static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
{
struct related_thread_group *rtg = p->grp;
struct related_thread_group *rtg = p->wts.grp;
if (is_suh_max() && rtg && rtg->id == DEFAULT_CGROUP_COLOC_ID &&
rtg->skip_min && p->unfilter)
rtg->skip_min && p->wts.unfilter)
return is_min_capacity_cpu(cpu);
return false;