Merge "sched: Improve the scheduler"
This commit is contained in:
commit
1d46207553
@ -2913,7 +2913,7 @@ static ssize_t proc_sched_task_boost_read(struct file *file,
|
||||
|
||||
if (!task)
|
||||
return -ESRCH;
|
||||
sched_boost = task->boost;
|
||||
sched_boost = task->wts.boost;
|
||||
put_task_struct(task);
|
||||
len = scnprintf(buffer, sizeof(buffer), "%d\n", sched_boost);
|
||||
return simple_read_from_buffer(buf, count, ppos, buffer, len);
|
||||
@ -2945,9 +2945,9 @@ static ssize_t proc_sched_task_boost_write(struct file *file,
|
||||
goto out;
|
||||
}
|
||||
|
||||
task->boost = sched_boost;
|
||||
task->wts.boost = sched_boost;
|
||||
if (sched_boost == 0)
|
||||
task->boost_period = 0;
|
||||
task->wts.boost_period = 0;
|
||||
out:
|
||||
put_task_struct(task);
|
||||
return err < 0 ? err : count;
|
||||
@ -2963,7 +2963,7 @@ static ssize_t proc_sched_task_boost_period_read(struct file *file,
|
||||
|
||||
if (!task)
|
||||
return -ESRCH;
|
||||
sched_boost_period_ms = div64_ul(task->boost_period, 1000000UL);
|
||||
sched_boost_period_ms = div64_ul(task->wts.boost_period, 1000000UL);
|
||||
put_task_struct(task);
|
||||
len = snprintf(buffer, sizeof(buffer), "%llu\n", sched_boost_period_ms);
|
||||
return simple_read_from_buffer(buf, count, ppos, buffer, len);
|
||||
@ -2991,14 +2991,14 @@ static ssize_t proc_sched_task_boost_period_write(struct file *file,
|
||||
err = kstrtouint(strstrip(buffer), 0, &sched_boost_period);
|
||||
if (err)
|
||||
goto out;
|
||||
if (task->boost == 0 && sched_boost_period) {
|
||||
if (task->wts.boost == 0 && sched_boost_period) {
|
||||
/* setting boost period without boost is invalid */
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
task->boost_period = (u64)sched_boost_period * 1000 * 1000;
|
||||
task->boost_expires = sched_clock() + task->boost_period;
|
||||
task->wts.boost_period = (u64)sched_boost_period * 1000 * 1000;
|
||||
task->wts.boost_expires = sched_clock() + task->wts.boost_period;
|
||||
out:
|
||||
put_task_struct(task);
|
||||
return err < 0 ? err : count;
|
||||
|
@ -556,7 +556,7 @@ static inline int hh_vcpu_populate_affinity_info(u32 cpu_index, u64 cap_id)
|
||||
#endif /* CONFIG_QCOM_HYP_CORE_CTL */
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
extern void sched_exit(struct task_struct *p);
|
||||
extern void walt_task_dead(struct task_struct *p);
|
||||
extern int
|
||||
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
|
||||
extern void
|
||||
@ -569,8 +569,7 @@ extern void walt_update_cluster_topology(void);
|
||||
#define RAVG_HIST_SIZE_MAX 5
|
||||
#define NUM_BUSY_BUCKETS 10
|
||||
|
||||
/* ravg represents frequency scaled cpu-demand of tasks */
|
||||
struct ravg {
|
||||
struct walt_task_struct {
|
||||
/*
|
||||
* 'mark_start' marks the beginning of an event (task waking up, task
|
||||
* starting to execute, task being preempted) within a window
|
||||
@ -604,21 +603,36 @@ struct ravg {
|
||||
*
|
||||
* 'demand_scaled' represents task's demand scaled to 1024
|
||||
*/
|
||||
u64 mark_start;
|
||||
u32 sum, demand;
|
||||
u32 coloc_demand;
|
||||
u32 sum_history[RAVG_HIST_SIZE_MAX];
|
||||
u32 *curr_window_cpu, *prev_window_cpu;
|
||||
u32 curr_window, prev_window;
|
||||
u32 pred_demand;
|
||||
u8 busy_buckets[NUM_BUSY_BUCKETS];
|
||||
u16 demand_scaled;
|
||||
u16 pred_demand_scaled;
|
||||
u64 active_time;
|
||||
u64 last_win_size;
|
||||
u64 mark_start;
|
||||
u32 sum, demand;
|
||||
u32 coloc_demand;
|
||||
u32 sum_history[RAVG_HIST_SIZE_MAX];
|
||||
u32 *curr_window_cpu, *prev_window_cpu;
|
||||
u32 curr_window, prev_window;
|
||||
u32 pred_demand;
|
||||
u8 busy_buckets[NUM_BUSY_BUCKETS];
|
||||
u16 demand_scaled;
|
||||
u16 pred_demand_scaled;
|
||||
u64 active_time;
|
||||
u64 last_win_size;
|
||||
int boost;
|
||||
bool wake_up_idle;
|
||||
bool misfit;
|
||||
u64 boost_period;
|
||||
u64 boost_expires;
|
||||
u64 last_sleep_ts;
|
||||
u32 init_load_pct;
|
||||
u32 unfilter;
|
||||
u64 last_wake_ts;
|
||||
u64 last_enqueued_ts;
|
||||
struct walt_related_thread_group __rcu *grp;
|
||||
struct list_head grp_list;
|
||||
u64 cpu_cycles;
|
||||
cpumask_t cpus_requested;
|
||||
};
|
||||
|
||||
#else
|
||||
static inline void sched_exit(struct task_struct *p) { }
|
||||
static inline void walt_task_dead(struct task_struct *p) { }
|
||||
|
||||
static inline int
|
||||
register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
|
||||
@ -841,20 +855,7 @@ struct task_struct {
|
||||
struct sched_rt_entity rt;
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
int boost;
|
||||
u64 boost_period;
|
||||
u64 boost_expires;
|
||||
u64 last_sleep_ts;
|
||||
bool wake_up_idle;
|
||||
struct ravg ravg;
|
||||
u32 init_load_pct;
|
||||
u64 last_wake_ts;
|
||||
u64 last_enqueued_ts;
|
||||
struct related_thread_group *grp;
|
||||
struct list_head grp_list;
|
||||
u64 cpu_cycles;
|
||||
bool misfit;
|
||||
u32 unfilter;
|
||||
struct walt_task_struct wts;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
@ -882,9 +883,6 @@ struct task_struct {
|
||||
int nr_cpus_allowed;
|
||||
const cpumask_t *cpus_ptr;
|
||||
cpumask_t cpus_mask;
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
cpumask_t cpus_requested;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
int rcu_read_lock_nesting;
|
||||
@ -2214,19 +2212,19 @@ const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
|
||||
#define PF_WAKE_UP_IDLE 1
|
||||
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
|
||||
{
|
||||
return p->wake_up_idle;
|
||||
return p->wts.wake_up_idle;
|
||||
}
|
||||
|
||||
static inline int sched_set_wake_up_idle(struct task_struct *p,
|
||||
int wake_up_idle)
|
||||
{
|
||||
p->wake_up_idle = !!wake_up_idle;
|
||||
p->wts.wake_up_idle = !!wake_up_idle;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void set_wake_up_idle(bool enabled)
|
||||
{
|
||||
current->wake_up_idle = enabled;
|
||||
current->wts.wake_up_idle = enabled;
|
||||
}
|
||||
#else
|
||||
static inline u32 sched_get_wake_up_idle(struct task_struct *p)
|
||||
|
@ -475,11 +475,11 @@ TRACE_EVENT(sched_load_balance_skip_tasks,
|
||||
TP_fast_assign(
|
||||
__entry->scpu = scpu;
|
||||
__entry->src_util_cum =
|
||||
cpu_rq(scpu)->cum_window_demand_scaled;
|
||||
cpu_rq(scpu)->wrq.cum_window_demand_scaled;
|
||||
__entry->grp_type = grp_type;
|
||||
__entry->dcpu = dcpu;
|
||||
__entry->dst_util_cum =
|
||||
cpu_rq(dcpu)->cum_window_demand_scaled;
|
||||
cpu_rq(dcpu)->wrq.cum_window_demand_scaled;
|
||||
__entry->pid = pid;
|
||||
__entry->affinity = affinity;
|
||||
__entry->task_util = task_util;
|
||||
@ -1044,7 +1044,7 @@ TRACE_EVENT(sched_task_util,
|
||||
__entry->rtg_skip_min = rtg_skip_min;
|
||||
__entry->start_cpu = start_cpu;
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
__entry->unfilter = p->unfilter;
|
||||
__entry->unfilter = p->wts.unfilter;
|
||||
#else
|
||||
__entry->unfilter = 0;
|
||||
#endif
|
||||
|
@ -75,7 +75,10 @@ struct task_struct init_task
|
||||
.cpus_mask = CPU_MASK_ALL,
|
||||
.nr_cpus_allowed= NR_CPUS,
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
.cpus_requested = CPU_MASK_ALL,
|
||||
.wts = {
|
||||
.cpus_requested = CPU_MASK_ALL,
|
||||
.wake_up_idle = false,
|
||||
},
|
||||
#endif
|
||||
.mm = NULL,
|
||||
.active_mm = &init_mm,
|
||||
@ -95,9 +98,6 @@ struct task_struct init_task
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
.sched_task_group = &root_task_group,
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
.wake_up_idle = false,
|
||||
#endif
|
||||
.ptraced = LIST_HEAD_INIT(init_task.ptraced),
|
||||
.ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),
|
||||
|
@ -1017,8 +1017,8 @@ static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
int ret;
|
||||
|
||||
if (cpumask_subset(&p->cpus_requested, cs->cpus_allowed)) {
|
||||
ret = set_cpus_allowed_ptr(p, &p->cpus_requested);
|
||||
if (cpumask_subset(&p->wts.cpus_requested, cs->cpus_allowed)) {
|
||||
ret = set_cpus_allowed_ptr(p, &p->wts.cpus_requested);
|
||||
if (!ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -748,7 +748,6 @@ void __noreturn do_exit(long code)
|
||||
}
|
||||
|
||||
exit_signals(tsk); /* sets PF_EXITING */
|
||||
sched_exit(tsk);
|
||||
|
||||
if (unlikely(in_atomic())) {
|
||||
pr_info("note: %s[%d] exited with preempt_count %d\n",
|
||||
|
@ -1338,7 +1338,7 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
uclamp_rq_dec(rq, p);
|
||||
p->sched_class->dequeue_task(rq, p, flags);
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
if (p == rq->ed_task)
|
||||
if (p == rq->wrq.ed_task)
|
||||
early_detection_notify(rq, sched_ktime_clock());
|
||||
#endif
|
||||
trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_mask)[0]);
|
||||
@ -2799,11 +2799,11 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||
p->se.nr_migrations = 0;
|
||||
p->se.vruntime = 0;
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
p->last_sleep_ts = 0;
|
||||
p->wake_up_idle = false;
|
||||
p->boost = 0;
|
||||
p->boost_expires = 0;
|
||||
p->boost_period = 0;
|
||||
p->wts.last_sleep_ts = 0;
|
||||
p->wts.wake_up_idle = false;
|
||||
p->wts.boost = 0;
|
||||
p->wts.boost_expires = 0;
|
||||
p->wts.boost_period = 0;
|
||||
#endif
|
||||
INIT_LIST_HEAD(&p->se.group_node);
|
||||
|
||||
@ -3371,6 +3371,7 @@ static struct rq *finish_task_switch(struct task_struct *prev)
|
||||
*/
|
||||
kprobe_flush_task(prev);
|
||||
|
||||
walt_task_dead(prev);
|
||||
/* Task is done with its stack. */
|
||||
put_task_stack(prev);
|
||||
|
||||
@ -3720,7 +3721,7 @@ void scheduler_tick(void)
|
||||
u64 wallclock;
|
||||
bool early_notif;
|
||||
u32 old_load;
|
||||
struct related_thread_group *grp;
|
||||
struct walt_related_thread_group *grp;
|
||||
unsigned int flag = 0;
|
||||
|
||||
sched_clock_tick();
|
||||
@ -4209,7 +4210,7 @@ static void __sched notrace __schedule(bool preempt)
|
||||
if (likely(prev != next)) {
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
if (!prev->on_rq)
|
||||
prev->last_sleep_ts = wallclock;
|
||||
prev->wts.last_sleep_ts = wallclock;
|
||||
#endif
|
||||
|
||||
walt_update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
|
||||
@ -5627,7 +5628,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
||||
}
|
||||
|
||||
if (!retval && !(p->flags & PF_KTHREAD))
|
||||
cpumask_and(&p->cpus_requested, in_mask, cpu_possible_mask);
|
||||
cpumask_and(&p->wts.cpus_requested,
|
||||
in_mask, cpu_possible_mask);
|
||||
#endif
|
||||
|
||||
out_free_new_mask:
|
||||
@ -6789,7 +6791,7 @@ void __init sched_init_smp(void)
|
||||
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
|
||||
BUG();
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
cpumask_copy(¤t->cpus_requested, cpu_possible_mask);
|
||||
cpumask_copy(¤t->wts.cpus_requested, cpu_possible_mask);
|
||||
#endif
|
||||
sched_init_granularity();
|
||||
|
||||
@ -7220,7 +7222,7 @@ static void walt_schedgp_attach(struct cgroup_taskset *tset)
|
||||
cgroup_taskset_first(tset, &css);
|
||||
tg = css_tg(css);
|
||||
|
||||
colocate = tg->colocate;
|
||||
colocate = tg->wtg.colocate;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset)
|
||||
sync_cgroup_colocation(task, colocate);
|
||||
@ -7232,7 +7234,7 @@ sched_boost_override_read(struct cgroup_subsys_state *css,
|
||||
{
|
||||
struct task_group *tg = css_tg(css);
|
||||
|
||||
return (u64) tg->sched_boost_no_override;
|
||||
return (u64) tg->wtg.sched_boost_no_override;
|
||||
}
|
||||
|
||||
static int sched_boost_override_write(struct cgroup_subsys_state *css,
|
||||
@ -7240,7 +7242,7 @@ static int sched_boost_override_write(struct cgroup_subsys_state *css,
|
||||
{
|
||||
struct task_group *tg = css_tg(css);
|
||||
|
||||
tg->sched_boost_no_override = !!override;
|
||||
tg->wtg.sched_boost_no_override = !!override;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -7249,7 +7251,7 @@ static u64 sched_colocate_read(struct cgroup_subsys_state *css,
|
||||
{
|
||||
struct task_group *tg = css_tg(css);
|
||||
|
||||
return (u64) tg->colocate;
|
||||
return (u64) tg->wtg.colocate;
|
||||
}
|
||||
|
||||
static int sched_colocate_write(struct cgroup_subsys_state *css,
|
||||
@ -7257,11 +7259,11 @@ static int sched_colocate_write(struct cgroup_subsys_state *css,
|
||||
{
|
||||
struct task_group *tg = css_tg(css);
|
||||
|
||||
if (tg->colocate_update_disabled)
|
||||
if (tg->wtg.colocate_update_disabled)
|
||||
return -EPERM;
|
||||
|
||||
tg->colocate = !!colocate;
|
||||
tg->colocate_update_disabled = true;
|
||||
tg->wtg.colocate = !!colocate;
|
||||
tg->wtg.colocate_update_disabled = true;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
@ -8398,13 +8400,14 @@ int set_task_boost(int boost, u64 period)
|
||||
if (boost < TASK_BOOST_NONE || boost >= TASK_BOOST_END)
|
||||
return -EINVAL;
|
||||
if (boost) {
|
||||
current->boost = boost;
|
||||
current->boost_period = (u64)period * 1000 * 1000;
|
||||
current->boost_expires = sched_clock() + current->boost_period;
|
||||
current->wts.boost = boost;
|
||||
current->wts.boost_period = (u64)period * 1000 * 1000;
|
||||
current->wts.boost_expires = sched_clock() +
|
||||
current->wts.boost_period;
|
||||
} else {
|
||||
current->boost = 0;
|
||||
current->boost_expires = 0;
|
||||
current->boost_period = 0;
|
||||
current->wts.boost = 0;
|
||||
current->wts.boost_expires = 0;
|
||||
current->wts.boost_period = 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -8429,25 +8432,25 @@ void sched_account_irqtime(int cpu, struct task_struct *curr,
|
||||
walt_update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
|
||||
delta);
|
||||
|
||||
nr_ticks = cur_jiffies_ts - rq->irqload_ts;
|
||||
nr_ticks = cur_jiffies_ts - rq->wrq.irqload_ts;
|
||||
|
||||
if (nr_ticks) {
|
||||
if (nr_ticks < 10) {
|
||||
/* Decay CPU's irqload by 3/4 for each window. */
|
||||
rq->avg_irqload *= (3 * nr_ticks);
|
||||
rq->avg_irqload = div64_u64(rq->avg_irqload,
|
||||
rq->wrq.avg_irqload *= (3 * nr_ticks);
|
||||
rq->wrq.avg_irqload = div64_u64(rq->wrq.avg_irqload,
|
||||
4 * nr_ticks);
|
||||
} else {
|
||||
rq->avg_irqload = 0;
|
||||
rq->wrq.avg_irqload = 0;
|
||||
}
|
||||
rq->avg_irqload += rq->cur_irqload;
|
||||
rq->high_irqload = (rq->avg_irqload >=
|
||||
rq->wrq.avg_irqload += rq->wrq.cur_irqload;
|
||||
rq->wrq.high_irqload = (rq->wrq.avg_irqload >=
|
||||
sysctl_sched_cpu_high_irqload);
|
||||
rq->cur_irqload = 0;
|
||||
rq->wrq.cur_irqload = 0;
|
||||
}
|
||||
|
||||
rq->cur_irqload += delta;
|
||||
rq->irqload_ts = cur_jiffies_ts;
|
||||
rq->wrq.cur_irqload += delta;
|
||||
rq->wrq.irqload_ts = cur_jiffies_ts;
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
@ -65,7 +65,7 @@ struct sugov_cpu {
|
||||
unsigned int iowait_boost;
|
||||
u64 last_update;
|
||||
|
||||
struct sched_walt_cpu_load walt_load;
|
||||
struct walt_cpu_load walt_load;
|
||||
|
||||
unsigned long util;
|
||||
unsigned int flags;
|
||||
@ -394,11 +394,11 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
|
||||
sg_cpu->bw_dl = cpu_bw_dl(rq);
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
util = cpu_util_freq(sg_cpu->cpu, &sg_cpu->walt_load);
|
||||
util = cpu_util_freq_walt(sg_cpu->cpu, &sg_cpu->walt_load);
|
||||
|
||||
return uclamp_rq_util_with(rq, util, NULL);
|
||||
#else
|
||||
util = cpu_util_freq(sg_cpu->cpu, NULL) + cpu_util_rt(rq);
|
||||
util = cpu_util_cfs(rq);
|
||||
|
||||
return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
|
||||
#endif
|
||||
|
@ -650,10 +650,10 @@ do { \
|
||||
P(cpu_capacity);
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
P(cluster->cur_freq);
|
||||
P(walt_stats.nr_big_tasks);
|
||||
P(wrq.cluster->cur_freq);
|
||||
P(wrq.walt_stats.nr_big_tasks);
|
||||
SEQ_printf(m, " .%-30s: %llu\n", "walt_stats.cumulative_runnable_avg",
|
||||
rq->walt_stats.cumulative_runnable_avg_scaled);
|
||||
rq->wrq.walt_stats.cumulative_runnable_avg_scaled);
|
||||
#endif
|
||||
#undef P
|
||||
#undef PN
|
||||
@ -929,9 +929,6 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
|
||||
P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
|
||||
P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
P(ravg.demand);
|
||||
#endif
|
||||
avg_atom = p->se.sum_exec_runtime;
|
||||
if (nr_switches)
|
||||
avg_atom = div64_ul(avg_atom, nr_switches);
|
||||
|
@ -3718,7 +3718,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
|
||||
static inline unsigned long task_util_est(struct task_struct *p)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
return p->ravg.demand_scaled;
|
||||
return p->wts.demand_scaled;
|
||||
#endif
|
||||
return max(task_util(p), _task_util_est(p));
|
||||
}
|
||||
@ -5436,7 +5436,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (!se) {
|
||||
add_nr_running(rq, 1);
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
p->misfit = !task_fits_max(p, rq->cpu);
|
||||
p->wts.misfit = !task_fits_max(p, rq->cpu);
|
||||
#endif
|
||||
inc_rq_walt_stats(rq, p);
|
||||
/*
|
||||
@ -6415,7 +6415,7 @@ unsigned long capacity_curr_of(int cpu)
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
static inline bool walt_get_rtg_status(struct task_struct *p)
|
||||
{
|
||||
struct related_thread_group *grp;
|
||||
struct walt_related_thread_group *grp;
|
||||
bool ret = false;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -6432,7 +6432,7 @@ static inline bool walt_get_rtg_status(struct task_struct *p)
|
||||
static inline bool walt_task_skip_min_cpu(struct task_struct *p)
|
||||
{
|
||||
return sched_boost() != CONSERVATIVE_BOOST &&
|
||||
walt_get_rtg_status(p) && p->unfilter;
|
||||
walt_get_rtg_status(p) && p->wts.unfilter;
|
||||
}
|
||||
|
||||
static inline bool walt_is_many_wakeup(int sibling_count_hint)
|
||||
@ -6707,7 +6707,7 @@ static inline unsigned long
|
||||
cpu_util_next_walt(int cpu, struct task_struct *p, int dst_cpu)
|
||||
{
|
||||
unsigned long util =
|
||||
cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
|
||||
cpu_rq(cpu)->wrq.walt_stats.cumulative_runnable_avg_scaled;
|
||||
bool queued = task_on_rq_queued(p);
|
||||
|
||||
/*
|
||||
@ -6848,7 +6848,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
static inline int wake_to_idle(struct task_struct *p)
|
||||
{
|
||||
return (current->wake_up_idle || p->wake_up_idle);
|
||||
return (current->wts.wake_up_idle || p->wts.wake_up_idle);
|
||||
}
|
||||
#else
|
||||
static inline int wake_to_idle(struct task_struct *p)
|
||||
@ -8105,7 +8105,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
||||
}
|
||||
|
||||
if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
|
||||
!preferred_cluster(cpu_rq(env->dst_cpu)->cluster, p))
|
||||
!preferred_cluster(
|
||||
cpu_rq(env->dst_cpu)->wrq.cluster, p))
|
||||
return 0;
|
||||
|
||||
/* Don't detach task if it doesn't fit on the destination */
|
||||
@ -8114,7 +8115,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
||||
return 0;
|
||||
|
||||
/* Don't detach task if it is under active migration */
|
||||
if (env->src_rq->push_task == p)
|
||||
if (env->src_rq->wrq.push_task == p)
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
@ -10419,7 +10420,7 @@ int active_load_balance_cpu_stop(void *data)
|
||||
BUG_ON(busiest_rq == target_rq);
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
push_task = busiest_rq->push_task;
|
||||
push_task = busiest_rq->wrq.push_task;
|
||||
target_cpu = busiest_rq->push_cpu;
|
||||
if (push_task) {
|
||||
if (task_on_rq_queued(push_task) &&
|
||||
@ -10475,14 +10476,14 @@ int active_load_balance_cpu_stop(void *data)
|
||||
out_unlock:
|
||||
busiest_rq->active_balance = 0;
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
push_task = busiest_rq->push_task;
|
||||
push_task = busiest_rq->wrq.push_task;
|
||||
#endif
|
||||
target_cpu = busiest_rq->push_cpu;
|
||||
clear_reserved(target_cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
if (push_task)
|
||||
busiest_rq->push_task = NULL;
|
||||
busiest_rq->wrq.push_task = NULL;
|
||||
#endif
|
||||
|
||||
rq_unlock(busiest_rq, &rf);
|
||||
@ -11197,7 +11198,7 @@ static bool silver_has_big_tasks(void)
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (!is_min_capacity_cpu(cpu))
|
||||
break;
|
||||
if (cpu_rq(cpu)->walt_stats.nr_big_tasks)
|
||||
if (cpu_rq(cpu)->wrq.walt_stats.nr_big_tasks)
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -11428,7 +11429,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
|
||||
struct cfs_rq *cfs_rq;
|
||||
struct sched_entity *se = &curr->se;
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
bool old_misfit = curr->misfit;
|
||||
bool old_misfit = curr->wts.misfit;
|
||||
bool misfit;
|
||||
#endif
|
||||
|
||||
@ -11447,7 +11448,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
|
||||
|
||||
if (old_misfit != misfit) {
|
||||
walt_adjust_nr_big_tasks(rq, 1, misfit);
|
||||
curr->misfit = misfit;
|
||||
curr->wts.misfit = misfit;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1809,7 +1809,7 @@ static int rt_energy_aware_wake_cpu(struct task_struct *task)
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
cpu = cpu_rq(smp_processor_id())->rd->min_cap_orig_cpu;
|
||||
cpu = cpu_rq(smp_processor_id())->rd->wrd.min_cap_orig_cpu;
|
||||
if (cpu < 0)
|
||||
goto unlock;
|
||||
|
||||
|
@ -88,7 +88,7 @@ extern __read_mostly bool sched_predl;
|
||||
extern unsigned int sched_capacity_margin_up[NR_CPUS];
|
||||
extern unsigned int sched_capacity_margin_down[NR_CPUS];
|
||||
|
||||
struct sched_walt_cpu_load {
|
||||
struct walt_cpu_load {
|
||||
unsigned long nl;
|
||||
unsigned long pl;
|
||||
bool rtgb_active;
|
||||
@ -107,33 +107,95 @@ struct walt_sched_stats {
|
||||
u64 pred_demands_sum_scaled;
|
||||
};
|
||||
|
||||
struct group_cpu_time {
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
u64 nt_curr_runnable_sum;
|
||||
u64 nt_prev_runnable_sum;
|
||||
struct walt_task_group {
|
||||
/* Toggle ability to override sched boost enabled */
|
||||
bool sched_boost_no_override;
|
||||
/*
|
||||
* Controls whether a cgroup is eligible for sched boost or not. This
|
||||
* can temporariliy be disabled by the kernel based on the no_override
|
||||
* flag above.
|
||||
*/
|
||||
bool sched_boost_enabled;
|
||||
/*
|
||||
* Controls whether tasks of this cgroup should be colocated with each
|
||||
* other and tasks of other cgroups that have the same flag turned on.
|
||||
*/
|
||||
bool colocate;
|
||||
/* Controls whether further updates are allowed to the colocate flag */
|
||||
bool colocate_update_disabled;
|
||||
};
|
||||
|
||||
struct load_subtractions {
|
||||
u64 window_start;
|
||||
u64 subs;
|
||||
u64 new_subs;
|
||||
struct walt_root_domain {
|
||||
/* First cpu with maximum and minimum original capacity */
|
||||
int max_cap_orig_cpu, min_cap_orig_cpu;
|
||||
/* First cpu with mid capacity */
|
||||
int mid_cap_orig_cpu;
|
||||
};
|
||||
|
||||
|
||||
#define NUM_TRACKED_WINDOWS 2
|
||||
#define NUM_LOAD_INDICES 1000
|
||||
|
||||
struct sched_cluster {
|
||||
raw_spinlock_t load_lock;
|
||||
struct list_head list;
|
||||
struct cpumask cpus;
|
||||
int id;
|
||||
struct group_cpu_time {
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
u64 nt_curr_runnable_sum;
|
||||
u64 nt_prev_runnable_sum;
|
||||
};
|
||||
|
||||
struct load_subtractions {
|
||||
u64 window_start;
|
||||
u64 subs;
|
||||
u64 new_subs;
|
||||
};
|
||||
|
||||
struct walt_rq {
|
||||
struct task_struct *push_task;
|
||||
struct walt_sched_cluster *cluster;
|
||||
struct cpumask freq_domain_cpumask;
|
||||
struct walt_sched_stats walt_stats;
|
||||
|
||||
u64 window_start;
|
||||
u32 prev_window_size;
|
||||
unsigned long walt_flags;
|
||||
|
||||
u64 cur_irqload;
|
||||
u64 avg_irqload;
|
||||
u64 irqload_ts;
|
||||
bool high_irqload;
|
||||
struct task_struct *ed_task;
|
||||
u64 task_exec_scale;
|
||||
u64 old_busy_time;
|
||||
u64 old_estimated_time;
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
u64 nt_curr_runnable_sum;
|
||||
u64 nt_prev_runnable_sum;
|
||||
u64 cum_window_demand_scaled;
|
||||
struct group_cpu_time grp_time;
|
||||
struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
|
||||
DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
|
||||
NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
|
||||
u8 *top_tasks[NUM_TRACKED_WINDOWS];
|
||||
u8 curr_table;
|
||||
int prev_top;
|
||||
int curr_top;
|
||||
bool notif_pending;
|
||||
u64 last_cc_update;
|
||||
u64 cycles;
|
||||
};
|
||||
|
||||
struct walt_sched_cluster {
|
||||
raw_spinlock_t load_lock;
|
||||
struct list_head list;
|
||||
struct cpumask cpus;
|
||||
int id;
|
||||
/*
|
||||
* max_possible_freq = maximum supported by hardware
|
||||
*/
|
||||
unsigned int cur_freq;
|
||||
unsigned int max_possible_freq;
|
||||
u64 aggr_grp_load;
|
||||
unsigned int cur_freq;
|
||||
unsigned int max_possible_freq;
|
||||
u64 aggr_grp_load;
|
||||
};
|
||||
|
||||
extern __weak cpumask_t asym_cap_sibling_cpus;
|
||||
@ -461,21 +523,7 @@ struct task_group {
|
||||
/* Latency-sensitive flag used for a task group */
|
||||
unsigned int latency_sensitive;
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
/* Toggle ability to override sched boost enabled */
|
||||
bool sched_boost_no_override;
|
||||
/*
|
||||
* Controls whether a cgroup is eligible for sched boost or not. This
|
||||
* can temporariliy be disabled by the kernel based on the no_override
|
||||
* flag above.
|
||||
*/
|
||||
bool sched_boost_enabled;
|
||||
/*
|
||||
* Controls whether tasks of this cgroup should be colocated with each
|
||||
* other and tasks of other cgroups that have the same flag turned on.
|
||||
*/
|
||||
bool colocate;
|
||||
/* Controls whether further updates are allowed to the colocate flag */
|
||||
bool colocate_update_disabled;
|
||||
struct walt_task_group wtg;
|
||||
#endif /* CONFIG_SCHED_WALT */
|
||||
#endif /* CONFIG_UCLAMP_TASK_GROUP */
|
||||
|
||||
@ -873,10 +921,7 @@ struct root_domain {
|
||||
struct perf_domain __rcu *pd;
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
/* First cpu with maximum and minimum original capacity */
|
||||
int max_cap_orig_cpu, min_cap_orig_cpu;
|
||||
/* First cpu with mid capacity */
|
||||
int mid_cap_orig_cpu;
|
||||
struct walt_root_domain wrd;
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -1049,39 +1094,7 @@ struct rq {
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
struct task_struct *push_task;
|
||||
struct sched_cluster *cluster;
|
||||
struct cpumask freq_domain_cpumask;
|
||||
struct walt_sched_stats walt_stats;
|
||||
|
||||
u64 window_start;
|
||||
u32 prev_window_size;
|
||||
unsigned long walt_flags;
|
||||
|
||||
u64 cur_irqload;
|
||||
u64 avg_irqload;
|
||||
u64 irqload_ts;
|
||||
bool high_irqload;
|
||||
struct task_struct *ed_task;
|
||||
u64 task_exec_scale;
|
||||
u64 old_busy_time, old_busy_time_group;
|
||||
u64 old_estimated_time;
|
||||
u64 curr_runnable_sum;
|
||||
u64 prev_runnable_sum;
|
||||
u64 nt_curr_runnable_sum;
|
||||
u64 nt_prev_runnable_sum;
|
||||
u64 cum_window_demand_scaled;
|
||||
struct group_cpu_time grp_time;
|
||||
struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
|
||||
DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
|
||||
NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
|
||||
u8 *top_tasks[NUM_TRACKED_WINDOWS];
|
||||
u8 curr_table;
|
||||
int prev_top;
|
||||
int curr_top;
|
||||
bool notif_pending;
|
||||
u64 last_cc_update;
|
||||
u64 cycles;
|
||||
struct walt_rq wrq;
|
||||
#endif /* CONFIG_SCHED_WALT */
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
@ -2146,7 +2159,7 @@ static inline int hrtick_enabled(struct rq *rq)
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
u64 sched_ktime_clock(void);
|
||||
unsigned long
|
||||
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load);
|
||||
cpu_util_freq_walt(int cpu, struct walt_cpu_load *walt_load);
|
||||
#else
|
||||
#define sched_ravg_window TICK_NSEC
|
||||
static inline u64 sched_ktime_clock(void)
|
||||
@ -2177,14 +2190,14 @@ unsigned long capacity_curr_of(int cpu);
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
static inline int per_task_boost(struct task_struct *p)
|
||||
{
|
||||
if (p->boost_period) {
|
||||
if (sched_clock() > p->boost_expires) {
|
||||
p->boost_period = 0;
|
||||
p->boost_expires = 0;
|
||||
p->boost = 0;
|
||||
if (p->wts.boost_period) {
|
||||
if (sched_clock() > p->wts.boost_expires) {
|
||||
p->wts.boost_period = 0;
|
||||
p->wts.boost_expires = 0;
|
||||
p->wts.boost = 0;
|
||||
}
|
||||
}
|
||||
return p->boost;
|
||||
return p->wts.boost;
|
||||
}
|
||||
#else
|
||||
static inline int per_task_boost(struct task_struct *p)
|
||||
@ -2207,7 +2220,7 @@ static inline unsigned long capacity_orig_of(int cpu)
|
||||
static inline unsigned long task_util(struct task_struct *p)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
return p->ravg.demand_scaled;
|
||||
return p->wts.demand_scaled;
|
||||
#endif
|
||||
return READ_ONCE(p->se.avg.util_avg);
|
||||
}
|
||||
@ -2257,7 +2270,7 @@ static inline unsigned long cpu_util(int cpu)
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
u64 walt_cpu_util =
|
||||
cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
|
||||
cpu_rq(cpu)->wrq.walt_stats.cumulative_runnable_avg_scaled;
|
||||
|
||||
return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu));
|
||||
#endif
|
||||
@ -2277,7 +2290,7 @@ static inline unsigned long cpu_util_cum(int cpu, int delta)
|
||||
unsigned long capacity = capacity_orig_of(cpu);
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
util = cpu_rq(cpu)->cum_window_demand_scaled;
|
||||
util = cpu_rq(cpu)->wrq.cum_window_demand_scaled;
|
||||
#endif
|
||||
delta += util;
|
||||
if (delta < 0)
|
||||
@ -2286,16 +2299,6 @@ static inline unsigned long cpu_util_cum(int cpu, int delta)
|
||||
return (delta >= capacity) ? capacity : delta;
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
return cpu_util_freq_walt(cpu, walt_load);
|
||||
#else
|
||||
return cpu_util(cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
extern unsigned int capacity_margin_freq;
|
||||
|
||||
static inline unsigned long
|
||||
@ -2856,12 +2859,12 @@ enum sched_boost_policy {
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
|
||||
static inline int cluster_first_cpu(struct sched_cluster *cluster)
|
||||
static inline int cluster_first_cpu(struct walt_sched_cluster *cluster)
|
||||
{
|
||||
return cpumask_first(&cluster->cpus);
|
||||
}
|
||||
|
||||
struct related_thread_group {
|
||||
struct walt_related_thread_group {
|
||||
int id;
|
||||
raw_spinlock_t lock;
|
||||
struct list_head tasks;
|
||||
@ -2873,16 +2876,16 @@ struct related_thread_group {
|
||||
u64 start_ts;
|
||||
};
|
||||
|
||||
extern struct sched_cluster *sched_cluster[NR_CPUS];
|
||||
extern struct walt_sched_cluster *sched_cluster[NR_CPUS];
|
||||
|
||||
extern unsigned int max_possible_capacity;
|
||||
extern unsigned int __weak min_max_possible_capacity;
|
||||
extern unsigned int __read_mostly __weak sched_init_task_load_windows;
|
||||
extern unsigned int __read_mostly __weak sched_load_granule;
|
||||
|
||||
extern int update_preferred_cluster(struct related_thread_group *grp,
|
||||
extern int update_preferred_cluster(struct walt_related_thread_group *grp,
|
||||
struct task_struct *p, u32 old_load, bool from_tick);
|
||||
extern void set_preferred_cluster(struct related_thread_group *grp);
|
||||
extern void set_preferred_cluster(struct walt_related_thread_group *grp);
|
||||
extern void add_new_task_to_grp(struct task_struct *new);
|
||||
|
||||
#define NO_BOOST 0
|
||||
@ -2931,7 +2934,7 @@ static inline bool asym_cap_sibling_group_has_capacity(int dst_cpu, int margin)
|
||||
|
||||
static inline unsigned int cpu_max_possible_freq(int cpu)
|
||||
{
|
||||
return cpu_rq(cpu)->cluster->max_possible_freq;
|
||||
return cpu_rq(cpu)->wrq.cluster->max_possible_freq;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_max_freq(int cpu)
|
||||
@ -2957,23 +2960,23 @@ static inline bool is_min_capacity_cpu(int cpu)
|
||||
|
||||
static inline unsigned int task_load(struct task_struct *p)
|
||||
{
|
||||
return p->ravg.demand;
|
||||
return p->wts.demand;
|
||||
}
|
||||
|
||||
static inline unsigned int task_pl(struct task_struct *p)
|
||||
{
|
||||
return p->ravg.pred_demand;
|
||||
return p->wts.pred_demand;
|
||||
}
|
||||
|
||||
static inline bool task_in_related_thread_group(struct task_struct *p)
|
||||
{
|
||||
return !!(rcu_access_pointer(p->grp) != NULL);
|
||||
return (rcu_access_pointer(p->wts.grp) != NULL);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct related_thread_group *task_related_thread_group(struct task_struct *p)
|
||||
static inline struct walt_related_thread_group
|
||||
*task_related_thread_group(struct task_struct *p)
|
||||
{
|
||||
return rcu_dereference(p->grp);
|
||||
return rcu_dereference(p->wts.grp);
|
||||
}
|
||||
|
||||
/* Is frequency of two cpus synchronized with each other? */
|
||||
@ -2987,7 +2990,7 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
|
||||
if (asym_cap_siblings(src_cpu, dst_cpu))
|
||||
return 1;
|
||||
|
||||
return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
|
||||
return cpumask_test_cpu(dst_cpu, &rq->wrq.freq_domain_cpumask);
|
||||
}
|
||||
|
||||
#define CPU_RESERVED 1
|
||||
@ -3016,9 +3019,9 @@ static inline bool is_full_throttle_boost(void)
|
||||
return sched_boost() == FULL_THROTTLE_BOOST;
|
||||
}
|
||||
|
||||
extern int preferred_cluster(struct sched_cluster *cluster,
|
||||
extern int preferred_cluster(struct walt_sched_cluster *cluster,
|
||||
struct task_struct *p);
|
||||
extern struct sched_cluster *rq_cluster(struct rq *rq);
|
||||
extern struct walt_sched_cluster *rq_cluster(struct rq *rq);
|
||||
|
||||
#ifdef CONFIG_UCLAMP_TASK_GROUP
|
||||
static inline bool task_sched_boost(struct task_struct *p)
|
||||
@ -3030,7 +3033,7 @@ static inline bool task_sched_boost(struct task_struct *p)
|
||||
return false;
|
||||
tg = container_of(css, struct task_group, css);
|
||||
|
||||
return tg->sched_boost_enabled;
|
||||
return tg->wtg.sched_boost_enabled;
|
||||
}
|
||||
|
||||
extern int sync_cgroup_colocation(struct task_struct *p, bool insert);
|
||||
@ -3056,35 +3059,35 @@ static inline int is_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
return test_bit(CPU_RESERVED, &rq->walt_flags);
|
||||
return test_bit(CPU_RESERVED, &rq->wrq.walt_flags);
|
||||
}
|
||||
|
||||
static inline int mark_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
return test_and_set_bit(CPU_RESERVED, &rq->walt_flags);
|
||||
return test_and_set_bit(CPU_RESERVED, &rq->wrq.walt_flags);
|
||||
}
|
||||
|
||||
static inline void clear_reserved(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
clear_bit(CPU_RESERVED, &rq->walt_flags);
|
||||
clear_bit(CPU_RESERVED, &rq->wrq.walt_flags);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
return cpu_of(rq) == task_cpu(p) && (p->on_rq || p->last_sleep_ts >=
|
||||
rq->window_start);
|
||||
return cpu_of(rq) == task_cpu(p) && (p->on_rq ||
|
||||
p->wts.last_sleep_ts >= rq->wrq.window_start);
|
||||
}
|
||||
|
||||
static inline void walt_fixup_cum_window_demand(struct rq *rq, s64 scaled_delta)
|
||||
{
|
||||
rq->cum_window_demand_scaled += scaled_delta;
|
||||
if (unlikely((s64)rq->cum_window_demand_scaled < 0))
|
||||
rq->cum_window_demand_scaled = 0;
|
||||
rq->wrq.cum_window_demand_scaled += scaled_delta;
|
||||
if (unlikely((s64)rq->wrq.cum_window_demand_scaled < 0))
|
||||
rq->wrq.cum_window_demand_scaled = 0;
|
||||
}
|
||||
|
||||
extern unsigned long thermal_cap(int cpu);
|
||||
@ -3124,15 +3127,15 @@ static inline enum sched_boost_policy task_boost_policy(struct task_struct *p)
|
||||
return policy;
|
||||
}
|
||||
|
||||
static inline bool is_min_capacity_cluster(struct sched_cluster *cluster)
|
||||
static inline bool is_min_capacity_cluster(struct walt_sched_cluster *cluster)
|
||||
{
|
||||
return is_min_capacity_cpu(cluster_first_cpu(cluster));
|
||||
}
|
||||
#else /* CONFIG_SCHED_WALT */
|
||||
|
||||
struct walt_sched_stats;
|
||||
struct related_thread_group;
|
||||
struct sched_cluster;
|
||||
struct walt_related_thread_group;
|
||||
struct walt_sched_cluster;
|
||||
|
||||
static inline bool task_sched_boost(struct task_struct *p)
|
||||
{
|
||||
@ -3177,12 +3180,12 @@ static inline bool is_max_capacity_cpu(int cpu) { return true; }
|
||||
static inline bool is_min_capacity_cpu(int cpu) { return true; }
|
||||
|
||||
static inline int
|
||||
preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
|
||||
preferred_cluster(struct walt_sched_cluster *cluster, struct task_struct *p)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline struct sched_cluster *rq_cluster(struct rq *rq)
|
||||
static inline struct walt_sched_cluster *rq_cluster(struct rq *rq)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@ -3194,15 +3197,16 @@ static inline bool asym_cap_sibling_group_has_capacity(int dst_cpu, int margin)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void set_preferred_cluster(struct related_thread_group *grp) { }
|
||||
static inline void
|
||||
set_preferred_cluster(struct walt_related_thread_group *grp) { }
|
||||
|
||||
static inline bool task_in_related_thread_group(struct task_struct *p)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct related_thread_group *task_related_thread_group(struct task_struct *p)
|
||||
static inline struct walt_related_thread_group *task_related_thread_group(
|
||||
struct task_struct *p)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
@ -3210,8 +3214,9 @@ struct related_thread_group *task_related_thread_group(struct task_struct *p)
|
||||
static inline u32 task_load(struct task_struct *p) { return 0; }
|
||||
static inline u32 task_pl(struct task_struct *p) { return 0; }
|
||||
|
||||
static inline int update_preferred_cluster(struct related_thread_group *grp,
|
||||
struct task_struct *p, u32 old_load, bool from_tick)
|
||||
static inline int
|
||||
update_preferred_cluster(struct walt_related_thread_group *grp,
|
||||
struct task_struct *p, u32 old_load, bool from_tick)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -495,8 +495,8 @@ static int init_rootdomain(struct root_domain *rd)
|
||||
goto free_cpudl;
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
rd->max_cap_orig_cpu = rd->min_cap_orig_cpu = -1;
|
||||
rd->mid_cap_orig_cpu = -1;
|
||||
rd->wrd.max_cap_orig_cpu = rd->wrd.min_cap_orig_cpu = -1;
|
||||
rd->wrd.mid_cap_orig_cpu = -1;
|
||||
#endif
|
||||
|
||||
init_max_cpu_capacity(&rd->max_cpu_capacity);
|
||||
@ -2059,8 +2059,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
|
||||
rcu_read_lock();
|
||||
for_each_cpu(i, cpu_map) {
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
|
||||
int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);
|
||||
int max_cpu = READ_ONCE(d.rd->wrd.max_cap_orig_cpu);
|
||||
int min_cpu = READ_ONCE(d.rd->wrd.min_cap_orig_cpu);
|
||||
#endif
|
||||
|
||||
sd = *per_cpu_ptr(d.sd, i);
|
||||
@ -2068,11 +2068,11 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
if ((max_cpu < 0) || (arch_scale_cpu_capacity(i) >
|
||||
arch_scale_cpu_capacity(max_cpu)))
|
||||
WRITE_ONCE(d.rd->max_cap_orig_cpu, i);
|
||||
WRITE_ONCE(d.rd->wrd.max_cap_orig_cpu, i);
|
||||
|
||||
if ((min_cpu < 0) || (arch_scale_cpu_capacity(i) <
|
||||
arch_scale_cpu_capacity(min_cpu)))
|
||||
WRITE_ONCE(d.rd->min_cap_orig_cpu, i);
|
||||
WRITE_ONCE(d.rd->wrd.min_cap_orig_cpu, i);
|
||||
#endif
|
||||
|
||||
cpu_attach_domain(sd, d.rd, i);
|
||||
@ -2081,14 +2081,14 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
/* set the mid capacity cpu (assumes only 3 capacities) */
|
||||
for_each_cpu(i, cpu_map) {
|
||||
int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
|
||||
int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);
|
||||
int max_cpu = READ_ONCE(d.rd->wrd.max_cap_orig_cpu);
|
||||
int min_cpu = READ_ONCE(d.rd->wrd.min_cap_orig_cpu);
|
||||
|
||||
if ((arch_scale_cpu_capacity(i)
|
||||
!= arch_scale_cpu_capacity(min_cpu)) &&
|
||||
!= arch_scale_cpu_capacity(min_cpu)) &&
|
||||
(arch_scale_cpu_capacity(i)
|
||||
!= arch_scale_cpu_capacity(max_cpu))) {
|
||||
WRITE_ONCE(d.rd->mid_cap_orig_cpu, i);
|
||||
!= arch_scale_cpu_capacity(max_cpu))) {
|
||||
WRITE_ONCE(d.rd->wrd.mid_cap_orig_cpu, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -2098,10 +2098,10 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
|
||||
* change dynamically. So update the max cap CPU and its capacity
|
||||
* here.
|
||||
*/
|
||||
if (d.rd->max_cap_orig_cpu != -1) {
|
||||
d.rd->max_cpu_capacity.cpu = d.rd->max_cap_orig_cpu;
|
||||
if (d.rd->wrd.max_cap_orig_cpu != -1) {
|
||||
d.rd->max_cpu_capacity.cpu = d.rd->wrd.max_cap_orig_cpu;
|
||||
d.rd->max_cpu_capacity.val = arch_scale_cpu_capacity(
|
||||
d.rd->max_cap_orig_cpu);
|
||||
d.rd->wrd.max_cap_orig_cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -61,8 +61,6 @@ int __weak sched_unisolate_cpu(int cpu) { return 0; }
|
||||
|
||||
int __weak sched_unisolate_cpu_unlocked(int cpu) { return 0; }
|
||||
|
||||
void __weak sched_exit(struct task_struct *p) { }
|
||||
|
||||
int __weak register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
|
||||
{
|
||||
return 0;
|
||||
@ -132,23 +130,23 @@ int __weak sched_busy_hyst_handler(struct ctl_table *table, int write,
|
||||
u64 __weak sched_ktime_clock(void) { return 0; }
|
||||
|
||||
unsigned long __weak
|
||||
cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
|
||||
cpu_util_freq_walt(int cpu, struct walt_cpu_load *walt_load)
|
||||
{
|
||||
return cpu_util(cpu);
|
||||
}
|
||||
|
||||
int __weak update_preferred_cluster(struct related_thread_group *grp,
|
||||
int __weak update_preferred_cluster(struct walt_related_thread_group *grp,
|
||||
struct task_struct *p, u32 old_load, bool from_tick)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __weak set_preferred_cluster(struct related_thread_group *grp) { }
|
||||
void __weak set_preferred_cluster(struct walt_related_thread_group *grp) { }
|
||||
|
||||
void __weak add_new_task_to_grp(struct task_struct *new) { }
|
||||
|
||||
int __weak
|
||||
preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
|
||||
preferred_cluster(struct walt_sched_cluster *cluster, struct task_struct *p)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
@ -212,6 +210,8 @@ void __weak walt_sched_init_rq(struct rq *rq) { }
|
||||
|
||||
void __weak walt_update_cluster_topology(void) { }
|
||||
|
||||
void __weak walt_task_dead(struct task_struct *p) { }
|
||||
|
||||
#if defined(CONFIG_UCLAMP_TASK_GROUP)
|
||||
void __weak walt_init_sched_boost(struct task_group *tg) { }
|
||||
#endif
|
||||
|
@ -35,8 +35,8 @@ fixup_cumulative_runnable_avg(struct walt_sched_stats *stats,
|
||||
static inline void
|
||||
walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
fixup_cumulative_runnable_avg(&rq->walt_stats, p->ravg.demand_scaled,
|
||||
p->ravg.pred_demand_scaled);
|
||||
fixup_cumulative_runnable_avg(&rq->wrq.walt_stats, p->wts.demand_scaled,
|
||||
p->wts.pred_demand_scaled);
|
||||
|
||||
/*
|
||||
* Add a task's contribution to the cumulative window demand when
|
||||
@ -45,16 +45,16 @@ walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
|
||||
* prio/cgroup/class change.
|
||||
* (2) task is waking for the first time in this window.
|
||||
*/
|
||||
if (p->on_rq || (p->last_sleep_ts < rq->window_start))
|
||||
walt_fixup_cum_window_demand(rq, p->ravg.demand_scaled);
|
||||
if (p->on_rq || (p->wts.last_sleep_ts < rq->wrq.window_start))
|
||||
walt_fixup_cum_window_demand(rq, p->wts.demand_scaled);
|
||||
}
|
||||
|
||||
static inline void
|
||||
walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
fixup_cumulative_runnable_avg(&rq->walt_stats,
|
||||
-(s64)p->ravg.demand_scaled,
|
||||
-(s64)p->ravg.pred_demand_scaled);
|
||||
fixup_cumulative_runnable_avg(&rq->wrq.walt_stats,
|
||||
-(s64)p->wts.demand_scaled,
|
||||
-(s64)p->wts.pred_demand_scaled);
|
||||
|
||||
/*
|
||||
* on_rq will be 1 for sleeping tasks. So check if the task
|
||||
@ -62,31 +62,31 @@ walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p)
|
||||
* prio/cgroup/class.
|
||||
*/
|
||||
if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
|
||||
walt_fixup_cum_window_demand(rq, -(s64)p->ravg.demand_scaled);
|
||||
walt_fixup_cum_window_demand(rq, -(s64)p->wts.demand_scaled);
|
||||
}
|
||||
|
||||
static inline void walt_adjust_nr_big_tasks(struct rq *rq, int delta, bool inc)
|
||||
{
|
||||
sched_update_nr_prod(cpu_of(rq), 0, true);
|
||||
rq->walt_stats.nr_big_tasks += inc ? delta : -delta;
|
||||
rq->wrq.walt_stats.nr_big_tasks += inc ? delta : -delta;
|
||||
|
||||
BUG_ON(rq->walt_stats.nr_big_tasks < 0);
|
||||
BUG_ON(rq->wrq.walt_stats.nr_big_tasks < 0);
|
||||
}
|
||||
|
||||
static inline void inc_rq_walt_stats(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
if (p->misfit)
|
||||
rq->walt_stats.nr_big_tasks++;
|
||||
if (p->wts.misfit)
|
||||
rq->wrq.walt_stats.nr_big_tasks++;
|
||||
|
||||
walt_inc_cumulative_runnable_avg(rq, p);
|
||||
}
|
||||
|
||||
static inline void dec_rq_walt_stats(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
if (p->misfit)
|
||||
rq->walt_stats.nr_big_tasks--;
|
||||
if (p->wts.misfit)
|
||||
rq->wrq.walt_stats.nr_big_tasks--;
|
||||
|
||||
BUG_ON(rq->walt_stats.nr_big_tasks < 0);
|
||||
BUG_ON(rq->wrq.walt_stats.nr_big_tasks < 0);
|
||||
|
||||
walt_dec_cumulative_runnable_avg(rq, p);
|
||||
}
|
||||
@ -107,28 +107,23 @@ static inline u64 sched_irqload(int cpu)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
s64 delta;
|
||||
|
||||
delta = get_jiffies_64() - rq->irqload_ts;
|
||||
delta = get_jiffies_64() - rq->wrq.irqload_ts;
|
||||
/*
|
||||
* Current context can be preempted by irq and rq->irqload_ts can be
|
||||
* Current context can be preempted by irq and rq->wrq.irqload_ts can be
|
||||
* updated by irq context so that delta can be negative.
|
||||
* But this is okay and we can safely return as this means there
|
||||
* was recent irq occurrence.
|
||||
*/
|
||||
|
||||
if (delta < SCHED_HIGH_IRQ_TIMEOUT)
|
||||
return rq->avg_irqload;
|
||||
return rq->wrq.avg_irqload;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int sched_cpu_high_irqload(int cpu)
|
||||
{
|
||||
return cpu_rq(cpu)->high_irqload;
|
||||
}
|
||||
|
||||
static inline int exiting_task(struct task_struct *p)
|
||||
{
|
||||
return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
|
||||
return cpu_rq(cpu)->wrq.high_irqload;
|
||||
}
|
||||
|
||||
static inline u64
|
||||
@ -152,14 +147,14 @@ extern void sched_account_irqtime(int cpu, struct task_struct *curr,
|
||||
|
||||
static inline int same_cluster(int src_cpu, int dst_cpu)
|
||||
{
|
||||
return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
|
||||
return cpu_rq(src_cpu)->wrq.cluster == cpu_rq(dst_cpu)->wrq.cluster;
|
||||
}
|
||||
|
||||
void walt_sched_init_rq(struct rq *rq);
|
||||
|
||||
static inline void walt_update_last_enqueue(struct task_struct *p)
|
||||
{
|
||||
p->last_enqueued_ts = sched_ktime_clock();
|
||||
p->wts.last_enqueued_ts = sched_ktime_clock();
|
||||
}
|
||||
|
||||
static inline bool is_suh_max(void)
|
||||
@ -170,10 +165,10 @@ static inline bool is_suh_max(void)
|
||||
#define DEFAULT_CGROUP_COLOC_ID 1
|
||||
static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
|
||||
{
|
||||
struct related_thread_group *rtg = p->grp;
|
||||
struct walt_related_thread_group *rtg = p->wts.grp;
|
||||
|
||||
if (is_suh_max() && rtg && rtg->id == DEFAULT_CGROUP_COLOC_ID &&
|
||||
rtg->skip_min && p->unfilter)
|
||||
rtg->skip_min && p->wts.unfilter)
|
||||
return is_min_capacity_cpu(cpu);
|
||||
|
||||
return false;
|
||||
@ -189,7 +184,7 @@ static inline void walt_try_to_wake_up(struct task_struct *p)
|
||||
struct rq_flags rf;
|
||||
u64 wallclock;
|
||||
unsigned int old_load;
|
||||
struct related_thread_group *grp = NULL;
|
||||
struct walt_related_thread_group *grp = NULL;
|
||||
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
old_load = task_load(p);
|
||||
|
Loading…
Reference in New Issue
Block a user