sched: Add snapshot of task boost feature

This snapshot is taken from msm-4.19 as of commit 5debecbe7195
("trace: filter out spurious preemption and IRQs disable traces").

Change-Id: I3c9663da1fd89e9e942831fda00a47b4a29ea4e3
Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
Satya Durga Srinivasu Prabhala 2019-09-17 09:23:19 -07:00
parent 4e1224407a
commit 7b456d0f2c
6 changed files with 222 additions and 11 deletions

View File

@ -2874,6 +2874,121 @@ static int proc_tgid_io_accounting(struct seq_file *m, struct pid_namespace *ns,
}
#endif /* CONFIG_TASK_IO_ACCOUNTING */
#ifdef CONFIG_SCHED_WALT
static ssize_t proc_sched_task_boost_read(struct file *file,
char __user *buf, size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file_inode(file));
char buffer[PROC_NUMBUF];
int sched_boost;
size_t len;
if (!task)
return -ESRCH;
sched_boost = task->boost;
put_task_struct(task);
len = scnprintf(buffer, sizeof(buffer), "%d\n", sched_boost);
return simple_read_from_buffer(buf, count, ppos, buffer, len);
}
static ssize_t proc_sched_task_boost_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file_inode(file));
char buffer[PROC_NUMBUF];
int sched_boost;
int err;
if (!task)
return -ESRCH;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count)) {
err = -EFAULT;
goto out;
}
err = kstrtoint(strstrip(buffer), 0, &sched_boost);
if (err)
goto out;
if (sched_boost < TASK_BOOST_NONE || sched_boost >= TASK_BOOST_END) {
err = -EINVAL;
goto out;
}
task->boost = sched_boost;
if (sched_boost == 0)
task->boost_period = 0;
out:
put_task_struct(task);
return err < 0 ? err : count;
}
static ssize_t proc_sched_task_boost_period_read(struct file *file,
char __user *buf, size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file_inode(file));
char buffer[PROC_NUMBUF];
u64 sched_boost_period_ms = 0;
size_t len;
if (!task)
return -ESRCH;
sched_boost_period_ms = div64_ul(task->boost_period, 1000000UL);
put_task_struct(task);
len = snprintf(buffer, sizeof(buffer), "%llu\n", sched_boost_period_ms);
return simple_read_from_buffer(buf, count, ppos, buffer, len);
}
static ssize_t proc_sched_task_boost_period_write(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file_inode(file));
char buffer[PROC_NUMBUF];
unsigned int sched_boost_period;
int err;
if (!task)
return -ESRCH;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count)) {
err = -EFAULT;
goto out;
}
err = kstrtouint(strstrip(buffer), 0, &sched_boost_period);
if (err)
goto out;
if (task->boost == 0 && sched_boost_period) {
/* setting boost period without boost is invalid */
err = -EINVAL;
goto out;
}
task->boost_period = (u64)sched_boost_period * 1000 * 1000;
task->boost_expires = sched_clock() + task->boost_period;
out:
put_task_struct(task);
return err < 0 ? err : count;
}
static const struct file_operations proc_task_boost_enabled_operations = {
.read = proc_sched_task_boost_read,
.write = proc_sched_task_boost_write,
.llseek = generic_file_llseek,
};
static const struct file_operations proc_task_boost_period_operations = {
.read = proc_sched_task_boost_period_read,
.write = proc_sched_task_boost_period_write,
.llseek = generic_file_llseek,
};
#endif /* CONFIG_SCHED_WALT */
#ifdef CONFIG_USER_NS
static int proc_id_map_open(struct inode *inode, struct file *file,
const struct seq_operations *seq_ops)
@ -3067,6 +3182,8 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("sched_init_task_load", 00644,
proc_pid_sched_init_task_load_operations),
REG("sched_group_id", 00666, proc_pid_sched_group_id_operations),
REG("sched_boost", 0666, proc_task_boost_enabled_operations),
REG("sched_boost_period_ms", 0666, proc_task_boost_period_operations),
#endif
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),

View File

@ -129,6 +129,14 @@ enum fps {
FPS120 = 120,
};
enum task_boost_type {
TASK_BOOST_NONE = 0,
TASK_BOOST_ON_MID,
TASK_BOOST_ON_MAX,
TASK_BOOST_STRICT_MAX,
TASK_BOOST_END,
};
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
/*
@ -540,6 +548,7 @@ extern void __weak
sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax);
extern void __weak free_task_load_ptrs(struct task_struct *p);
extern void __weak sched_set_refresh_rate(enum fps fps);
extern int set_task_boost(int boost, u64 period);
#define RAVG_HIST_SIZE_MAX 5
#define NUM_BUSY_BUCKETS 10
@ -607,6 +616,8 @@ static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
u32 fmin, u32 fmax) { }
static inline void sched_set_refresh_rate(enum fps fps) { }
static inline void set_task_boost(int boost, u64 period) { }
#endif /* CONFIG_SCHED_WALT */
struct sched_rt_entity {
@ -806,7 +817,11 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_SCHED_WALT
int boost;
u64 boost_period;
u64 boost_expires;
u64 last_sleep_ts;
bool wake_up_idle;
struct ravg ravg;

View File

@ -1017,6 +1017,7 @@ TRACE_EVENT(sched_task_util,
__field(int, start_cpu)
__field(int, unfilter)
__field(unsigned long, cpus_allowed)
__field(int, task_boost)
),
TP_fast_assign(
@ -1042,15 +1043,20 @@ TRACE_EVENT(sched_task_util,
#endif
__entry->cpus_allowed =
cpumask_bits(&p->cpus_mask)[0];
#ifdef CONFIG_SCHED_WALT
__entry->task_boost = per_task_boost(p);
#else
__entry->task_boost = 0;
#endif
),
TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%d affinity=%lx",
TP_printk("pid=%d comm=%s util=%lu prev_cpu=%d candidates=%#lx best_energy_cpu=%d sync=%d need_idle=%d fastpath=%d placement_boost=%d latency=%llu stune_boosted=%d is_rtg=%d rtg_skip_min=%d start_cpu=%d unfilter=%d affinity=%lx task_boost=%d",
__entry->pid, __entry->comm, __entry->util, __entry->prev_cpu,
__entry->candidates, __entry->best_energy_cpu, __entry->sync,
__entry->need_idle, __entry->fastpath, __entry->placement_boost,
__entry->latency, __entry->stune_boosted,
__entry->is_rtg, __entry->rtg_skip_min, __entry->start_cpu,
__entry->unfilter, __entry->cpus_allowed)
__entry->unfilter, __entry->cpus_allowed, __entry->task_boost)
)
/*

View File

@ -2751,6 +2751,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
#ifdef CONFIG_SCHED_WALT
p->last_sleep_ts = 0;
p->wake_up_idle = false;
p->boost = 0;
p->boost_expires = 0;
p->boost_period = 0;
#endif
INIT_LIST_HEAD(&p->se.group_node);
@ -8392,6 +8395,26 @@ void dequeue_task_core(struct rq *rq, struct task_struct *p, int flags)
}
#ifdef CONFIG_SCHED_WALT
/*
*@boost:should be 0,1,2.
*@period:boost time based on ms units.
*/
int set_task_boost(int boost, u64 period)
{
if (boost < TASK_BOOST_NONE || boost >= TASK_BOOST_END)
return -EINVAL;
if (boost) {
current->boost = boost;
current->boost_period = (u64)period * 1000 * 1000;
current->boost_expires = sched_clock() + current->boost_period;
} else {
current->boost = 0;
current->boost_expires = 0;
current->boost_period = 0;
}
return 0;
}
void sched_account_irqtime(int cpu, struct task_struct *curr,
u64 delta, u64 wallclock)
{

View File

@ -3848,7 +3848,7 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
{
unsigned long capacity = capacity_orig_of(cpu);
unsigned long max_capacity = cpu_rq(cpu)->rd->max_cpu_capacity.val;
unsigned long task_boost = 0;
unsigned long task_boost = per_task_boost(p);
if (capacity == max_capacity)
return true;
@ -3858,7 +3858,7 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
task_boost > 0)
return false;
} else { /* mid cap cpu */
if (task_boost > 1)
if (task_boost > TASK_BOOST_ON_MID)
return false;
}
@ -3883,6 +3883,7 @@ struct find_best_target_env {
bool boosted;
int fastpath;
int start_cpu;
bool strict_max;
};
static inline void adjust_cpus_for_packing(struct task_struct *p,
@ -6341,8 +6342,9 @@ static int get_start_cpu(struct task_struct *p)
#ifdef CONFIG_SCHED_WALT
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
int start_cpu = rd->min_cap_orig_cpu;
int task_boost = 0;
bool boosted = task_boost_policy(p) == SCHED_BOOST_ON_BIG;
int task_boost = per_task_boost(p);
bool boosted = task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
task_boost == TASK_BOOST_ON_MID;
bool task_skip_min = task_skip_min_cpu(p);
/*
@ -6350,12 +6352,12 @@ static int get_start_cpu(struct task_struct *p)
* or just mid will be -1, there never be any other combinations of -1s
* beyond these
*/
if (task_skip_min || boosted || task_boost == 1) {
if (task_skip_min || boosted) {
start_cpu = rd->mid_cap_orig_cpu == -1 ?
rd->max_cap_orig_cpu : rd->mid_cap_orig_cpu;
}
if (task_boost == 2) {
if (task_boost > TASK_BOOST_ON_MID) {
start_cpu = rd->max_cap_orig_cpu;
return start_cpu;
}
@ -6420,6 +6422,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
if (boosted)
target_capacity = 0;
if (fbt_env->strict_max)
most_spare_wake_cap = LONG_MIN;
/* Find start CPU based on boost value */
start_cpu = fbt_env->start_cpu;
/* Find SD for the start CPU */
@ -6482,6 +6487,9 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
most_spare_cap_cpu = i;
}
if (per_task_boost(cpu_rq(i)->curr) ==
TASK_BOOST_STRICT_MAX)
continue;
/*
* Cumulative demand may already be accounting for the
* task. If so, add just the boost-utilization to
@ -6627,7 +6635,8 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
* unless the task can't be accommodated in the higher
* capacity CPUs.
*/
if (boosted && (best_idle_cpu != -1 || target_cpu != -1)) {
if (boosted && (best_idle_cpu != -1 || target_cpu != -1 ||
(fbt_env->strict_max && most_spare_cap_cpu != -1))) {
if (boosted) {
if (!next_group_higher_cap)
break;
@ -6949,7 +6958,8 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
int placement_boost = task_boost_policy(p);
u64 start_t = 0;
int delta = 0;
bool boosted = uclamp_boosted(p);
int task_boost = per_task_boost(p);
bool boosted = uclamp_boosted(p) || (task_boost > 0);
int start_cpu = get_start_cpu(p);
if (start_cpu < 0)
@ -7000,6 +7010,8 @@ int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
fbt_env.need_idle = need_idle;
fbt_env.start_cpu = start_cpu;
fbt_env.boosted = boosted;
fbt_env.strict_max = is_rtg &&
(task_boost == TASK_BOOST_STRICT_MAX);
find_best_target(NULL, candidates, p, &fbt_env);
@ -7962,6 +7974,16 @@ static inline int migrate_degrades_locality(struct task_struct *p,
}
#endif
static inline bool can_migrate_boosted_task(struct task_struct *p,
int src_cpu, int dst_cpu)
{
if (per_task_boost(p) == TASK_BOOST_STRICT_MAX &&
task_in_related_thread_group(p) &&
(capacity_orig_of(dst_cpu) < capacity_orig_of(src_cpu)))
return false;
return true;
}
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
@ -7982,6 +8004,12 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
return 0;
/*
* don't allow pull boost task to smaller cores.
*/
if (!can_migrate_boosted_task(p, env->src_cpu, env->dst_cpu))
return 0;
if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
int cpu;
@ -10041,7 +10069,10 @@ static int load_balance(int this_cpu, struct rq *this_rq,
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
if (!cpumask_test_cpu(this_cpu,
busiest->curr->cpus_ptr) ||
!can_migrate_boosted_task(busiest->curr,
cpu_of(busiest), this_cpu)) {
raw_spin_unlock_irqrestore(&busiest->lock,
flags);
env.flags |= LBF_ALL_PINNED;

View File

@ -2205,6 +2205,25 @@ static inline unsigned long capacity_orig_of(int cpu)
return cpu_rq(cpu)->cpu_capacity_orig;
}
#ifdef CONFIG_SCHED_WALT
static inline int per_task_boost(struct task_struct *p)
{
if (p->boost_period) {
if (sched_clock() > p->boost_expires) {
p->boost_period = 0;
p->boost_expires = 0;
p->boost = 0;
}
}
return p->boost;
}
#else
static inline int per_task_boost(struct task_struct *p)
{
return 0;
}
#endif
static inline unsigned long task_util(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT