Snap for 9307365 from 6cc9edd259 to android-mainline-keystone-qcom-release

Change-Id: Ie601bf6c1aec11f5f27046861913dd069898bb06
This commit is contained in:
Android Build Coastguard Worker 2022-11-18 11:00:42 +00:00
commit e9630af190
3 changed files with 48 additions and 15 deletions

View File

@ -193,6 +193,11 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
TP_ARGS(p, rq, need_update), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_cpus_allowed,
TP_PROTO(struct task_struct *p, cpumask_var_t cpus_requested,
const struct cpumask *new_mask, int *ret),
TP_ARGS(p, cpus_requested, new_mask, ret), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork_init,
TP_PROTO(struct task_struct *p),
TP_ARGS(p), 1);

View File

@ -67,6 +67,8 @@
#include <linux/cgroup.h>
#include <linux/wait.h>
#include <trace/hooks/sched.h>
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
@ -137,6 +139,7 @@ struct cpuset {
/* user-configured CPUs and Memory Nodes allow to tasks */
cpumask_var_t cpus_allowed;
cpumask_var_t cpus_requested;
nodemask_t mems_allowed;
/* effective CPUs and Memory Nodes allow to tasks */
@ -565,7 +568,7 @@ static void cpuset_update_task_spread_flag(struct cpuset *cs,
static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
return cpumask_subset(p->cpus_requested, q->cpus_requested) &&
nodes_subset(p->mems_allowed, q->mems_allowed) &&
is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
is_mem_exclusive(p) <= is_mem_exclusive(q);
@ -602,8 +605,13 @@ static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
goto free_two;
if (cs && !zalloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
goto free_three;
return 0;
free_three:
free_cpumask_var(*pmask3);
free_two:
free_cpumask_var(*pmask2);
free_one:
@ -620,6 +628,7 @@ static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
{
if (cs) {
free_cpumask_var(cs->cpus_allowed);
free_cpumask_var(cs->cpus_requested);
free_cpumask_var(cs->effective_cpus);
free_cpumask_var(cs->subparts_cpus);
}
@ -648,6 +657,7 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
}
cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
cpumask_copy(trial->cpus_requested, cs->cpus_requested);
cpumask_copy(trial->effective_cpus, cs->effective_cpus);
return trial;
}
@ -762,7 +772,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
cpuset_for_each_child(c, css, par) {
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
cpumask_intersects(trial->cpus_requested, c->cpus_requested))
goto out;
if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
c != cur &&
@ -1188,6 +1198,18 @@ void rebuild_sched_domains(void)
}
EXPORT_SYMBOL_GPL(rebuild_sched_domains);
static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
const struct cpumask *new_mask)
{
int ret = -EINVAL;
trace_android_rvh_update_cpus_allowed(p, cs->cpus_requested, new_mask, &ret);
if (!ret)
return ret;
return set_cpus_allowed_ptr(p, new_mask);
}
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@ -1210,7 +1232,7 @@ static void update_tasks_cpumask(struct cpuset *cs)
if (top_cs && (task->flags & PF_KTHREAD) &&
kthread_is_per_cpu(task))
continue;
set_cpus_allowed_ptr(task, cs->effective_cpus);
update_cpus_allowed(cs, task, cs->effective_cpus);
}
css_task_iter_end(&it);
}
@ -1232,10 +1254,10 @@ static void compute_effective_cpumask(struct cpumask *new_cpus,
if (parent->nr_subparts_cpus) {
cpumask_or(new_cpus, parent->effective_cpus,
parent->subparts_cpus);
cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
cpumask_and(new_cpus, new_cpus, cs->cpus_requested);
cpumask_and(new_cpus, new_cpus, cpu_active_mask);
} else {
cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
cpumask_and(new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
}
}
@ -1727,25 +1749,26 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
return -EACCES;
/*
* An empty cpus_allowed is ok only if the cpuset has no tasks.
* An empty cpus_requested is ok only if the cpuset has no tasks.
* Since cpulist_parse() fails on an empty mask, we special case
* that parsing. The validate_change() call ensures that cpusets
* with tasks have cpus.
*/
if (!*buf) {
cpumask_clear(trialcs->cpus_allowed);
cpumask_clear(trialcs->cpus_requested);
} else {
retval = cpulist_parse(buf, trialcs->cpus_allowed);
retval = cpulist_parse(buf, trialcs->cpus_requested);
if (retval < 0)
return retval;
if (!cpumask_subset(trialcs->cpus_allowed,
top_cpuset.cpus_allowed))
return -EINVAL;
}
if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
return -EINVAL;
cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
/* Nothing to do if the cpus didn't change */
if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
return 0;
#ifdef CONFIG_CPUMASK_OFFSTACK
@ -1800,6 +1823,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
parent_cs(cs));
spin_lock_irq(&callback_lock);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
/*
* Make sure that subparts_cpus, if not empty, is a subset of
@ -2517,7 +2541,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
*/
WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));
cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
cpuset_update_task_spread_flag(cs, task);
@ -2741,7 +2765,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
switch (type) {
case FILE_CPULIST:
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
break;
case FILE_MEMLIST:
seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@ -3130,6 +3154,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cs->mems_allowed = parent->mems_allowed;
cs->effective_mems = parent->mems_allowed;
cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
cpumask_copy(cs->cpus_requested, parent->cpus_requested);
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
@ -3246,8 +3271,10 @@ int __init cpuset_init(void)
BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL));
cpumask_setall(top_cpuset.cpus_allowed);
cpumask_setall(top_cpuset.cpus_requested);
nodes_setall(top_cpuset.mems_allowed);
cpumask_setall(top_cpuset.effective_cpus);
nodes_setall(top_cpuset.effective_mems);

View File

@ -35,6 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_queue);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_migrate_queued_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_overutilized);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_setaffinity);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpus_allowed);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_sched_domains);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_tick);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup_ignore);