ANDROID: cgroup: Add android_rvh_cgroup_force_kthread_migration

In Android GKI, CONFIG_FAIR_GROUP_SCHED is enabled [1] to help
prioritize important work. Given that CPU shares of root cgroup
can't be changed, leaving the tasks inside root cgroup will give
them higher share compared to the other tasks inside important
cgroups. This is mitigated by moving all tasks inside root cgroup to
a different cgroup after Android is booted. However, there are many
kernel tasks stuck in the root cgroup after the boot.

It is possible to relax kernel threads and kworkers migrations under
certain scenarios. However the patch [2] posted at upstream is not
accepted. Hence add a restricted vendor hook to notify modules when a
kernel thread is requested for cgroup migration. The modules can relax
the restrictions forced by the kernel and allow the cgroup migration.

[1] f08f049de1
[2] https://lore.kernel.org/lkml/1617714261-18111-1-git-send-email-pkondeti@codeaurora.org

Bug: 184594949
Change-Id: I445a170ba797c8bece3b4b59b7a42cdd85438f1f
Signed-off-by: Pavankumar Kondeti <quic_pkondeti@quicinc.com>
[quic_dickey@quicinc.com: port to android-mainline kernel]
Signed-off-by: Stephen Dickey <quic_dickey@quicinc.com>
This commit is contained in:
Pavankumar Kondeti 2021-04-16 15:21:14 +05:30 committed by Stephen Dickey
parent cd018c99fa
commit 7551a1a2a1
5 changed files with 16 additions and 5 deletions

View File

@ -136,6 +136,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_compl_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_set_task);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cgroup_force_kthread_migration);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_syscall_prctl_finished);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_uic_command);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_tm_command);

View File

@ -23,6 +23,10 @@ DECLARE_HOOK(android_vh_cgroup_attach,
TP_PROTO(struct cgroup_subsys *ss, struct cgroup_taskset *tset),
TP_ARGS(ss, tset));
DECLARE_RESTRICTED_HOOK(android_rvh_cgroup_force_kthread_migration,
TP_PROTO(struct task_struct *tsk, struct cgroup *dst_cgrp, bool *force_migration),
TP_ARGS(tsk, dst_cgrp, force_migration), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_cpuset_fork,
TP_PROTO(struct task_struct *p, bool *inherit_cpus),
TP_ARGS(p, inherit_cpus), 1);

View File

@ -251,7 +251,8 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
void cgroup_attach_lock(bool lock_threadgroup);
void cgroup_attach_unlock(bool lock_threadgroup);
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
bool *locked)
bool *locked,
struct cgroup *dst_cgrp);
__acquires(&cgroup_threadgroup_rwsem);
void cgroup_procs_write_finish(struct task_struct *task, bool locked)
__releases(&cgroup_threadgroup_rwsem);

View File

@ -501,7 +501,7 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
if (!cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, threadgroup, &locked);
task = cgroup_procs_write_start(buf, threadgroup, &locked, cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;

View File

@ -2920,10 +2920,12 @@ int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
}
struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
bool *threadgroup_locked)
bool *threadgroup_locked,
struct cgroup *dst_cgrp)
{
struct task_struct *tsk;
pid_t pid;
bool force_migration = false;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return ERR_PTR(-EINVAL);
@ -2954,13 +2956,16 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
if (threadgroup)
tsk = tsk->group_leader;
if (tsk->flags & PF_KTHREAD)
trace_android_rvh_cgroup_force_kthread_migration(tsk, dst_cgrp, &force_migration);
/*
* kthreads may acquire PF_NO_SETAFFINITY during initialization.
* If userland migrates such a kthread to a non-root cgroup, it can
* become trapped in a cpuset, or RT kthread may be born in a
* cgroup with no rt_runtime allocated. Just say no.
*/
if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
if (!force_migration && (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY))) {
tsk = ERR_PTR(-EINVAL);
goto out_unlock_threadgroup;
}
@ -5147,7 +5152,7 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
if (!dst_cgrp)
return -ENODEV;
task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked);
task = cgroup_procs_write_start(buf, threadgroup, &threadgroup_locked, dst_cgrp);
ret = PTR_ERR_OR_ZERO(task);
if (ret)
goto out_unlock;