sched/numa: Drop sysctl_numa_balancing_settle_count sysctl
commit 887c290e
(sched/numa: Decide whether to favour task or group weights
based on swap candidate relationships) drop the check against
sysctl_numa_balancing_settle_count, this patch remove the sysctl.
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Link: http://lkml.kernel.org/r/1386833006-6600-1-git-send-email-liwanp@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ffe732c243
commit
1bd53a7efd
@ -428,11 +428,6 @@ rate for each task.
|
||||
numa_balancing_scan_size_mb is how many megabytes worth of pages are
|
||||
scanned for a given scan.
|
||||
|
||||
numa_balancing_settle_count is how many scan periods must complete before
|
||||
the schedule balancer stops pushing the task towards a preferred node. This
|
||||
gives the scheduler a chance to place the task on an alternative node if the
|
||||
preferred node is overloaded.
|
||||
|
||||
numa_balancing_migrate_deferred is how many page migrations get skipped
|
||||
unconditionally, after a page migration is skipped because a page is shared
|
||||
with other tasks. This reduces page migration overhead, and determines
|
||||
|
@ -48,7 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_delay;
|
||||
extern unsigned int sysctl_numa_balancing_scan_period_min;
|
||||
extern unsigned int sysctl_numa_balancing_scan_period_max;
|
||||
extern unsigned int sysctl_numa_balancing_scan_size;
|
||||
extern unsigned int sysctl_numa_balancing_settle_count;
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
extern unsigned int sysctl_sched_migration_cost;
|
||||
|
@ -872,15 +872,6 @@ static unsigned int task_scan_max(struct task_struct *p)
|
||||
return max(smin, smax);
|
||||
}
|
||||
|
||||
/*
|
||||
* Once a preferred node is selected the scheduler balancer will prefer moving
|
||||
* a task to that node for sysctl_numa_balancing_settle_count number of PTE
|
||||
* scans. This will give the process the chance to accumulate more faults on
|
||||
* the preferred node but still allow the scheduler to move the task again if
|
||||
* the nodes CPUs are overloaded.
|
||||
*/
|
||||
unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
|
||||
|
||||
static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
rq->nr_numa_running += (p->numa_preferred_nid != -1);
|
||||
|
@ -384,13 +384,6 @@ static struct ctl_table kern_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "numa_balancing_settle_count",
|
||||
.data = &sysctl_numa_balancing_settle_count,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "numa_balancing_migrate_deferred",
|
||||
.data = &sysctl_numa_balancing_migrate_deferred,
|
||||
|
Loading…
Reference in New Issue
Block a user