[PATCH] sched: filter affine wakeups
) From: Nick Piggin <nickpiggin@yahoo.com.au> Track the last waker CPU, and only consider wakeup-balancing if there's a match between current waker CPU and the previous waker CPU. This ensures that there is some correlation between two subsequent wakeup events before we move the task. Should help random-wakeup workloads on large SMP systems, by reducing the migration attempts by a factor of nr_cpus. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
198e2f1811
commit
d7102e95b7
@ -696,8 +696,11 @@ struct task_struct {
|
|||||||
|
|
||||||
int lock_depth; /* BKL lock depth */
|
int lock_depth; /* BKL lock depth */
|
||||||
|
|
||||||
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
#if defined(CONFIG_SMP)
|
||||||
|
int last_waker_cpu; /* CPU that last woke this task up */
|
||||||
|
#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||||
int oncpu;
|
int oncpu;
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
int prio, static_prio;
|
int prio, static_prio;
|
||||||
struct list_head run_list;
|
struct list_head run_list;
|
||||||
|
@ -1290,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (p->last_waker_cpu != this_cpu)
|
||||||
|
goto out_set_cpu;
|
||||||
|
|
||||||
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
|
if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
|
||||||
goto out_set_cpu;
|
goto out_set_cpu;
|
||||||
|
|
||||||
@ -1360,6 +1363,8 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
|
|||||||
cpu = task_cpu(p);
|
cpu = task_cpu(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
p->last_waker_cpu = this_cpu;
|
||||||
|
|
||||||
out_activate:
|
out_activate:
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
if (old_state == TASK_UNINTERRUPTIBLE) {
|
if (old_state == TASK_UNINTERRUPTIBLE) {
|
||||||
@ -1441,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
|
|||||||
#ifdef CONFIG_SCHEDSTATS
|
#ifdef CONFIG_SCHEDSTATS
|
||||||
memset(&p->sched_info, 0, sizeof(p->sched_info));
|
memset(&p->sched_info, 0, sizeof(p->sched_info));
|
||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
#if defined(CONFIG_SMP)
|
||||||
|
p->last_waker_cpu = cpu;
|
||||||
|
#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||||
p->oncpu = 0;
|
p->oncpu = 0;
|
||||||
#endif
|
#endif
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
/* Want to start with kernel preemption disabled. */
|
/* Want to start with kernel preemption disabled. */
|
||||||
task_thread_info(p)->preempt_count = 1;
|
task_thread_info(p)->preempt_count = 1;
|
||||||
|
Loading…
Reference in New Issue
Block a user