sched/idle: Remove stale old file
Commit cf37b6b484
("sched/idle: Move cpu/idle.c to sched/idle.c")
said to simply move a file; somehow it got mangled and created an old
version of the file and forgot to remove the old file.
Fix this fail; add the lost change and remove the now identical old
file.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: rjw@rjwysocki.net
Cc: nicolas.pitre@linaro.org
Cc: preeti@linux.vnet.ibm.com
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
Link: http://lkml.kernel.org/r/20140224172207.GC9987@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
f5f9739d7a
commit
06d50c65b1
@ -1,147 +0,0 @@
|
|||||||
/*
|
|
||||||
* Generic entry point for the idle threads
|
|
||||||
*/
|
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/cpu.h>
|
|
||||||
#include <linux/cpuidle.h>
|
|
||||||
#include <linux/tick.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/stackprotector.h>
|
|
||||||
|
|
||||||
#include <asm/tlb.h>
|
|
||||||
|
|
||||||
#include <trace/events/power.h>
|
|
||||||
|
|
||||||
static int __read_mostly cpu_idle_force_poll;
|
|
||||||
|
|
||||||
void cpu_idle_poll_ctrl(bool enable)
|
|
||||||
{
|
|
||||||
if (enable) {
|
|
||||||
cpu_idle_force_poll++;
|
|
||||||
} else {
|
|
||||||
cpu_idle_force_poll--;
|
|
||||||
WARN_ON_ONCE(cpu_idle_force_poll < 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
|
|
||||||
static int __init cpu_idle_poll_setup(char *__unused)
|
|
||||||
{
|
|
||||||
cpu_idle_force_poll = 1;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
__setup("nohlt", cpu_idle_poll_setup);
|
|
||||||
|
|
||||||
static int __init cpu_idle_nopoll_setup(char *__unused)
|
|
||||||
{
|
|
||||||
cpu_idle_force_poll = 0;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
__setup("hlt", cpu_idle_nopoll_setup);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline int cpu_idle_poll(void)
|
|
||||||
{
|
|
||||||
rcu_idle_enter();
|
|
||||||
trace_cpu_idle_rcuidle(0, smp_processor_id());
|
|
||||||
local_irq_enable();
|
|
||||||
while (!tif_need_resched())
|
|
||||||
cpu_relax();
|
|
||||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
|
||||||
rcu_idle_exit();
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Weak implementations for optional arch specific functions */
|
|
||||||
void __weak arch_cpu_idle_prepare(void) { }
|
|
||||||
void __weak arch_cpu_idle_enter(void) { }
|
|
||||||
void __weak arch_cpu_idle_exit(void) { }
|
|
||||||
void __weak arch_cpu_idle_dead(void) { }
|
|
||||||
void __weak arch_cpu_idle(void)
|
|
||||||
{
|
|
||||||
cpu_idle_force_poll = 1;
|
|
||||||
local_irq_enable();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Generic idle loop implementation
|
|
||||||
*/
|
|
||||||
static void cpu_idle_loop(void)
|
|
||||||
{
|
|
||||||
while (1) {
|
|
||||||
tick_nohz_idle_enter();
|
|
||||||
|
|
||||||
while (!need_resched()) {
|
|
||||||
check_pgt_cache();
|
|
||||||
rmb();
|
|
||||||
|
|
||||||
if (cpu_is_offline(smp_processor_id()))
|
|
||||||
arch_cpu_idle_dead();
|
|
||||||
|
|
||||||
local_irq_disable();
|
|
||||||
arch_cpu_idle_enter();
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In poll mode we reenable interrupts and spin.
|
|
||||||
*
|
|
||||||
* Also if we detected in the wakeup from idle
|
|
||||||
* path that the tick broadcast device expired
|
|
||||||
* for us, we don't want to go deep idle as we
|
|
||||||
* know that the IPI is going to arrive right
|
|
||||||
* away
|
|
||||||
*/
|
|
||||||
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
|
|
||||||
cpu_idle_poll();
|
|
||||||
} else {
|
|
||||||
if (!current_clr_polling_and_test()) {
|
|
||||||
stop_critical_timings();
|
|
||||||
rcu_idle_enter();
|
|
||||||
if (cpuidle_idle_call())
|
|
||||||
arch_cpu_idle();
|
|
||||||
if (WARN_ON_ONCE(irqs_disabled()))
|
|
||||||
local_irq_enable();
|
|
||||||
rcu_idle_exit();
|
|
||||||
start_critical_timings();
|
|
||||||
} else {
|
|
||||||
local_irq_enable();
|
|
||||||
}
|
|
||||||
__current_set_polling();
|
|
||||||
}
|
|
||||||
arch_cpu_idle_exit();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Since we fell out of the loop above, we know
|
|
||||||
* TIF_NEED_RESCHED must be set, propagate it into
|
|
||||||
* PREEMPT_NEED_RESCHED.
|
|
||||||
*
|
|
||||||
* This is required because for polling idle loops we will
|
|
||||||
* not have had an IPI to fold the state for us.
|
|
||||||
*/
|
|
||||||
preempt_set_need_resched();
|
|
||||||
tick_nohz_idle_exit();
|
|
||||||
schedule_preempt_disabled();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void cpu_startup_entry(enum cpuhp_state state)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* This #ifdef needs to die, but it's too late in the cycle to
|
|
||||||
* make this generic (arm and sh have never invoked the canary
|
|
||||||
* init for the non boot cpus!). Will be fixed in 3.11
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86
|
|
||||||
/*
|
|
||||||
* If we're the non-boot CPU, nothing set the stack canary up
|
|
||||||
* for us. The boot CPU already has it initialized but no harm
|
|
||||||
* in doing it again. This is a good place for updating it, as
|
|
||||||
* we wont ever return from this function (so the invalid
|
|
||||||
* canaries already on the stack wont ever trigger).
|
|
||||||
*/
|
|
||||||
boot_init_stack_canary();
|
|
||||||
#endif
|
|
||||||
__current_set_polling();
|
|
||||||
arch_cpu_idle_prepare();
|
|
||||||
cpu_idle_loop();
|
|
||||||
}
|
|
@ -108,14 +108,17 @@ static void cpu_idle_loop(void)
|
|||||||
__current_set_polling();
|
__current_set_polling();
|
||||||
}
|
}
|
||||||
arch_cpu_idle_exit();
|
arch_cpu_idle_exit();
|
||||||
/*
|
|
||||||
* We need to test and propagate the TIF_NEED_RESCHED
|
|
||||||
* bit here because we might not have send the
|
|
||||||
* reschedule IPI to idle tasks.
|
|
||||||
*/
|
|
||||||
if (tif_need_resched())
|
|
||||||
set_preempt_need_resched();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Since we fell out of the loop above, we know
|
||||||
|
* TIF_NEED_RESCHED must be set, propagate it into
|
||||||
|
* PREEMPT_NEED_RESCHED.
|
||||||
|
*
|
||||||
|
* This is required because for polling idle loops we will
|
||||||
|
* not have had an IPI to fold the state for us.
|
||||||
|
*/
|
||||||
|
preempt_set_need_resched();
|
||||||
tick_nohz_idle_exit();
|
tick_nohz_idle_exit();
|
||||||
schedule_preempt_disabled();
|
schedule_preempt_disabled();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user