x86: Call idle notifier after irq_enter()
Interrupts notify the idle exit state before calling irq_enter(). But the notifier code calls rcu_read_lock() and this is not allowed while rcu is in an extended quiescent state. We need to wait for irq_enter() -> rcu_idle_exit() to be called before doing so otherwise this results in a grumpy RCU: [ 0.099991] WARNING: at include/linux/rcupdate.h:194 __atomic_notifier_call_chain+0xd2/0x110() [ 0.099991] Hardware name: AMD690VM-FMH [ 0.099991] Modules linked in: [ 0.099991] Pid: 0, comm: swapper Not tainted 3.0.0-rc6+ #255 [ 0.099991] Call Trace: [ 0.099991] <IRQ> [<ffffffff81051c8a>] warn_slowpath_common+0x7a/0xb0 [ 0.099991] [<ffffffff81051cd5>] warn_slowpath_null+0x15/0x20 [ 0.099991] [<ffffffff817d6fa2>] __atomic_notifier_call_chain+0xd2/0x110 [ 0.099991] [<ffffffff817d6ff1>] atomic_notifier_call_chain+0x11/0x20 [ 0.099991] [<ffffffff81001873>] exit_idle+0x43/0x50 [ 0.099991] [<ffffffff81020439>] smp_apic_timer_interrupt+0x39/0xa0 [ 0.099991] [<ffffffff817da253>] apic_timer_interrupt+0x13/0x20 [ 0.099991] <EOI> [<ffffffff8100ae67>] ? default_idle+0xa7/0x350 [ 0.099991] [<ffffffff8100ae65>] ? default_idle+0xa5/0x350 [ 0.099991] [<ffffffff8100b19b>] amd_e400_idle+0x8b/0x110 [ 0.099991] [<ffffffff810cb01f>] ? rcu_enter_nohz+0x8f/0x160 [ 0.099991] [<ffffffff810019a0>] cpu_idle+0xb0/0x110 [ 0.099991] [<ffffffff817a7505>] rest_init+0xe5/0x140 [ 0.099991] [<ffffffff817a7468>] ? rest_init+0x48/0x140 [ 0.099991] [<ffffffff81cc5ca3>] start_kernel+0x3d1/0x3dc [ 0.099991] [<ffffffff81cc5321>] x86_64_start_reservations+0x131/0x135 [ 0.099991] [<ffffffff81cc5412>] x86_64_start_kernel+0xed/0xf4 Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Andy Henroid <andrew.d.henroid@intel.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
e37e112de3
commit
98ad1cc14a
@ -876,8 +876,8 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
|
|||||||
* Besides, if we don't timer interrupts ignore the global
|
* Besides, if we don't timer interrupts ignore the global
|
||||||
* interrupt lock, which is the WrongThing (tm) to do.
|
* interrupt lock, which is the WrongThing (tm) to do.
|
||||||
*/
|
*/
|
||||||
exit_idle();
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
exit_idle();
|
||||||
local_apic_timer_interrupt();
|
local_apic_timer_interrupt();
|
||||||
irq_exit();
|
irq_exit();
|
||||||
|
|
||||||
@ -1809,8 +1809,8 @@ void smp_spurious_interrupt(struct pt_regs *regs)
|
|||||||
{
|
{
|
||||||
u32 v;
|
u32 v;
|
||||||
|
|
||||||
exit_idle();
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
exit_idle();
|
||||||
/*
|
/*
|
||||||
* Check if this really is a spurious interrupt and ACK it
|
* Check if this really is a spurious interrupt and ACK it
|
||||||
* if it is a vectored one. Just in case...
|
* if it is a vectored one. Just in case...
|
||||||
@ -1846,8 +1846,8 @@ void smp_error_interrupt(struct pt_regs *regs)
|
|||||||
"Illegal register address", /* APIC Error Bit 7 */
|
"Illegal register address", /* APIC Error Bit 7 */
|
||||||
};
|
};
|
||||||
|
|
||||||
exit_idle();
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
exit_idle();
|
||||||
/* First tickle the hardware, only then report what went on. -- REW */
|
/* First tickle the hardware, only then report what went on. -- REW */
|
||||||
v0 = apic_read(APIC_ESR);
|
v0 = apic_read(APIC_ESR);
|
||||||
apic_write(APIC_ESR, 0);
|
apic_write(APIC_ESR, 0);
|
||||||
|
@ -2421,8 +2421,8 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
|||||||
unsigned vector, me;
|
unsigned vector, me;
|
||||||
|
|
||||||
ack_APIC_irq();
|
ack_APIC_irq();
|
||||||
exit_idle();
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
exit_idle();
|
||||||
|
|
||||||
me = smp_processor_id();
|
me = smp_processor_id();
|
||||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
|
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
|
||||||
|
@ -397,8 +397,8 @@ static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
|
|||||||
|
|
||||||
asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
|
asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
exit_idle();
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
exit_idle();
|
||||||
inc_irq_stat(irq_thermal_count);
|
inc_irq_stat(irq_thermal_count);
|
||||||
smp_thermal_vector();
|
smp_thermal_vector();
|
||||||
irq_exit();
|
irq_exit();
|
||||||
|
@ -19,8 +19,8 @@ void (*mce_threshold_vector)(void) = default_threshold_interrupt;
|
|||||||
|
|
||||||
asmlinkage void smp_threshold_interrupt(void)
|
asmlinkage void smp_threshold_interrupt(void)
|
||||||
{
|
{
|
||||||
exit_idle();
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
exit_idle();
|
||||||
inc_irq_stat(irq_threshold_count);
|
inc_irq_stat(irq_threshold_count);
|
||||||
mce_threshold_vector();
|
mce_threshold_vector();
|
||||||
irq_exit();
|
irq_exit();
|
||||||
|
@ -181,8 +181,8 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
|||||||
unsigned vector = ~regs->orig_ax;
|
unsigned vector = ~regs->orig_ax;
|
||||||
unsigned irq;
|
unsigned irq;
|
||||||
|
|
||||||
exit_idle();
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
exit_idle();
|
||||||
|
|
||||||
irq = __this_cpu_read(vector_irq[vector]);
|
irq = __this_cpu_read(vector_irq[vector]);
|
||||||
|
|
||||||
@ -209,10 +209,10 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
|
|||||||
|
|
||||||
ack_APIC_irq();
|
ack_APIC_irq();
|
||||||
|
|
||||||
exit_idle();
|
|
||||||
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
|
|
||||||
|
exit_idle();
|
||||||
|
|
||||||
inc_irq_stat(x86_platform_ipis);
|
inc_irq_stat(x86_platform_ipis);
|
||||||
|
|
||||||
if (x86_platform_ipi_callback)
|
if (x86_platform_ipi_callback)
|
||||||
|
Loading…
Reference in New Issue
Block a user