x86, mce: rename mce_notify_user to mce_notify_irq

Rename the mce_notify_user function to mce_notify_irq. The next
patch will split the wakeup handling of interrupt context
and of process context and it's better to give it a clearer
name for this.

Contains a fix from Ying Huang

[ Impact: cleanup ]

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Huang Ying <ying.huang@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
Andi Kleen
2009-05-27 21:56:58 +02:00
committed by H. Peter Anvin
parent 4ef702c10b
commit 9ff36ee966
5 changed files with 9 additions and 9 deletions

View File

@ -159,7 +159,7 @@ enum mcp_flags {
}; };
void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
int mce_notify_user(void); int mce_notify_irq(void);
DECLARE_PER_CPU(struct mce, injectm); DECLARE_PER_CPU(struct mce, injectm);
extern struct file_operations mce_chrdev_ops; extern struct file_operations mce_chrdev_ops;

View File

@ -65,7 +65,7 @@ static void raise_mce(unsigned long data)
memset(&b, 0xff, sizeof(mce_banks_t)); memset(&b, 0xff, sizeof(mce_banks_t));
printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu);
machine_check_poll(0, &b); machine_check_poll(0, &b);
mce_notify_user(); mce_notify_irq();
printk(KERN_INFO "Finished machine check poll on CPU %d\n", printk(KERN_INFO "Finished machine check poll on CPU %d\n",
cpu); cpu);
} }

View File

@ -348,7 +348,7 @@ asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
ack_APIC_irq(); ack_APIC_irq();
exit_idle(); exit_idle();
irq_enter(); irq_enter();
mce_notify_user(); mce_notify_irq();
irq_exit(); irq_exit();
} }
#endif #endif
@ -356,7 +356,7 @@ asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs)
static void mce_report_event(struct pt_regs *regs) static void mce_report_event(struct pt_regs *regs)
{ {
if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) {
mce_notify_user(); mce_notify_irq();
return; return;
} }
@ -968,7 +968,7 @@ static void mcheck_timer(unsigned long data)
* polling interval, otherwise increase the polling interval. * polling interval, otherwise increase the polling interval.
*/ */
n = &__get_cpu_var(next_interval); n = &__get_cpu_var(next_interval);
if (mce_notify_user()) if (mce_notify_irq())
*n = max(*n/2, HZ/100); *n = max(*n/2, HZ/100);
else else
*n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ));
@ -989,7 +989,7 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
* Can be called from interrupt context, but not from machine check/NMI * Can be called from interrupt context, but not from machine check/NMI
* context. * context.
*/ */
int mce_notify_user(void) int mce_notify_irq(void)
{ {
/* Not more than two messages every minute */ /* Not more than two messages every minute */
static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
@ -1014,7 +1014,7 @@ int mce_notify_user(void)
} }
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mce_notify_user); EXPORT_SYMBOL_GPL(mce_notify_irq);
/* /*
* Initialize Machine Checks for a CPU. * Initialize Machine Checks for a CPU.

View File

@ -80,7 +80,7 @@ static int cmci_supported(int *banks)
static void intel_threshold_interrupt(void) static void intel_threshold_interrupt(void)
{ {
machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
mce_notify_user(); mce_notify_irq();
} }
static void print_update(char *type, int *hdr, int num) static void print_update(char *type, int *hdr, int num)

View File

@ -860,7 +860,7 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
#ifdef CONFIG_X86_NEW_MCE #ifdef CONFIG_X86_NEW_MCE
/* notify userspace of pending MCEs */ /* notify userspace of pending MCEs */
if (thread_info_flags & _TIF_MCE_NOTIFY) if (thread_info_flags & _TIF_MCE_NOTIFY)
mce_notify_user(); mce_notify_irq();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */ #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
/* deal with pending signal delivery */ /* deal with pending signal delivery */