Merge branches 'cpus4096', 'x86/cleanups' and 'x86/urgent' into x86/percpu

This commit is contained in:
Ingo Molnar 2009-01-15 13:18:57 +01:00
commit 7f268f4352
92 changed files with 890 additions and 593 deletions

View File

@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
these macros in include/asm-XXX/topology.h: these macros in include/asm-XXX/topology.h:
#define topology_physical_package_id(cpu) #define topology_physical_package_id(cpu)
#define topology_core_id(cpu) #define topology_core_id(cpu)
#define topology_thread_siblings(cpu) #define topology_thread_cpumask(cpu)
#define topology_core_siblings(cpu) #define topology_core_cpumask(cpu)
The type of **_id is int. The type of **_id is int.
The type of siblings is cpumask_t. The type of siblings is (const) struct cpumask *.
To be consistent on all architectures, include/linux/topology.h To be consistent on all architectures, include/linux/topology.h
provides default definitions for any of the above macros that are provides default definitions for any of the above macros that are

View File

@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
last_cpu = cpu; last_cpu = cpu;
irq_desc[irq].affinity = cpumask_of_cpu(cpu); cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu)); irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
return 0; return 0;
} }

View File

@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
.lock = SPIN_LOCK_UNLOCKED .lock = SPIN_LOCK_UNLOCKED
}; };
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating bad_irq_desc.affinity or .pending_mask */
#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
/* /*
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
* come via this function. Instead, they should provide their * come via this function. Instead, they should provide their
@ -161,7 +166,7 @@ void __init init_IRQ(void)
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
bad_irq_desc.affinity = CPU_MASK_ALL; cpumask_setall(bad_irq_desc.affinity);
bad_irq_desc.cpu = smp_processor_id(); bad_irq_desc.cpu = smp_processor_id();
#endif #endif
init_arch_irq(); init_arch_irq();
@ -191,15 +196,16 @@ void migrate_irqs(void)
struct irq_desc *desc = irq_desc + i; struct irq_desc *desc = irq_desc + i;
if (desc->cpu == cpu) { if (desc->cpu == cpu) {
unsigned int newcpu = any_online_cpu(desc->affinity); unsigned int newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
if (newcpu == NR_CPUS) { if (newcpu >= nr_cpu_ids) {
if (printk_ratelimit()) if (printk_ratelimit())
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
i, cpu); i, cpu);
cpus_setall(desc->affinity); cpumask_setall(desc->affinity);
newcpu = any_online_cpu(desc->affinity); newcpu = cpumask_any_and(desc->affinity,
cpu_online_mask);
} }
route_irq(desc, i, newcpu); route_irq(desc, i, newcpu);

View File

@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
const struct cpumask *mask = cpumask_of(cpu); const struct cpumask *mask = cpumask_of(cpu);
spin_lock_irq(&desc->lock); spin_lock_irq(&desc->lock);
desc->affinity = *mask; cpumask_copy(desc->affinity, mask);
desc->chip->set_affinity(irq, mask); desc->chip->set_affinity(irq, mask);
spin_unlock_irq(&desc->lock); spin_unlock_irq(&desc->lock);
} }

View File

@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = {
#endif #endif
}; };
#ifdef CONFIG_CPUMASK_OFFSTACK
/* We are not allocating a variable-sized bad_irq_desc.affinity */
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
int show_interrupts(struct seq_file *p, void *v) int show_interrupts(struct seq_file *p, void *v)
{ {
int i = *(loff_t *) v, j; int i = *(loff_t *) v, j;

View File

@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
if (iosapic_intr_info[irq].count == 0) { if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Clear affinity */ /* Clear affinity */
cpus_setall(idesc->affinity); cpumask_setall(idesc->affinity);
#endif #endif
/* Clear the interrupt information */ /* Clear the interrupt information */
iosapic_intr_info[irq].dest = 0; iosapic_intr_info[irq].dest = 0;

View File

@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
void set_irq_affinity_info (unsigned int irq, int hwid, int redir) void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
{ {
if (irq < NR_IRQS) { if (irq < NR_IRQS) {
cpumask_copy(&irq_desc[irq].affinity, cpumask_copy(irq_desc[irq].affinity,
cpumask_of(cpu_logical_id(hwid))); cpumask_of(cpu_logical_id(hwid)));
irq_redir[irq] = (char) (redir & 0xff); irq_redir[irq] = (char) (redir & 0xff);
} }
@ -148,7 +148,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU) if (desc->status == IRQ_PER_CPU)
continue; continue;
if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask) if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
>= nr_cpu_ids) { >= nr_cpu_ids) {
/* /*
* Save it for phase 2 processing * Save it for phase 2 processing

View File

@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
ia64_srlz_d(); ia64_srlz_d();
while (vector != IA64_SPURIOUS_INT_VECTOR) { while (vector != IA64_SPURIOUS_INT_VECTOR) {
struct irq_desc *desc = irq_to_desc(vector);
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb(); smp_local_flush_tlb();
kstat_this_cpu.irqs[vector]++; kstat_incr_irqs_this_cpu(vector, desc);
} else if (unlikely(IS_RESCHEDULE(vector))) } else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++; kstat_incr_irqs_this_cpu(vector, desc);
else { else {
int irq = local_vector_to_irq(vector); int irq = local_vector_to_irq(vector);
@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
* Perform normal interrupt style processing * Perform normal interrupt style processing
*/ */
while (vector != IA64_SPURIOUS_INT_VECTOR) { while (vector != IA64_SPURIOUS_INT_VECTOR) {
struct irq_desc *desc = irq_to_desc(vector);
if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
smp_local_flush_tlb(); smp_local_flush_tlb();
kstat_this_cpu.irqs[vector]++; kstat_incr_irqs_this_cpu(vector, desc);
} else if (unlikely(IS_RESCHEDULE(vector))) } else if (unlikely(IS_RESCHEDULE(vector)))
kstat_this_cpu.irqs[vector]++; kstat_incr_irqs_this_cpu(vector, desc);
else { else {
struct pt_regs *old_regs = set_irq_regs(NULL); struct pt_regs *old_regs = set_irq_regs(NULL);
int irq = local_vector_to_irq(vector); int irq = local_vector_to_irq(vector);

View File

@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
msg.data = data; msg.data = data;
write_msi_msg(irq, &msg); write_msi_msg(irq, &msg);
irq_desc[irq].affinity = cpumask_of_cpu(cpu); cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu)); msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
dmar_msi_write(irq, &msg); dmar_msi_write(irq, &msg);
irq_desc[irq].affinity = *mask; cpumask_copy(irq_desc[irq].affinity, mask);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */

View File

@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
write_msi_msg(irq, &msg); write_msi_msg(irq, &msg);
irq_desc[irq].affinity = *cpu_mask; cpumask_copy(irq_desc[irq].affinity, cpu_mask);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */

View File

@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
*/ */
#define IRQ_AFFINITY_HOOK(irq) \ #define IRQ_AFFINITY_HOOK(irq) \
do { \ do { \
if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \ if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
smtc_forward_irq(irq); \ smtc_forward_irq(irq); \
irq_exit(); \ irq_exit(); \
return; \ return; \

View File

@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
} }
irq_desc[irq].affinity = *cpumask; cpumask_copy(irq_desc[irq].affinity, cpumask);
spin_unlock_irqrestore(&gic_lock, flags); spin_unlock_irqrestore(&gic_lock, flags);
} }

View File

@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
* and efficiency, we just pick the easiest one to find. * and efficiency, we just pick the easiest one to find.
*/ */
target = first_cpu(irq_desc[irq].affinity); target = cpumask_first(irq_desc[irq].affinity);
/* /*
* We depend on the platform code to have correctly processed * We depend on the platform code to have correctly processed
@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
struct clock_event_device *cd; struct clock_event_device *cd;
void *arg_copy = pipi->arg; void *arg_copy = pipi->arg;
int type_copy = pipi->type; int type_copy = pipi->type;
int irq = MIPS_CPU_IRQ_BASE + 1;
smtc_ipi_nq(&freeIPIq, pipi); smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) { switch (type_copy) {
case SMTC_CLOCK_TICK: case SMTC_CLOCK_TICK:
irq_enter(); irq_enter();
kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
cd = &per_cpu(mips_clockevent_device, cpu); cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd); cd->event_handler(cd);
irq_exit(); irq_exit();

View File

@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
{ {
cpumask_t tmask = *affinity; cpumask_t tmask;
int cpu = 0; int cpu = 0;
void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff); void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
* be made to forward to an offline "CPU". * be made to forward to an offline "CPU".
*/ */
cpumask_copy(&tmask, affinity);
for_each_cpu(cpu, affinity) { for_each_cpu(cpu, affinity) {
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu)) if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
cpu_clear(cpu, tmask); cpu_clear(cpu, tmask);
} }
irq_desc[irq].affinity = tmask; cpumask_copy(irq_desc[irq].affinity, &tmask);
if (cpus_empty(tmask)) if (cpus_empty(tmask))
/* /*

View File

@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
int irq = SGI_BUSERR_IRQ; int irq = SGI_BUSERR_IRQ;
irq_enter(); irq_enter();
kstat_this_cpu.irqs[irq]++; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
ip22_be_interrupt(irq); ip22_be_interrupt(irq);
irq_exit(); irq_exit();
} }

View File

@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
char c; char c;
irq_enter(); irq_enter();
kstat_this_cpu.irqs[irq]++; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
printk(KERN_ALERT "Oops, got 8254 interrupt.\n"); printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
ArcRead(0, &c, 1, &cnt); ArcRead(0, &c, 1, &cnt);
ArcEnterInteractiveMode(); ArcEnterInteractiveMode();

View File

@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
void bcm1480_mailbox_interrupt(void) void bcm1480_mailbox_interrupt(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int irq = K_BCM1480_INT_MBOX_0_0;
unsigned int action; unsigned int action;
kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
/* Load the mailbox register to figure out what we're supposed to do */ /* Load the mailbox register to figure out what we're supposed to do */
action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff; action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;

View File

@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
void sb1250_mailbox_interrupt(void) void sb1250_mailbox_interrupt(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int irq = K_INT_MBOX_0;
unsigned int action; unsigned int action;
kstat_this_cpu.irqs[K_INT_MBOX_0]++; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
/* Load the mailbox register to figure out what we're supposed to do */ /* Load the mailbox register to figure out what we're supposed to do */
action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff; action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;

View File

@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
* the stack NMI-atomically, it's safe to use smp_processor_id(). * the stack NMI-atomically, it's safe to use smp_processor_id().
*/ */
int sum, cpu = smp_processor_id(); int sum, cpu = smp_processor_id();
int irq = NMIIRQ;
u8 wdt, tmp; u8 wdt, tmp;
wdt = WDCTR & ~WDCTR_WDCNE; wdt = WDCTR & ~WDCTR_WDCNE;
@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
NMICR = NMICR_WDIF; NMICR = NMICR_WDIF;
nmi_count(cpu)++; nmi_count(cpu)++;
kstat_this_cpu.irqs[NMIIRQ]++; kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
sum = irq_stat[cpu].__irq_count; sum = irq_stat[cpu].__irq_count;
if (last_irq_sums[cpu] == sum) { if (last_irq_sums[cpu] == sum) {

View File

@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
if (CHECK_IRQ_PER_CPU(irq)) { if (CHECK_IRQ_PER_CPU(irq)) {
/* Bad linux design decision. The mask has already /* Bad linux design decision. The mask has already
* been set; we must reset it */ * been set; we must reset it */
irq_desc[irq].affinity = CPU_MASK_ALL; cpumask_setall(irq_desc[irq].affinity);
return -EINVAL; return -EINVAL;
} }
@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
if (cpu_check_affinity(irq, dest)) if (cpu_check_affinity(irq, dest))
return; return;
irq_desc[irq].affinity = *dest; cpumask_copy(irq_desc[irq].affinity, dest);
} }
#endif #endif
@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
unsigned long txn_affinity_addr(unsigned int irq, int cpu) unsigned long txn_affinity_addr(unsigned int irq, int cpu)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
irq_desc[irq].affinity = cpumask_of_cpu(cpu); cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
#endif #endif
return per_cpu(cpu_data, cpu).txn_addr; return per_cpu(cpu_data, cpu).txn_addr;
@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
irq = eirr_to_irq(eirr_val); irq = eirr_to_irq(eirr_val);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
dest = irq_desc[irq].affinity; cpumask_copy(&dest, irq_desc[irq].affinity);
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) && if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
!cpu_isset(smp_processor_id(), dest)) { !cpu_isset(smp_processor_id(), dest)) {
int cpu = first_cpu(dest); int cpu = first_cpu(dest);

View File

@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
if (irq_desc[irq].status & IRQ_PER_CPU) if (irq_desc[irq].status & IRQ_PER_CPU)
continue; continue;
cpus_and(mask, irq_desc[irq].affinity, map); cpumask_and(&mask, irq_desc[irq].affinity, &map);
if (any_online_cpu(mask) == NR_CPUS) { if (any_online_cpu(mask) == NR_CPUS) {
printk("Breaking affinity for irq %i\n", irq); printk("Breaking affinity for irq %i\n", irq);
mask = map; mask = map;

View File

@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
{ {
int server; int server;
/* For the moment only implement delivery to all cpus or one cpu */ /* For the moment only implement delivery to all cpus or one cpu */
cpumask_t cpumask = irq_desc[virq].affinity; cpumask_t cpumask;
cpumask_t tmp = CPU_MASK_NONE; cpumask_t tmp = CPU_MASK_NONE;
cpumask_copy(&cpumask, irq_desc[virq].affinity);
if (!distribute_irqs) if (!distribute_irqs)
return default_server; return default_server;
@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
virq, cpu); virq, cpu);
/* Reset affinity to all cpus */ /* Reset affinity to all cpus */
irq_desc[virq].affinity = CPU_MASK_ALL; cpumask_setall(irq_desc[virq].affinity);
desc->chip->set_affinity(virq, cpu_all_mask); desc->chip->set_affinity(virq, cpu_all_mask);
unlock: unlock:
spin_unlock_irqrestore(&desc->lock, flags); spin_unlock_irqrestore(&desc->lock, flags);

View File

@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq) static int irq_choose_cpu(unsigned int virt_irq)
{ {
cpumask_t mask = irq_desc[virt_irq].affinity; cpumask_t mask;
int cpuid; int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
if (cpus_equal(mask, CPU_MASK_ALL)) { if (cpus_equal(mask, CPU_MASK_ALL)) {
static int irq_rover; static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock); static DEFINE_SPINLOCK(irq_rover_lock);

View File

@ -247,9 +247,10 @@ struct irq_handler_data {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int irq_choose_cpu(unsigned int virt_irq) static int irq_choose_cpu(unsigned int virt_irq)
{ {
cpumask_t mask = irq_desc[virt_irq].affinity; cpumask_t mask;
int cpuid; int cpuid;
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
if (cpus_equal(mask, CPU_MASK_ALL)) { if (cpus_equal(mask, CPU_MASK_ALL)) {
static int irq_rover; static int irq_rover;
static DEFINE_SPINLOCK(irq_rover_lock); static DEFINE_SPINLOCK(irq_rover_lock);
@ -854,7 +855,7 @@ void fixup_irqs(void)
!(irq_desc[irq].status & IRQ_PER_CPU)) { !(irq_desc[irq].status & IRQ_PER_CPU)) {
if (irq_desc[irq].chip->set_affinity) if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq, irq_desc[irq].chip->set_affinity(irq,
&irq_desc[irq].affinity); irq_desc[irq].affinity);
} }
spin_unlock_irqrestore(&irq_desc[irq].lock, flags); spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
} }

View File

@ -729,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
irq_enter(); irq_enter();
kstat_this_cpu.irqs[0]++; kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
if (unlikely(!evt->event_handler)) { if (unlikely(!evt->event_handler)) {
printk(KERN_WARNING printk(KERN_WARNING

View File

@ -0,0 +1,12 @@
#ifndef _ASM_X86_APICNUM_H
#define _ASM_X86_APICNUM_H
/* define MAX_IO_APICS */
#ifdef CONFIG_X86_32
# define MAX_IO_APICS 64
#else
# define MAX_IO_APICS 128
# define MAX_LOCAL_APIC 32768
#endif
#endif /* _ASM_X86_APICNUM_H */

View File

@ -3,6 +3,9 @@
/* /*
* Copyright 1992, Linus Torvalds. * Copyright 1992, Linus Torvalds.
*
* Note: inlines with more than a single statement should be marked
* __always_inline to avoid problems with older gcc's inlining heuristics.
*/ */
#ifndef _LINUX_BITOPS_H #ifndef _LINUX_BITOPS_H
@ -53,7 +56,8 @@
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void set_bit(unsigned int nr, volatile unsigned long *addr) static __always_inline void
set_bit(unsigned int nr, volatile unsigned long *addr)
{ {
if (IS_IMMEDIATE(nr)) { if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0" asm volatile(LOCK_PREFIX "orb %1,%0"
@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors. * in order to ensure changes are visible on other processors.
*/ */
static inline void clear_bit(int nr, volatile unsigned long *addr) static __always_inline void
clear_bit(int nr, volatile unsigned long *addr)
{ {
if (IS_IMMEDIATE(nr)) { if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0" asm volatile(LOCK_PREFIX "andb %1,%0"
@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
* *
* This is the same as test_and_set_bit on x86. * This is the same as test_and_set_bit on x86.
*/ */
static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) static __always_inline int
test_and_set_bit_lock(int nr, volatile unsigned long *addr)
{ {
return test_and_set_bit(nr, addr); return test_and_set_bit(nr, addr);
} }
@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return oldbit; return oldbit;
} }
static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{ {
return ((1UL << (nr % BITS_PER_LONG)) & return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;

View File

@ -7,6 +7,20 @@
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#ifdef CONFIG_SMP
extern void prefill_possible_map(void);
#else /* CONFIG_SMP */
static inline void prefill_possible_map(void) {}
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0
#endif /* CONFIG_SMP */
struct x86_cpu { struct x86_cpu {
struct cpu cpu; struct cpu cpu;
}; };
@ -17,4 +31,11 @@ extern void arch_unregister_cpu(int);
#endif #endif
DECLARE_PER_CPU(int, cpu_state); DECLARE_PER_CPU(int, cpu_state);
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
extern unsigned char boot_cpu_id;
#else
#define boot_cpu_id 0
#endif
#endif /* _ASM_X86_CPU_H */ #endif /* _ASM_X86_CPU_H */

View File

@ -0,0 +1,28 @@
#ifndef _ASM_X86_CPUMASK_H
#define _ASM_X86_CPUMASK_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#ifdef CONFIG_X86_64
extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
extern cpumask_var_t cpu_sibling_setup_mask;
#else /* CONFIG_X86_32 */
extern cpumask_t cpu_callin_map;
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_initialized;
extern cpumask_t cpu_sibling_setup_map;
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
#endif /* CONFIG_X86_32 */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_CPUMASK_H */

View File

@ -19,6 +19,9 @@ typedef struct {
DECLARE_PER_CPU(irq_cpustat_t, irq_stat); DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
#define __ARCH_IRQ_STAT #define __ARCH_IRQ_STAT
#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)

View File

@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
extern int nr_ioapics; extern int nr_ioapics;
extern int nr_ioapic_registers[MAX_IO_APICS]; extern int nr_ioapic_registers[MAX_IO_APICS];
/*
* MP-BIOS irq configuration table structures:
*/
#define MP_MAX_IOAPIC_PIN 127 #define MP_MAX_IOAPIC_PIN 127
struct mp_config_ioapic {
unsigned long mp_apicaddr;
unsigned int mp_apicid;
unsigned char mp_type;
unsigned char mp_apicver;
unsigned char mp_flags;
};
struct mp_config_intsrc {
unsigned int mp_dstapic;
unsigned char mp_type;
unsigned char mp_irqtype;
unsigned short mp_irqflag;
unsigned char mp_srcbus;
unsigned char mp_srcbusirq;
unsigned char mp_dstirq;
};
/* I/O APIC entries */ /* I/O APIC entries */
extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
/* # of MP IRQ source entries */ /* # of MP IRQ source entries */
extern int mp_irq_entries; extern int mp_irq_entries;
/* MP IRQ source entries */ /* MP IRQ source entries */
extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* non-0 if default (table-less) MP configuration */ /* non-0 if default (table-less) MP configuration */
extern int mpc_default_type; extern int mpc_default_type;

View File

@ -105,6 +105,8 @@
#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER) #if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
#include <asm/apicnum.h> /* need MAX_IO_APICS */
#ifndef CONFIG_SPARSE_IRQ #ifndef CONFIG_SPARSE_IRQ
# if NR_CPUS < MAX_IO_APICS # if NR_CPUS < MAX_IO_APICS
# define NR_IRQS (NR_VECTORS + (32 * NR_CPUS)) # define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
@ -112,11 +114,12 @@
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) # define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
# endif # endif
#else #else
# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
# define NR_IRQS (NR_VECTORS + (8 * NR_CPUS)) # define NR_IRQS \
# else ((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \
# define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS)) (NR_VECTORS + (8 * NR_CPUS)) : \
# endif (NR_VECTORS + (32 * MAX_IO_APICS))) \
#endif #endif
#elif defined(CONFIG_X86_VOYAGER) #elif defined(CONFIG_X86_VOYAGER)

View File

@ -24,17 +24,18 @@
# endif # endif
#endif #endif
struct intel_mp_floating { /* Intel MP Floating Pointer Structure */
char mpf_signature[4]; /* "_MP_" */ struct mpf_intel {
unsigned int mpf_physptr; /* Configuration table address */ char signature[4]; /* "_MP_" */
unsigned char mpf_length; /* Our length (paragraphs) */ unsigned int physptr; /* Configuration table address */
unsigned char mpf_specification;/* Specification version */ unsigned char length; /* Our length (paragraphs) */
unsigned char mpf_checksum; /* Checksum (makes sum 0) */ unsigned char specification; /* Specification version */
unsigned char mpf_feature1; /* Standard or configuration ? */ unsigned char checksum; /* Checksum (makes sum 0) */
unsigned char mpf_feature2; /* Bit7 set for IMCR|PIC */ unsigned char feature1; /* Standard or configuration ? */
unsigned char mpf_feature3; /* Unused (0) */ unsigned char feature2; /* Bit7 set for IMCR|PIC */
unsigned char mpf_feature4; /* Unused (0) */ unsigned char feature3; /* Unused (0) */
unsigned char mpf_feature5; /* Unused (0) */ unsigned char feature4; /* Unused (0) */
unsigned char feature5; /* Unused (0) */
}; };
#define MPC_SIGNATURE "PCMP" #define MPC_SIGNATURE "PCMP"

View File

@ -244,7 +244,8 @@ struct pv_mmu_ops {
void (*flush_tlb_user)(void); void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void); void (*flush_tlb_kernel)(void);
void (*flush_tlb_single)(unsigned long addr); void (*flush_tlb_single)(unsigned long addr);
void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm, void (*flush_tlb_others)(const struct cpumask *cpus,
struct mm_struct *mm,
unsigned long va); unsigned long va);
/* Hooks for allocating and freeing a pagetable top-level */ /* Hooks for allocating and freeing a pagetable top-level */
@ -984,10 +985,11 @@ static inline void __flush_tlb_single(unsigned long addr)
PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr); PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
} }
static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, static inline void flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long va) unsigned long va)
{ {
PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va); PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
} }
static inline int paravirt_pgd_alloc(struct mm_struct *mm) static inline int paravirt_pgd_alloc(struct mm_struct *mm)

View File

@ -17,30 +17,7 @@
#endif #endif
#include <asm/pda.h> #include <asm/pda.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/cpumask.h>
#ifdef CONFIG_X86_64
extern cpumask_var_t cpu_callin_mask;
extern cpumask_var_t cpu_callout_mask;
extern cpumask_var_t cpu_initialized_mask;
extern cpumask_var_t cpu_sibling_setup_mask;
#else /* CONFIG_X86_32 */
extern cpumask_t cpu_callin_map;
extern cpumask_t cpu_callout_map;
extern cpumask_t cpu_initialized;
extern cpumask_t cpu_sibling_setup_map;
#define cpu_callin_mask ((struct cpumask *)&cpu_callin_map)
#define cpu_callout_mask ((struct cpumask *)&cpu_callout_map)
#define cpu_initialized_mask ((struct cpumask *)&cpu_initialized)
#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
#endif /* CONFIG_X86_32 */
extern void (*mtrr_hook)(void);
extern void zap_low_mappings(void);
extern int __cpuinit get_local_pda(int cpu); extern int __cpuinit get_local_pda(int cpu);
@ -167,8 +144,6 @@ void play_dead_common(void);
void native_send_call_func_ipi(const struct cpumask *mask); void native_send_call_func_ipi(const struct cpumask *mask);
void native_send_call_func_single_ipi(int cpu); void native_send_call_func_single_ipi(int cpu);
extern void prefill_possible_map(void);
void smp_store_cpu_info(int id); void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
@ -177,10 +152,6 @@ static inline int num_booting_cpus(void)
{ {
return cpumask_weight(cpu_callout_mask); return cpumask_weight(cpu_callout_mask);
} }
#else
static inline void prefill_possible_map(void)
{
}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
extern unsigned disabled_cpus __cpuinitdata; extern unsigned disabled_cpus __cpuinitdata;
@ -205,10 +176,6 @@ extern int safe_smp_processor_id(void);
}) })
#define safe_smp_processor_id() smp_processor_id() #define safe_smp_processor_id() smp_processor_id()
#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
#define stack_smp_processor_id() 0
#endif #endif
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
@ -251,11 +218,5 @@ static inline int hard_smp_processor_id(void)
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
extern unsigned char boot_cpu_id;
#else
#define boot_cpu_id 0
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SMP_H */ #endif /* _ASM_X86_SMP_H */

View File

@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb(); __flush_tlb();
} }
static inline void native_flush_tlb_others(const cpumask_t *cpumask, static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long va) unsigned long va)
{ {
@ -142,8 +142,8 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
flush_tlb_mm(vma->vm_mm); flush_tlb_mm(vma->vm_mm);
} }
void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm, void native_flush_tlb_others(const struct cpumask *cpumask,
unsigned long va); struct mm_struct *mm, unsigned long va);
#define TLBSTATE_OK 1 #define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2 #define TLBSTATE_LAZY 2
@ -166,7 +166,7 @@ static inline void reset_lazy_tlbstate(void)
#endif /* SMP */ #endif /* SMP */
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va) #define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
#endif #endif
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
@ -175,4 +175,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_all(); flush_tlb_all();
} }
extern void zap_low_mappings(void);
#endif /* _ASM_X86_TLBFLUSH_H */ #endif /* _ASM_X86_TLBFLUSH_H */

View File

@ -325,7 +325,8 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
#define cpubit_isset(cpu, bau_local_cpumask) \ #define cpubit_isset(cpu, bau_local_cpumask) \
test_bit((cpu), (bau_local_cpumask).bits) test_bit((cpu), (bau_local_cpumask).bits)
extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long); extern int uv_flush_tlb_others(struct cpumask *,
struct mm_struct *, unsigned long);
extern void uv_bau_message_intr1(void); extern void uv_bau_message_intr1(void);
extern void uv_bau_timeout_intr1(void); extern void uv_bau_timeout_intr1(void);

View File

@ -912,8 +912,8 @@ static u8 __init uniq_ioapic_id(u8 id)
DECLARE_BITMAP(used, 256); DECLARE_BITMAP(used, 256);
bitmap_zero(used, 256); bitmap_zero(used, 256);
for (i = 0; i < nr_ioapics; i++) { for (i = 0; i < nr_ioapics; i++) {
struct mp_config_ioapic *ia = &mp_ioapics[i]; struct mpc_ioapic *ia = &mp_ioapics[i];
__set_bit(ia->mp_apicid, used); __set_bit(ia->apicid, used);
} }
if (!test_bit(id, used)) if (!test_bit(id, used))
return id; return id;
@ -945,47 +945,47 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
idx = nr_ioapics; idx = nr_ioapics;
mp_ioapics[idx].mp_type = MP_IOAPIC; mp_ioapics[idx].type = MP_IOAPIC;
mp_ioapics[idx].mp_flags = MPC_APIC_USABLE; mp_ioapics[idx].flags = MPC_APIC_USABLE;
mp_ioapics[idx].mp_apicaddr = address; mp_ioapics[idx].apicaddr = address;
set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id); mp_ioapics[idx].apicid = uniq_ioapic_id(id);
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
mp_ioapics[idx].mp_apicver = io_apic_get_version(idx); mp_ioapics[idx].apicver = io_apic_get_version(idx);
#else #else
mp_ioapics[idx].mp_apicver = 0; mp_ioapics[idx].apicver = 0;
#endif #endif
/* /*
* Build basic GSI lookup table to facilitate gsi->io_apic lookups * Build basic GSI lookup table to facilitate gsi->io_apic lookups
* and to prevent reprogramming of IOAPIC pins (PCI GSIs). * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
*/ */
mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid; mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
mp_ioapic_routing[idx].gsi_base = gsi_base; mp_ioapic_routing[idx].gsi_base = gsi_base;
mp_ioapic_routing[idx].gsi_end = gsi_base + mp_ioapic_routing[idx].gsi_end = gsi_base +
io_apic_get_redir_entries(idx); io_apic_get_redir_entries(idx);
printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
"GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid, "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr, mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end); mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
nr_ioapics++; nr_ioapics++;
} }
static void assign_to_mp_irq(struct mp_config_intsrc *m, static void assign_to_mp_irq(struct mpc_intsrc *m,
struct mp_config_intsrc *mp_irq) struct mpc_intsrc *mp_irq)
{ {
memcpy(mp_irq, m, sizeof(struct mp_config_intsrc)); memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
} }
static int mp_irq_cmp(struct mp_config_intsrc *mp_irq, static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
struct mp_config_intsrc *m) struct mpc_intsrc *m)
{ {
return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc)); return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
} }
static void save_mp_irq(struct mp_config_intsrc *m) static void save_mp_irq(struct mpc_intsrc *m)
{ {
int i; int i;
@ -1003,7 +1003,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
{ {
int ioapic; int ioapic;
int pin; int pin;
struct mp_config_intsrc mp_irq; struct mpc_intsrc mp_irq;
/* /*
* Convert 'gsi' to 'ioapic.pin'. * Convert 'gsi' to 'ioapic.pin'.
@ -1021,13 +1021,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
if ((bus_irq == 0) && (trigger == 3)) if ((bus_irq == 0) && (trigger == 3))
trigger = 1; trigger = 1;
mp_irq.mp_type = MP_INTSRC; mp_irq.type = MP_INTSRC;
mp_irq.mp_irqtype = mp_INT; mp_irq.irqtype = mp_INT;
mp_irq.mp_irqflag = (trigger << 2) | polarity; mp_irq.irqflag = (trigger << 2) | polarity;
mp_irq.mp_srcbus = MP_ISA_BUS; mp_irq.srcbus = MP_ISA_BUS;
mp_irq.mp_srcbusirq = bus_irq; /* IRQ */ mp_irq.srcbusirq = bus_irq; /* IRQ */
mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */ mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
mp_irq.mp_dstirq = pin; /* INTIN# */ mp_irq.dstirq = pin; /* INTIN# */
save_mp_irq(&mp_irq); save_mp_irq(&mp_irq);
} }
@ -1037,7 +1037,7 @@ void __init mp_config_acpi_legacy_irqs(void)
int i; int i;
int ioapic; int ioapic;
unsigned int dstapic; unsigned int dstapic;
struct mp_config_intsrc mp_irq; struct mpc_intsrc mp_irq;
#if defined (CONFIG_MCA) || defined (CONFIG_EISA) #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
/* /*
@ -1062,7 +1062,7 @@ void __init mp_config_acpi_legacy_irqs(void)
ioapic = mp_find_ioapic(0); ioapic = mp_find_ioapic(0);
if (ioapic < 0) if (ioapic < 0)
return; return;
dstapic = mp_ioapics[ioapic].mp_apicid; dstapic = mp_ioapics[ioapic].apicid;
/* /*
* Use the default configuration for the IRQs 0-15. Unless * Use the default configuration for the IRQs 0-15. Unless
@ -1072,16 +1072,14 @@ void __init mp_config_acpi_legacy_irqs(void)
int idx; int idx;
for (idx = 0; idx < mp_irq_entries; idx++) { for (idx = 0; idx < mp_irq_entries; idx++) {
struct mp_config_intsrc *irq = mp_irqs + idx; struct mpc_intsrc *irq = mp_irqs + idx;
/* Do we already have a mapping for this ISA IRQ? */ /* Do we already have a mapping for this ISA IRQ? */
if (irq->mp_srcbus == MP_ISA_BUS if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
&& irq->mp_srcbusirq == i)
break; break;
/* Do we already have a mapping for this IOAPIC pin */ /* Do we already have a mapping for this IOAPIC pin */
if (irq->mp_dstapic == dstapic && if (irq->dstapic == dstapic && irq->dstirq == i)
irq->mp_dstirq == i)
break; break;
} }
@ -1090,13 +1088,13 @@ void __init mp_config_acpi_legacy_irqs(void)
continue; /* IRQ already used */ continue; /* IRQ already used */
} }
mp_irq.mp_type = MP_INTSRC; mp_irq.type = MP_INTSRC;
mp_irq.mp_irqflag = 0; /* Conforming */ mp_irq.irqflag = 0; /* Conforming */
mp_irq.mp_srcbus = MP_ISA_BUS; mp_irq.srcbus = MP_ISA_BUS;
mp_irq.mp_dstapic = dstapic; mp_irq.dstapic = dstapic;
mp_irq.mp_irqtype = mp_INT; mp_irq.irqtype = mp_INT;
mp_irq.mp_srcbusirq = i; /* Identity mapped */ mp_irq.srcbusirq = i; /* Identity mapped */
mp_irq.mp_dstirq = i; mp_irq.dstirq = i;
save_mp_irq(&mp_irq); save_mp_irq(&mp_irq);
} }
@ -1207,22 +1205,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
u32 gsi, int triggering, int polarity) u32 gsi, int triggering, int polarity)
{ {
#ifdef CONFIG_X86_MPPARSE #ifdef CONFIG_X86_MPPARSE
struct mp_config_intsrc mp_irq; struct mpc_intsrc mp_irq;
int ioapic; int ioapic;
if (!acpi_ioapic) if (!acpi_ioapic)
return 0; return 0;
/* print the entry should happen on mptable identically */ /* print the entry should happen on mptable identically */
mp_irq.mp_type = MP_INTSRC; mp_irq.type = MP_INTSRC;
mp_irq.mp_irqtype = mp_INT; mp_irq.irqtype = mp_INT;
mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) | mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
(polarity == ACPI_ACTIVE_HIGH ? 1 : 3); (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
mp_irq.mp_srcbus = number; mp_irq.srcbus = number;
mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3); mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
ioapic = mp_find_ioapic(gsi); ioapic = mp_find_ioapic(gsi);
mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id; mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base; mp_irq.dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
save_mp_irq(&mp_irq); save_mp_irq(&mp_irq);
#endif #endif

View File

@ -895,6 +895,10 @@ void disable_local_APIC(void)
{ {
unsigned int value; unsigned int value;
/* APIC hasn't been mapped yet */
if (!apic_phys)
return;
clear_local_APIC(); clear_local_APIC();
/* /*
@ -1126,6 +1130,11 @@ void __cpuinit setup_local_APIC(void)
unsigned int value; unsigned int value;
int i, j; int i, j;
if (disable_apic) {
disable_ioapic_setup();
return;
}
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* Pound the ESR really hard over the head with a big hammer - mbligh */ /* Pound the ESR really hard over the head with a big hammer - mbligh */
if (lapic_is_integrated() && esr_disable) { if (lapic_is_integrated() && esr_disable) {
@ -1566,11 +1575,11 @@ int apic_version[MAX_APICS];
int __init APIC_init_uniprocessor(void) int __init APIC_init_uniprocessor(void)
{ {
#ifdef CONFIG_X86_64
if (disable_apic) { if (disable_apic) {
pr_info("Apic disabled\n"); pr_info("Apic disabled\n");
return -1; return -1;
} }
#ifdef CONFIG_X86_64
if (!cpu_has_apic) { if (!cpu_has_apic) {
disable_apic = 1; disable_apic = 1;
pr_info("Apic disabled by BIOS\n"); pr_info("Apic disabled by BIOS\n");

View File

@ -21,6 +21,8 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/cpumask.h>
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/apic.h> #include <asm/apic.h>

View File

@ -235,8 +235,6 @@ static u32 get_cur_val(const struct cpumask *mask)
return 0; return 0;
} }
cpumask_copy(cmd.mask, mask);
drv_read(&cmd); drv_read(&cmd);
dprintk("get_cur_val = %u\n", cmd.val); dprintk("get_cur_val = %u\n", cmd.val);

View File

@ -132,7 +132,16 @@ struct _cpuid4_info {
union _cpuid4_leaf_ecx ecx; union _cpuid4_leaf_ecx ecx;
unsigned long size; unsigned long size;
unsigned long can_disable; unsigned long can_disable;
cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};
/* subset of above _cpuid4_info w/o shared_cpu_map */
struct _cpuid4_info_regs {
union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx;
union _cpuid4_leaf_ecx ecx;
unsigned long size;
unsigned long can_disable;
}; };
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
@ -263,7 +272,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
} }
static void __cpuinit static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{ {
if (index < 3) if (index < 3)
return; return;
@ -271,7 +280,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
} }
static int static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) __cpuinit cpuid4_cache_lookup_regs(int index,
struct _cpuid4_info_regs *this_leaf)
{ {
union _cpuid4_leaf_eax eax; union _cpuid4_leaf_eax eax;
union _cpuid4_leaf_ebx ebx; union _cpuid4_leaf_ebx ebx;
@ -299,6 +309,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
return 0; return 0;
} }
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
struct _cpuid4_info_regs *leaf_regs =
(struct _cpuid4_info_regs *)this_leaf;
return cpuid4_cache_lookup_regs(index, leaf_regs);
}
static int __cpuinit find_num_cache_leaves(void) static int __cpuinit find_num_cache_leaves(void)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
@ -338,11 +357,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
* parameters cpuid leaf to find the cache details * parameters cpuid leaf to find the cache details
*/ */
for (i = 0; i < num_cache_leaves; i++) { for (i = 0; i < num_cache_leaves; i++) {
struct _cpuid4_info this_leaf; struct _cpuid4_info_regs this_leaf;
int retval; int retval;
retval = cpuid4_cache_lookup(i, &this_leaf); retval = cpuid4_cache_lookup_regs(i, &this_leaf);
if (retval >= 0) { if (retval >= 0) {
switch(this_leaf.eax.split.level) { switch(this_leaf.eax.split.level) {
case 1: case 1:
@ -491,17 +509,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
if (num_threads_sharing == 1) if (num_threads_sharing == 1)
cpu_set(cpu, this_leaf->shared_cpu_map); cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
else { else {
index_msb = get_count_order(num_threads_sharing); index_msb = get_count_order(num_threads_sharing);
for_each_online_cpu(i) { for_each_online_cpu(i) {
if (cpu_data(i).apicid >> index_msb == if (cpu_data(i).apicid >> index_msb ==
c->apicid >> index_msb) { c->apicid >> index_msb) {
cpu_set(i, this_leaf->shared_cpu_map); cpumask_set_cpu(i,
to_cpumask(this_leaf->shared_cpu_map));
if (i != cpu && per_cpu(cpuid4_info, i)) { if (i != cpu && per_cpu(cpuid4_info, i)) {
sibling_leaf = CPUID4_INFO_IDX(i, index); sibling_leaf =
cpu_set(cpu, sibling_leaf->shared_cpu_map); CPUID4_INFO_IDX(i, index);
cpumask_set_cpu(cpu, to_cpumask(
sibling_leaf->shared_cpu_map));
} }
} }
} }
@ -513,9 +534,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
int sibling; int sibling;
this_leaf = CPUID4_INFO_IDX(cpu, index); this_leaf = CPUID4_INFO_IDX(cpu, index);
for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index); sibling_leaf = CPUID4_INFO_IDX(sibling, index);
cpu_clear(cpu, sibling_leaf->shared_cpu_map); cpumask_clear_cpu(cpu,
to_cpumask(sibling_leaf->shared_cpu_map));
} }
} }
#else #else
@ -620,8 +642,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
int n = 0; int n = 0;
if (len > 1) { if (len > 1) {
cpumask_t *mask = &this_leaf->shared_cpu_map; const struct cpumask *mask;
mask = to_cpumask(this_leaf->shared_cpu_map);
n = type? n = type?
cpulist_scnprintf(buf, len-2, mask) : cpulist_scnprintf(buf, len-2, mask) :
cpumask_scnprintf(buf, len-2, mask); cpumask_scnprintf(buf, len-2, mask);
@ -684,7 +707,8 @@ static struct pci_dev *get_k8_northbridge(int node)
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
{ {
int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
int node = cpu_to_node(cpumask_first(mask));
struct pci_dev *dev = NULL; struct pci_dev *dev = NULL;
ssize_t ret = 0; ssize_t ret = 0;
int i; int i;
@ -718,7 +742,8 @@ static ssize_t
store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
size_t count) size_t count)
{ {
int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
int node = cpu_to_node(cpumask_first(mask));
struct pci_dev *dev = NULL; struct pci_dev *dev = NULL;
unsigned int ret, index, val; unsigned int ret, index, val;
@ -863,7 +888,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
return -ENOMEM; return -ENOMEM;
} }
static cpumask_t cache_dev_map = CPU_MASK_NONE; static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
/* Add/Remove cache interface for CPU device */ /* Add/Remove cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev) static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@ -903,7 +928,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
} }
kobject_uevent(&(this_object->kobj), KOBJ_ADD); kobject_uevent(&(this_object->kobj), KOBJ_ADD);
} }
cpu_set(cpu, cache_dev_map); cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
return 0; return 0;
@ -916,9 +941,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
if (per_cpu(cpuid4_info, cpu) == NULL) if (per_cpu(cpuid4_info, cpu) == NULL)
return; return;
if (!cpu_isset(cpu, cache_dev_map)) if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
return; return;
cpu_clear(cpu, cache_dev_map); cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
for (i = 0; i < num_cache_leaves; i++) for (i = 0; i < num_cache_leaves; i++)
kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));

View File

@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
struct threshold_bank { struct threshold_bank {
struct kobject *kobj; struct kobject *kobj;
struct threshold_block *blocks; struct threshold_block *blocks;
cpumask_t cpus; cpumask_var_t cpus;
}; };
static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
i = first_cpu(per_cpu(cpu_core_map, cpu)); i = cpumask_first(&per_cpu(cpu_core_map, cpu));
/* first core not up yet */ /* first core not up yet */
if (cpu_data(i).cpu_core_id) if (cpu_data(i).cpu_core_id)
@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err) if (err)
goto out; goto out;
b->cpus = per_cpu(cpu_core_map, cpu); cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
per_cpu(threshold_banks, cpu)[bank] = b; per_cpu(threshold_banks, cpu)[bank] = b;
goto out; goto out;
} }
@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
kfree(b);
err = -ENOMEM;
goto out;
}
b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
if (!b->kobj) if (!b->kobj)
goto out_free; goto out_free;
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
b->cpus = CPU_MASK_ALL; cpumask_setall(b->cpus);
#else #else
b->cpus = per_cpu(cpu_core_map, cpu); cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
#endif #endif
per_cpu(threshold_banks, cpu)[bank] = b; per_cpu(threshold_banks, cpu)[bank] = b;
@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
if (err) if (err)
goto out_free; goto out_free;
for_each_cpu_mask_nr(i, b->cpus) { for_each_cpu(i, b->cpus) {
if (i == cpu) if (i == cpu)
continue; continue;
@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
out_free: out_free:
per_cpu(threshold_banks, cpu)[bank] = NULL; per_cpu(threshold_banks, cpu)[bank] = NULL;
free_cpumask_var(b->cpus);
kfree(b); kfree(b);
out: out:
return err; return err;
@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
#endif #endif
/* remove all sibling symlinks before unregistering */ /* remove all sibling symlinks before unregistering */
for_each_cpu_mask_nr(i, b->cpus) { for_each_cpu(i, b->cpus) {
if (i == cpu) if (i == cpu)
continue; continue;
@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
free_out: free_out:
kobject_del(b->kobj); kobject_del(b->kobj);
kobject_put(b->kobj); kobject_put(b->kobj);
free_cpumask_var(b->cpus);
kfree(b); kfree(b);
per_cpu(threshold_banks, cpu)[bank] = NULL; per_cpu(threshold_banks, cpu)[bank] = NULL;
} }

View File

@ -24,7 +24,7 @@
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
#include <asm/smp.h> #include <asm/cpu.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/virtext.h> #include <asm/virtext.h>

View File

@ -46,6 +46,7 @@
#include <asm/idle.h> #include <asm/idle.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/cpu.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/acpi.h> #include <asm/acpi.h>
@ -82,11 +83,11 @@ static DEFINE_SPINLOCK(vector_lock);
int nr_ioapic_registers[MAX_IO_APICS]; int nr_ioapic_registers[MAX_IO_APICS];
/* I/O APIC entries */ /* I/O APIC entries */
struct mp_config_ioapic mp_ioapics[MAX_IO_APICS]; struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
int nr_ioapics; int nr_ioapics;
/* MP IRQ source entries */ /* MP IRQ source entries */
struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* # of MP IRQ source entries */ /* # of MP IRQ source entries */
int mp_irq_entries; int mp_irq_entries;
@ -356,7 +357,7 @@ set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
if (!cfg->move_in_progress) { if (!cfg->move_in_progress) {
/* it means that domain is not changed */ /* it means that domain is not changed */
if (!cpumask_intersects(&desc->affinity, mask)) if (!cpumask_intersects(desc->affinity, mask))
cfg->move_desc_pending = 1; cfg->move_desc_pending = 1;
} }
} }
@ -386,7 +387,7 @@ struct io_apic {
static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
{ {
return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
+ (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK); + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
} }
static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
@ -579,9 +580,9 @@ set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
if (assign_irq_vector(irq, cfg, mask)) if (assign_irq_vector(irq, cfg, mask))
return BAD_APICID; return BAD_APICID;
cpumask_and(&desc->affinity, cfg->domain, mask); cpumask_and(desc->affinity, cfg->domain, mask);
set_extra_move_desc(desc, mask); set_extra_move_desc(desc, mask);
return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask); return cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
} }
static void static void
@ -944,10 +945,10 @@ static int find_irq_entry(int apic, int pin, int type)
int i; int i;
for (i = 0; i < mp_irq_entries; i++) for (i = 0; i < mp_irq_entries; i++)
if (mp_irqs[i].mp_irqtype == type && if (mp_irqs[i].irqtype == type &&
(mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid || (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
mp_irqs[i].mp_dstapic == MP_APIC_ALL) && mp_irqs[i].dstapic == MP_APIC_ALL) &&
mp_irqs[i].mp_dstirq == pin) mp_irqs[i].dstirq == pin)
return i; return i;
return -1; return -1;
@ -961,13 +962,13 @@ static int __init find_isa_irq_pin(int irq, int type)
int i; int i;
for (i = 0; i < mp_irq_entries; i++) { for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mp_srcbus; int lbus = mp_irqs[i].srcbus;
if (test_bit(lbus, mp_bus_not_pci) && if (test_bit(lbus, mp_bus_not_pci) &&
(mp_irqs[i].mp_irqtype == type) && (mp_irqs[i].irqtype == type) &&
(mp_irqs[i].mp_srcbusirq == irq)) (mp_irqs[i].srcbusirq == irq))
return mp_irqs[i].mp_dstirq; return mp_irqs[i].dstirq;
} }
return -1; return -1;
} }
@ -977,17 +978,17 @@ static int __init find_isa_irq_apic(int irq, int type)
int i; int i;
for (i = 0; i < mp_irq_entries; i++) { for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mp_srcbus; int lbus = mp_irqs[i].srcbus;
if (test_bit(lbus, mp_bus_not_pci) && if (test_bit(lbus, mp_bus_not_pci) &&
(mp_irqs[i].mp_irqtype == type) && (mp_irqs[i].irqtype == type) &&
(mp_irqs[i].mp_srcbusirq == irq)) (mp_irqs[i].srcbusirq == irq))
break; break;
} }
if (i < mp_irq_entries) { if (i < mp_irq_entries) {
int apic; int apic;
for(apic = 0; apic < nr_ioapics; apic++) { for(apic = 0; apic < nr_ioapics; apic++) {
if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic) if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
return apic; return apic;
} }
} }
@ -1012,23 +1013,23 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
return -1; return -1;
} }
for (i = 0; i < mp_irq_entries; i++) { for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mp_srcbus; int lbus = mp_irqs[i].srcbus;
for (apic = 0; apic < nr_ioapics; apic++) for (apic = 0; apic < nr_ioapics; apic++)
if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic || if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
mp_irqs[i].mp_dstapic == MP_APIC_ALL) mp_irqs[i].dstapic == MP_APIC_ALL)
break; break;
if (!test_bit(lbus, mp_bus_not_pci) && if (!test_bit(lbus, mp_bus_not_pci) &&
!mp_irqs[i].mp_irqtype && !mp_irqs[i].irqtype &&
(bus == lbus) && (bus == lbus) &&
(slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) { (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq); int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
if (!(apic || IO_APIC_IRQ(irq))) if (!(apic || IO_APIC_IRQ(irq)))
continue; continue;
if (pin == (mp_irqs[i].mp_srcbusirq & 3)) if (pin == (mp_irqs[i].srcbusirq & 3))
return irq; return irq;
/* /*
* Use the first all-but-pin matching entry as a * Use the first all-but-pin matching entry as a
@ -1071,7 +1072,7 @@ static int EISA_ELCR(unsigned int irq)
* EISA conforming in the MP table, that means its trigger type must * EISA conforming in the MP table, that means its trigger type must
* be read in from the ELCR */ * be read in from the ELCR */
#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq)) #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
#define default_EISA_polarity(idx) default_ISA_polarity(idx) #define default_EISA_polarity(idx) default_ISA_polarity(idx)
/* PCI interrupts are always polarity one level triggered, /* PCI interrupts are always polarity one level triggered,
@ -1088,13 +1089,13 @@ static int EISA_ELCR(unsigned int irq)
static int MPBIOS_polarity(int idx) static int MPBIOS_polarity(int idx)
{ {
int bus = mp_irqs[idx].mp_srcbus; int bus = mp_irqs[idx].srcbus;
int polarity; int polarity;
/* /*
* Determine IRQ line polarity (high active or low active): * Determine IRQ line polarity (high active or low active):
*/ */
switch (mp_irqs[idx].mp_irqflag & 3) switch (mp_irqs[idx].irqflag & 3)
{ {
case 0: /* conforms, ie. bus-type dependent polarity */ case 0: /* conforms, ie. bus-type dependent polarity */
if (test_bit(bus, mp_bus_not_pci)) if (test_bit(bus, mp_bus_not_pci))
@ -1130,13 +1131,13 @@ static int MPBIOS_polarity(int idx)
static int MPBIOS_trigger(int idx) static int MPBIOS_trigger(int idx)
{ {
int bus = mp_irqs[idx].mp_srcbus; int bus = mp_irqs[idx].srcbus;
int trigger; int trigger;
/* /*
* Determine IRQ trigger mode (edge or level sensitive): * Determine IRQ trigger mode (edge or level sensitive):
*/ */
switch ((mp_irqs[idx].mp_irqflag>>2) & 3) switch ((mp_irqs[idx].irqflag>>2) & 3)
{ {
case 0: /* conforms, ie. bus-type dependent */ case 0: /* conforms, ie. bus-type dependent */
if (test_bit(bus, mp_bus_not_pci)) if (test_bit(bus, mp_bus_not_pci))
@ -1214,16 +1215,16 @@ int (*ioapic_renumber_irq)(int ioapic, int irq);
static int pin_2_irq(int idx, int apic, int pin) static int pin_2_irq(int idx, int apic, int pin)
{ {
int irq, i; int irq, i;
int bus = mp_irqs[idx].mp_srcbus; int bus = mp_irqs[idx].srcbus;
/* /*
* Debugging check, we are in big trouble if this message pops up! * Debugging check, we are in big trouble if this message pops up!
*/ */
if (mp_irqs[idx].mp_dstirq != pin) if (mp_irqs[idx].dstirq != pin)
printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
if (test_bit(bus, mp_bus_not_pci)) { if (test_bit(bus, mp_bus_not_pci)) {
irq = mp_irqs[idx].mp_srcbusirq; irq = mp_irqs[idx].srcbusirq;
} else { } else {
/* /*
* PCI IRQs are mapped in order * PCI IRQs are mapped in order
@ -1566,14 +1567,14 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
apic_printk(APIC_VERBOSE,KERN_DEBUG apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
"IRQ %d Mode:%i Active:%i)\n", "IRQ %d Mode:%i Active:%i)\n",
apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector, apic, mp_ioapics[apic].apicid, pin, cfg->vector,
irq, trigger, polarity); irq, trigger, polarity);
if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry, if (setup_ioapic_entry(mp_ioapics[apic].apicid, irq, &entry,
dest, trigger, polarity, cfg->vector)) { dest, trigger, polarity, cfg->vector)) {
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mp_ioapics[apic].mp_apicid, pin); mp_ioapics[apic].apicid, pin);
__clear_irq_vector(irq, cfg); __clear_irq_vector(irq, cfg);
return; return;
} }
@ -1604,12 +1605,10 @@ static void __init setup_IO_APIC_irqs(void)
notcon = 1; notcon = 1;
apic_printk(APIC_VERBOSE, apic_printk(APIC_VERBOSE,
KERN_DEBUG " %d-%d", KERN_DEBUG " %d-%d",
mp_ioapics[apic].mp_apicid, mp_ioapics[apic].apicid, pin);
pin);
} else } else
apic_printk(APIC_VERBOSE, " %d-%d", apic_printk(APIC_VERBOSE, " %d-%d",
mp_ioapics[apic].mp_apicid, mp_ioapics[apic].apicid, pin);
pin);
continue; continue;
} }
if (notcon) { if (notcon) {
@ -1699,7 +1698,7 @@ __apicdebuginit(void) print_IO_APIC(void)
printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
for (i = 0; i < nr_ioapics; i++) for (i = 0; i < nr_ioapics; i++)
printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]); mp_ioapics[i].apicid, nr_ioapic_registers[i]);
/* /*
* We are a bit conservative about what we expect. We have to * We are a bit conservative about what we expect. We have to
@ -1719,7 +1718,7 @@ __apicdebuginit(void) print_IO_APIC(void)
spin_unlock_irqrestore(&ioapic_lock, flags); spin_unlock_irqrestore(&ioapic_lock, flags);
printk("\n"); printk("\n");
printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid); printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
@ -2121,14 +2120,14 @@ static void __init setup_ioapic_ids_from_mpc(void)
reg_00.raw = io_apic_read(apic, 0); reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags); spin_unlock_irqrestore(&ioapic_lock, flags);
old_id = mp_ioapics[apic].mp_apicid; old_id = mp_ioapics[apic].apicid;
if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) { if (mp_ioapics[apic].apicid >= get_physical_broadcast()) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
apic, mp_ioapics[apic].mp_apicid); apic, mp_ioapics[apic].apicid);
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
reg_00.bits.ID); reg_00.bits.ID);
mp_ioapics[apic].mp_apicid = reg_00.bits.ID; mp_ioapics[apic].apicid = reg_00.bits.ID;
} }
/* /*
@ -2137,9 +2136,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
* 'stuck on smp_invalidate_needed IPI wait' messages. * 'stuck on smp_invalidate_needed IPI wait' messages.
*/ */
if (check_apicid_used(phys_id_present_map, if (check_apicid_used(phys_id_present_map,
mp_ioapics[apic].mp_apicid)) { mp_ioapics[apic].apicid)) {
printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
apic, mp_ioapics[apic].mp_apicid); apic, mp_ioapics[apic].apicid);
for (i = 0; i < get_physical_broadcast(); i++) for (i = 0; i < get_physical_broadcast(); i++)
if (!physid_isset(i, phys_id_present_map)) if (!physid_isset(i, phys_id_present_map))
break; break;
@ -2148,13 +2147,13 @@ static void __init setup_ioapic_ids_from_mpc(void)
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
i); i);
physid_set(i, phys_id_present_map); physid_set(i, phys_id_present_map);
mp_ioapics[apic].mp_apicid = i; mp_ioapics[apic].apicid = i;
} else { } else {
physid_mask_t tmp; physid_mask_t tmp;
tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid); tmp = apicid_to_cpu_present(mp_ioapics[apic].apicid);
apic_printk(APIC_VERBOSE, "Setting %d in the " apic_printk(APIC_VERBOSE, "Setting %d in the "
"phys_id_present_map\n", "phys_id_present_map\n",
mp_ioapics[apic].mp_apicid); mp_ioapics[apic].apicid);
physids_or(phys_id_present_map, phys_id_present_map, tmp); physids_or(phys_id_present_map, phys_id_present_map, tmp);
} }
@ -2163,11 +2162,11 @@ static void __init setup_ioapic_ids_from_mpc(void)
* We need to adjust the IRQ routing table * We need to adjust the IRQ routing table
* if the ID changed. * if the ID changed.
*/ */
if (old_id != mp_ioapics[apic].mp_apicid) if (old_id != mp_ioapics[apic].apicid)
for (i = 0; i < mp_irq_entries; i++) for (i = 0; i < mp_irq_entries; i++)
if (mp_irqs[i].mp_dstapic == old_id) if (mp_irqs[i].dstapic == old_id)
mp_irqs[i].mp_dstapic mp_irqs[i].dstapic
= mp_ioapics[apic].mp_apicid; = mp_ioapics[apic].apicid;
/* /*
* Read the right value from the MPC table and * Read the right value from the MPC table and
@ -2175,9 +2174,9 @@ static void __init setup_ioapic_ids_from_mpc(void)
*/ */
apic_printk(APIC_VERBOSE, KERN_INFO apic_printk(APIC_VERBOSE, KERN_INFO
"...changing IO-APIC physical APIC ID to %d ...", "...changing IO-APIC physical APIC ID to %d ...",
mp_ioapics[apic].mp_apicid); mp_ioapics[apic].apicid);
reg_00.bits.ID = mp_ioapics[apic].mp_apicid; reg_00.bits.ID = mp_ioapics[apic].apicid;
spin_lock_irqsave(&ioapic_lock, flags); spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0, reg_00.raw); io_apic_write(apic, 0, reg_00.raw);
spin_unlock_irqrestore(&ioapic_lock, flags); spin_unlock_irqrestore(&ioapic_lock, flags);
@ -2188,7 +2187,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
spin_lock_irqsave(&ioapic_lock, flags); spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(apic, 0); reg_00.raw = io_apic_read(apic, 0);
spin_unlock_irqrestore(&ioapic_lock, flags); spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid) if (reg_00.bits.ID != mp_ioapics[apic].apicid)
printk("could not set ID!\n"); printk("could not set ID!\n");
else else
apic_printk(APIC_VERBOSE, " ok.\n"); apic_printk(APIC_VERBOSE, " ok.\n");
@ -2383,7 +2382,7 @@ migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
if (cfg->move_in_progress) if (cfg->move_in_progress)
send_cleanup_vector(cfg); send_cleanup_vector(cfg);
cpumask_copy(&desc->affinity, mask); cpumask_copy(desc->affinity, mask);
} }
static int migrate_irq_remapped_level_desc(struct irq_desc *desc) static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
@ -2405,11 +2404,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
} }
/* everthing is clear. we have right of way */ /* everthing is clear. we have right of way */
migrate_ioapic_irq_desc(desc, &desc->pending_mask); migrate_ioapic_irq_desc(desc, desc->pending_mask);
ret = 0; ret = 0;
desc->status &= ~IRQ_MOVE_PENDING; desc->status &= ~IRQ_MOVE_PENDING;
cpumask_clear(&desc->pending_mask); cpumask_clear(desc->pending_mask);
unmask: unmask:
unmask_IO_APIC_irq_desc(desc); unmask_IO_APIC_irq_desc(desc);
@ -2434,7 +2433,7 @@ static void ir_irq_migration(struct work_struct *work)
continue; continue;
} }
desc->chip->set_affinity(irq, &desc->pending_mask); desc->chip->set_affinity(irq, desc->pending_mask);
spin_unlock_irqrestore(&desc->lock, flags); spin_unlock_irqrestore(&desc->lock, flags);
} }
} }
@ -2448,7 +2447,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
{ {
if (desc->status & IRQ_LEVEL) { if (desc->status & IRQ_LEVEL) {
desc->status |= IRQ_MOVE_PENDING; desc->status |= IRQ_MOVE_PENDING;
cpumask_copy(&desc->pending_mask, mask); cpumask_copy(desc->pending_mask, mask);
migrate_irq_remapped_level_desc(desc); migrate_irq_remapped_level_desc(desc);
return; return;
} }
@ -2516,7 +2515,7 @@ static void irq_complete_move(struct irq_desc **descp)
/* domain has not changed, but affinity did */ /* domain has not changed, but affinity did */
me = smp_processor_id(); me = smp_processor_id();
if (cpu_isset(me, desc->affinity)) { if (cpumask_test_cpu(me, desc->affinity)) {
*descp = desc = move_irq_desc(desc, me); *descp = desc = move_irq_desc(desc, me);
/* get the new one */ /* get the new one */
cfg = desc->chip_data; cfg = desc->chip_data;
@ -3117,8 +3116,8 @@ static int ioapic_resume(struct sys_device *dev)
spin_lock_irqsave(&ioapic_lock, flags); spin_lock_irqsave(&ioapic_lock, flags);
reg_00.raw = io_apic_read(dev->id, 0); reg_00.raw = io_apic_read(dev->id, 0);
if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) { if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid; reg_00.bits.ID = mp_ioapics[dev->id].apicid;
io_apic_write(dev->id, 0, reg_00.raw); io_apic_write(dev->id, 0, reg_00.raw);
} }
spin_unlock_irqrestore(&ioapic_lock, flags); spin_unlock_irqrestore(&ioapic_lock, flags);
@ -3183,7 +3182,7 @@ unsigned int create_irq_nr(unsigned int irq_want)
irq = 0; irq = 0;
spin_lock_irqsave(&vector_lock, flags); spin_lock_irqsave(&vector_lock, flags);
for (new = irq_want; new < NR_IRQS; new++) { for (new = irq_want; new < nr_irqs; new++) {
if (platform_legacy_irq(new)) if (platform_legacy_irq(new))
continue; continue;
@ -3258,6 +3257,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
int err; int err;
unsigned dest; unsigned dest;
if (disable_apic)
return -ENXIO;
cfg = irq_cfg(irq); cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, TARGET_CPUS); err = assign_irq_vector(irq, cfg, TARGET_CPUS);
if (err) if (err)
@ -3726,6 +3728,9 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
struct irq_cfg *cfg; struct irq_cfg *cfg;
int err; int err;
if (disable_apic)
return -ENXIO;
cfg = irq_cfg(irq); cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, TARGET_CPUS); err = assign_irq_vector(irq, cfg, TARGET_CPUS);
if (!err) { if (!err) {
@ -3850,6 +3855,22 @@ void __init probe_nr_irqs_gsi(void)
nr_irqs_gsi = nr; nr_irqs_gsi = nr;
} }
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
int nr;
nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ?
(NR_VECTORS + (8 * nr_cpu_ids)) :
(NR_VECTORS + (32 * nr_ioapics)));
if (nr < nr_irqs && nr > nr_irqs_gsi)
nr_irqs = nr;
return 0;
}
#endif
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
ACPI-based IOAPIC Configuration ACPI-based IOAPIC Configuration
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
@ -3984,8 +4005,8 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
return -1; return -1;
for (i = 0; i < mp_irq_entries; i++) for (i = 0; i < mp_irq_entries; i++)
if (mp_irqs[i].mp_irqtype == mp_INT && if (mp_irqs[i].irqtype == mp_INT &&
mp_irqs[i].mp_srcbusirq == bus_irq) mp_irqs[i].srcbusirq == bus_irq)
break; break;
if (i >= mp_irq_entries) if (i >= mp_irq_entries)
return -1; return -1;
@ -4039,7 +4060,7 @@ void __init setup_ioapic_dest(void)
*/ */
if (desc->status & if (desc->status &
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
mask = &desc->affinity; mask = desc->affinity;
else else
mask = TARGET_CPUS; mask = TARGET_CPUS;
@ -4100,7 +4121,7 @@ void __init ioapic_init_mappings(void)
ioapic_res = ioapic_setup_resources(); ioapic_res = ioapic_setup_resources();
for (i = 0; i < nr_ioapics; i++) { for (i = 0; i < nr_ioapics; i++) {
if (smp_found_config) { if (smp_found_config) {
ioapic_phys = mp_ioapics[i].mp_apicaddr; ioapic_phys = mp_ioapics[i].apicaddr;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
if (!ioapic_phys) { if (!ioapic_phys) {
printk(KERN_ERR printk(KERN_ERR

View File

@ -248,7 +248,7 @@ void fixup_irqs(void)
if (irq == 2) if (irq == 2)
continue; continue;
affinity = &desc->affinity; affinity = desc->affinity;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq); printk("Breaking affinity for irq %i\n", irq);
affinity = cpu_all_mask; affinity = cpu_all_mask;

View File

@ -100,7 +100,7 @@ void fixup_irqs(void)
/* interrupt's are disabled at this point */ /* interrupt's are disabled at this point */
spin_lock(&desc->lock); spin_lock(&desc->lock);
affinity = &desc->affinity; affinity = desc->affinity;
if (!irq_has_action(irq) || if (!irq_has_action(irq) ||
cpumask_equal(affinity, cpu_online_mask)) { cpumask_equal(affinity, cpu_online_mask)) {
spin_unlock(&desc->lock); spin_unlock(&desc->lock);

View File

@ -87,9 +87,9 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/microcode.h> #include <asm/microcode.h>
@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1; return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
} }
static inline int static inline int
update_match_revision(struct microcode_header_intel *mc_header, int rev) update_match_revision(struct microcode_header_intel *mc_header, int rev)
{ {
return (mc_header->rev <= rev) ? 0 : 1; return (mc_header->rev <= rev) ? 0 : 1;
@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
return ret; return ret;
} }
ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size, ret = generic_load_microcode(cpu, (void *)firmware->data,
&get_ucode_fw); firmware->size, &get_ucode_fw);
release_firmware(firmware); release_firmware(firmware);
@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
/* We should bind the task to the CPU */ /* We should bind the task to the CPU */
BUG_ON(cpu != raw_smp_processor_id()); BUG_ON(cpu != raw_smp_processor_id());
return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user); return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
} }
static void microcode_fini_cpu(int cpu) static void microcode_fini_cpu(int cpu)

View File

@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
{ {
vfree(module_region); vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception /* FIXME: If module_region == mod->init_region, trim exception
table entries. */ table entries. */
} }
/* We don't need anything special. */ /* We don't need anything special. */
@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
*para = NULL; *para = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".text", secstrings + s->sh_name)) if (!strcmp(".text", secstrings + s->sh_name))
text = s; text = s;
if (!strcmp(".altinstructions", secstrings + s->sh_name)) if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s; alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name)) if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks= s; locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name)) if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s; para = s;
} }

View File

@ -30,14 +30,14 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#define DEBUGP(fmt...) #define DEBUGP(fmt...)
#ifndef CONFIG_UML #ifndef CONFIG_UML
void module_free(struct module *mod, void *module_region) void module_free(struct module *mod, void *module_region)
{ {
vfree(module_region); vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception /* FIXME: If module_region == mod->init_region, trim exception
table entries. */ table entries. */
} }
void *module_alloc(unsigned long size) void *module_alloc(unsigned long size)
@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym; Elf64_Sym *sym;
void *loc; void *loc;
u64 val; u64 val;
DEBUGP("Applying relocate section %u to %u\n", relsec, DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info); sechdrs[relsec].sh_info);
@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info); + ELF64_R_SYM(rel[i].r_info);
DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n", DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), (int)ELF64_R_TYPE(rel[i].r_info),
sym->st_value, rel[i].r_addend, (u64)loc); sym->st_value, rel[i].r_addend, (u64)loc);
val = sym->st_value + rel[i].r_addend; val = sym->st_value + rel[i].r_addend;
switch (ELF64_R_TYPE(rel[i].r_info)) { switch (ELF64_R_TYPE(rel[i].r_info)) {
case R_X86_64_NONE: case R_X86_64_NONE:
@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if ((s64)val != *(s32 *)loc) if ((s64)val != *(s32 *)loc)
goto overflow; goto overflow;
break; break;
case R_X86_64_PC32: case R_X86_64_PC32:
val -= (u64)loc; val -= (u64)loc;
*(u32 *)loc = val; *(u32 *)loc = val;
#if 0 #if 0
if ((s64)val != *(s32 *)loc) if ((s64)val != *(s32 *)loc)
goto overflow; goto overflow;
#endif #endif
break; break;
default: default:
printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n", printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info)); me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC; return -ENOEXEC;
} }
@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return 0; return 0;
overflow: overflow:
printk(KERN_ERR "overflow in relocation type %d val %Lx\n", printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val); (int)ELF64_R_TYPE(rel[i].r_info), val);
printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n", printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
me->name); me->name);
@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
unsigned int relsec, unsigned int relsec,
struct module *me) struct module *me)
{ {
printk("non add relocation not supported\n"); printk(KERN_ERR "non add relocation not supported\n");
return -ENOSYS; return -ENOSYS;
} }
int module_finalize(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *me)
{ {
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL; *para = NULL;
@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
if (!strcmp(".altinstructions", secstrings + s->sh_name)) if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s; alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name)) if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks= s; locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name)) if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s; para = s;
} }

View File

@ -144,11 +144,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
if (bad_ioapic(m->apicaddr)) if (bad_ioapic(m->apicaddr))
return; return;
mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr; mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
mp_ioapics[nr_ioapics].mp_apicid = m->apicid; mp_ioapics[nr_ioapics].apicid = m->apicid;
mp_ioapics[nr_ioapics].mp_type = m->type; mp_ioapics[nr_ioapics].type = m->type;
mp_ioapics[nr_ioapics].mp_apicver = m->apicver; mp_ioapics[nr_ioapics].apicver = m->apicver;
mp_ioapics[nr_ioapics].mp_flags = m->flags; mp_ioapics[nr_ioapics].flags = m->flags;
nr_ioapics++; nr_ioapics++;
} }
@ -160,55 +160,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
m->srcbusirq, m->dstapic, m->dstirq); m->srcbusirq, m->dstapic, m->dstirq);
} }
static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq) static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
{ {
apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x," apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
" IRQ %02x, APIC ID %x, APIC INT %02x\n", " IRQ %02x, APIC ID %x, APIC INT %02x\n",
mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3, mp_irq->irqtype, mp_irq->irqflag & 3,
(mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus, (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq); mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
} }
static void __init assign_to_mp_irq(struct mpc_intsrc *m, static void __init assign_to_mp_irq(struct mpc_intsrc *m,
struct mp_config_intsrc *mp_irq) struct mpc_intsrc *mp_irq)
{ {
mp_irq->mp_dstapic = m->dstapic; mp_irq->dstapic = m->dstapic;
mp_irq->mp_type = m->type; mp_irq->type = m->type;
mp_irq->mp_irqtype = m->irqtype; mp_irq->irqtype = m->irqtype;
mp_irq->mp_irqflag = m->irqflag; mp_irq->irqflag = m->irqflag;
mp_irq->mp_srcbus = m->srcbus; mp_irq->srcbus = m->srcbus;
mp_irq->mp_srcbusirq = m->srcbusirq; mp_irq->srcbusirq = m->srcbusirq;
mp_irq->mp_dstirq = m->dstirq; mp_irq->dstirq = m->dstirq;
} }
static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq, static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
struct mpc_intsrc *m) struct mpc_intsrc *m)
{ {
m->dstapic = mp_irq->mp_dstapic; m->dstapic = mp_irq->dstapic;
m->type = mp_irq->mp_type; m->type = mp_irq->type;
m->irqtype = mp_irq->mp_irqtype; m->irqtype = mp_irq->irqtype;
m->irqflag = mp_irq->mp_irqflag; m->irqflag = mp_irq->irqflag;
m->srcbus = mp_irq->mp_srcbus; m->srcbus = mp_irq->srcbus;
m->srcbusirq = mp_irq->mp_srcbusirq; m->srcbusirq = mp_irq->srcbusirq;
m->dstirq = mp_irq->mp_dstirq; m->dstirq = mp_irq->dstirq;
} }
static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq, static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
struct mpc_intsrc *m) struct mpc_intsrc *m)
{ {
if (mp_irq->mp_dstapic != m->dstapic) if (mp_irq->dstapic != m->dstapic)
return 1; return 1;
if (mp_irq->mp_type != m->type) if (mp_irq->type != m->type)
return 2; return 2;
if (mp_irq->mp_irqtype != m->irqtype) if (mp_irq->irqtype != m->irqtype)
return 3; return 3;
if (mp_irq->mp_irqflag != m->irqflag) if (mp_irq->irqflag != m->irqflag)
return 4; return 4;
if (mp_irq->mp_srcbus != m->srcbus) if (mp_irq->srcbus != m->srcbus)
return 5; return 5;
if (mp_irq->mp_srcbusirq != m->srcbusirq) if (mp_irq->srcbusirq != m->srcbusirq)
return 6; return 6;
if (mp_irq->mp_dstirq != m->dstirq) if (mp_irq->dstirq != m->dstirq)
return 7; return 7;
return 0; return 0;
@ -417,7 +417,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
intsrc.type = MP_INTSRC; intsrc.type = MP_INTSRC;
intsrc.irqflag = 0; /* conforming */ intsrc.irqflag = 0; /* conforming */
intsrc.srcbus = 0; intsrc.srcbus = 0;
intsrc.dstapic = mp_ioapics[0].mp_apicid; intsrc.dstapic = mp_ioapics[0].apicid;
intsrc.irqtype = mp_INT; intsrc.irqtype = mp_INT;
@ -570,14 +570,14 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
} }
} }
static struct intel_mp_floating *mpf_found; static struct mpf_intel *mpf_found;
/* /*
* Scan the memory blocks for an SMP configuration block. * Scan the memory blocks for an SMP configuration block.
*/ */
static void __init __get_smp_config(unsigned int early) static void __init __get_smp_config(unsigned int early)
{ {
struct intel_mp_floating *mpf = mpf_found; struct mpf_intel *mpf = mpf_found;
if (!mpf) if (!mpf)
return; return;
@ -598,9 +598,9 @@ static void __init __get_smp_config(unsigned int early)
} }
printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
mpf->mpf_specification); mpf->specification);
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32) #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
if (mpf->mpf_feature2 & (1 << 7)) { if (mpf->feature2 & (1 << 7)) {
printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
pic_mode = 1; pic_mode = 1;
} else { } else {
@ -611,7 +611,7 @@ static void __init __get_smp_config(unsigned int early)
/* /*
* Now see if we need to read further. * Now see if we need to read further.
*/ */
if (mpf->mpf_feature1 != 0) { if (mpf->feature1 != 0) {
if (early) { if (early) {
/* /*
* local APIC has default address * local APIC has default address
@ -621,16 +621,16 @@ static void __init __get_smp_config(unsigned int early)
} }
printk(KERN_INFO "Default MP configuration #%d\n", printk(KERN_INFO "Default MP configuration #%d\n",
mpf->mpf_feature1); mpf->feature1);
construct_default_ISA_mptable(mpf->mpf_feature1); construct_default_ISA_mptable(mpf->feature1);
} else if (mpf->mpf_physptr) { } else if (mpf->physptr) {
/* /*
* Read the physical hardware table. Anything here will * Read the physical hardware table. Anything here will
* override the defaults. * override the defaults.
*/ */
if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) { if (!smp_read_mpc(phys_to_virt(mpf->physptr), early)) {
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 0; smp_found_config = 0;
#endif #endif
@ -688,19 +688,19 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
unsigned reserve) unsigned reserve)
{ {
unsigned int *bp = phys_to_virt(base); unsigned int *bp = phys_to_virt(base);
struct intel_mp_floating *mpf; struct mpf_intel *mpf;
apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
bp, length); bp, length);
BUILD_BUG_ON(sizeof(*mpf) != 16); BUILD_BUG_ON(sizeof(*mpf) != 16);
while (length > 0) { while (length > 0) {
mpf = (struct intel_mp_floating *)bp; mpf = (struct mpf_intel *)bp;
if ((*bp == SMP_MAGIC_IDENT) && if ((*bp == SMP_MAGIC_IDENT) &&
(mpf->mpf_length == 1) && (mpf->length == 1) &&
!mpf_checksum((unsigned char *)bp, 16) && !mpf_checksum((unsigned char *)bp, 16) &&
((mpf->mpf_specification == 1) ((mpf->specification == 1)
|| (mpf->mpf_specification == 4))) { || (mpf->specification == 4))) {
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
smp_found_config = 1; smp_found_config = 1;
#endif #endif
@ -713,7 +713,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
return 1; return 1;
reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE, reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
BOOTMEM_DEFAULT); BOOTMEM_DEFAULT);
if (mpf->mpf_physptr) { if (mpf->physptr) {
unsigned long size = PAGE_SIZE; unsigned long size = PAGE_SIZE;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* /*
@ -722,14 +722,14 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
* the bottom is mapped now. * the bottom is mapped now.
* PC-9800's MPC table places on the very last * PC-9800's MPC table places on the very last
* of physical memory; so that simply reserving * of physical memory; so that simply reserving
* PAGE_SIZE from mpg->mpf_physptr yields BUG() * PAGE_SIZE from mpf->physptr yields BUG()
* in reserve_bootmem. * in reserve_bootmem.
*/ */
unsigned long end = max_low_pfn * PAGE_SIZE; unsigned long end = max_low_pfn * PAGE_SIZE;
if (mpf->mpf_physptr + size > end) if (mpf->physptr + size > end)
size = end - mpf->mpf_physptr; size = end - mpf->physptr;
#endif #endif
reserve_bootmem_generic(mpf->mpf_physptr, size, reserve_bootmem_generic(mpf->physptr, size,
BOOTMEM_DEFAULT); BOOTMEM_DEFAULT);
} }
@ -809,15 +809,15 @@ static int __init get_MP_intsrc_index(struct mpc_intsrc *m)
/* not legacy */ /* not legacy */
for (i = 0; i < mp_irq_entries; i++) { for (i = 0; i < mp_irq_entries; i++) {
if (mp_irqs[i].mp_irqtype != mp_INT) if (mp_irqs[i].irqtype != mp_INT)
continue; continue;
if (mp_irqs[i].mp_irqflag != 0x0f) if (mp_irqs[i].irqflag != 0x0f)
continue; continue;
if (mp_irqs[i].mp_srcbus != m->srcbus) if (mp_irqs[i].srcbus != m->srcbus)
continue; continue;
if (mp_irqs[i].mp_srcbusirq != m->srcbusirq) if (mp_irqs[i].srcbusirq != m->srcbusirq)
continue; continue;
if (irq_used[i]) { if (irq_used[i]) {
/* already claimed */ /* already claimed */
@ -922,10 +922,10 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
if (irq_used[i]) if (irq_used[i])
continue; continue;
if (mp_irqs[i].mp_irqtype != mp_INT) if (mp_irqs[i].irqtype != mp_INT)
continue; continue;
if (mp_irqs[i].mp_irqflag != 0x0f) if (mp_irqs[i].irqflag != 0x0f)
continue; continue;
if (nr_m_spare > 0) { if (nr_m_spare > 0) {
@ -1001,7 +1001,7 @@ static int __init update_mp_table(void)
{ {
char str[16]; char str[16];
char oem[10]; char oem[10];
struct intel_mp_floating *mpf; struct mpf_intel *mpf;
struct mpc_table *mpc, *mpc_new; struct mpc_table *mpc, *mpc_new;
if (!enable_update_mptable) if (!enable_update_mptable)
@ -1014,19 +1014,19 @@ static int __init update_mp_table(void)
/* /*
* Now see if we need to go further. * Now see if we need to go further.
*/ */
if (mpf->mpf_feature1 != 0) if (mpf->feature1 != 0)
return 0; return 0;
if (!mpf->mpf_physptr) if (!mpf->physptr)
return 0; return 0;
mpc = phys_to_virt(mpf->mpf_physptr); mpc = phys_to_virt(mpf->physptr);
if (!smp_check_mpc(mpc, oem, str)) if (!smp_check_mpc(mpc, oem, str))
return 0; return 0;
printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf)); printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr); printk(KERN_INFO "physptr: %x\n", mpf->physptr);
if (mpc_new_phys && mpc->length > mpc_new_length) { if (mpc_new_phys && mpc->length > mpc_new_length) {
mpc_new_phys = 0; mpc_new_phys = 0;
@ -1047,23 +1047,23 @@ static int __init update_mp_table(void)
} }
printk(KERN_INFO "use in-positon replacing\n"); printk(KERN_INFO "use in-positon replacing\n");
} else { } else {
mpf->mpf_physptr = mpc_new_phys; mpf->physptr = mpc_new_phys;
mpc_new = phys_to_virt(mpc_new_phys); mpc_new = phys_to_virt(mpc_new_phys);
memcpy(mpc_new, mpc, mpc->length); memcpy(mpc_new, mpc, mpc->length);
mpc = mpc_new; mpc = mpc_new;
/* check if we can modify that */ /* check if we can modify that */
if (mpc_new_phys - mpf->mpf_physptr) { if (mpc_new_phys - mpf->physptr) {
struct intel_mp_floating *mpf_new; struct mpf_intel *mpf_new;
/* steal 16 bytes from [0, 1k) */ /* steal 16 bytes from [0, 1k) */
printk(KERN_INFO "mpf new: %x\n", 0x400 - 16); printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
mpf_new = phys_to_virt(0x400 - 16); mpf_new = phys_to_virt(0x400 - 16);
memcpy(mpf_new, mpf, 16); memcpy(mpf_new, mpf, 16);
mpf = mpf_new; mpf = mpf_new;
mpf->mpf_physptr = mpc_new_phys; mpf->physptr = mpc_new_phys;
} }
mpf->mpf_checksum = 0; mpf->checksum = 0;
mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16); mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr); printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
} }
/* /*

View File

@ -35,10 +35,10 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/uaccess.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
static struct class *msr_class; static struct class *msr_class;

View File

@ -14,6 +14,7 @@
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/pci_x86.h> #include <asm/pci_x86.h>
#include <asm/virtext.h> #include <asm/virtext.h>
#include <asm/cpu.h>
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
# include <linux/dmi.h> # include <linux/dmi.h>

View File

@ -89,7 +89,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/vsyscall.h> #include <asm/vsyscall.h>
#include <asm/smp.h> #include <asm/cpu.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/iommu.h> #include <asm/iommu.h>

View File

@ -13,6 +13,7 @@
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/highmem.h> #include <asm/highmem.h>
#include <asm/cpumask.h>
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
unsigned int num_processors; unsigned int num_processors;

View File

@ -53,7 +53,6 @@
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/idle.h> #include <asm/idle.h>
#include <asm/smp.h>
#include <asm/trampoline.h> #include <asm/trampoline.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/numa.h> #include <asm/numa.h>
@ -1125,6 +1124,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
printk(KERN_ERR "... forcing use of dummy APIC emulation." printk(KERN_ERR "... forcing use of dummy APIC emulation."
"(tell your hw vendor)\n"); "(tell your hw vendor)\n");
smpboot_clear_io_apic(); smpboot_clear_io_apic();
disable_ioapic_setup();
return -1; return -1;
} }

View File

@ -20,7 +20,7 @@ DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
* Optimizations Manfred Spraul <manfred@colorfullife.com> * Optimizations Manfred Spraul <manfred@colorfullife.com>
*/ */
static cpumask_t flush_cpumask; static cpumask_var_t flush_cpumask;
static struct mm_struct *flush_mm; static struct mm_struct *flush_mm;
static unsigned long flush_va; static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock); static DEFINE_SPINLOCK(tlbstate_lock);
@ -92,7 +92,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
cpu = get_cpu(); cpu = get_cpu();
if (!cpu_isset(cpu, flush_cpumask)) if (!cpumask_test_cpu(cpu, flush_cpumask))
goto out; goto out;
/* /*
* This was a BUG() but until someone can quote me the * This was a BUG() but until someone can quote me the
@ -114,35 +114,22 @@ void smp_invalidate_interrupt(struct pt_regs *regs)
} }
ack_APIC_irq(); ack_APIC_irq();
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
cpu_clear(cpu, flush_cpumask); cpumask_clear_cpu(cpu, flush_cpumask);
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
out: out:
put_cpu_no_resched(); put_cpu_no_resched();
inc_irq_stat(irq_tlb_count); inc_irq_stat(irq_tlb_count);
} }
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, void native_flush_tlb_others(const struct cpumask *cpumask,
unsigned long va) struct mm_struct *mm, unsigned long va)
{ {
cpumask_t cpumask = *cpumaskp;
/* /*
* A couple of (to be removed) sanity checks:
*
* - current CPU must not be in mask
* - mask must exist :) * - mask must exist :)
*/ */
BUG_ON(cpus_empty(cpumask)); BUG_ON(cpumask_empty(cpumask));
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
BUG_ON(!mm); BUG_ON(!mm);
#ifdef CONFIG_HOTPLUG_CPU
/* If a CPU which we ran on has gone down, OK. */
cpus_and(cpumask, cpumask, cpu_online_map);
if (unlikely(cpus_empty(cpumask)))
return;
#endif
/* /*
* i'm not happy about this global shared spinlock in the * i'm not happy about this global shared spinlock in the
* MM hot path, but we'll see how contended it is. * MM hot path, but we'll see how contended it is.
@ -150,9 +137,17 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
*/ */
spin_lock(&tlbstate_lock); spin_lock(&tlbstate_lock);
cpumask_andnot(flush_cpumask, cpumask, cpumask_of(smp_processor_id()));
#ifdef CONFIG_HOTPLUG_CPU
/* If a CPU which we ran on has gone down, OK. */
cpumask_and(flush_cpumask, flush_cpumask, cpu_online_mask);
if (unlikely(cpumask_empty(flush_cpumask))) {
spin_unlock(&tlbstate_lock);
return;
}
#endif
flush_mm = mm; flush_mm = mm;
flush_va = va; flush_va = va;
cpus_or(flush_cpumask, cpumask, flush_cpumask);
/* /*
* Make the above memory operations globally visible before * Make the above memory operations globally visible before
@ -163,9 +158,9 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to * We have to send the IPI only to
* CPUs affected. * CPUs affected.
*/ */
send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR); send_IPI_mask(flush_cpumask, INVALIDATE_TLB_VECTOR);
while (!cpus_empty(flush_cpumask)) while (!cpumask_empty(flush_cpumask))
/* nothing. lockup detection does not belong here */ /* nothing. lockup detection does not belong here */
cpu_relax(); cpu_relax();
@ -177,25 +172,19 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
void flush_tlb_current_task(void) void flush_tlb_current_task(void)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
cpumask_t cpu_mask;
preempt_disable(); preempt_disable();
cpu_mask = mm->cpu_vm_mask;
cpu_clear(smp_processor_id(), cpu_mask);
local_flush_tlb(); local_flush_tlb();
if (!cpus_empty(cpu_mask)) if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
cpumask_t cpu_mask;
preempt_disable(); preempt_disable();
cpu_mask = mm->cpu_vm_mask;
cpu_clear(smp_processor_id(), cpu_mask);
if (current->active_mm == mm) { if (current->active_mm == mm) {
if (current->mm) if (current->mm)
@ -203,8 +192,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else else
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (!cpus_empty(cpu_mask)) if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
@ -212,11 +201,8 @@ void flush_tlb_mm(struct mm_struct *mm)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
cpumask_t cpu_mask;
preempt_disable(); preempt_disable();
cpu_mask = mm->cpu_vm_mask;
cpu_clear(smp_processor_id(), cpu_mask);
if (current->active_mm == mm) { if (current->active_mm == mm) {
if (current->mm) if (current->mm)
@ -225,9 +211,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (!cpus_empty(cpu_mask)) if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(cpu_mask, mm, va); flush_tlb_others(&mm->cpu_vm_mask, mm, va);
preempt_enable(); preempt_enable();
} }
EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_page);
@ -254,3 +239,9 @@ void reset_lazy_tlbstate(void)
per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm; per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
} }
static int init_flush_cpumask(void)
{
alloc_cpumask_var(&flush_cpumask, GFP_KERNEL);
return 0;
}
early_initcall(init_flush_cpumask);

View File

@ -43,10 +43,10 @@
union smp_flush_state { union smp_flush_state {
struct { struct {
cpumask_t flush_cpumask;
struct mm_struct *flush_mm; struct mm_struct *flush_mm;
unsigned long flush_va; unsigned long flush_va;
spinlock_t tlbstate_lock; spinlock_t tlbstate_lock;
DECLARE_BITMAP(flush_cpumask, NR_CPUS);
}; };
char pad[SMP_CACHE_BYTES]; char pad[SMP_CACHE_BYTES];
} ____cacheline_aligned; } ____cacheline_aligned;
@ -131,7 +131,7 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
f = &per_cpu(flush_state, sender); f = &per_cpu(flush_state, sender);
if (!cpu_isset(cpu, f->flush_cpumask)) if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
goto out; goto out;
/* /*
* This was a BUG() but until someone can quote me the * This was a BUG() but until someone can quote me the
@ -153,19 +153,15 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
} }
out: out:
ack_APIC_irq(); ack_APIC_irq();
cpu_clear(cpu, f->flush_cpumask); cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
inc_irq_stat(irq_tlb_count); inc_irq_stat(irq_tlb_count);
} }
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm, static void flush_tlb_others_ipi(const struct cpumask *cpumask,
unsigned long va) struct mm_struct *mm, unsigned long va)
{ {
int sender; int sender;
union smp_flush_state *f; union smp_flush_state *f;
cpumask_t cpumask = *cpumaskp;
if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
return;
/* Caller has disabled preemption */ /* Caller has disabled preemption */
sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
@ -180,7 +176,8 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
f->flush_mm = mm; f->flush_mm = mm;
f->flush_va = va; f->flush_va = va;
cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask); cpumask_andnot(to_cpumask(f->flush_cpumask),
cpumask, cpumask_of(smp_processor_id()));
/* /*
* Make the above memory operations globally visible before * Make the above memory operations globally visible before
@ -191,9 +188,10 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
* We have to send the IPI only to * We have to send the IPI only to
* CPUs affected. * CPUs affected.
*/ */
send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender); send_IPI_mask(to_cpumask(f->flush_cpumask),
INVALIDATE_TLB_VECTOR_START + sender);
while (!cpus_empty(f->flush_cpumask)) while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
cpu_relax(); cpu_relax();
f->flush_mm = NULL; f->flush_mm = NULL;
@ -201,6 +199,25 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
spin_unlock(&f->tlbstate_lock); spin_unlock(&f->tlbstate_lock);
} }
void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm, unsigned long va)
{
if (is_uv_system()) {
/* FIXME: could be an percpu_alloc'd thing */
static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
struct cpumask *after_uv_flush = &get_cpu_var(flush_tlb_mask);
cpumask_andnot(after_uv_flush, cpumask,
cpumask_of(smp_processor_id()));
if (!uv_flush_tlb_others(after_uv_flush, mm, va))
flush_tlb_others_ipi(after_uv_flush, mm, va);
put_cpu_var(flush_tlb_uv_cpumask);
return;
}
flush_tlb_others_ipi(cpumask, mm, va);
}
static int __cpuinit init_smp_flush(void) static int __cpuinit init_smp_flush(void)
{ {
int i; int i;
@ -215,25 +232,18 @@ core_initcall(init_smp_flush);
void flush_tlb_current_task(void) void flush_tlb_current_task(void)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
cpumask_t cpu_mask;
preempt_disable(); preempt_disable();
cpu_mask = mm->cpu_vm_mask;
cpu_clear(smp_processor_id(), cpu_mask);
local_flush_tlb(); local_flush_tlb();
if (!cpus_empty(cpu_mask)) if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
cpumask_t cpu_mask;
preempt_disable(); preempt_disable();
cpu_mask = mm->cpu_vm_mask;
cpu_clear(smp_processor_id(), cpu_mask);
if (current->active_mm == mm) { if (current->active_mm == mm) {
if (current->mm) if (current->mm)
@ -241,8 +251,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else else
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (!cpus_empty(cpu_mask)) if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL); flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
@ -250,11 +260,8 @@ void flush_tlb_mm(struct mm_struct *mm)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
cpumask_t cpu_mask;
preempt_disable(); preempt_disable();
cpu_mask = mm->cpu_vm_mask;
cpu_clear(smp_processor_id(), cpu_mask);
if (current->active_mm == mm) { if (current->active_mm == mm) {
if (current->mm) if (current->mm)
@ -263,8 +270,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
} }
if (!cpus_empty(cpu_mask)) if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(cpu_mask, mm, va); flush_tlb_others(&mm->cpu_vm_mask, mm, va);
preempt_enable(); preempt_enable();
} }

View File

@ -212,11 +212,11 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
* The cpumaskp mask contains the cpus the broadcast was sent to. * The cpumaskp mask contains the cpus the broadcast was sent to.
* *
* Returns 1 if all remote flushing was done. The mask is zeroed. * Returns 1 if all remote flushing was done. The mask is zeroed.
* Returns 0 if some remote flushing remains to be done. The mask is left * Returns 0 if some remote flushing remains to be done. The mask will have
* unchanged. * some bits still set.
*/ */
int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc, int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
cpumask_t *cpumaskp) struct cpumask *cpumaskp)
{ {
int completion_status = 0; int completion_status = 0;
int right_shift; int right_shift;
@ -263,13 +263,13 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
* Success, so clear the remote cpu's from the mask so we don't * Success, so clear the remote cpu's from the mask so we don't
* use the IPI method of shootdown on them. * use the IPI method of shootdown on them.
*/ */
for_each_cpu_mask(bit, *cpumaskp) { for_each_cpu(bit, cpumaskp) {
blade = uv_cpu_to_blade_id(bit); blade = uv_cpu_to_blade_id(bit);
if (blade == this_blade) if (blade == this_blade)
continue; continue;
cpu_clear(bit, *cpumaskp); cpumask_clear_cpu(bit, cpumaskp);
} }
if (!cpus_empty(*cpumaskp)) if (!cpumask_empty(cpumaskp))
return 0; return 0;
return 1; return 1;
} }
@ -296,7 +296,7 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
* Returns 1 if all remote flushing was done. * Returns 1 if all remote flushing was done.
* Returns 0 if some remote flushing remains to be done. * Returns 0 if some remote flushing remains to be done.
*/ */
int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm, int uv_flush_tlb_others(struct cpumask *cpumaskp, struct mm_struct *mm,
unsigned long va) unsigned long va)
{ {
int i; int i;
@ -315,7 +315,7 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
i = 0; i = 0;
for_each_cpu_mask(bit, *cpumaskp) { for_each_cpu(bit, cpumaskp) {
blade = uv_cpu_to_blade_id(bit); blade = uv_cpu_to_blade_id(bit);
BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1)); BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
if (blade == this_blade) { if (blade == this_blade) {

View File

@ -9,6 +9,7 @@
#include <asm/e820.h> #include <asm/e820.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/cpu.h>
void __init pre_intr_init_hook(void) void __init pre_intr_init_hook(void)
{ {

View File

@ -49,7 +49,6 @@
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/smp.h>
unsigned int __VMALLOC_RESERVE = 128 << 20; unsigned int __VMALLOC_RESERVE = 128 << 20;

View File

@ -333,11 +333,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
req_type & _PAGE_CACHE_MASK); req_type & _PAGE_CACHE_MASK);
} }
is_range_ram = pagerange_is_ram(start, end); /*
if (is_range_ram == 1) * For legacy reasons, some parts of the physical address range in the
return reserve_ram_pages_type(start, end, req_type, new_type); * legacy 1MB region is treated as non-RAM (even when listed as RAM in
else if (is_range_ram < 0) * the e820 tables). So we will track the memory attributes of this
return -EINVAL; * legacy 1MB region using the linear memtype_list always.
*/
if (end >= ISA_END_ADDRESS) {
is_range_ram = pagerange_is_ram(start, end);
if (is_range_ram == 1)
return reserve_ram_pages_type(start, end, req_type,
new_type);
else if (is_range_ram < 0)
return -EINVAL;
}
new = kmalloc(sizeof(struct memtype), GFP_KERNEL); new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
if (!new) if (!new)
@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end)
if (is_ISA_range(start, end - 1)) if (is_ISA_range(start, end - 1))
return 0; return 0;
is_range_ram = pagerange_is_ram(start, end); /*
if (is_range_ram == 1) * For legacy reasons, some parts of the physical address range in the
return free_ram_pages_type(start, end); * legacy 1MB region is treated as non-RAM (even when listed as RAM in
else if (is_range_ram < 0) * the e820 tables). So we will track the memory attributes of this
return -EINVAL; * legacy 1MB region using the linear memtype_list always.
*/
if (end >= ISA_END_ADDRESS) {
is_range_ram = pagerange_is_ram(start, end);
if (is_range_ram == 1)
return free_ram_pages_type(start, end);
else if (is_range_ram < 0)
return -EINVAL;
}
spin_lock(&memtype_lock); spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) { list_for_each_entry(entry, &memtype_list, nd) {

View File

@ -634,35 +634,27 @@ static void xen_flush_tlb_single(unsigned long addr)
preempt_enable(); preempt_enable();
} }
static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, static void xen_flush_tlb_others(const struct cpumask *cpus,
unsigned long va) struct mm_struct *mm, unsigned long va)
{ {
struct { struct {
struct mmuext_op op; struct mmuext_op op;
cpumask_t mask; DECLARE_BITMAP(mask, NR_CPUS);
} *args; } *args;
cpumask_t cpumask = *cpus;
struct multicall_space mcs; struct multicall_space mcs;
/* BUG_ON(cpumask_empty(cpus));
* A couple of (to be removed) sanity checks:
*
* - current CPU must not be in mask
* - mask must exist :)
*/
BUG_ON(cpus_empty(cpumask));
BUG_ON(cpu_isset(smp_processor_id(), cpumask));
BUG_ON(!mm); BUG_ON(!mm);
/* If a CPU which we ran on has gone down, OK. */
cpus_and(cpumask, cpumask, cpu_online_map);
if (cpus_empty(cpumask))
return;
mcs = xen_mc_entry(sizeof(*args)); mcs = xen_mc_entry(sizeof(*args));
args = mcs.args; args = mcs.args;
args->mask = cpumask; args->op.arg2.vcpumask = to_cpumask(args->mask);
args->op.arg2.vcpumask = &args->mask;
/* Remove us, and any offline CPUS. */
cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
if (unlikely(cpumask_empty(to_cpumask(args->mask))))
goto issue;
if (va == TLB_FLUSH_ALL) { if (va == TLB_FLUSH_ALL) {
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
@ -673,6 +665,7 @@ static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
issue:
xen_mc_issue(PARAVIRT_LAZY_MMU); xen_mc_issue(PARAVIRT_LAZY_MMU);
} }

View File

@ -107,7 +107,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
/* /*
* Print cpu online, possible, present, and system maps * Print cpu online, possible, present, and system maps
*/ */
static ssize_t print_cpus_map(char *buf, cpumask_t *map) static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
{ {
int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);

View File

@ -31,7 +31,10 @@
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/topology.h> #include <linux/topology.h>
#define define_one_ro(_name) \ #define define_one_ro_named(_name, _func) \
static SYSDEV_ATTR(_name, 0444, _func, NULL)
#define define_one_ro(_name) \
static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
#define define_id_show_func(name) \ #define define_id_show_func(name) \
@ -42,8 +45,8 @@ static ssize_t show_##name(struct sys_device *dev, \
return sprintf(buf, "%d\n", topology_##name(cpu)); \ return sprintf(buf, "%d\n", topology_##name(cpu)); \
} }
#if defined(topology_thread_siblings) || defined(topology_core_siblings) #if defined(topology_thread_cpumask) || defined(topology_core_cpumask)
static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf) static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
{ {
ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
int n = 0; int n = 0;
@ -65,7 +68,7 @@ static ssize_t show_##name(struct sys_device *dev, \
struct sysdev_attribute *attr, char *buf) \ struct sysdev_attribute *attr, char *buf) \
{ \ { \
unsigned int cpu = dev->id; \ unsigned int cpu = dev->id; \
return show_cpumap(0, &(topology_##name(cpu)), buf); \ return show_cpumap(0, topology_##name(cpu), buf); \
} }
#define define_siblings_show_list(name) \ #define define_siblings_show_list(name) \
@ -74,7 +77,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
char *buf) \ char *buf) \
{ \ { \
unsigned int cpu = dev->id; \ unsigned int cpu = dev->id; \
return show_cpumap(1, &(topology_##name(cpu)), buf); \ return show_cpumap(1, topology_##name(cpu), buf); \
} }
#else #else
@ -82,9 +85,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
static ssize_t show_##name(struct sys_device *dev, \ static ssize_t show_##name(struct sys_device *dev, \
struct sysdev_attribute *attr, char *buf) \ struct sysdev_attribute *attr, char *buf) \
{ \ { \
unsigned int cpu = dev->id; \ return show_cpumap(0, topology_##name(dev->id), buf); \
cpumask_t mask = topology_##name(cpu); \
return show_cpumap(0, &mask, buf); \
} }
#define define_siblings_show_list(name) \ #define define_siblings_show_list(name) \
@ -92,9 +93,7 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
struct sysdev_attribute *attr, \ struct sysdev_attribute *attr, \
char *buf) \ char *buf) \
{ \ { \
unsigned int cpu = dev->id; \ return show_cpumap(1, topology_##name(dev->id), buf); \
cpumask_t mask = topology_##name(cpu); \
return show_cpumap(1, &mask, buf); \
} }
#endif #endif
@ -107,13 +106,13 @@ define_one_ro(physical_package_id);
define_id_show_func(core_id); define_id_show_func(core_id);
define_one_ro(core_id); define_one_ro(core_id);
define_siblings_show_func(thread_siblings); define_siblings_show_func(thread_cpumask);
define_one_ro(thread_siblings); define_one_ro_named(thread_siblings, show_thread_cpumask);
define_one_ro(thread_siblings_list); define_one_ro_named(thread_siblings_list, show_thread_cpumask_list);
define_siblings_show_func(core_siblings); define_siblings_show_func(core_cpumask);
define_one_ro(core_siblings); define_one_ro_named(core_siblings, show_core_cpumask);
define_one_ro(core_siblings_list); define_one_ro_named(core_siblings_list, show_core_cpumask_list);
static struct attribute *default_attrs[] = { static struct attribute *default_attrs[] = {
&attr_physical_package_id.attr, &attr_physical_package_id.attr,

View File

@ -244,7 +244,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
*/ */
int dcdbas_smi_request(struct smi_cmd *smi_cmd) int dcdbas_smi_request(struct smi_cmd *smi_cmd)
{ {
cpumask_t old_mask; cpumask_var_t old_mask;
int ret = 0; int ret = 0;
if (smi_cmd->magic != SMI_CMD_MAGIC) { if (smi_cmd->magic != SMI_CMD_MAGIC) {
@ -254,8 +254,11 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
} }
/* SMI requires CPU 0 */ /* SMI requires CPU 0 */
old_mask = current->cpus_allowed; if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); return -ENOMEM;
cpumask_copy(old_mask, &current->cpus_allowed);
set_cpus_allowed_ptr(current, cpumask_of(0));
if (smp_processor_id() != 0) { if (smp_processor_id() != 0) {
dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
__func__); __func__);
@ -275,7 +278,8 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
); );
out: out:
set_cpus_allowed_ptr(current, &old_mask); set_cpus_allowed_ptr(current, old_mask);
free_cpumask_var(old_mask);
return ret; return ret;
} }

View File

@ -318,7 +318,7 @@ xpc_hb_checker(void *ignore)
/* this thread was marked active by xpc_hb_init() */ /* this thread was marked active by xpc_hb_init() */
set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU)); set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
/* set our heartbeating to other partitions into motion */ /* set our heartbeating to other partitions into motion */
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);

View File

@ -854,20 +854,27 @@ static void efx_fini_io(struct efx_nic *efx)
* interrupts across them. */ * interrupts across them. */
static int efx_wanted_rx_queues(void) static int efx_wanted_rx_queues(void)
{ {
cpumask_t core_mask; cpumask_var_t core_mask;
int count; int count;
int cpu; int cpu;
cpus_clear(core_mask); if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) {
printk(KERN_WARNING
"efx.c: allocation failure, irq balancing hobbled\n");
return 1;
}
cpumask_clear(core_mask);
count = 0; count = 0;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (!cpu_isset(cpu, core_mask)) { if (!cpumask_test_cpu(cpu, core_mask)) {
++count; ++count;
cpus_or(core_mask, core_mask, cpumask_or(core_mask, core_mask,
topology_core_siblings(cpu)); topology_core_cpumask(cpu));
} }
} }
free_cpumask_var(core_mask);
return count; return count;
} }

View File

@ -38,7 +38,7 @@
static LIST_HEAD(dying_tasks); static LIST_HEAD(dying_tasks);
static LIST_HEAD(dead_tasks); static LIST_HEAD(dead_tasks);
static cpumask_t marked_cpus = CPU_MASK_NONE; static cpumask_var_t marked_cpus;
static DEFINE_SPINLOCK(task_mortuary); static DEFINE_SPINLOCK(task_mortuary);
static void process_task_mortuary(void); static void process_task_mortuary(void);
@ -456,10 +456,10 @@ static void mark_done(int cpu)
{ {
int i; int i;
cpu_set(cpu, marked_cpus); cpumask_set_cpu(cpu, marked_cpus);
for_each_online_cpu(i) { for_each_online_cpu(i) {
if (!cpu_isset(i, marked_cpus)) if (!cpumask_test_cpu(i, marked_cpus))
return; return;
} }
@ -468,7 +468,7 @@ static void mark_done(int cpu)
*/ */
process_task_mortuary(); process_task_mortuary();
cpus_clear(marked_cpus); cpumask_clear(marked_cpus);
} }
@ -565,6 +565,20 @@ void sync_buffer(int cpu)
mutex_unlock(&buffer_mutex); mutex_unlock(&buffer_mutex);
} }
int __init buffer_sync_init(void)
{
if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
cpumask_clear(marked_cpus);
return 0;
}
void __exit buffer_sync_cleanup(void)
{
free_cpumask_var(marked_cpus);
}
/* The function can be used to add a buffer worth of data directly to /* The function can be used to add a buffer worth of data directly to
* the kernel buffer. The buffer is assumed to be a circular buffer. * the kernel buffer. The buffer is assumed to be a circular buffer.
* Take the entries from index start and end at index end, wrapping * Take the entries from index start and end at index end, wrapping

View File

@ -19,4 +19,8 @@ void sync_stop(void);
/* sync the given CPU's buffer */ /* sync the given CPU's buffer */
void sync_buffer(int cpu); void sync_buffer(int cpu);
/* initialize/destroy the buffer system. */
int buffer_sync_init(void);
void buffer_sync_cleanup(void);
#endif /* OPROFILE_BUFFER_SYNC_H */ #endif /* OPROFILE_BUFFER_SYNC_H */

View File

@ -183,6 +183,10 @@ static int __init oprofile_init(void)
{ {
int err; int err;
err = buffer_sync_init();
if (err)
return err;
err = oprofile_arch_init(&oprofile_ops); err = oprofile_arch_init(&oprofile_ops);
if (err < 0 || timer) { if (err < 0 || timer) {
@ -191,8 +195,10 @@ static int __init oprofile_init(void)
} }
err = oprofilefs_register(); err = oprofilefs_register();
if (err) if (err) {
oprofile_arch_exit(); oprofile_arch_exit();
buffer_sync_cleanup();
}
return err; return err;
} }
@ -202,6 +208,7 @@ static void __exit oprofile_exit(void)
{ {
oprofilefs_unregister(); oprofilefs_unregister();
oprofile_arch_exit(); oprofile_arch_exit();
buffer_sync_cleanup();
} }

View File

@ -6,6 +6,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/cpu.h>
#include <linux/intel-iommu.h> #include <linux/intel-iommu.h>
#include "intr_remapping.h" #include "intr_remapping.h"

View File

@ -26,6 +26,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/bootmem.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/irq.h> #include <asm/irq.h>
@ -75,7 +76,14 @@ enum {
static int evtchn_to_irq[NR_EVENT_CHANNELS] = { static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
[0 ... NR_EVENT_CHANNELS-1] = -1 [0 ... NR_EVENT_CHANNELS-1] = -1
}; };
static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG]; struct cpu_evtchn_s {
unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
};
static struct cpu_evtchn_s *cpu_evtchn_mask_p;
static inline unsigned long *cpu_evtchn_mask(int cpu)
{
return cpu_evtchn_mask_p[cpu].bits;
}
static u8 cpu_evtchn[NR_EVENT_CHANNELS]; static u8 cpu_evtchn[NR_EVENT_CHANNELS];
/* Reference counts for bindings to IRQs. */ /* Reference counts for bindings to IRQs. */
@ -115,7 +123,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
unsigned int idx) unsigned int idx)
{ {
return (sh->evtchn_pending[idx] & return (sh->evtchn_pending[idx] &
cpu_evtchn_mask[cpu][idx] & cpu_evtchn_mask(cpu)[idx] &
~sh->evtchn_mask[idx]); ~sh->evtchn_mask[idx]);
} }
@ -125,11 +133,11 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
BUG_ON(irq == -1); BUG_ON(irq == -1);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu); cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
#endif #endif
__clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]); __clear_bit(chn, cpu_evtchn_mask(cpu_evtchn[chn]));
__set_bit(chn, cpu_evtchn_mask[cpu]); __set_bit(chn, cpu_evtchn_mask(cpu));
cpu_evtchn[chn] = cpu; cpu_evtchn[chn] = cpu;
} }
@ -142,12 +150,12 @@ static void init_evtchn_cpu_bindings(void)
/* By default all event channels notify CPU#0. */ /* By default all event channels notify CPU#0. */
for_each_irq_desc(i, desc) { for_each_irq_desc(i, desc) {
desc->affinity = cpumask_of_cpu(0); cpumask_copy(desc->affinity, cpumask_of(0));
} }
#endif #endif
memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0])); memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
} }
static inline unsigned int cpu_from_evtchn(unsigned int evtchn) static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
@ -822,6 +830,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
void __init xen_init_IRQ(void) void __init xen_init_IRQ(void)
{ {
int i; int i;
size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
cpu_evtchn_mask_p = alloc_bootmem(size);
BUG_ON(cpu_evtchn_mask_p == NULL);
init_evtchn_cpu_bindings(); init_evtchn_cpu_bindings();

View File

@ -100,7 +100,7 @@ static void do_suspend(void)
/* XXX use normal device tree? */ /* XXX use normal device tree? */
xenbus_suspend(); xenbus_suspend();
err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0)); err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
if (err) { if (err) {
printk(KERN_ERR "failed to start xen_suspend: %d\n", err); printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
goto out; goto out;

View File

@ -9,7 +9,7 @@
* *
* Undefined if no bit exists, so code should check against 0 first. * Undefined if no bit exists, so code should check against 0 first.
*/ */
static inline unsigned long __ffs(unsigned long word) static __always_inline unsigned long __ffs(unsigned long word)
{ {
int num = 0; int num = 0;

View File

@ -9,7 +9,7 @@
* *
* Undefined if no set bit exists, so code should check against 0 first. * Undefined if no set bit exists, so code should check against 0 first.
*/ */
static inline unsigned long __fls(unsigned long word) static __always_inline unsigned long __fls(unsigned long word)
{ {
int num = BITS_PER_LONG - 1; int num = BITS_PER_LONG - 1;

View File

@ -9,7 +9,7 @@
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/ */
static inline int fls(int x) static __always_inline int fls(int x)
{ {
int r = 32; int r = 32;

View File

@ -15,7 +15,7 @@
* at position 64. * at position 64.
*/ */
#if BITS_PER_LONG == 32 #if BITS_PER_LONG == 32
static inline int fls64(__u64 x) static __always_inline int fls64(__u64 x)
{ {
__u32 h = x >> 32; __u32 h = x >> 32;
if (h) if (h)
@ -23,7 +23,7 @@ static inline int fls64(__u64 x)
return fls(x); return fls(x);
} }
#elif BITS_PER_LONG == 64 #elif BITS_PER_LONG == 64
static inline int fls64(__u64 x) static __always_inline int fls64(__u64 x)
{ {
if (x == 0) if (x == 0)
return 0; return 0;

View File

@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
struct irq_desc; struct irq_desc;
extern int early_irq_init(void); extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void); extern int arch_early_irq_init(void);
extern int arch_init_chip_data(struct irq_desc *desc, int cpu); extern int arch_init_chip_data(struct irq_desc *desc, int cpu);

View File

@ -182,11 +182,11 @@ struct irq_desc {
unsigned int irqs_unhandled; unsigned int irqs_unhandled;
spinlock_t lock; spinlock_t lock;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_t affinity; cpumask_var_t affinity;
unsigned int cpu; unsigned int cpu;
#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ #ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_t pending_mask; cpumask_var_t pending_mask;
#endif
#endif #endif
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir; struct proc_dir_entry *dir;
@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
#endif /* !CONFIG_S390 */ #endif /* !CONFIG_S390 */
#ifdef CONFIG_SMP
/**
* init_alloc_desc_masks - allocate cpumasks for irq_desc
* @desc: pointer to irq_desc struct
* @cpu: cpu which will be handling the cpumasks
* @boot: true if need bootmem
*
* Allocates affinity and pending_mask cpumask if required.
* Returns true if successful (or not required).
* Side effect: affinity has all bits set, pending_mask has all bits clear.
*/
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
bool boot)
{
int node;
if (boot) {
alloc_bootmem_cpumask_var(&desc->affinity);
cpumask_setall(desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
alloc_bootmem_cpumask_var(&desc->pending_mask);
cpumask_clear(desc->pending_mask);
#endif
return true;
}
node = cpu_to_node(cpu);
if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
return false;
cpumask_setall(desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
free_cpumask_var(desc->affinity);
return false;
}
cpumask_clear(desc->pending_mask);
#endif
return true;
}
/**
* init_copy_desc_masks - copy cpumasks for irq_desc
* @old_desc: pointer to old irq_desc struct
* @new_desc: pointer to new irq_desc struct
*
* Insures affinity and pending_masks are copied to new irq_desc.
* If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
* irq_desc struct so the copy is redundant.
*/
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
struct irq_desc *new_desc)
{
#ifdef CONFIG_CPUMASKS_OFFSTACK
cpumask_copy(new_desc->affinity, old_desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
#endif
#endif
}
#else /* !CONFIG_SMP */
static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
bool boot)
{
return true;
}
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
struct irq_desc *new_desc)
{
}
#endif /* CONFIG_SMP */
#endif /* _LINUX_IRQ_H */ #endif /* _LINUX_IRQ_H */

View File

@ -20,6 +20,7 @@
# define for_each_irq_desc_reverse(irq, desc) \ # define for_each_irq_desc_reverse(irq, desc) \
for (irq = nr_irqs - 1; irq >= 0; irq--) for (irq = nr_irqs - 1; irq >= 0; irq--)
#else /* CONFIG_GENERIC_HARDIRQS */ #else /* CONFIG_GENERIC_HARDIRQS */
extern int nr_irqs; extern int nr_irqs;

View File

@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
#ifndef topology_core_siblings #ifndef topology_core_siblings
#define topology_core_siblings(cpu) cpumask_of_cpu(cpu) #define topology_core_siblings(cpu) cpumask_of_cpu(cpu)
#endif #endif
#ifndef topology_thread_cpumask
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
#endif
#ifndef topology_core_cpumask
#define topology_core_cpumask(cpu) cpumask_of(cpu)
#endif
#endif /* _LINUX_TOPOLOGY_H */ #endif /* _LINUX_TOPOLOGY_H */

View File

@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
desc->irq_count = 0; desc->irq_count = 0;
desc->irqs_unhandled = 0; desc->irqs_unhandled = 0;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_setall(&desc->affinity); cpumask_setall(desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_clear(desc->pending_mask);
#endif
#endif #endif
spin_unlock_irqrestore(&desc->lock, flags); spin_unlock_irqrestore(&desc->lock, flags);
} }

View File

@ -17,6 +17,7 @@
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/bootmem.h>
#include "internals.h" #include "internals.h"
@ -57,6 +58,7 @@ int nr_irqs = NR_IRQS;
EXPORT_SYMBOL_GPL(nr_irqs); EXPORT_SYMBOL_GPL(nr_irqs);
#ifdef CONFIG_SPARSE_IRQ #ifdef CONFIG_SPARSE_IRQ
static struct irq_desc irq_desc_init = { static struct irq_desc irq_desc_init = {
.irq = -1, .irq = -1,
.status = IRQ_DISABLED, .status = IRQ_DISABLED,
@ -64,9 +66,6 @@ static struct irq_desc irq_desc_init = {
.handle_irq = handle_bad_irq, .handle_irq = handle_bad_irq,
.depth = 1, .depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
#ifdef CONFIG_SMP
.affinity = CPU_MASK_ALL
#endif
}; };
void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@ -101,6 +100,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
printk(KERN_ERR "can not alloc kstat_irqs\n"); printk(KERN_ERR "can not alloc kstat_irqs\n");
BUG_ON(1); BUG_ON(1);
} }
if (!init_alloc_desc_masks(desc, cpu, false)) {
printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
BUG_ON(1);
}
arch_init_chip_data(desc, cpu); arch_init_chip_data(desc, cpu);
} }
@ -109,7 +112,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
*/ */
DEFINE_SPINLOCK(sparse_irq_lock); DEFINE_SPINLOCK(sparse_irq_lock);
struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; struct irq_desc **irq_desc_ptrs __read_mostly;
static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
[0 ... NR_IRQS_LEGACY-1] = { [0 ... NR_IRQS_LEGACY-1] = {
@ -119,14 +122,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
.handle_irq = handle_bad_irq, .handle_irq = handle_bad_irq,
.depth = 1, .depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
#ifdef CONFIG_SMP
.affinity = CPU_MASK_ALL
#endif
} }
}; };
/* FIXME: use bootmem alloc ...*/ static unsigned int *kstat_irqs_legacy;
static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
int __init early_irq_init(void) int __init early_irq_init(void)
{ {
@ -134,18 +133,30 @@ int __init early_irq_init(void)
int legacy_count; int legacy_count;
int i; int i;
/* initialize nr_irqs based on nr_cpu_ids */
arch_probe_nr_irqs();
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
desc = irq_desc_legacy; desc = irq_desc_legacy;
legacy_count = ARRAY_SIZE(irq_desc_legacy); legacy_count = ARRAY_SIZE(irq_desc_legacy);
/* allocate irq_desc_ptrs array based on nr_irqs */
irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
/* allocate based on nr_cpu_ids */
/* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
sizeof(int));
for (i = 0; i < legacy_count; i++) { for (i = 0; i < legacy_count; i++) {
desc[i].irq = i; desc[i].irq = i;
desc[i].kstat_irqs = kstat_irqs_legacy[i]; desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
init_alloc_desc_masks(&desc[i], 0, true);
irq_desc_ptrs[i] = desc + i; irq_desc_ptrs[i] = desc + i;
} }
for (i = legacy_count; i < NR_IRQS; i++) for (i = legacy_count; i < nr_irqs; i++)
irq_desc_ptrs[i] = NULL; irq_desc_ptrs[i] = NULL;
return arch_early_irq_init(); return arch_early_irq_init();
@ -153,7 +164,10 @@ int __init early_irq_init(void)
struct irq_desc *irq_to_desc(unsigned int irq) struct irq_desc *irq_to_desc(unsigned int irq)
{ {
return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; if (irq_desc_ptrs && irq < nr_irqs)
return irq_desc_ptrs[irq];
return NULL;
} }
struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@ -162,10 +176,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
unsigned long flags; unsigned long flags;
int node; int node;
if (irq >= NR_IRQS) { if (irq >= nr_irqs) {
printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
irq, NR_IRQS); irq, nr_irqs);
WARN_ON(1);
return NULL; return NULL;
} }
@ -207,9 +220,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
.handle_irq = handle_bad_irq, .handle_irq = handle_bad_irq,
.depth = 1, .depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
#ifdef CONFIG_SMP
.affinity = CPU_MASK_ALL
#endif
} }
}; };
@ -219,12 +229,15 @@ int __init early_irq_init(void)
int count; int count;
int i; int i;
printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
desc = irq_desc; desc = irq_desc;
count = ARRAY_SIZE(irq_desc); count = ARRAY_SIZE(irq_desc);
for (i = 0; i < count; i++) for (i = 0; i < count; i++) {
desc[i].irq = i; desc[i].irq = i;
init_alloc_desc_masks(&desc[i], 0, true);
}
return arch_early_irq_init(); return arch_early_irq_init();
} }

View File

@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
extern struct lock_class_key irq_desc_lock_class; extern struct lock_class_key irq_desc_lock_class;
extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
extern spinlock_t sparse_irq_lock; extern spinlock_t sparse_irq_lock;
#ifdef CONFIG_SPARSE_IRQ
/* irq_desc_ptrs allocated at boot time */
extern struct irq_desc **irq_desc_ptrs;
#else
/* irq_desc_ptrs is a fixed size array */
extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
#endif
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);

View File

@ -98,14 +98,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
#ifdef CONFIG_GENERIC_PENDING_IRQ #ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
cpumask_copy(&desc->affinity, cpumask); cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask); desc->chip->set_affinity(irq, cpumask);
} else { } else {
desc->status |= IRQ_MOVE_PENDING; desc->status |= IRQ_MOVE_PENDING;
cpumask_copy(&desc->pending_mask, cpumask); cpumask_copy(desc->pending_mask, cpumask);
} }
#else #else
cpumask_copy(&desc->affinity, cpumask); cpumask_copy(desc->affinity, cpumask);
desc->chip->set_affinity(irq, cpumask); desc->chip->set_affinity(irq, cpumask);
#endif #endif
desc->status |= IRQ_AFFINITY_SET; desc->status |= IRQ_AFFINITY_SET;
@ -127,16 +127,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
* one of the targets is online. * one of the targets is online.
*/ */
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
if (cpumask_any_and(&desc->affinity, cpu_online_mask) if (cpumask_any_and(desc->affinity, cpu_online_mask)
< nr_cpu_ids) < nr_cpu_ids)
goto set_affinity; goto set_affinity;
else else
desc->status &= ~IRQ_AFFINITY_SET; desc->status &= ~IRQ_AFFINITY_SET;
} }
cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
set_affinity: set_affinity:
desc->chip->set_affinity(irq, &desc->affinity); desc->chip->set_affinity(irq, desc->affinity);
return 0; return 0;
} }

View File

@ -18,7 +18,7 @@ void move_masked_irq(int irq)
desc->status &= ~IRQ_MOVE_PENDING; desc->status &= ~IRQ_MOVE_PENDING;
if (unlikely(cpumask_empty(&desc->pending_mask))) if (unlikely(cpumask_empty(desc->pending_mask)))
return; return;
if (!desc->chip->set_affinity) if (!desc->chip->set_affinity)
@ -38,13 +38,13 @@ void move_masked_irq(int irq)
* For correct operation this depends on the caller * For correct operation this depends on the caller
* masking the irqs. * masking the irqs.
*/ */
if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
< nr_cpu_ids)) { < nr_cpu_ids)) {
cpumask_and(&desc->affinity, cpumask_and(desc->affinity,
&desc->pending_mask, cpu_online_mask); desc->pending_mask, cpu_online_mask);
desc->chip->set_affinity(irq, &desc->affinity); desc->chip->set_affinity(irq, desc->affinity);
} }
cpumask_clear(&desc->pending_mask); cpumask_clear(desc->pending_mask);
} }
void move_native_irq(int irq) void move_native_irq(int irq)

View File

@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
old_desc->kstat_irqs = NULL; old_desc->kstat_irqs = NULL;
} }
static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
struct irq_desc *desc, int cpu) struct irq_desc *desc, int cpu)
{ {
memcpy(desc, old_desc, sizeof(struct irq_desc)); memcpy(desc, old_desc, sizeof(struct irq_desc));
if (!init_alloc_desc_masks(desc, cpu, false)) {
printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
"for migration.\n", irq);
return false;
}
spin_lock_init(&desc->lock); spin_lock_init(&desc->lock);
desc->cpu = cpu; desc->cpu = cpu;
lockdep_set_class(&desc->lock, &irq_desc_lock_class); lockdep_set_class(&desc->lock, &irq_desc_lock_class);
init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
init_copy_desc_masks(old_desc, desc);
arch_init_copy_chip_data(old_desc, desc, cpu); arch_init_copy_chip_data(old_desc, desc, cpu);
return true;
} }
static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
node = cpu_to_node(cpu); node = cpu_to_node(cpu);
desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
if (!desc) { if (!desc) {
printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); printk(KERN_ERR "irq %d: can not get new irq_desc "
"for migration.\n", irq);
/* still use old one */ /* still use old one */
desc = old_desc; desc = old_desc;
goto out_unlock; goto out_unlock;
} }
init_copy_one_irq_desc(irq, old_desc, desc, cpu); if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
/* still use old one */
kfree(desc);
desc = old_desc;
goto out_unlock;
}
irq_desc_ptrs[irq] = desc; irq_desc_ptrs[irq] = desc;

View File

@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
static int irq_affinity_proc_show(struct seq_file *m, void *v) static int irq_affinity_proc_show(struct seq_file *m, void *v)
{ {
struct irq_desc *desc = irq_to_desc((long)m->private); struct irq_desc *desc = irq_to_desc((long)m->private);
const struct cpumask *mask = &desc->affinity; const struct cpumask *mask = desc->affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ #ifdef CONFIG_GENERIC_PENDING_IRQ
if (desc->status & IRQ_MOVE_PENDING) if (desc->status & IRQ_MOVE_PENDING)
mask = &desc->pending_mask; mask = desc->pending_mask;
#endif #endif
seq_cpumask(m, mask); seq_cpumask(m, mask);
seq_putc(m, '\n'); seq_putc(m, '\n');

View File

@ -960,16 +960,17 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) static inline int pick_optimal_cpu(int this_cpu,
const struct cpumask *mask)
{ {
int first; int first;
/* "this_cpu" is cheaper to preempt than a remote processor */ /* "this_cpu" is cheaper to preempt than a remote processor */
if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
return this_cpu; return this_cpu;
first = first_cpu(*mask); first = cpumask_first(mask);
if (first != NR_CPUS) if (first < nr_cpu_ids)
return first; return first;
return -1; return -1;
@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
int cpu = task_cpu(task); int cpu = task_cpu(task);
cpumask_var_t domain_mask;
if (task->rt.nr_cpus_allowed == 1) if (task->rt.nr_cpus_allowed == 1)
return -1; /* No other targets possible */ return -1; /* No other targets possible */
@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
if (this_cpu == cpu) if (this_cpu == cpu)
this_cpu = -1; /* Skip this_cpu opt if the same */ this_cpu = -1; /* Skip this_cpu opt if the same */
for_each_domain(cpu, sd) { if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
if (sd->flags & SD_WAKE_AFFINE) { for_each_domain(cpu, sd) {
cpumask_t domain_mask; if (sd->flags & SD_WAKE_AFFINE) {
int best_cpu; int best_cpu;
cpumask_and(&domain_mask, sched_domain_span(sd), cpumask_and(domain_mask,
lowest_mask); sched_domain_span(sd),
lowest_mask);
best_cpu = pick_optimal_cpu(this_cpu, best_cpu = pick_optimal_cpu(this_cpu,
&domain_mask); domain_mask);
if (best_cpu != -1)
return best_cpu; if (best_cpu != -1) {
free_cpumask_var(domain_mask);
return best_cpu;
}
}
} }
free_cpumask_var(domain_mask);
} }
/* /*

View File

@ -795,6 +795,11 @@ int __init __weak early_irq_init(void)
return 0; return 0;
} }
int __init __weak arch_probe_nr_irqs(void)
{
return 0;
}
int __init __weak arch_early_irq_init(void) int __init __weak arch_early_irq_init(void)
{ {
return 0; return 0;

View File

@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use * Kernel threads bound to a single CPU can safely use
* smp_processor_id(): * smp_processor_id():
*/ */
if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
goto out; goto out;
/* /*