Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King: "The usual collection of random fixes. Also some further fixes to the last set of security fixes, and some more from Will (which you may already have in a slightly different form)" * 'fixes' of git://git.linaro.org/people/rmk/linux-arm: ARM: 7807/1: kexec: validate CPU hotplug support ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock ARM: 7811/1: locks: use early clobber in arch_spin_trylock ARM: 7810/1: perf: Fix array out of bounds access in armpmu_map_hw_event() ARM: 7809/1: perf: fix event validation for software group leaders ARM: Fix FIQ code on VIVT CPUs ARM: Fix !kuser helpers case ARM: Fix the world famous typo with is_gate_vma()
This commit is contained in:
commit
2620bf06f1
@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
|
|||||||
{
|
{
|
||||||
return 1 << mpidr_hash.bits;
|
return 1 << mpidr_hash.bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern int platform_can_cpu_hotplug(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||||||
" subs %1, %0, %0, ror #16\n"
|
" subs %1, %0, %0, ror #16\n"
|
||||||
" addeq %0, %0, %4\n"
|
" addeq %0, %0, %4\n"
|
||||||
" strexeq %2, %0, [%3]"
|
" strexeq %2, %0, [%3]"
|
||||||
: "=&r" (slock), "=&r" (contended), "=r" (res)
|
: "=&r" (slock), "=&r" (contended), "=&r" (res)
|
||||||
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
|
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
|
||||||
: "cc");
|
: "cc");
|
||||||
} while (res);
|
} while (res);
|
||||||
@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
|||||||
|
|
||||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long contended, res;
|
||||||
|
|
||||||
|
do {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" ldrex %0, [%1]\n"
|
" ldrex %0, [%2]\n"
|
||||||
|
" mov %1, #0\n"
|
||||||
" teq %0, #0\n"
|
" teq %0, #0\n"
|
||||||
" strexeq %0, %2, [%1]"
|
" strexeq %1, %3, [%2]"
|
||||||
: "=&r" (tmp)
|
: "=&r" (contended), "=&r" (res)
|
||||||
: "r" (&rw->lock), "r" (0x80000000)
|
: "r" (&rw->lock), "r" (0x80000000)
|
||||||
: "cc");
|
: "cc");
|
||||||
|
} while (res);
|
||||||
|
|
||||||
if (tmp == 0) {
|
if (!contended) {
|
||||||
smp_mb();
|
smp_mb();
|
||||||
return 1;
|
return 1;
|
||||||
} else {
|
} else {
|
||||||
@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
|
|||||||
|
|
||||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||||
{
|
{
|
||||||
unsigned long tmp, tmp2 = 1;
|
unsigned long contended, res;
|
||||||
|
|
||||||
|
do {
|
||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" ldrex %0, [%2]\n"
|
" ldrex %0, [%2]\n"
|
||||||
|
" mov %1, #0\n"
|
||||||
" adds %0, %0, #1\n"
|
" adds %0, %0, #1\n"
|
||||||
" strexpl %1, %0, [%2]\n"
|
" strexpl %1, %0, [%2]"
|
||||||
: "=&r" (tmp), "+r" (tmp2)
|
: "=&r" (contended), "=&r" (res)
|
||||||
: "r" (&rw->lock)
|
: "r" (&rw->lock)
|
||||||
: "cc");
|
: "cc");
|
||||||
|
} while (res);
|
||||||
|
|
||||||
|
/* If the lock is negative, then it is already held for write. */
|
||||||
|
if (contended < 0x80000000) {
|
||||||
smp_mb();
|
smp_mb();
|
||||||
return tmp2 == 0;
|
return 1;
|
||||||
|
} else {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* read_can_lock - would read_trylock() succeed? */
|
/* read_can_lock - would read_trylock() succeed? */
|
||||||
|
@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro kuser_cmpxchg_check
|
.macro kuser_cmpxchg_check
|
||||||
#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
|
#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
|
||||||
|
!defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
|
||||||
#ifndef CONFIG_MMU
|
#ifndef CONFIG_MMU
|
||||||
#warning "NPTL on non MMU needs fixing"
|
#warning "NPTL on non MMU needs fixing"
|
||||||
#else
|
#else
|
||||||
|
@ -84,17 +84,13 @@ int show_fiq_list(struct seq_file *p, int prec)
|
|||||||
|
|
||||||
void set_fiq_handler(void *start, unsigned int length)
|
void set_fiq_handler(void *start, unsigned int length)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_CPU_USE_DOMAINS)
|
|
||||||
void *base = (void *)0xffff0000;
|
|
||||||
#else
|
|
||||||
void *base = vectors_page;
|
void *base = vectors_page;
|
||||||
#endif
|
|
||||||
unsigned offset = FIQ_OFFSET;
|
unsigned offset = FIQ_OFFSET;
|
||||||
|
|
||||||
memcpy(base + offset, start, length);
|
memcpy(base + offset, start, length);
|
||||||
|
if (!cache_is_vipt_nonaliasing())
|
||||||
|
flush_icache_range(base + offset, offset + length);
|
||||||
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
|
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
|
||||||
if (!vectors_high())
|
|
||||||
flush_icache_range(offset, offset + length);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int claim_fiq(struct fiq_handler *f)
|
int claim_fiq(struct fiq_handler *f)
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/mach-types.h>
|
#include <asm/mach-types.h>
|
||||||
|
#include <asm/smp_plat.h>
|
||||||
#include <asm/system_misc.h>
|
#include <asm/system_misc.h>
|
||||||
|
|
||||||
extern const unsigned char relocate_new_kernel[];
|
extern const unsigned char relocate_new_kernel[];
|
||||||
@ -38,6 +39,14 @@ int machine_kexec_prepare(struct kimage *image)
|
|||||||
__be32 header;
|
__be32 header;
|
||||||
int i, err;
|
int i, err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Validate that if the current HW supports SMP, then the SW supports
|
||||||
|
* and implements CPU hotplug for the current HW. If not, we won't be
|
||||||
|
* able to kexec reliably, so fail the prepare operation.
|
||||||
|
*/
|
||||||
|
if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No segment at default ATAGs address. try to locate
|
* No segment at default ATAGs address. try to locate
|
||||||
* a dtb using magic.
|
* a dtb using magic.
|
||||||
@ -134,10 +143,13 @@ void machine_kexec(struct kimage *image)
|
|||||||
unsigned long reboot_code_buffer_phys;
|
unsigned long reboot_code_buffer_phys;
|
||||||
void *reboot_code_buffer;
|
void *reboot_code_buffer;
|
||||||
|
|
||||||
if (num_online_cpus() > 1) {
|
/*
|
||||||
pr_err("kexec: error: multiple CPUs still online\n");
|
* This can only happen if machine_shutdown() failed to disable some
|
||||||
return;
|
* CPU, and that can only happen if the checks in
|
||||||
}
|
* machine_kexec_prepare() were not correct. If this fails, we can't
|
||||||
|
* reliably kexec anyway, so BUG_ON is appropriate.
|
||||||
|
*/
|
||||||
|
BUG_ON(num_online_cpus() > 1);
|
||||||
|
|
||||||
page_list = image->head & PAGE_MASK;
|
page_list = image->head & PAGE_MASK;
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
|
|||||||
int mapping;
|
int mapping;
|
||||||
|
|
||||||
if (config >= PERF_COUNT_HW_MAX)
|
if (config >= PERF_COUNT_HW_MAX)
|
||||||
return -ENOENT;
|
return -EINVAL;
|
||||||
|
|
||||||
mapping = (*event_map)[config];
|
mapping = (*event_map)[config];
|
||||||
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
|
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
|
||||||
@ -258,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
|
|||||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||||
struct pmu *leader_pmu = event->group_leader->pmu;
|
struct pmu *leader_pmu = event->group_leader->pmu;
|
||||||
|
|
||||||
|
if (is_software_event(event))
|
||||||
|
return 1;
|
||||||
|
|
||||||
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
|
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr)
|
|||||||
{
|
{
|
||||||
return in_gate_area(NULL, addr);
|
return in_gate_area(NULL, addr);
|
||||||
}
|
}
|
||||||
#define is_gate_vma(vma) ((vma) = &gate_vma)
|
#define is_gate_vma(vma) ((vma) == &gate_vma)
|
||||||
#else
|
#else
|
||||||
#define is_gate_vma(vma) 0
|
#define is_gate_vma(vma) 0
|
||||||
#endif
|
#endif
|
||||||
|
@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int platform_can_cpu_hotplug(void)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
if (smp_ops.cpu_kill)
|
||||||
|
return 1;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
static void percpu_timer_stop(void);
|
static void percpu_timer_stop(void);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user