Merge 5.4.194 into android11-5.4-lts

Changes in 5.4.194
	MIPS: Use address-of operator on section symbols
	block: drbd: drbd_nl: Make conversion to 'enum drbd_ret_code' explicit
	drm/amd/display/dc/gpio/gpio_service: Pass around correct dce_{version, environment} types
	drm/i915: Cast remain to unsigned long in eb_relocate_vma
	nfp: bpf: silence bitwise vs. logical OR warning
	can: grcan: grcan_probe(): fix broken system id check for errata workaround needs
	can: grcan: only use the NAPI poll budget for RX
	arm: remove CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
	KVM: x86/pmu: Refactoring find_arch_event() to pmc_perf_hw_id()
	x86/asm: Allow to pass macros to __ASM_FORM()
	x86: xen: kvm: Gather the definition of emulate prefixes
	x86: xen: insn: Decode Xen and KVM emulate-prefix signature
	x86: kprobes: Prohibit probing on instruction which has emulate prefix
	KVM: x86/svm: Account for family 17h event renumberings in amd_pmc_perf_hw_id
	Bluetooth: Fix the creation of hdev->name
	mm: fix missing cache flush for all tail pages of compound page
	mm: hugetlb: fix missing cache flush in copy_huge_page_from_user()
	mm: userfaultfd: fix missing cache flush in mcopy_atomic_pte() and __mcopy_atomic()
	Linux 5.4.194

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Ib6799ab085043b5cc60cf8e39a22f48dc4520378
This commit is contained in:
Greg Kroah-Hartman 2022-05-18 16:45:20 +02:00
commit e44bd11b47
45 changed files with 226 additions and 145 deletions

View File

@ -52,8 +52,7 @@ wrapper :c:func:`free_area_init`. Yet, the mappings array is not
usable until the call to :c:func:`memblock_free_all` that hands all
the memory to the page allocator.
If an architecture enables `CONFIG_ARCH_HAS_HOLES_MEMORYMODEL` option,
it may free parts of the `mem_map` array that do not cover the
An architecture may free parts of the `mem_map` array that do not cover the
actual physical pages. In such case, the architecture specific
:c:func:`pfn_valid` implementation should take the holes in the
`mem_map` into account.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 193
SUBLEVEL = 194
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -26,7 +26,7 @@ config ARM
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_KEEP_MEMBLOCK if HAVE_ARCH_PFN_VALID || KEXEC
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_NO_SG_CHAIN if !ARM_HAS_SG_CHAIN
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
@ -521,7 +521,6 @@ config ARCH_S3C24XX
config ARCH_OMAP1
bool "TI OMAP1"
depends on MMU
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_OMAP
select CLKDEV_LOOKUP
select CLKSRC_MMIO
@ -1519,9 +1518,6 @@ config OABI_COMPAT
UNPREDICTABLE (in fact it can be predicted that it won't work
at all). If in doubt say N.
config ARCH_HAS_HOLES_MEMORYMODEL
bool
config ARCH_SPARSEMEM_ENABLE
bool
@ -1529,7 +1525,7 @@ config ARCH_SPARSEMEM_DEFAULT
def_bool ARCH_SPARSEMEM_ENABLE
config HAVE_ARCH_PFN_VALID
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
def_bool y
config HIGHMEM
bool "High Memory Support"

View File

@ -214,7 +214,6 @@ config ARCH_BRCMSTB
select HAVE_ARM_ARCH_TIMER
select BRCMSTB_L2_IRQ
select BCM7120_L2_IRQ
select ARCH_HAS_HOLES_MEMORYMODEL
select ZONE_DMA if ARM_LPAE
select SOC_BRCMSTB
select SOC_BUS

View File

@ -5,7 +5,6 @@ menuconfig ARCH_DAVINCI
depends on ARCH_MULTI_V5
select DAVINCI_TIMER
select ZONE_DMA
select ARCH_HAS_HOLES_MEMORYMODEL
select PM_GENERIC_DOMAINS if PM
select PM_GENERIC_DOMAINS_OF if PM && OF
select REGMAP_MMIO

View File

@ -8,7 +8,6 @@
menuconfig ARCH_EXYNOS
bool "Samsung EXYNOS"
depends on ARCH_MULTI_V7
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC

View File

@ -2,7 +2,6 @@
config ARCH_HIGHBANK
bool "Calxeda ECX-1000/2000 (Highbank/Midway)"
depends on ARCH_MULTI_V7
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_ERRATA_764369 if SMP

View File

@ -94,7 +94,7 @@ config SOC_DRA7XX
config ARCH_OMAP2PLUS
bool
select ARCH_HAS_BANDGAP
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_HAS_RESET_CONTROLLER
select ARCH_OMAP
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP

View File

@ -8,7 +8,6 @@
config ARCH_S5PV210
bool "Samsung S5PV210/S5PC110"
depends on ARCH_MULTI_V7
select ARCH_HAS_HOLES_MEMORYMODEL
select ARM_VIC
select CLKSRC_SAMSUNG_PWM
select COMMON_CLK_SAMSUNG

View File

@ -3,7 +3,6 @@ config ARCH_TANGO
bool "Sigma Designs Tango4 (SMP87xx)"
depends on ARCH_MULTI_V7
# Cortex-A9 MPCore r3p0, PL310 r3p2
select ARCH_HAS_HOLES_MEMORYMODEL
select ARM_ERRATA_754322
select ARM_ERRATA_764369 if SMP
select ARM_ERRATA_775420

View File

@ -167,7 +167,7 @@ void __init plat_mem_setup(void)
dtb = phys_to_virt(fw_arg2);
else if (fw_passed_dtb) /* UHI interface or appended dtb */
dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
else if (&__dtb_start != &__dtb_end)
dtb = (void *)__dtb_start;
else
panic("no dtb found");

View File

@ -79,7 +79,7 @@ void __init plat_mem_setup(void)
if (fw_passed_dtb) /* UHI interface */
dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
else if (&__dtb_start != &__dtb_end)
dtb = (void *)__dtb_start;
else
panic("no dtb found");

View File

@ -28,7 +28,7 @@ static ulong get_fdtaddr(void)
if (fw_passed_dtb && !fw_arg2 && !fw_arg3)
return (ulong)fw_passed_dtb;
if (__dtb_start < __dtb_end)
if (&__dtb_start < &__dtb_end)
ftaddr = (ulong)__dtb_start;
return ftaddr;

View File

@ -77,7 +77,7 @@ void __init plat_mem_setup(void)
*/
if (fw_passed_dtb)
dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
else if (&__dtb_start != &__dtb_end)
dtb = (void *)__dtb_start;
__dt_setup_arch(dtb);

View File

@ -7,9 +7,11 @@
# define __ASM_FORM_RAW(x) x
# define __ASM_FORM_COMMA(x) x,
#else
# define __ASM_FORM(x) " " #x " "
# define __ASM_FORM_RAW(x) #x
# define __ASM_FORM_COMMA(x) " " #x ","
#include <linux/stringify.h>
# define __ASM_FORM(x) " " __stringify(x) " "
# define __ASM_FORM_RAW(x) __stringify(x)
# define __ASM_FORM_COMMA(x) " " __stringify(x) ","
#endif
#ifndef __x86_64__

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_EMULATE_PREFIX_H
#define _ASM_X86_EMULATE_PREFIX_H
/*
* Virt escape sequences to trigger instruction emulation;
* ideally these would decode to 'whole' instruction and not destroy
* the instruction stream; sadly this is not true for the 'kvm' one :/
*/
#define __XEN_EMULATE_PREFIX 0x0f,0x0b,0x78,0x65,0x6e /* ud2 ; .ascii "xen" */
#define __KVM_EMULATE_PREFIX 0x0f,0x0b,0x6b,0x76,0x6d /* ud2 ; .ascii "kvm" */
#endif

View File

@ -45,6 +45,7 @@ struct insn {
struct insn_field immediate2; /* for 64bit imm or seg16 */
};
int emulate_prefix_size;
insn_attr_t attr;
unsigned char opnd_bytes;
unsigned char addr_bytes;
@ -128,6 +129,11 @@ static inline int insn_is_evex(struct insn *insn)
return (insn->vex_prefix.nbytes == 4);
}
static inline int insn_has_emulate_prefix(struct insn *insn)
{
return !!insn->emulate_prefix_size;
}
/* Ensure this instruction is decoded completely */
static inline int insn_complete(struct insn *insn)
{

View File

@ -379,12 +379,9 @@ struct xen_pmu_arch {
* Prefix forces emulation of some non-trapping instructions.
* Currently only CPUID.
*/
#ifdef __ASSEMBLY__
#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
#else
#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
#endif
#include <asm/emulate_prefix.h>
#define XEN_EMULATE_PREFIX __ASM_FORM(.byte __XEN_EMULATE_PREFIX ;)
#define XEN_CPUID XEN_EMULATE_PREFIX __ASM_FORM(cpuid)
#endif /* _ASM_X86_XEN_INTERFACE_H */

View File

@ -358,6 +358,10 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
kernel_insn_init(insn, dest, MAX_INSN_SIZE);
insn_get_length(insn);
/* We can not probe force emulate prefixed instruction */
if (insn_has_emulate_prefix(insn))
return 0;
/* Another subsystem puts a breakpoint, failed to recover */
if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
return 0;

View File

@ -143,7 +143,6 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
unsigned config, type = PERF_TYPE_RAW;
u8 event_select, unit_mask;
struct kvm *kvm = pmc->vcpu->kvm;
struct kvm_pmu_event_filter *filter;
int i;
@ -175,17 +174,12 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
if (!allow_event)
return;
event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_INV |
ARCH_PERFMON_EVENTSEL_CMASK |
HSW_IN_TX |
HSW_IN_TX_CHECKPOINTED))) {
config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
event_select,
unit_mask);
config = kvm_x86_ops->pmu_ops->pmc_perf_hw_id(pmc);
if (config != PERF_COUNT_HW_MAX)
type = PERF_TYPE_HARDWARE;
}

View File

@ -22,8 +22,7 @@ struct kvm_event_hw_type_mapping {
};
struct kvm_pmu_ops {
unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
u8 unit_mask);
unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
unsigned (*find_fixed_event)(int idx);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);

View File

@ -44,6 +44,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
/* duplicated from amd_f17h_perfmon_event_map. */
static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
/* amd_pmc_perf_hw_id depends on these being the same size */
static_assert(ARRAY_SIZE(amd_event_mapping) ==
ARRAY_SIZE(amd_f17h_event_mapping));
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
@ -126,21 +142,27 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
return &pmu->gp_counters[msr_to_index(msr)];
}
static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
u8 event_select,
u8 unit_mask)
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
struct kvm_event_hw_type_mapping *event_mapping;
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
if (guest_cpuid_family(pmc->vcpu) >= 0x17)
event_mapping = amd_f17h_event_mapping;
else
event_mapping = amd_event_mapping;
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (amd_event_mapping[i].eventsel == event_select
&& amd_event_mapping[i].unit_mask == unit_mask)
if (event_mapping[i].eventsel == event_select
&& event_mapping[i].unit_mask == unit_mask)
break;
if (i == ARRAY_SIZE(amd_event_mapping))
return PERF_COUNT_HW_MAX;
return amd_event_mapping[i].event_type;
return event_mapping[i].event_type;
}
/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
@ -300,7 +322,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
}
struct kvm_pmu_ops amd_pmu_ops = {
.find_arch_event = amd_find_arch_event,
.pmc_perf_hw_id = amd_pmc_perf_hw_id,
.find_fixed_event = amd_find_fixed_event,
.pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,

View File

@ -64,10 +64,11 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
reprogram_counter(pmu, bit);
}
static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
u8 event_select,
u8 unit_mask)
static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
@ -374,7 +375,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)
}
struct kvm_pmu_ops intel_pmu_ops = {
.find_arch_event = intel_find_arch_event,
.pmc_perf_hw_id = intel_pmc_perf_hw_id,
.find_fixed_event = intel_find_fixed_event,
.pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,

View File

@ -68,6 +68,7 @@
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/intel_pt.h>
#include <asm/emulate_prefix.h>
#include <clocksource/hyperv_timer.h>
#define CREATE_TRACE_POINTS
@ -5583,6 +5584,7 @@ EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
int handle_ud(struct kvm_vcpu *vcpu)
{
static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
int emul_type = EMULTYPE_TRAP_UD;
char sig[5]; /* ud2; .ascii "kvm" */
struct x86_exception e;
@ -5590,7 +5592,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
if (force_emulation_prefix &&
kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
sig, sizeof(sig), &e) == 0 &&
memcmp(sig, "\xf\xbkvm", sizeof(sig)) == 0) {
memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
emul_type = EMULTYPE_TRAP_UD_FORCED;
}

View File

@ -13,6 +13,8 @@
#include <asm/inat.h>
#include <asm/insn.h>
#include <asm/emulate_prefix.h>
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
@ -58,6 +60,36 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
insn->addr_bytes = 4;
}
static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
static int __insn_get_emulate_prefix(struct insn *insn,
const insn_byte_t *prefix, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
goto err_out;
}
insn->emulate_prefix_size = len;
insn->next_byte += len;
return 1;
err_out:
return 0;
}
static void insn_get_emulate_prefix(struct insn *insn)
{
if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
return;
__insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
}
/**
* insn_get_prefixes - scan x86 instruction prefix bytes
* @insn: &struct insn containing instruction
@ -76,6 +108,8 @@ void insn_get_prefixes(struct insn *insn)
if (prefixes->got)
return;
insn_get_emulate_prefix(insn);
nb = 0;
lb = 0;
b = peek_next(insn_byte_t, insn);

View File

@ -791,9 +791,11 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&adm_ctx.resource->adm_mutex);
if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
R_PRIMARY, parms.assume_uptodate);
else
retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
retcode = (enum drbd_ret_code)drbd_set_role(adm_ctx.device,
R_SECONDARY, 0);
mutex_unlock(&adm_ctx.resource->adm_mutex);
genl_lock();
@ -1971,7 +1973,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
drbd_flush_workqueue(&connection->sender_work);
rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
retcode = rv; /* FIXME: Type mismatch. */
retcode = (enum drbd_ret_code)rv;
drbd_resume_io(device);
if (rv < SS_SUCCESS)
goto fail;
@ -2696,7 +2698,8 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
}
rcu_read_unlock();
retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
retcode = (enum drbd_ret_code)conn_request_state(connection,
NS(conn, C_UNCONNECTED), CS_VERBOSE);
conn_reconfig_done(connection);
mutex_unlock(&adm_ctx.resource->adm_mutex);
@ -2809,7 +2812,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&adm_ctx.resource->adm_mutex);
rv = conn_try_disconnect(connection, parms.force_disconnect);
if (rv < SS_SUCCESS)
retcode = rv; /* FIXME: Type mismatch. */
retcode = (enum drbd_ret_code)rv;
else
retcode = NO_ERROR;
mutex_unlock(&adm_ctx.resource->adm_mutex);

View File

@ -53,8 +53,8 @@
*/
struct gpio_service *dal_gpio_service_create(
enum dce_version dce_version_major,
enum dce_version dce_version_minor,
enum dce_version dce_version,
enum dce_environment dce_environment,
struct dc_context *ctx)
{
struct gpio_service *service;
@ -67,14 +67,14 @@ struct gpio_service *dal_gpio_service_create(
return NULL;
}
if (!dal_hw_translate_init(&service->translate, dce_version_major,
dce_version_minor)) {
if (!dal_hw_translate_init(&service->translate, dce_version,
dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}
if (!dal_hw_factory_init(&service->factory, dce_version_major,
dce_version_minor)) {
if (!dal_hw_factory_init(&service->factory, dce_version,
dce_environment)) {
BREAK_TO_DEBUGGER();
goto failure_1;
}

View File

@ -42,8 +42,8 @@ void dal_gpio_destroy(
struct gpio **ptr);
struct gpio_service *dal_gpio_service_create(
enum dce_version dce_version_major,
enum dce_version dce_version_minor,
enum dce_version dce_version,
enum dce_environment dce_environment,
struct dc_context *ctx);
struct gpio *dal_gpio_service_create_irq(

View File

@ -1452,7 +1452,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
urelocs = u64_to_user_ptr(entry->relocs_ptr);
remain = entry->relocation_count;
if (unlikely(remain > N_RELOC(ULONG_MAX)))
if (unlikely((unsigned long)remain > N_RELOC(ULONG_MAX)))
return -EINVAL;
/*

View File

@ -241,7 +241,7 @@ struct grcan_device_config {
.rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \
}
#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 0x4100
#define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100
#define GRLIB_VERSION_MASK 0xffff
/* GRCAN private data structure */
@ -1137,7 +1137,7 @@ static int grcan_close(struct net_device *dev)
return 0;
}
static int grcan_transmit_catch_up(struct net_device *dev, int budget)
static void grcan_transmit_catch_up(struct net_device *dev)
{
struct grcan_priv *priv = netdev_priv(dev);
unsigned long flags;
@ -1145,7 +1145,7 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
spin_lock_irqsave(&priv->lock, flags);
work_done = catch_up_echo_skb(dev, budget, true);
work_done = catch_up_echo_skb(dev, -1, true);
if (work_done) {
if (!priv->resetting && !priv->closing &&
!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
@ -1159,8 +1159,6 @@ static int grcan_transmit_catch_up(struct net_device *dev, int budget)
}
spin_unlock_irqrestore(&priv->lock, flags);
return work_done;
}
static int grcan_receive(struct net_device *dev, int budget)
@ -1242,19 +1240,13 @@ static int grcan_poll(struct napi_struct *napi, int budget)
struct net_device *dev = priv->dev;
struct grcan_registers __iomem *regs = priv->regs;
unsigned long flags;
int tx_work_done, rx_work_done;
int rx_budget = budget / 2;
int tx_budget = budget - rx_budget;
int work_done;
/* Half of the budget for receiveing messages */
rx_work_done = grcan_receive(dev, rx_budget);
work_done = grcan_receive(dev, budget);
/* Half of the budget for transmitting messages as that can trigger echo
* frames being received
*/
tx_work_done = grcan_transmit_catch_up(dev, tx_budget);
grcan_transmit_catch_up(dev);
if (rx_work_done < rx_budget && tx_work_done < tx_budget) {
if (work_done < budget) {
napi_complete(napi);
/* Guarantee no interference with a running reset that otherwise
@ -1271,7 +1263,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&priv->lock, flags);
}
return rx_work_done + tx_work_done;
return work_done;
}
/* Work tx bug by waiting while for the risky situation to clear. If that fails,
@ -1656,6 +1648,7 @@ static int grcan_setup_netdev(struct platform_device *ofdev,
static int grcan_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
struct device_node *sysid_parent;
struct resource *res;
u32 sysid, ambafreq;
int irq, err;
@ -1665,10 +1658,15 @@ static int grcan_probe(struct platform_device *ofdev)
/* Compare GRLIB version number with the first that does not
* have the tx bug (see start_xmit)
*/
err = of_property_read_u32(np, "systemid", &sysid);
if (!err && ((sysid & GRLIB_VERSION_MASK)
>= GRCAN_TXBUG_SAFE_GRLIB_VERSION))
txbug = false;
sysid_parent = of_find_node_by_path("/ambapp0");
if (sysid_parent) {
of_node_get(sysid_parent);
err = of_property_read_u32(sysid_parent, "systemid", &sysid);
if (!err && ((sysid & GRLIB_VERSION_MASK) >=
GRCAN_TXBUG_SAFE_GRLIB_VERSION))
txbug = false;
of_node_put(sysid_parent);
}
err = of_property_read_u32(np, "freq", &ambafreq);
if (err) {

View File

@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}

View File

@ -193,8 +193,6 @@ kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
return 1;
p = pfn_to_page(pfn);
if (!memmap_valid_within(pfn, p, page_zone(p)))
return 1;
ent = kmalloc(sizeof(*ent), GFP_KERNEL);
if (!ent)

View File

@ -1453,37 +1453,6 @@ void memory_present(int nid, unsigned long start, unsigned long end);
#define pfn_valid_within(pfn) (1)
#endif
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
/*
* pfn_valid() is meant to be able to tell if a given PFN has valid memmap
* associated with it or not. This means that a struct page exists for this
* pfn. The caller cannot assume the page is fully initialized in general.
* Hotplugable pages might not have been onlined yet. pfn_to_online_page()
* will ensure the struct page is fully online and initialized. Special pages
* (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
*
* In FLATMEM, it is expected that holes always have valid memmap as long as
* there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
* that a valid section has a memmap for the entire section.
*
* However, an ARM, and maybe other embedded architectures in the future
* free memmap backing holes to save memory on the assumption the memmap is
* never used. The page_zone linkages are then broken even though pfn_valid()
* returns true. A walker of the full memmap must then do this additional
* check to ensure the memmap they are looking at is sane by making sure
* the zone and PFN linkages are still valid. This is expensive, but walkers
* of the full memmap are extremely rare.
*/
bool memmap_valid_within(unsigned long pfn,
struct page *page, struct zone *zone);
#else
static inline bool memmap_valid_within(unsigned long pfn,
struct page *page, struct zone *zone)
{
return true;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */

View File

@ -34,6 +34,9 @@
/* HCI priority */
#define HCI_PRIO_MAX 7
/* HCI maximum id value */
#define HCI_MAX_ID 10000
/* HCI Core structures */
struct inquiry_data {
bdaddr_t bdaddr;

View File

@ -4820,6 +4820,8 @@ long copy_huge_page_from_user(struct page *dst_page,
if (rc)
break;
flush_dcache_page(subpage);
cond_resched();
}
return ret_val;

View File

@ -994,9 +994,12 @@ static int move_to_new_page(struct page *newpage, struct page *page,
if (!PageMappingFlags(page))
page->mapping = NULL;
if (likely(!is_zone_device_page(newpage)))
flush_dcache_page(newpage);
if (likely(!is_zone_device_page(newpage))) {
int i, nr = compound_nr(newpage);
for (i = 0; i < nr; i++)
flush_dcache_page(newpage + i);
}
}
out:
return rc;

View File

@ -72,20 +72,6 @@ struct zoneref *__next_zones_zonelist(struct zoneref *z,
return z;
}
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
bool memmap_valid_within(unsigned long pfn,
struct page *page, struct zone *zone)
{
if (page_to_pfn(page) != pfn)
return false;
if (page_zone(page) != zone)
return false;
return true;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
void lruvec_init(struct lruvec *lruvec)
{
enum lru_list lru;

View File

@ -53,6 +53,8 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
/* don't free the page */
goto out;
}
flush_dcache_page(page);
} else {
page = *pagep;
*pagep = NULL;
@ -572,6 +574,7 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
err = -EFAULT;
goto out;
}
flush_dcache_page(page);
goto retry;
} else
BUG_ON(page);

View File

@ -1447,10 +1447,6 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
if (!page)
continue;
/* Watch for unexpected holes punched in the memmap */
if (!memmap_valid_within(pfn, page, zone))
continue;
if (page_zone(page) != zone)
continue;

View File

@ -3304,10 +3304,10 @@ int hci_register_dev(struct hci_dev *hdev)
*/
switch (hdev->dev_type) {
case HCI_PRIMARY:
id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
break;
case HCI_AMP:
id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
break;
default:
return -EINVAL;
@ -3316,7 +3316,7 @@ int hci_register_dev(struct hci_dev *hdev)
if (id < 0)
return id;
sprintf(hdev->name, "hci%d", id);
snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
hdev->id = id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_EMULATE_PREFIX_H
#define _ASM_X86_EMULATE_PREFIX_H
/*
* Virt escape sequences to trigger instruction emulation;
* ideally these would decode to 'whole' instruction and not destroy
* the instruction stream; sadly this is not true for the 'kvm' one :/
*/
#define __XEN_EMULATE_PREFIX 0x0f,0x0b,0x78,0x65,0x6e /* ud2 ; .ascii "xen" */
#define __KVM_EMULATE_PREFIX 0x0f,0x0b,0x6b,0x76,0x6d /* ud2 ; .ascii "kvm" */
#endif

View File

@ -45,6 +45,7 @@ struct insn {
struct insn_field immediate2; /* for 64bit imm or seg16 */
};
int emulate_prefix_size;
insn_attr_t attr;
unsigned char opnd_bytes;
unsigned char addr_bytes;
@ -128,6 +129,11 @@ static inline int insn_is_evex(struct insn *insn)
return (insn->vex_prefix.nbytes == 4);
}
static inline int insn_has_emulate_prefix(struct insn *insn)
{
return !!insn->emulate_prefix_size;
}
/* Ensure this instruction is decoded completely */
static inline int insn_complete(struct insn *insn)
{

View File

@ -13,6 +13,8 @@
#include "../include/asm/inat.h"
#include "../include/asm/insn.h"
#include "../include/asm/emulate_prefix.h"
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
@ -58,6 +60,36 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
insn->addr_bytes = 4;
}
static const insn_byte_t xen_prefix[] = { __XEN_EMULATE_PREFIX };
static const insn_byte_t kvm_prefix[] = { __KVM_EMULATE_PREFIX };
static int __insn_get_emulate_prefix(struct insn *insn,
const insn_byte_t *prefix, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (peek_nbyte_next(insn_byte_t, insn, i) != prefix[i])
goto err_out;
}
insn->emulate_prefix_size = len;
insn->next_byte += len;
return 1;
err_out:
return 0;
}
static void insn_get_emulate_prefix(struct insn *insn)
{
if (__insn_get_emulate_prefix(insn, xen_prefix, sizeof(xen_prefix)))
return;
__insn_get_emulate_prefix(insn, kvm_prefix, sizeof(kvm_prefix));
}
/**
* insn_get_prefixes - scan x86 instruction prefix bytes
* @insn: &struct insn containing instruction
@ -76,6 +108,8 @@ void insn_get_prefixes(struct insn *insn)
if (prefixes->got)
return;
insn_get_emulate_prefix(insn);
nb = 0;
lb = 0;
b = peek_next(insn_byte_t, insn);

View File

@ -4,6 +4,7 @@
FILES='
arch/x86/include/asm/inat_types.h
arch/x86/include/asm/orc_types.h
arch/x86/include/asm/emulate_prefix.h
arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
'
@ -46,4 +47,4 @@ done
check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]"'
check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'

View File

@ -28,6 +28,7 @@ arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/required-features.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/inat_types.h
arch/x86/include/asm/emulate_prefix.h
arch/x86/include/uapi/asm/prctl.h
arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
@ -116,7 +117,7 @@ check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B
check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]"'
check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
# diff non-symmetric files
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl