Merge 5.10.33 into android12-5.10
Changes in 5.10.33 vhost-vdpa: protect concurrent access to vhost device iotlb gpio: omap: Save and restore sysconfig KEYS: trusted: Fix TPM reservation for seal/unseal vdpa/mlx5: Set err = -ENOMEM in case dma_map_sg_attrs fails pinctrl: lewisburg: Update number of pins in community block: return -EBUSY when there are open partitions in blkdev_reread_part pinctrl: core: Show pin numbers for the controllers with base = 0 arm64: dts: allwinner: Revert SD card CD GPIO for Pine64-LTS bpf: Permits pointers on stack for helper calls bpf: Allow variable-offset stack access bpf: Refactor and streamline bounds check into helper bpf: Tighten speculative pointer arithmetic mask locking/qrwlock: Fix ordering in queued_write_lock_slowpath() perf/x86/intel/uncore: Remove uncore extra PCI dev HSWEP_PCI_PCU_3 perf/x86/kvm: Fix Broadwell Xeon stepping in isolation_ucodes[] perf auxtrace: Fix potential NULL pointer dereference perf map: Fix error return code in maps__clone() HID: google: add don USB id HID: alps: fix error return code in alps_input_configured() HID cp2112: fix support for multiple gpiochips HID: wacom: Assign boolean values to a bool variable soc: qcom: geni: shield geni_icc_get() for ACPI boot dmaengine: xilinx: dpdma: Fix descriptor issuing on video group dmaengine: xilinx: dpdma: Fix race condition in done IRQ ARM: dts: Fix swapped mmc order for omap3 net: geneve: check skb is large enough for IPv4/IPv6 header dmaengine: tegra20: Fix runtime PM imbalance on error s390/entry: save the caller of psw_idle arm64: kprobes: Restore local irqflag if kprobes is cancelled xen-netback: Check for hotplug-status existence before watching cavium/liquidio: Fix duplicate argument kasan: fix hwasan build for gcc csky: change a Kconfig symbol name to fix e1000 build error ia64: fix discontig.c section mismatches ia64: tools: remove duplicate definition of ia64_mf() on ia64 x86/crash: Fix crash_setup_memmap_entries() out-of-bounds access net: hso: fix NULL-deref on disconnect regression USB: CDC-ACM: fix poison/unpoison imbalance Linux 5.10.33 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I638db3c919ad938eaaaac3d687175252edcd7990
This commit is contained in:
commit
0907114be2
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 32
|
||||
SUBLEVEL = 33
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -24,6 +24,9 @@ aliases {
|
||||
i2c0 = &i2c1;
|
||||
i2c1 = &i2c2;
|
||||
i2c2 = &i2c3;
|
||||
mmc0 = &mmc1;
|
||||
mmc1 = &mmc2;
|
||||
mmc2 = &mmc3;
|
||||
serial0 = &uart1;
|
||||
serial1 = &uart2;
|
||||
serial2 = &uart3;
|
||||
|
@ -10,5 +10,5 @@ / {
|
||||
};
|
||||
|
||||
&mmc0 {
|
||||
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 push-push switch */
|
||||
broken-cd; /* card detect is broken on *some* boards */
|
||||
};
|
||||
|
@ -286,10 +286,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
||||
if (!instruction_pointer(regs))
|
||||
BUG();
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
} else {
|
||||
kprobes_restore_local_irqflag(kcb, regs);
|
||||
reset_current_kprobe();
|
||||
}
|
||||
|
||||
break;
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
|
@ -292,7 +292,7 @@ config FORCE_MAX_ZONEORDER
|
||||
int "Maximum zone order"
|
||||
default "11"
|
||||
|
||||
config RAM_BASE
|
||||
config DRAM_BASE
|
||||
hex "DRAM start addr (the same with memory-section in dts)"
|
||||
default 0x0
|
||||
|
||||
|
@ -28,7 +28,7 @@
|
||||
#define SSEG_SIZE 0x20000000
|
||||
#define LOWMEM_LIMIT (SSEG_SIZE * 2)
|
||||
|
||||
#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
|
||||
#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -94,7 +94,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
|
||||
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
|
||||
* called yet. Note that node 0 will also count all non-existent cpus.
|
||||
*/
|
||||
static int __meminit early_nr_cpus_node(int node)
|
||||
static int early_nr_cpus_node(int node)
|
||||
{
|
||||
int cpu, n = 0;
|
||||
|
||||
@ -109,7 +109,7 @@ static int __meminit early_nr_cpus_node(int node)
|
||||
* compute_pernodesize - compute size of pernode data
|
||||
* @node: the node id.
|
||||
*/
|
||||
static unsigned long __meminit compute_pernodesize(int node)
|
||||
static unsigned long compute_pernodesize(int node)
|
||||
{
|
||||
unsigned long pernodesize = 0, cpus;
|
||||
|
||||
@ -366,7 +366,7 @@ static void __init reserve_pernode_space(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void __meminit scatter_node_data(void)
|
||||
static void scatter_node_data(void)
|
||||
{
|
||||
pg_data_t **dst;
|
||||
int node;
|
||||
|
@ -994,6 +994,7 @@ ENDPROC(ext_int_handler)
|
||||
* Load idle PSW.
|
||||
*/
|
||||
ENTRY(psw_idle)
|
||||
stg %r14,(__SF_GPRS+8*8)(%r15)
|
||||
stg %r3,__SF_EMPTY(%r15)
|
||||
larl %r1,.Lpsw_idle_exit
|
||||
stg %r1,__SF_EMPTY+8(%r15)
|
||||
|
@ -4387,7 +4387,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
|
||||
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
|
||||
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
|
||||
|
@ -1159,7 +1159,6 @@ enum {
|
||||
SNBEP_PCI_QPI_PORT0_FILTER,
|
||||
SNBEP_PCI_QPI_PORT1_FILTER,
|
||||
BDX_PCI_QPI_PORT2_FILTER,
|
||||
HSWEP_PCI_PCU_3,
|
||||
};
|
||||
|
||||
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
|
||||
@ -2816,22 +2815,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
#define HSWEP_PCU_DID 0x2fc0
|
||||
#define HSWEP_PCU_CAPID4_OFFET 0x94
|
||||
#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
|
||||
|
||||
static bool hswep_has_limit_sbox(unsigned int device)
|
||||
{
|
||||
struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
|
||||
u32 capid4;
|
||||
|
||||
if (!dev)
|
||||
return false;
|
||||
|
||||
pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
|
||||
if (!hswep_get_chop(capid4))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void hswep_uncore_cpu_init(void)
|
||||
{
|
||||
int pkg = boot_cpu_data.logical_proc_id;
|
||||
|
||||
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
|
||||
/* Detect 6-8 core systems with only two SBOXes */
|
||||
if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
|
||||
u32 capid4;
|
||||
|
||||
pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
|
||||
0x94, &capid4);
|
||||
if (((capid4 >> 6) & 0x3) == 0)
|
||||
hswep_uncore_sbox.num_boxes = 2;
|
||||
}
|
||||
if (hswep_has_limit_sbox(HSWEP_PCU_DID))
|
||||
hswep_uncore_sbox.num_boxes = 2;
|
||||
|
||||
uncore_msr_uncores = hswep_msr_uncores;
|
||||
}
|
||||
@ -3094,11 +3104,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = {
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
|
||||
SNBEP_PCI_QPI_PORT1_FILTER),
|
||||
},
|
||||
{ /* PCU.3 (for Capability registers) */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
|
||||
HSWEP_PCI_PCU_3),
|
||||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
||||
@ -3190,27 +3195,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
#define BDX_PCU_DID 0x6fc0
|
||||
|
||||
void bdx_uncore_cpu_init(void)
|
||||
{
|
||||
int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
|
||||
|
||||
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
uncore_msr_uncores = bdx_msr_uncores;
|
||||
|
||||
/* BDX-DE doesn't have SBOX */
|
||||
if (boot_cpu_data.x86_model == 86) {
|
||||
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
|
||||
/* Detect systems with no SBOXes */
|
||||
} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
|
||||
struct pci_dev *pdev;
|
||||
u32 capid4;
|
||||
if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
|
||||
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
|
||||
|
||||
pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
|
||||
pci_read_config_dword(pdev, 0x94, &capid4);
|
||||
if (((capid4 >> 6) & 0x3) == 0)
|
||||
bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
|
||||
}
|
||||
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
|
||||
}
|
||||
|
||||
@ -3431,11 +3427,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
|
||||
BDX_PCI_QPI_PORT2_FILTER),
|
||||
},
|
||||
{ /* PCU.3 (for Capability registers) */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
|
||||
HSWEP_PCI_PCU_3),
|
||||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
||||
|
@ -337,7 +337,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
|
||||
struct crash_memmap_data cmd;
|
||||
struct crash_mem *cmem;
|
||||
|
||||
cmem = vzalloc(sizeof(struct crash_mem));
|
||||
cmem = vzalloc(struct_size(cmem, ranges, 1));
|
||||
if (!cmem)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -98,6 +98,8 @@ static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
|
||||
return -EINVAL;
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
if (bdev->bd_part_count)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Reopen the device to revalidate the driver state and force a
|
||||
|
@ -723,7 +723,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
|
||||
goto end;
|
||||
}
|
||||
if (!tdc->busy) {
|
||||
err = pm_runtime_get_sync(tdc->tdma->dev);
|
||||
err = pm_runtime_resume_and_get(tdc->tdma->dev);
|
||||
if (err < 0) {
|
||||
dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
|
||||
goto end;
|
||||
@ -818,7 +818,7 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
|
||||
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
|
||||
int err;
|
||||
|
||||
err = pm_runtime_get_sync(tdc->tdma->dev);
|
||||
err = pm_runtime_resume_and_get(tdc->tdma->dev);
|
||||
if (err < 0) {
|
||||
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
|
||||
return;
|
||||
|
@ -839,6 +839,7 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
|
||||
struct xilinx_dpdma_tx_desc *desc;
|
||||
struct virt_dma_desc *vdesc;
|
||||
u32 reg, channels;
|
||||
bool first_frame;
|
||||
|
||||
lockdep_assert_held(&chan->lock);
|
||||
|
||||
@ -852,14 +853,6 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
|
||||
chan->running = true;
|
||||
}
|
||||
|
||||
if (chan->video_group)
|
||||
channels = xilinx_dpdma_chan_video_group_ready(chan);
|
||||
else
|
||||
channels = BIT(chan->id);
|
||||
|
||||
if (!channels)
|
||||
return;
|
||||
|
||||
vdesc = vchan_next_desc(&chan->vchan);
|
||||
if (!vdesc)
|
||||
return;
|
||||
@ -884,13 +877,26 @@ static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
|
||||
FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
|
||||
upper_32_bits(sw_desc->dma_addr)));
|
||||
|
||||
if (chan->first_frame)
|
||||
first_frame = chan->first_frame;
|
||||
chan->first_frame = false;
|
||||
|
||||
if (chan->video_group) {
|
||||
channels = xilinx_dpdma_chan_video_group_ready(chan);
|
||||
/*
|
||||
* Trigger the transfer only when all channels in the group are
|
||||
* ready.
|
||||
*/
|
||||
if (!channels)
|
||||
return;
|
||||
} else {
|
||||
channels = BIT(chan->id);
|
||||
}
|
||||
|
||||
if (first_frame)
|
||||
reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
|
||||
else
|
||||
reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
|
||||
|
||||
chan->first_frame = false;
|
||||
|
||||
dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
|
||||
}
|
||||
|
||||
@ -1042,13 +1048,14 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
|
||||
*/
|
||||
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
|
||||
{
|
||||
struct xilinx_dpdma_tx_desc *active = chan->desc.active;
|
||||
struct xilinx_dpdma_tx_desc *active;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&chan->lock, flags);
|
||||
|
||||
xilinx_dpdma_debugfs_desc_done_irq(chan);
|
||||
|
||||
active = chan->desc.active;
|
||||
if (active)
|
||||
vchan_cyclic_callback(&active->vdesc);
|
||||
else
|
||||
|
@ -29,6 +29,7 @@
|
||||
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
|
||||
|
||||
struct gpio_regs {
|
||||
u32 sysconfig;
|
||||
u32 irqenable1;
|
||||
u32 irqenable2;
|
||||
u32 wake_en;
|
||||
@ -1072,6 +1073,7 @@ static void omap_gpio_init_context(struct gpio_bank *p)
|
||||
const struct omap_gpio_reg_offs *regs = p->regs;
|
||||
void __iomem *base = p->base;
|
||||
|
||||
p->context.sysconfig = readl_relaxed(base + regs->sysconfig);
|
||||
p->context.ctrl = readl_relaxed(base + regs->ctrl);
|
||||
p->context.oe = readl_relaxed(base + regs->direction);
|
||||
p->context.wake_en = readl_relaxed(base + regs->wkup_en);
|
||||
@ -1091,6 +1093,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
|
||||
const struct omap_gpio_reg_offs *regs = bank->regs;
|
||||
void __iomem *base = bank->base;
|
||||
|
||||
writel_relaxed(bank->context.sysconfig, base + regs->sysconfig);
|
||||
writel_relaxed(bank->context.wake_en, base + regs->wkup_en);
|
||||
writel_relaxed(bank->context.ctrl, base + regs->ctrl);
|
||||
writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0);
|
||||
@ -1118,6 +1121,10 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context)
|
||||
|
||||
bank->saved_datain = readl_relaxed(base + bank->regs->datain);
|
||||
|
||||
/* Save syconfig, it's runtime value can be different from init value */
|
||||
if (bank->loses_context)
|
||||
bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig);
|
||||
|
||||
if (!bank->enabled_non_wakeup_gpios)
|
||||
goto update_gpio_context_count;
|
||||
|
||||
@ -1282,6 +1289,7 @@ static int gpio_omap_cpu_notifier(struct notifier_block *nb,
|
||||
|
||||
static const struct omap_gpio_reg_offs omap2_gpio_regs = {
|
||||
.revision = OMAP24XX_GPIO_REVISION,
|
||||
.sysconfig = OMAP24XX_GPIO_SYSCONFIG,
|
||||
.direction = OMAP24XX_GPIO_OE,
|
||||
.datain = OMAP24XX_GPIO_DATAIN,
|
||||
.dataout = OMAP24XX_GPIO_DATAOUT,
|
||||
@ -1305,6 +1313,7 @@ static const struct omap_gpio_reg_offs omap2_gpio_regs = {
|
||||
|
||||
static const struct omap_gpio_reg_offs omap4_gpio_regs = {
|
||||
.revision = OMAP4_GPIO_REVISION,
|
||||
.sysconfig = OMAP4_GPIO_SYSCONFIG,
|
||||
.direction = OMAP4_GPIO_OE,
|
||||
.datain = OMAP4_GPIO_DATAIN,
|
||||
.dataout = OMAP4_GPIO_DATAOUT,
|
||||
|
@ -761,6 +761,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
||||
|
||||
if (input_register_device(data->input2)) {
|
||||
input_free_device(input2);
|
||||
ret = -ENOENT;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
@ -161,6 +161,7 @@ struct cp2112_device {
|
||||
atomic_t read_avail;
|
||||
atomic_t xfer_avail;
|
||||
struct gpio_chip gc;
|
||||
struct irq_chip irq;
|
||||
u8 *in_out_buffer;
|
||||
struct mutex lock;
|
||||
|
||||
@ -1175,16 +1176,6 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip cp2112_gpio_irqchip = {
|
||||
.name = "cp2112-gpio",
|
||||
.irq_startup = cp2112_gpio_irq_startup,
|
||||
.irq_shutdown = cp2112_gpio_irq_shutdown,
|
||||
.irq_ack = cp2112_gpio_irq_ack,
|
||||
.irq_mask = cp2112_gpio_irq_mask,
|
||||
.irq_unmask = cp2112_gpio_irq_unmask,
|
||||
.irq_set_type = cp2112_gpio_irq_type,
|
||||
};
|
||||
|
||||
static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
|
||||
int pin)
|
||||
{
|
||||
@ -1339,8 +1330,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
dev->gc.can_sleep = 1;
|
||||
dev->gc.parent = &hdev->dev;
|
||||
|
||||
dev->irq.name = "cp2112-gpio";
|
||||
dev->irq.irq_startup = cp2112_gpio_irq_startup;
|
||||
dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
|
||||
dev->irq.irq_ack = cp2112_gpio_irq_ack;
|
||||
dev->irq.irq_mask = cp2112_gpio_irq_mask;
|
||||
dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
|
||||
dev->irq.irq_set_type = cp2112_gpio_irq_type;
|
||||
dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
|
||||
|
||||
girq = &dev->gc.irq;
|
||||
girq->chip = &cp2112_gpio_irqchip;
|
||||
girq->chip = &dev->irq;
|
||||
/* The event comes from the outside so no parent handler */
|
||||
girq->parent_handler = NULL;
|
||||
girq->num_parents = 0;
|
||||
|
@ -526,6 +526,8 @@ static void hammer_remove(struct hid_device *hdev)
|
||||
}
|
||||
|
||||
static const struct hid_device_id hammer_devices[] = {
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
|
@ -486,6 +486,7 @@
|
||||
#define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
|
||||
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
|
||||
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
|
||||
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
|
||||
|
||||
#define USB_VENDOR_ID_GOTOP 0x08f2
|
||||
#define USB_DEVICE_ID_SUPER_Q2 0x007f
|
||||
|
@ -2533,7 +2533,7 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
|
||||
!wacom_wac->shared->is_touch_on) {
|
||||
if (!wacom_wac->shared->touch_down)
|
||||
return;
|
||||
prox = 0;
|
||||
prox = false;
|
||||
}
|
||||
|
||||
wacom_wac->hid_data.num_received++;
|
||||
|
@ -412,7 +412,7 @@
|
||||
| CN6XXX_INTR_M0UNWI_ERR \
|
||||
| CN6XXX_INTR_M1UPB0_ERR \
|
||||
| CN6XXX_INTR_M1UPWI_ERR \
|
||||
| CN6XXX_INTR_M1UPB0_ERR \
|
||||
| CN6XXX_INTR_M1UNB0_ERR \
|
||||
| CN6XXX_INTR_M1UNWI_ERR \
|
||||
| CN6XXX_INTR_INSTR_DB_OF_ERR \
|
||||
| CN6XXX_INTR_SLIST_DB_OF_ERR \
|
||||
|
@ -890,6 +890,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
__be16 sport;
|
||||
int err;
|
||||
|
||||
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
|
||||
return -EINVAL;
|
||||
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info,
|
||||
geneve->cfg.info.key.tp_dst, sport);
|
||||
@ -984,6 +987,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
__be16 sport;
|
||||
int err;
|
||||
|
||||
if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
|
||||
return -EINVAL;
|
||||
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info,
|
||||
geneve->cfg.info.key.tp_dst, sport);
|
||||
|
@ -3104,7 +3104,7 @@ static void hso_free_interface(struct usb_interface *interface)
|
||||
cancel_work_sync(&serial_table[i]->async_put_intf);
|
||||
cancel_work_sync(&serial_table[i]->async_get_intf);
|
||||
hso_serial_tty_unregister(serial);
|
||||
kref_put(&serial_table[i]->ref, hso_serial_ref_free);
|
||||
kref_put(&serial->parent->ref, hso_serial_ref_free);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -824,11 +824,15 @@ static void connect(struct backend_info *be)
|
||||
xenvif_carrier_on(be->vif);
|
||||
|
||||
unregister_hotplug_status_watch(be);
|
||||
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
|
||||
hotplug_status_changed,
|
||||
"%s/%s", dev->nodename, "hotplug-status");
|
||||
if (!err)
|
||||
if (xenbus_exists(XBT_NIL, dev->nodename, "hotplug-status")) {
|
||||
err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
|
||||
NULL, hotplug_status_changed,
|
||||
"%s/%s", dev->nodename,
|
||||
"hotplug-status");
|
||||
if (err)
|
||||
goto err;
|
||||
be->have_hotplug_status_watch = 1;
|
||||
}
|
||||
|
||||
netif_tx_wake_all_queues(be->vif->dev);
|
||||
|
||||
|
@ -1604,8 +1604,8 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
|
||||
unsigned i, pin;
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
struct pinctrl_gpio_range *range;
|
||||
unsigned int gpio_num;
|
||||
struct gpio_chip *chip;
|
||||
int gpio_num;
|
||||
#endif
|
||||
|
||||
seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
|
||||
@ -1625,7 +1625,7 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
|
||||
seq_printf(s, "pin %d (%s) ", pin, desc->name);
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
gpio_num = 0;
|
||||
gpio_num = -1;
|
||||
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
|
||||
if ((pin >= range->pin_base) &&
|
||||
(pin < (range->pin_base + range->npins))) {
|
||||
@ -1633,10 +1633,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
|
||||
break;
|
||||
}
|
||||
}
|
||||
chip = gpio_to_chip(gpio_num);
|
||||
if (chip && chip->gpiodev && chip->gpiodev->base)
|
||||
seq_printf(s, "%u:%s ", gpio_num -
|
||||
chip->gpiodev->base, chip->label);
|
||||
if (gpio_num >= 0)
|
||||
chip = gpio_to_chip(gpio_num);
|
||||
else
|
||||
chip = NULL;
|
||||
if (chip)
|
||||
seq_printf(s, "%u:%s ", gpio_num - chip->gpiodev->base, chip->label);
|
||||
else
|
||||
seq_puts(s, "0:? ");
|
||||
#endif
|
||||
|
@ -299,9 +299,9 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
|
||||
static const struct intel_community lbg_communities[] = {
|
||||
LBG_COMMUNITY(0, 0, 71),
|
||||
LBG_COMMUNITY(1, 72, 132),
|
||||
LBG_COMMUNITY(3, 133, 144),
|
||||
LBG_COMMUNITY(4, 145, 180),
|
||||
LBG_COMMUNITY(5, 181, 246),
|
||||
LBG_COMMUNITY(3, 133, 143),
|
||||
LBG_COMMUNITY(4, 144, 178),
|
||||
LBG_COMMUNITY(5, 179, 246),
|
||||
};
|
||||
|
||||
static const struct intel_pinctrl_soc_data lbg_soc_data = {
|
||||
|
@ -741,6 +741,9 @@ int geni_icc_get(struct geni_se *se, const char *icc_ddr)
|
||||
int i, err;
|
||||
const char *icc_names[] = {"qup-core", "qup-config", icc_ddr};
|
||||
|
||||
if (has_acpi_companion(se->dev))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(se->icc_paths); i++) {
|
||||
if (!icc_names[i])
|
||||
continue;
|
||||
|
@ -1637,12 +1637,13 @@ static int acm_resume(struct usb_interface *intf)
|
||||
struct urb *urb;
|
||||
int rv = 0;
|
||||
|
||||
acm_unpoison_urbs(acm);
|
||||
spin_lock_irq(&acm->write_lock);
|
||||
|
||||
if (--acm->susp_count)
|
||||
goto out;
|
||||
|
||||
acm_unpoison_urbs(acm);
|
||||
|
||||
if (tty_port_initialized(&acm->port)) {
|
||||
rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
|
||||
|
||||
|
@ -273,8 +273,10 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
|
||||
mr->log_size = log_entity_size;
|
||||
mr->nsg = nsg;
|
||||
mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
|
||||
if (!mr->nent)
|
||||
if (!mr->nent) {
|
||||
err = -ENOMEM;
|
||||
goto err_map;
|
||||
}
|
||||
|
||||
err = create_direct_mr(mvdev, mr);
|
||||
if (err)
|
||||
|
@ -749,9 +749,11 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
|
||||
const struct vdpa_config_ops *ops = vdpa->config;
|
||||
int r = 0;
|
||||
|
||||
mutex_lock(&dev->mutex);
|
||||
|
||||
r = vhost_dev_check_owner(dev);
|
||||
if (r)
|
||||
return r;
|
||||
goto unlock;
|
||||
|
||||
switch (msg->type) {
|
||||
case VHOST_IOTLB_UPDATE:
|
||||
@ -772,6 +774,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
|
||||
r = -EINVAL;
|
||||
break;
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&dev->mutex);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -1259,6 +1259,11 @@ static inline bool bpf_allow_ptr_leaks(void)
|
||||
return perfmon_capable();
|
||||
}
|
||||
|
||||
static inline bool bpf_allow_uninit_stack(void)
|
||||
{
|
||||
return perfmon_capable();
|
||||
}
|
||||
|
||||
static inline bool bpf_allow_ptr_to_map_access(void)
|
||||
{
|
||||
return perfmon_capable();
|
||||
|
@ -187,7 +187,7 @@ struct bpf_func_state {
|
||||
* 0 = main function, 1 = first callee.
|
||||
*/
|
||||
u32 frameno;
|
||||
/* subprog number == index within subprog_stack_depth
|
||||
/* subprog number == index within subprog_info
|
||||
* zero == main subprog
|
||||
*/
|
||||
u32 subprogno;
|
||||
@ -390,6 +390,7 @@ struct bpf_verifier_env {
|
||||
u32 used_map_cnt; /* number of used maps */
|
||||
u32 id_gen; /* used to generate unique reg IDs */
|
||||
bool allow_ptr_leaks;
|
||||
bool allow_uninit_stack;
|
||||
bool allow_ptr_to_map_access;
|
||||
bool bpf_capable;
|
||||
bool bypass_spec_v1;
|
||||
|
@ -85,6 +85,7 @@
|
||||
* omap2+ specific GPIO registers
|
||||
*/
|
||||
#define OMAP24XX_GPIO_REVISION 0x0000
|
||||
#define OMAP24XX_GPIO_SYSCONFIG 0x0010
|
||||
#define OMAP24XX_GPIO_IRQSTATUS1 0x0018
|
||||
#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
|
||||
#define OMAP24XX_GPIO_IRQENABLE2 0x002c
|
||||
@ -108,6 +109,7 @@
|
||||
#define OMAP24XX_GPIO_SETDATAOUT 0x0094
|
||||
|
||||
#define OMAP4_GPIO_REVISION 0x0000
|
||||
#define OMAP4_GPIO_SYSCONFIG 0x0010
|
||||
#define OMAP4_GPIO_EOI 0x0020
|
||||
#define OMAP4_GPIO_IRQSTATUSRAW0 0x0024
|
||||
#define OMAP4_GPIO_IRQSTATUSRAW1 0x0028
|
||||
@ -148,6 +150,7 @@
|
||||
#ifndef __ASSEMBLER__
|
||||
struct omap_gpio_reg_offs {
|
||||
u16 revision;
|
||||
u16 sysconfig;
|
||||
u16 direction;
|
||||
u16 datain;
|
||||
u16 dataout;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -61,6 +61,8 @@ EXPORT_SYMBOL(queued_read_lock_slowpath);
|
||||
*/
|
||||
void queued_write_lock_slowpath(struct qrwlock *lock)
|
||||
{
|
||||
int cnts;
|
||||
|
||||
/* Put the writer into the wait queue */
|
||||
arch_spin_lock(&lock->wait_lock);
|
||||
|
||||
@ -74,9 +76,8 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
|
||||
|
||||
/* When no more readers or writers, set the locked flag */
|
||||
do {
|
||||
atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
|
||||
} while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
|
||||
_QW_LOCKED) != _QW_WAITING);
|
||||
cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
|
||||
} while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
|
||||
unlock:
|
||||
arch_spin_unlock(&lock->wait_lock);
|
||||
}
|
||||
|
@ -2,6 +2,8 @@
|
||||
CFLAGS_KASAN_NOSANITIZE := -fno-builtin
|
||||
KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
|
||||
|
||||
cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
|
||||
|
||||
ifdef CONFIG_KASAN_STACK
|
||||
stack_enable := 1
|
||||
else
|
||||
@ -18,8 +20,6 @@ endif
|
||||
|
||||
CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
|
||||
|
||||
cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
|
||||
|
||||
# -fasan-shadow-offset fails without -fsanitize
|
||||
CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
|
||||
-fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
|
||||
@ -42,14 +42,14 @@ endif # CONFIG_KASAN_GENERIC
|
||||
ifdef CONFIG_KASAN_SW_TAGS
|
||||
|
||||
ifdef CONFIG_KASAN_INLINE
|
||||
instrumentation_flags := -mllvm -hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)
|
||||
instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET))
|
||||
else
|
||||
instrumentation_flags := -mllvm -hwasan-instrument-with-calls=1
|
||||
instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1)
|
||||
endif
|
||||
|
||||
CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
|
||||
-mllvm -hwasan-instrument-stack=$(stack_enable) \
|
||||
-mllvm -hwasan-use-short-granules=0 \
|
||||
$(call cc-param,hwasan-instrument-stack=$(stack_enable)) \
|
||||
$(call cc-param,hwasan-use-short-granules=0) \
|
||||
$(instrumentation_flags)
|
||||
|
||||
endif # CONFIG_KASAN_SW_TAGS
|
||||
|
@ -79,7 +79,7 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
|
||||
if (i == ARRAY_SIZE(tpm2_hash_map))
|
||||
return -EINVAL;
|
||||
|
||||
rc = tpm_buf_init(&buf, TPM2_ST_SESSIONS, TPM2_CC_CREATE);
|
||||
rc = tpm_try_get_ops(chip);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -39,9 +39,6 @@
|
||||
* sequential memory pages only.
|
||||
*/
|
||||
|
||||
/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
|
||||
#define ia64_mf() asm volatile ("mf" ::: "memory")
|
||||
|
||||
#define mb() ia64_mf()
|
||||
#define rmb() mb()
|
||||
#define wmb() mb()
|
||||
|
@ -636,7 +636,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
|
||||
break;
|
||||
}
|
||||
|
||||
if (itr)
|
||||
if (itr && itr->parse_snapshot_options)
|
||||
return itr->parse_snapshot_options(itr, opts, str);
|
||||
|
||||
pr_err("No AUX area tracing to snapshot\n");
|
||||
|
@ -836,15 +836,18 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
|
||||
int maps__clone(struct thread *thread, struct maps *parent)
|
||||
{
|
||||
struct maps *maps = thread->maps;
|
||||
int err = -ENOMEM;
|
||||
int err;
|
||||
struct map *map;
|
||||
|
||||
down_read(&parent->lock);
|
||||
|
||||
maps__for_each_entry(parent, map) {
|
||||
struct map *new = map__clone(map);
|
||||
if (new == NULL)
|
||||
|
||||
if (new == NULL) {
|
||||
err = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = unwind__prepare_access(maps, new, NULL);
|
||||
if (err)
|
||||
|
Loading…
Reference in New Issue
Block a user