This is the 5.10.159 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmOZpiwACgkQONu9yGCS
 aT6whRAArWRCd5yEvuYtdCIPQh70Yz3vhHKkeqKU3AzAOYxYB+UbmRf8i6Cgv5S7
 b0Mmla4vV1w+tRZcwdPHXoNrwxQ+r6b89mywResfp+FLAti/Ak1wMNR1l0FGQTeM
 z2dIeuhNtVIatvpBw7E1KKGXpSRYfJuzbkT3npKRmWqv1hmcwcqkQ9uZSkFCf3dO
 YETpGjOk6Z8/Ml4z6gEWy54+W3nLf3X8G0i1CfDwxkxcCf4fqIgNCaGaT26Q+Yv1
 GDKtAzmF/FfxwwIdwxx+Y/Iq3+ccEf2WTRQEDWb8K62TBbNjR+q1+Y3IJCfrBj+H
 6sVfnyQm9fTQd7gKLy3gipJxphS4sAZ+OcwY+gMfRQBBHSmccHOC9MhQFgd+wN39
 vBnCG0g6x+9J/DESPOXwrTDnWuGW1Grv7avYlJo2L4WTUFsAuDrtGdCRJ+Bwd9PH
 VVD6eSmXQdrFe9ttf8CQUERlWmNkAmFvT135Hf+qclNsvp7PbtCX2rjgo27NxVbo
 mdOkLLNXBwMNcHjhWhN7MDzoEw7gUqvWWC6vdRgQicrLJphrcV82+C0QIW3A/Uft
 tp/HEsHhGCX6mTXN0x9faz9WMXrfI+j6rUs0UoSVBfP+rwtCqweX5qgz5eUj2jjJ
 v+edcnh/2t4dVwK2dbsniw8y19tI/VHfyfMJiGSY18LsLlMrHiw=
 =QY26
 -----END PGP SIGNATURE-----

Merge 5.10.159 into android12-5.10-lts

Changes in 5.10.159
	arm64: dts: rockchip: keep I2S1 disabled for GPIO function on ROCK Pi 4 series
	arm: dts: rockchip: fix node name for hym8563 rtc
	ARM: dts: rockchip: fix ir-receiver node names
	arm64: dts: rockchip: fix ir-receiver node names
	ARM: dts: rockchip: rk3188: fix lcdc1-rgb24 node name
	ARM: 9251/1: perf: Fix stacktraces for tracepoint events in THUMB2 kernels
	ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation
	ASoC: wm8962: Wait for updated value of WM8962_CLOCKING1 register
	ARM: dts: rockchip: disable arm_global_timer on rk3066 and rk3188
	9p/fd: Use P9_HDRSZ for header size
	regulator: slg51000: Wait after asserting CS pin
	ALSA: seq: Fix function prototype mismatch in snd_seq_expand_var_event
	btrfs: send: avoid unaligned encoded writes when attempting to clone range
	ASoC: soc-pcm: Add NULL check in BE reparenting
	regulator: twl6030: fix get status of twl6032 regulators
	fbcon: Use kzalloc() in fbcon_prepare_logo()
	usb: dwc3: gadget: Disable GUSB2PHYCFG.SUSPHY for End Transfer
	9p/xen: check logical size for buffer size
	net: usb: qmi_wwan: add u-blox 0x1342 composition
	mm/khugepaged: take the right locks for page table retraction
	mm/khugepaged: fix GUP-fast interaction by sending IPI
	mm/khugepaged: invoke MMU notifiers in shmem/file collapse paths
	rtc: mc146818: Prevent reading garbage
	rtc: mc146818: Detect and handle broken RTCs
	rtc: mc146818: Dont test for bit 0-5 in Register D
	rtc: cmos: remove stale REVISIT comments
	rtc: mc146818-lib: change return values of mc146818_get_time()
	rtc: Check return value from mc146818_get_time()
	rtc: mc146818-lib: fix RTC presence check
	rtc: mc146818-lib: extract mc146818_avoid_UIP
	rtc: cmos: avoid UIP when writing alarm time
	rtc: cmos: avoid UIP when reading alarm time
	rtc: cmos: Replace spin_lock_irqsave with spin_lock in hard IRQ
	rtc: mc146818: Reduce spinlock section in mc146818_set_time()
	xen/netback: Ensure protocol headers don't fall in the non-linear area
	xen/netback: do some code cleanup
	xen/netback: don't call kfree_skb() with interrupts disabled
	media: videobuf2-core: take mmap_lock in vb2_get_unmapped_area()
	Revert "ARM: dts: imx7: Fix NAND controller size-cells"
	media: v4l2-dv-timings.c: fix too strict blanking sanity checks
	memcg: fix possible use-after-free in memcg_write_event_control()
	mm/gup: fix gup_pud_range() for dax
	Bluetooth: btusb: Add debug message for CSR controllers
	Bluetooth: Fix crash when replugging CSR fake controllers
	KVM: s390: vsie: Fix the initialization of the epoch extension (epdx) field
	drm/vmwgfx: Don't use screen objects when SEV is active
	drm/shmem-helper: Remove errant put in error path
	drm/shmem-helper: Avoid vm_open error paths
	HID: usbhid: Add ALWAYS_POLL quirk for some mice
	HID: hid-lg4ff: Add check for empty lbuf
	HID: core: fix shift-out-of-bounds in hid_report_raw_event
	can: af_can: fix NULL pointer dereference in can_rcv_filter
	mm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb page
	rtc: cmos: Disable irq around direct invocation of cmos_interrupt()
	rtc: mc146818-lib: fix locking in mc146818_set_time
	rtc: mc146818-lib: fix signedness bug in mc146818_get_time()
	netfilter: nft_set_pipapo: Actually validate intervals in fields after the first one
	ieee802154: cc2520: Fix error return code in cc2520_hw_init()
	ca8210: Fix crash by zero initializing data
	netfilter: ctnetlink: fix compilation warning after data race fixes in ct mark
	drm/bridge: ti-sn65dsi86: Fix output polarity setting bug
	gpio: amd8111: Fix PCI device reference count leak
	e1000e: Fix TX dispatch condition
	igb: Allocate MSI-X vector when testing
	drm: bridge: dw_hdmi: fix preference of RGB modes over YUV420
	af_unix: Get user_ns from in_skb in unix_diag_get_exact().
	vmxnet3: correctly report encapsulated LRO packet
	Bluetooth: 6LoWPAN: add missing hci_dev_put() in get_l2cap_conn()
	Bluetooth: Fix not cleanup led when bt_init fails
	net: dsa: ksz: Check return value
	selftests: rtnetlink: correct xfrm policy rule in kci_test_ipsec_offload
	mac802154: fix missing INIT_LIST_HEAD in ieee802154_if_add()
	net: encx24j600: Add parentheses to fix precedence
	net: encx24j600: Fix invalid logic in reading of MISTAT register
	xen-netfront: Fix NULL sring after live migration
	net: mvneta: Prevent out of bounds read in mvneta_config_rss()
	i40e: Fix not setting default xps_cpus after reset
	i40e: Fix for VF MAC address 0
	i40e: Disallow ip4 and ip6 l4_4_bytes
	NFC: nci: Bounds check struct nfc_target arrays
	nvme initialize core quirks before calling nvme_init_subsystem
	net: stmmac: fix "snps,axi-config" node property parsing
	ip_gre: do not report erspan version on GRE interface
	net: thunderx: Fix missing destroy_workqueue of nicvf_rx_mode_wq
	net: hisilicon: Fix potential use-after-free in hisi_femac_rx()
	net: hisilicon: Fix potential use-after-free in hix5hd2_rx()
	tipc: Fix potential OOB in tipc_link_proto_rcv()
	ipv4: Fix incorrect route flushing when source address is deleted
	ipv4: Fix incorrect route flushing when table ID 0 is used
	net: dsa: sja1105: fix memory leak in sja1105_setup_devlink_regions()
	tipc: call tipc_lxc_xmit without holding node_read_lock
	ethernet: aeroflex: fix potential skb leak in greth_init_rings()
	xen/netback: fix build warning
	net: plip: don't call kfree_skb/dev_kfree_skb() under spin_lock_irq()
	ipv6: avoid use-after-free in ip6_fragment()
	net: mvneta: Fix an out of bounds check
	macsec: add missing attribute validation for offload
	can: esd_usb: Allow REC and TEC to return to zero
	Linux 5.10.159

Change-Id: I3ec26473c358ffda0ea8a8dd91ee265f58739029
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2022-12-14 13:09:14 +01:00
commit 01ef2d0b53
98 changed files with 982 additions and 438 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 158
SUBLEVEL = 159
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -80,7 +80,12 @@ init_rtc_epoch(void)
static int
alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
mc146818_get_time(tm);
int ret = mc146818_get_time(tm);
if (ret < 0) {
dev_err_ratelimited(dev, "unable to read current time\n");
return ret;
}
/* Adjust for non-default epochs. It's easier to depend on the
generic __get_rtc_time and adjust the epoch here than create

View File

@ -1221,10 +1221,10 @@ dma_apbh: dma-apbh@33000000 {
clocks = <&clks IMX7D_NAND_USDHC_BUS_RAWNAND_CLK>;
};
gpmi: nand-controller@33002000 {
gpmi: nand-controller@33002000{
compatible = "fsl,imx7d-gpmi-nand";
#address-cells = <1>;
#size-cells = <0>;
#size-cells = <1>;
reg = <0x33002000 0x2000>, <0x33004000 0x4000>;
reg-names = "gpmi-nand", "bch";
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -31,7 +31,7 @@ phy0: ethernet-phy@0 {
&i2c1 {
status = "okay";
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View File

@ -67,7 +67,7 @@ spdif_out: spdif-out {
#sound-dai-cells = <0>;
};
ir_recv: gpio-ir-receiver {
ir_recv: ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";

View File

@ -402,7 +402,7 @@ lcdc1_vsync: lcdc1-vsync {
rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
};
lcdc1_rgb24: ldcd1-rgb24 {
lcdc1_rgb24: lcdc1-rgb24 {
rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
<2 RK_PA1 1 &pcfg_pull_none>,
<2 RK_PA2 1 &pcfg_pull_none>,
@ -630,7 +630,6 @@ &emac {
&global_timer {
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
status = "disabled";
};
&local_timer {

View File

@ -54,7 +54,7 @@ vdd_gpu: syr828@41 {
vin-supply = <&vcc_sys>;
};
hym8563@51 {
rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;

View File

@ -233,7 +233,7 @@ vdd_gpu: syr828@41 {
vin-supply = <&vcc_sys>;
};
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View File

@ -157,7 +157,7 @@ vdd_gpu: syr828@41 {
vin-supply = <&vcc_sys>;
};
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View File

@ -165,7 +165,7 @@ &hdmi {
};
&i2c0 {
hym8563: hym8563@51 {
hym8563: rtc@51 {
compatible = "haoyu,hym8563";
reg = <0x51>;
#clock-cells = <0>;

View File

@ -111,6 +111,13 @@ global_timer: global-timer@1013c200 {
reg = <0x1013c200 0x20>;
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
clocks = <&cru CORE_PERI>;
status = "disabled";
/* The clock source and the sched_clock provided by the arm_global_timer
* on Rockchip rk3066a/rk3188 are quite unstable because their rates
* depend on the CPU frequency.
* Keep the arm_global_timer disabled in order to have the
* DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
*/
};
local_timer: local-timer@1013c600 {

View File

@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->ARM_pc = (__ip); \
(regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
(regs)->ARM_sp = current_stack_pointer; \
(regs)->ARM_cpsr = SVC_MODE; \
}

View File

@ -44,12 +44,6 @@
typedef pte_t *pte_addr_t;
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
#define ZERO_PAGE(vaddr) (virt_to_page(0))
/*
* Mark the prot value as uncacheable and unbufferable.
*/

View File

@ -10,6 +10,15 @@
#include <linux/const.h>
#include <asm/proc-fns.h>
#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
#endif
#ifndef CONFIG_MMU
#include <asm-generic/pgtable-nopud.h>
@ -156,13 +165,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define __S111 __PAGE_SHARED_EXEC
#ifndef __ASSEMBLY__
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];

View File

@ -26,6 +26,13 @@
unsigned long vectors_base;
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
*/
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
#endif
@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void)
*/
void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
early_trap_init((void *)vectors_base);
mpu_setup();
/* allocate the zero page. */
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
flush_dcache_page(empty_zero_page);
}
/*

View File

@ -13,7 +13,7 @@ chosen {
stdout-path = "serial2:1500000n8";
};
ir_rx {
ir-receiver {
compatible = "gpio-ir-receiver";
gpios = <&gpio0 RK_PC0 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";

View File

@ -448,7 +448,6 @@ &i2s0 {
&i2s1 {
rockchip,playback-channels = <2>;
rockchip,capture-channels = <2>;
status = "okay";
};
&i2s2 {

View File

@ -535,8 +535,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
scb_s->eca |= scb_o->eca & ECA_CEI;
/* Epoch Extension */
if (test_kvm_facility(vcpu->kvm, 139))
if (test_kvm_facility(vcpu->kvm, 139)) {
scb_s->ecd |= scb_o->ecd & ECD_MEF;
scb_s->epdx = scb_o->epdx;
}
/* etoken */
if (test_kvm_facility(vcpu->kvm, 156))

View File

@ -1325,8 +1325,12 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
hpet_rtc_timer_reinit();
memset(&curr_time, 0, sizeof(struct rtc_time));
if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
mc146818_get_time(&curr_time);
if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
if (unlikely(mc146818_get_time(&curr_time) < 0)) {
pr_err_ratelimited("unable to read current time from RTC\n");
return IRQ_HANDLED;
}
}
if (hpet_rtc_flags & RTC_UIE &&
curr_time.tm_sec != hpet_prev_update_sec) {

View File

@ -120,7 +120,11 @@ static unsigned int read_magic_time(void)
struct rtc_time time;
unsigned int val;
mc146818_get_time(&time);
if (mc146818_get_time(&time) < 0) {
pr_err("Unable to read current time from RTC\n");
return 0;
}
pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
val = time.tm_year; /* 100 years */
if (val > 100)

View File

@ -1833,6 +1833,11 @@ static int btusb_setup_csr(struct hci_dev *hdev)
rp = (struct hci_rp_read_local_version *)skb->data;
bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x; LMP ver=%u subver=%04x; manufacturer=%u",
le16_to_cpu(rp->hci_ver), le16_to_cpu(rp->hci_rev),
le16_to_cpu(rp->lmp_ver), le16_to_cpu(rp->lmp_subver),
le16_to_cpu(rp->manufacturer));
/* Detect a wide host of Chinese controllers that aren't CSR.
*
* Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891

View File

@ -231,7 +231,10 @@ static int __init amd_gpio_init(void)
ioport_unmap(gp.pm);
goto out;
}
return 0;
out:
pci_dev_put(pdev);
return err;
}
@ -239,6 +242,7 @@ static void __exit amd_gpio_exit(void)
{
gpiochip_remove(&gp.chip);
ioport_unmap(gp.pm);
pci_dev_put(gp.pdev);
}
module_init(amd_gpio_init);

View File

@ -2609,6 +2609,9 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
* if supported. In any case the default RGB888 format is added
*/
/* Default 8bit RGB fallback */
output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
if (max_bpc >= 16 && info->bpc == 16) {
if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
@ -2642,9 +2645,6 @@ static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
/* Default 8bit RGB fallback */
output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
*num_output_fmts = i;
return output_fmts;

View File

@ -621,9 +621,9 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
&pdata->bridge.encoder->crtc->state->adjusted_mode;
u8 hsync_polarity = 0, vsync_polarity = 0;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
hsync_polarity = CHA_HSYNC_POLARITY;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
vsync_polarity = CHA_VSYNC_POLARITY;
ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,

View File

@ -563,12 +563,20 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
int ret;
WARN_ON(shmem->base.import_attach);
ret = drm_gem_shmem_get_pages(shmem);
WARN_ON_ONCE(ret != 0);
mutex_lock(&shmem->pages_lock);
/*
* We should have already pinned the pages when the buffer was first
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
if (!WARN_ON_ONCE(!shmem->pages_use_count))
shmem->pages_use_count++;
mutex_unlock(&shmem->pages_lock);
drm_gem_vm_open(vma);
}
@ -616,10 +624,8 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
shmem = to_drm_gem_shmem_obj(obj);
ret = drm_gem_shmem_get_pages(shmem);
if (ret) {
drm_gem_vm_close(vma);
if (ret)
return ret;
}
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);

View File

@ -949,6 +949,10 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
int i, ret;
/* Screen objects won't work if GMR's aren't available */
if (!dev_priv->has_gmr)
return -ENOSYS;
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
DRM_INFO("Not using screen objects,"
" missing cap SCREEN_OBJECT_2\n");

View File

@ -1310,6 +1310,9 @@ static s32 snto32(__u32 value, unsigned n)
if (!value || !n)
return 0;
if (n > 32)
n = 32;
switch (n) {
case 8: return ((__s8)value);
case 16: return ((__s16)value);

View File

@ -257,6 +257,7 @@
#define USB_DEVICE_ID_CH_AXIS_295 0x001c
#define USB_VENDOR_ID_CHERRY 0x046a
#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c
#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
@ -874,6 +875,7 @@
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
#define USB_DEVICE_ID_MS_MOUSE_0783 0x0783
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@ -1306,6 +1308,7 @@
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
#define USB_DEVICE_ID_PRIMAX_MOUSE_4E2A 0x4e2a
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f

View File

@ -872,6 +872,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
return -ENOMEM;
i = strlen(lbuf);
if (i == 0) {
kfree(lbuf);
return -EINVAL;
}
if (lbuf[i-1] == '\n') {
if (i == 1) {
kfree(lbuf);

View File

@ -54,6 +54,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
@ -122,6 +123,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_MOUSE_0783), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
@ -146,6 +148,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4E2A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },

View File

@ -787,7 +787,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
/*
* Set this now to ensure that drivers see the correct q->memory value
* in the queue_setup op.
*/
mutex_lock(&q->mmap_lock);
q->memory = memory;
mutex_unlock(&q->mmap_lock);
/*
* Ask the driver how many buffers and planes per buffer it requires.
@ -796,22 +802,27 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
plane_sizes, q->alloc_devs);
if (ret)
return ret;
goto error;
/* Check that driver has set sane values */
if (WARN_ON(!num_planes))
return -EINVAL;
if (WARN_ON(!num_planes)) {
ret = -EINVAL;
goto error;
}
for (i = 0; i < num_planes; i++)
if (WARN_ON(!plane_sizes[i]))
return -EINVAL;
if (WARN_ON(!plane_sizes[i])) {
ret = -EINVAL;
goto error;
}
/* Finally, allocate buffers and video memory */
allocated_buffers =
__vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
if (allocated_buffers == 0) {
dprintk(q, 1, "memory allocation failed\n");
return -ENOMEM;
ret = -ENOMEM;
goto error;
}
/*
@ -852,7 +863,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
if (ret < 0) {
/*
* Note: __vb2_queue_free() will subtract 'allocated_buffers'
* from q->num_buffers.
* from q->num_buffers and it will reset q->memory to
* VB2_MEMORY_UNKNOWN.
*/
__vb2_queue_free(q, allocated_buffers);
mutex_unlock(&q->mmap_lock);
@ -868,6 +880,12 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
q->waiting_for_buffers = !q->is_output;
return 0;
error:
mutex_lock(&q->mmap_lock);
q->memory = VB2_MEMORY_UNKNOWN;
mutex_unlock(&q->mmap_lock);
return ret;
}
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
@ -878,6 +896,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
{
unsigned int num_planes = 0, num_buffers, allocated_buffers;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
bool no_previous_buffers = !q->num_buffers;
int ret;
if (q->num_buffers == VB2_MAX_FRAME) {
@ -885,13 +904,19 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
return -ENOBUFS;
}
if (!q->num_buffers) {
if (no_previous_buffers) {
if (q->waiting_in_dqbuf && *count) {
dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
return -EBUSY;
}
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
/*
* Set this now to ensure that drivers see the correct q->memory
* value in the queue_setup op.
*/
mutex_lock(&q->mmap_lock);
q->memory = memory;
mutex_unlock(&q->mmap_lock);
q->waiting_for_buffers = !q->is_output;
} else {
if (q->memory != memory) {
@ -914,14 +939,15 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
ret = call_qop(q, queue_setup, q, &num_buffers,
&num_planes, plane_sizes, q->alloc_devs);
if (ret)
return ret;
goto error;
/* Finally, allocate buffers and video memory */
allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
num_planes, plane_sizes);
if (allocated_buffers == 0) {
dprintk(q, 1, "memory allocation failed\n");
return -ENOMEM;
ret = -ENOMEM;
goto error;
}
/*
@ -952,7 +978,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
if (ret < 0) {
/*
* Note: __vb2_queue_free() will subtract 'allocated_buffers'
* from q->num_buffers.
* from q->num_buffers and it will reset q->memory to
* VB2_MEMORY_UNKNOWN.
*/
__vb2_queue_free(q, allocated_buffers);
mutex_unlock(&q->mmap_lock);
@ -967,6 +994,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
*count = allocated_buffers;
return 0;
error:
if (no_previous_buffers) {
mutex_lock(&q->mmap_lock);
q->memory = VB2_MEMORY_UNKNOWN;
mutex_unlock(&q->mmap_lock);
}
return ret;
}
EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
@ -2120,6 +2155,22 @@ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
struct vb2_buffer *vb;
unsigned int buffer, plane;
/*
* Sanity checks to ensure the lock is held, MEMORY_MMAP is
* used and fileio isn't active.
*/
lockdep_assert_held(&q->mmap_lock);
if (q->memory != VB2_MEMORY_MMAP) {
dprintk(q, 1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
if (vb2_fileio_is_active(q)) {
dprintk(q, 1, "file io in progress\n");
return -EBUSY;
}
/*
* Go over all buffers and their planes, comparing the given offset
* with an offset assigned to each plane. If a match is found,
@ -2219,11 +2270,6 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
int ret;
unsigned long length;
if (q->memory != VB2_MEMORY_MMAP) {
dprintk(q, 1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
/*
* Check memory area access mode.
*/
@ -2245,14 +2291,9 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
mutex_lock(&q->mmap_lock);
if (vb2_fileio_is_active(q)) {
dprintk(q, 1, "mmap: file io in progress\n");
ret = -EBUSY;
goto unlock;
}
/*
* Find the plane corresponding to the offset passed by userspace.
* Find the plane corresponding to the offset passed by userspace. This
* will return an error if not MEMORY_MMAP or file I/O is in progress.
*/
ret = __find_plane_by_offset(q, off, &buffer, &plane);
if (ret)
@ -2305,22 +2346,25 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
void *vaddr;
int ret;
if (q->memory != VB2_MEMORY_MMAP) {
dprintk(q, 1, "queue is not currently set up for mmap\n");
return -EINVAL;
}
mutex_lock(&q->mmap_lock);
/*
* Find the plane corresponding to the offset passed by userspace.
* Find the plane corresponding to the offset passed by userspace. This
* will return an error if not MEMORY_MMAP or file I/O is in progress.
*/
ret = __find_plane_by_offset(q, off, &buffer, &plane);
if (ret)
return ret;
goto unlock;
vb = q->bufs[buffer];
vaddr = vb2_plane_vaddr(vb, plane);
mutex_unlock(&q->mmap_lock);
return vaddr ? (unsigned long)vaddr : -EINVAL;
unlock:
mutex_unlock(&q->mmap_lock);
return ret;
}
EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
#endif

View File

@ -145,6 +145,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
const struct v4l2_bt_timings *bt = &t->bt;
const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
u32 caps = cap->capabilities;
const u32 max_vert = 10240;
u32 max_hor = 3 * bt->width;
if (t->type != V4L2_DV_BT_656_1120)
return false;
@ -166,14 +168,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
if (!bt->interlaced &&
(bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
return false;
if (bt->hfrontporch > 2 * bt->width ||
bt->hsync > 1024 || bt->hbackporch > 1024)
/*
* Some video receivers cannot properly separate the frontporch,
* backporch and sync values, and instead they only have the total
* blanking. That can be assigned to any of these three fields.
* So just check that none of these are way out of range.
*/
if (bt->hfrontporch > max_hor ||
bt->hsync > max_hor || bt->hbackporch > max_hor)
return false;
if (bt->vfrontporch > 4096 ||
bt->vsync > 128 || bt->vbackporch > 4096)
if (bt->vfrontporch > max_vert ||
bt->vsync > max_vert || bt->vbackporch > max_vert)
return false;
if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
bt->il_vsync > 128 || bt->il_vbackporch > 4096))
if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
return false;
return fnc == NULL || fnc(t, fnc_handle);
}

View File

@ -227,6 +227,10 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
u8 rxerr = msg->msg.rx.data[2];
u8 txerr = msg->msg.rx.data[3];
netdev_dbg(priv->netdev,
"CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
msg->msg.rx.dlc, state, ecc, rxerr, txerr);
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb == NULL) {
stats->rx_dropped++;
@ -253,6 +257,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
break;
default:
priv->can.state = CAN_STATE_ERROR_ACTIVE;
txerr = 0;
rxerr = 0;
break;
}
} else {

View File

@ -95,6 +95,8 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
if (IS_ERR(region)) {
while (--i >= 0)
dsa_devlink_region_destroy(priv->regions[i]);
kfree(priv->regions);
return PTR_ERR(region);
}

View File

@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth)
if (dma_mapping_error(greth->dev, dma_addr)) {
if (netif_msg_ifup(greth))
dev_err(greth->dev, "Could not create initial DMA mapping\n");
dev_kfree_skb(skb);
goto cleanup;
}
greth->rx_skbuff[i] = skb;

View File

@ -2258,7 +2258,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
goto err_unregister_interrupts;
goto err_destroy_workqueue;
}
nic->msg_enable = debug;
@ -2267,6 +2267,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_destroy_workqueue:
destroy_workqueue(nic->nicvf_rx_mode_wq);
err_unregister_interrupts:
nicvf_unregister_interrupts(nic);
err_free_netdev:

View File

@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_bytes += len;
next:
pos = (pos + 1) % rxq->num;
if (rx_pkts_num >= limit)

View File

@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&priv->napi, skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_bytes += len;
next:
pos = dma_ring_incr(pos, RX_DESC_NUM);
}

View File

@ -5937,9 +5937,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(tx_ring,
(MAX_SKB_FRAGS *
((MAX_SKB_FRAGS + 1) *
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
adapter->tx_fifo_limit) + 4));
if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {

View File

@ -4234,11 +4234,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
/* First 4 bytes of L4 header */
if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
else if (!usr_ip4_spec->l4_4_bytes)
new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
else
if (usr_ip4_spec->l4_4_bytes)
return -EOPNOTSUPP;
/* Filtering on Type of Service is not supported. */

View File

@ -10065,6 +10065,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
return 0;
}
/**
* i40e_clean_xps_state - clean xps state for every tx_ring
* @vsi: ptr to the VSI
**/
static void i40e_clean_xps_state(struct i40e_vsi *vsi)
{
int i;
if (vsi->tx_rings)
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->tx_rings[i])
clear_bit(__I40E_TX_XPS_INIT_DONE,
vsi->tx_rings[i]->state);
}
/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
@ -10096,8 +10111,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
rtnl_unlock();
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
if (pf->vsi[v]) {
i40e_clean_xps_state(pf->vsi[v]);
pf->vsi[v]->seid = 0;
}
}
i40e_shutdown_adminq(&pf->hw);

View File

@ -1525,6 +1525,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
i40e_cleanup_reset_vf(vf);
i40e_flush(hw);
usleep_range(20000, 40000);
clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
return true;
@ -1648,6 +1649,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
}
i40e_flush(hw);
usleep_range(20000, 40000);
clear_bit(__I40E_VF_DISABLE, pf->state);
return true;

View File

@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
*data = 1;
return -1;
}
wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8);
wr32(E1000_EIMS, BIT(0));
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,

View File

@ -4140,7 +4140,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
/* Use the cpu associated to the rxq when it is online, in all
* the other cases, use the cpu 0 which can't be offline.
*/
if (cpu_online(pp->rxq_def))
if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
elected_cpu = pp->rxq_def;
max_cpu = num_present_cpus();

View File

@ -359,7 +359,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
goto err_out;
usleep_range(26, 100);
while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
(mistat & BUSY))
cpu_relax();
@ -397,7 +397,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
goto err_out;
usleep_range(26, 100);
while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
(mistat & BUSY))
cpu_relax();

View File

@ -108,10 +108,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe");
axi->axi_fb = of_property_read_bool(np, "snps,fb");
axi->axi_mb = of_property_read_bool(np, "snps,mb");
axi->axi_rb = of_property_read_bool(np, "snps,rb");
if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
axi->axi_wr_osr_lmt = 1;

View File

@ -926,7 +926,7 @@ static int ca8210_spi_transfer(
dev_dbg(&spi->dev, "%s called\n", __func__);
cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC);
cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC);
if (!cas_ctl)
return -ENOMEM;

View File

@ -970,7 +970,7 @@ static int cc2520_hw_init(struct cc2520_private *priv)
if (timeout-- <= 0) {
dev_err(&priv->spi->dev, "oscillator start failed!\n");
return ret;
return -ETIMEDOUT;
}
udelay(1);
} while (!(status & CC2520_STATUS_XOSC32M_STABLE));

View File

@ -3680,6 +3680,7 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
[IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
};
static void macsec_free_netdev(struct net_device *dev)

View File

@ -444,12 +444,12 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
}
rcv->state = PLIP_PK_DONE;
if (rcv->skb) {
kfree_skb(rcv->skb);
dev_kfree_skb_irq(rcv->skb);
rcv->skb = NULL;
}
snd->state = PLIP_PK_DONE;
if (snd->skb) {
dev_kfree_skb(snd->skb);
dev_consume_skb_irq(snd->skb);
snd->skb = NULL;
}
spin_unlock_irq(&nl->lock);

View File

@ -1352,6 +1352,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View File

@ -1356,6 +1356,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
};
u32 num_pkts = 0;
bool skip_page_frags = false;
bool encap_lro = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
u16 segCnt = 0, mss = 0;
@ -1496,13 +1497,18 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
if (VMXNET3_VERSION_GE_2(adapter) &&
rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
struct Vmxnet3_RxCompDescExt *rcdlro;
union Vmxnet3_GenericDesc *gdesc;
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
gdesc = (union Vmxnet3_GenericDesc *)rcd;
segCnt = rcdlro->segCnt;
WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
encap_lro = (le32_to_cpu(gdesc->dword[0]) &
(1UL << VMXNET3_RCD_HDR_INNER_SHIFT));
} else {
segCnt = 0;
}
@ -1570,7 +1576,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
if (!rcd->tcp ||
if ((!rcd->tcp && !encap_lro) ||
!(adapter->netdev->features & NETIF_F_LRO))
goto not_lro;
@ -1579,7 +1585,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
skb_shinfo(skb)->gso_size = mss;
skb_shinfo(skb)->gso_segs = segCnt;
} else if (segCnt != 0 || skb->len > mtu) {
} else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) {
u32 hlen;
hlen = vmxnet3_get_hdr_len(adapter, skb,
@ -1608,6 +1614,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
napi_gro_receive(&rq->napi, skb);
ctx->skb = NULL;
encap_lro = false;
num_pkts++;
}

View File

@ -48,7 +48,6 @@
#include <linux/debugfs.h>
typedef unsigned int pending_ring_idx_t;
#define INVALID_PENDING_RING_IDX (~0U)
struct pending_tx_info {
struct xen_netif_tx_request req; /* tx request */
@ -82,8 +81,6 @@ struct xenvif_rx_meta {
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
/* The maximum number of frags is derived from the size of a grant (same
@ -367,11 +364,6 @@ void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
int xenvif_queue_stopped(struct xenvif_queue *queue);
void xenvif_wake_queue(struct xenvif_queue *queue);
/* (Un)Map communication rings. */
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
@ -394,17 +386,13 @@ int xenvif_dealloc_kthread(void *data);
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
void xenvif_rx_action(struct xenvif_queue *queue);
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
/* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
/* Unmap a pending page and release it back to the guest */
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
return MAX_PENDING_REQS -

View File

@ -70,7 +70,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
wake_up(&queue->dealloc_wq);
}
int xenvif_schedulable(struct xenvif *vif)
static int xenvif_schedulable(struct xenvif *vif)
{
return netif_running(vif->dev) &&
test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
@ -178,20 +178,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
unsigned int id = queue->id;
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
}
void xenvif_wake_queue(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
unsigned int id = queue->id;
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@ -269,14 +255,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
skb_clear_hash(skb);
xenvif_rx_queue_tail(queue, skb);
if (!xenvif_rx_queue_tail(queue, skb))
goto drop;
xenvif_kick_thread(queue);
return NETDEV_TX_OK;
drop:
vif->dev->stats.tx_dropped++;
dev_kfree_skb(skb);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

View File

@ -112,6 +112,8 @@ static void make_tx_response(struct xenvif_queue *queue,
s8 st);
static void push_tx_responses(struct xenvif_queue *queue);
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
static inline int tx_work_todo(struct xenvif_queue *queue);
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
@ -330,10 +332,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb {
u16 pending_idx;
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
u8 copy_count;
};
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
u16 pending_idx,
@ -368,31 +373,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
return skb;
}
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
struct gnttab_map_grant_ref *gop,
unsigned int frag_overflow,
struct sk_buff *nskb)
static void xenvif_get_requests(struct xenvif_queue *queue,
struct sk_buff *skb,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txfrags,
unsigned *copy_ops,
unsigned *map_ops,
unsigned int frag_overflow,
struct sk_buff *nskb,
unsigned int extra_count,
unsigned int data_len)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
int start;
u16 pending_idx;
pending_ring_idx_t index;
unsigned int nr_slots;
struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
struct xen_netif_tx_request *txp = first;
nr_slots = shinfo->nr_frags;
nr_slots = shinfo->nr_frags + 1;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
copy_count(skb) = 0;
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, txp++, gop++) {
/* Create copy ops for exactly data_len bytes into the skb head. */
__skb_put(skb, data_len);
while (data_len > 0) {
int amount = data_len > txp->size ? txp->size : data_len;
cop->source.u.ref = txp->gref;
cop->source.domid = queue->vif->domid;
cop->source.offset = txp->offset;
cop->dest.domid = DOMID_SELF;
cop->dest.offset = (offset_in_page(skb->data +
skb_headlen(skb) -
data_len)) & ~XEN_PAGE_MASK;
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
- data_len);
cop->len = amount;
cop->flags = GNTCOPY_source_gref;
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
callback_param(queue, pending_idx).ctx = NULL;
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
copy_count(skb)++;
cop++;
data_len -= amount;
if (amount == txp->size) {
/* The copy op covered the full tx_request */
memcpy(&queue->pending_tx_info[pending_idx].req,
txp, sizeof(*txp));
queue->pending_tx_info[pending_idx].extra_count =
(txp == first) ? extra_count : 0;
if (txp == first)
txp = txfrags;
else
txp++;
queue->pending_cons++;
nr_slots--;
} else {
/* The copy op partially covered the tx_request.
* The remainder will be mapped.
*/
txp->offset += amount;
txp->size -= amount;
}
}
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
shinfo->nr_frags++, gop++) {
index = pending_index(queue->pending_cons++);
pending_idx = queue->pending_ring[index];
xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
xenvif_tx_create_map_op(queue, pending_idx, txp,
txp == first ? extra_count : 0, gop);
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
if (txp == first)
txp = txfrags;
else
txp++;
}
if (frag_overflow) {
@ -413,7 +480,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
skb_shinfo(skb)->frag_list = nskb;
}
return gop;
(*copy_ops) = cop - queue->tx_copy_ops;
(*map_ops) = gop - queue->tx_map_ops;
}
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
@ -449,7 +517,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct gnttab_copy **gopp_copy)
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
u16 pending_idx;
/* This always points to the shinfo of the skb being checked, which
* could be either the first or the one on the frag_list
*/
@ -460,24 +528,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
const bool sharedslot = nr_frags &&
frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
int i, err;
frag_get_pending_idx(&shinfo->frags[0]) ==
copy_pending_idx(skb, copy_count(skb) - 1);
int i, err = 0;
/* Check status of header. */
err = (*gopp_copy)->status;
if (unlikely(err)) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
/* The first frag might still have this slot mapped */
if (!sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
for (i = 0; i < copy_count(skb); i++) {
int newerr;
/* Check status of header. */
pending_idx = copy_pending_idx(skb, i);
newerr = (*gopp_copy)->status;
if (likely(!newerr)) {
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
} else {
err = newerr;
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
}
(*gopp_copy)++;
}
(*gopp_copy)++;
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
@ -524,14 +605,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
if (err)
continue;
/* First error: if the header haven't shared a slot with the
* first frag, release it as well.
*/
if (!sharedslot)
xenvif_idx_release(queue,
XENVIF_TX_CB(skb)->pending_idx,
XEN_NETIF_RSP_OKAY);
/* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
@ -801,7 +874,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
unsigned *copy_ops,
unsigned *map_ops)
{
struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
struct sk_buff *skb, *nskb;
int ret;
unsigned int frag_overflow;
@ -883,8 +955,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
continue;
}
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
XEN_NETBACK_TX_COPY_LEN : txreq.size;
ret = xenvif_count_requests(queue, &txreq, extra_count,
txfrags, work_to_do);
if (unlikely(ret < 0))
break;
@ -910,9 +986,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
index = pending_index(queue->pending_cons);
pending_idx = queue->pending_ring[index];
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
XEN_NETBACK_TX_COPY_LEN : txreq.size;
if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
data_len = txreq.size;
skb = xenvif_alloc_skb(data_len);
if (unlikely(skb == NULL)) {
@ -923,8 +998,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
}
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size)
skb_shinfo(skb)->nr_frags++;
/* At this point shinfo->nr_frags is in fact the number of
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
*/
@ -986,54 +1059,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
type);
}
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
virt_to_gfn(skb->data);
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
queue->tx_copy_ops[*copy_ops].dest.offset =
offset_in_page(skb->data) & ~XEN_PAGE_MASK;
queue->tx_copy_ops[*copy_ops].len = data_len;
queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
(*copy_ops)++;
if (data_len < txreq.size) {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
xenvif_tx_create_map_op(queue, pending_idx, &txreq,
extra_count, gop);
gop++;
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
memcpy(&queue->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
queue->pending_tx_info[pending_idx].extra_count =
extra_count;
}
queue->pending_cons++;
gop = xenvif_get_requests(queue, skb, txfrags, gop,
frag_overflow, nskb);
xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
map_ops, frag_overflow, nskb, extra_count,
data_len);
__skb_queue_tail(&queue->tx_queue, skb);
queue->tx.req_cons = idx;
if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
(*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
break;
}
(*map_ops) = gop - queue->tx_map_ops;
return;
}
@ -1112,9 +1150,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
unsigned data_len;
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
pending_idx = copy_pending_idx(skb, 0);
txp = &queue->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
@ -1133,18 +1170,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
continue;
}
data_len = skb->len;
callback_param(queue, pending_idx).ctx = NULL;
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (txp->flags & XEN_NETTXF_data_validated)
@ -1330,7 +1355,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
/* Called after netfront has transmitted */
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
{
unsigned nr_mops, nr_cops = 0;
unsigned nr_mops = 0, nr_cops = 0;
int work_done, ret;
if (unlikely(!tx_work_todo(queue)))
@ -1417,7 +1442,7 @@ static void push_tx_responses(struct xenvif_queue *queue)
notify_remote_via_irq(queue->tx_irq);
}
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
{
int ret;
struct gnttab_unmap_grant_ref tx_unmap_op;

View File

@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
return false;
}
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
{
unsigned long flags;
bool ret = true;
spin_lock_irqsave(&queue->rx_queue.lock, flags);
@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
struct net_device *dev = queue->vif->dev;
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
kfree_skb(skb);
queue->vif->dev->stats.rx_dropped++;
ret = false;
} else {
if (skb_queue_empty(&queue->rx_queue))
xenvif_update_needed_slots(queue, skb);
@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
}
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return ret;
}
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
@ -486,7 +488,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue)
#define RX_BATCH_SIZE 64
void xenvif_rx_action(struct xenvif_queue *queue)
static void xenvif_rx_action(struct xenvif_queue *queue)
{
struct sk_buff_head completed_skbs;
unsigned int work_done = 0;

View File

@ -1868,6 +1868,12 @@ static int netfront_resume(struct xenbus_device *dev)
netif_tx_unlock_bh(info->netdev);
xennet_disconnect_backend(info);
rtnl_lock();
if (info->queues)
xennet_destroy_queues(info);
rtnl_unlock();
return 0;
}

View File

@ -3092,10 +3092,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (!ctrl->identified) {
int i;
ret = nvme_init_subsystem(ctrl, id);
if (ret)
goto out_free;
/*
* Check for quirks. Quirk can depend on firmware version,
* so, in principle, the set of quirks present can change
@ -3108,6 +3104,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (quirk_matches(id, &core_quirks[i]))
ctrl->quirks |= core_quirks[i].quirks;
}
ret = nvme_init_subsystem(ctrl, id);
if (ret)
goto out_free;
}
memcpy(ctrl->subsys->firmware_rev, id->fr,
sizeof(ctrl->subsys->firmware_rev));

View File

@ -457,6 +457,8 @@ static int slg51000_i2c_probe(struct i2c_client *client)
chip->cs_gpiod = cs_gpiod;
}
usleep_range(10000, 11000);
i2c_set_clientdata(client, chip);
chip->chip_irq = client->irq;
chip->dev = dev;

View File

@ -67,6 +67,7 @@ struct twlreg_info {
#define TWL6030_CFG_STATE_SLEEP 0x03
#define TWL6030_CFG_STATE_GRP_SHIFT 5
#define TWL6030_CFG_STATE_APP_SHIFT 2
#define TWL6030_CFG_STATE_MASK 0x03
#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
TWL6030_CFG_STATE_APP_SHIFT)
@ -128,13 +129,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
if (grp < 0)
return grp;
grp &= P1_GRP_6030;
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
val = TWL6030_CFG_STATE_APP(val);
} else {
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
val &= TWL6030_CFG_STATE_MASK;
grp = 1;
}
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
val = TWL6030_CFG_STATE_APP(val);
return grp && (val == TWL6030_CFG_STATE_ON);
}
@ -187,7 +189,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev)
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
switch (TWL6030_CFG_STATE_APP(val)) {
if (info->features & TWL6032_SUBCLASS)
val &= TWL6030_CFG_STATE_MASK;
else
val = TWL6030_CFG_STATE_APP(val);
switch (val) {
case TWL6030_CFG_STATE_ON:
return REGULATOR_STATUS_NORMAL;

View File

@ -222,6 +222,8 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
static int cmos_read_time(struct device *dev, struct rtc_time *t)
{
int ret;
/*
* If pm_trace abused the RTC for storage, set the timespec to 0,
* which tells the caller that this RTC value is unusable.
@ -229,29 +231,64 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
if (!pm_trace_rtc_valid())
return -EIO;
/* REVISIT: if the clock has a "century" register, use
* that instead of the heuristic in mc146818_get_time().
* That'll make Y3K compatility (year > 2070) easy!
*/
mc146818_get_time(t);
ret = mc146818_get_time(t);
if (ret < 0) {
dev_err_ratelimited(dev, "unable to read current time\n");
return ret;
}
return 0;
}
static int cmos_set_time(struct device *dev, struct rtc_time *t)
{
/* REVISIT: set the "century" register if available
*
* NOTE: this ignores the issue whereby updating the seconds
/* NOTE: this ignores the issue whereby updating the seconds
* takes effect exactly 500ms after we write the register.
* (Also queueing and other delays before we get this far.)
*/
return mc146818_set_time(t);
}
struct cmos_read_alarm_callback_param {
struct cmos_rtc *cmos;
struct rtc_time *time;
unsigned char rtc_control;
};
static void cmos_read_alarm_callback(unsigned char __always_unused seconds,
void *param_in)
{
struct cmos_read_alarm_callback_param *p =
(struct cmos_read_alarm_callback_param *)param_in;
struct rtc_time *time = p->time;
time->tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
time->tm_min = CMOS_READ(RTC_MINUTES_ALARM);
time->tm_hour = CMOS_READ(RTC_HOURS_ALARM);
if (p->cmos->day_alrm) {
/* ignore upper bits on readback per ACPI spec */
time->tm_mday = CMOS_READ(p->cmos->day_alrm) & 0x3f;
if (!time->tm_mday)
time->tm_mday = -1;
if (p->cmos->mon_alrm) {
time->tm_mon = CMOS_READ(p->cmos->mon_alrm);
if (!time->tm_mon)
time->tm_mon = -1;
}
}
p->rtc_control = CMOS_READ(RTC_CONTROL);
}
static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char rtc_control;
struct cmos_read_alarm_callback_param p = {
.cmos = cmos,
.time = &t->time,
};
/* This not only a rtc_op, but also called directly */
if (!is_valid_irq(cmos->irq))
@ -262,28 +299,18 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
* the future.
*/
spin_lock_irq(&rtc_lock);
t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
t->time.tm_min = CMOS_READ(RTC_MINUTES_ALARM);
t->time.tm_hour = CMOS_READ(RTC_HOURS_ALARM);
/* Some Intel chipsets disconnect the alarm registers when the clock
* update is in progress - during this time reads return bogus values
* and writes may fail silently. See for example "7th Generation Intel®
* Processor Family I/O for U/Y Platforms [...] Datasheet", section
* 27.7.1
*
* Use the mc146818_avoid_UIP() function to avoid this.
*/
if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p))
return -EIO;
if (cmos->day_alrm) {
/* ignore upper bits on readback per ACPI spec */
t->time.tm_mday = CMOS_READ(cmos->day_alrm) & 0x3f;
if (!t->time.tm_mday)
t->time.tm_mday = -1;
if (cmos->mon_alrm) {
t->time.tm_mon = CMOS_READ(cmos->mon_alrm);
if (!t->time.tm_mon)
t->time.tm_mon = -1;
}
}
rtc_control = CMOS_READ(RTC_CONTROL);
spin_unlock_irq(&rtc_lock);
if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
if (((unsigned)t->time.tm_sec) < 0x60)
t->time.tm_sec = bcd2bin(t->time.tm_sec);
else
@ -312,7 +339,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
}
}
t->enabled = !!(rtc_control & RTC_AIE);
t->enabled = !!(p.rtc_control & RTC_AIE);
t->pending = 0;
return 0;
@ -443,10 +470,57 @@ static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
struct cmos_set_alarm_callback_param {
struct cmos_rtc *cmos;
unsigned char mon, mday, hrs, min, sec;
struct rtc_wkalrm *t;
};
/* Note: this function may be executed by mc146818_avoid_UIP() more then
* once
*/
static void cmos_set_alarm_callback(unsigned char __always_unused seconds,
void *param_in)
{
struct cmos_set_alarm_callback_param *p =
(struct cmos_set_alarm_callback_param *)param_in;
/* next rtc irq must not be from previous alarm setting */
cmos_irq_disable(p->cmos, RTC_AIE);
/* update alarm */
CMOS_WRITE(p->hrs, RTC_HOURS_ALARM);
CMOS_WRITE(p->min, RTC_MINUTES_ALARM);
CMOS_WRITE(p->sec, RTC_SECONDS_ALARM);
/* the system may support an "enhanced" alarm */
if (p->cmos->day_alrm) {
CMOS_WRITE(p->mday, p->cmos->day_alrm);
if (p->cmos->mon_alrm)
CMOS_WRITE(p->mon, p->cmos->mon_alrm);
}
if (use_hpet_alarm()) {
/*
* FIXME the HPET alarm glue currently ignores day_alrm
* and mon_alrm ...
*/
hpet_set_alarm_time(p->t->time.tm_hour, p->t->time.tm_min,
p->t->time.tm_sec);
}
if (p->t->enabled)
cmos_irq_enable(p->cmos, RTC_AIE);
}
static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char mon, mday, hrs, min, sec, rtc_control;
struct cmos_set_alarm_callback_param p = {
.cmos = cmos,
.t = t
};
unsigned char rtc_control;
int ret;
/* This not only a rtc_op, but also called directly */
@ -457,11 +531,11 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
if (ret < 0)
return ret;
mon = t->time.tm_mon + 1;
mday = t->time.tm_mday;
hrs = t->time.tm_hour;
min = t->time.tm_min;
sec = t->time.tm_sec;
p.mon = t->time.tm_mon + 1;
p.mday = t->time.tm_mday;
p.hrs = t->time.tm_hour;
p.min = t->time.tm_min;
p.sec = t->time.tm_sec;
spin_lock_irq(&rtc_lock);
rtc_control = CMOS_READ(RTC_CONTROL);
@ -469,43 +543,21 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
/* Writing 0xff means "don't care" or "match all". */
mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
min = (min < 60) ? bin2bcd(min) : 0xff;
sec = (sec < 60) ? bin2bcd(sec) : 0xff;
p.mon = (p.mon <= 12) ? bin2bcd(p.mon) : 0xff;
p.mday = (p.mday >= 1 && p.mday <= 31) ? bin2bcd(p.mday) : 0xff;
p.hrs = (p.hrs < 24) ? bin2bcd(p.hrs) : 0xff;
p.min = (p.min < 60) ? bin2bcd(p.min) : 0xff;
p.sec = (p.sec < 60) ? bin2bcd(p.sec) : 0xff;
}
spin_lock_irq(&rtc_lock);
/* next rtc irq must not be from previous alarm setting */
cmos_irq_disable(cmos, RTC_AIE);
/* update alarm */
CMOS_WRITE(hrs, RTC_HOURS_ALARM);
CMOS_WRITE(min, RTC_MINUTES_ALARM);
CMOS_WRITE(sec, RTC_SECONDS_ALARM);
/* the system may support an "enhanced" alarm */
if (cmos->day_alrm) {
CMOS_WRITE(mday, cmos->day_alrm);
if (cmos->mon_alrm)
CMOS_WRITE(mon, cmos->mon_alrm);
}
if (use_hpet_alarm()) {
/*
* FIXME the HPET alarm glue currently ignores day_alrm
* and mon_alrm ...
*/
hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min,
t->time.tm_sec);
}
if (t->enabled)
cmos_irq_enable(cmos, RTC_AIE);
spin_unlock_irq(&rtc_lock);
/*
* Some Intel chipsets disconnect the alarm registers when the clock
* update is in progress - during this time writes fail silently.
*
* Use mc146818_avoid_UIP() to avoid this.
*/
if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p))
return -EIO;
cmos->alarm_expires = rtc_tm_to_time64(&t->time);
@ -652,11 +704,10 @@ static struct cmos_rtc cmos_rtc;
static irqreturn_t cmos_interrupt(int irq, void *p)
{
unsigned long flags;
u8 irqstat;
u8 rtc_control;
spin_lock_irqsave(&rtc_lock, flags);
spin_lock(&rtc_lock);
/* When the HPET interrupt handler calls us, the interrupt
* status is passed as arg1 instead of the irq number. But
@ -690,7 +741,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
}
spin_unlock_irqrestore(&rtc_lock, flags);
spin_unlock(&rtc_lock);
if (is_intr(irqstat)) {
rtc_update_irq(p, 1, irqstat);
@ -806,6 +857,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
if (!mc146818_does_rtc_work()) {
dev_warn(dev, "broken or not accessible\n");
retval = -ENXIO;
goto cleanup1;
}
spin_lock_irq(&rtc_lock);
if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
@ -1054,7 +1111,9 @@ static void cmos_check_wkalrm(struct device *dev)
* ACK the rtc irq here
*/
if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) {
local_irq_disable();
cmos_interrupt(0, (void *)cmos->rtc);
local_irq_enable();
return;
}

View File

@ -9,40 +9,143 @@
#endif
/*
* Returns true if a clock update is in progress
* Execute a function while the UIP (Update-in-progress) bit of the RTC is
* unset.
*
* Warning: callback may be executed more then once.
*/
static inline unsigned char mc146818_is_updating(void)
bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
void *param)
{
unsigned char uip;
int i;
unsigned long flags;
unsigned char seconds;
for (i = 0; i < 10; i++) {
spin_lock_irqsave(&rtc_lock, flags);
/*
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
* every msec for completion.
*
* Store the second value before checking UIP so a long lasting
* NMI which happens to hit after the UIP check cannot make
* an update cycle invisible.
*/
seconds = CMOS_READ(RTC_SECONDS);
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
mdelay(1);
continue;
}
/* Revalidate the above readout */
if (seconds != CMOS_READ(RTC_SECONDS)) {
spin_unlock_irqrestore(&rtc_lock, flags);
continue;
}
if (callback)
callback(seconds, param);
/*
* Check for the UIP bit again. If it is set now then
* the above values may contain garbage.
*/
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
mdelay(1);
continue;
}
/*
* A NMI might have interrupted the above sequence so check
* whether the seconds value has changed which indicates that
* the NMI took longer than the UIP bit was set. Unlikely, but
* possible and there is also virt...
*/
if (seconds != CMOS_READ(RTC_SECONDS)) {
spin_unlock_irqrestore(&rtc_lock, flags);
continue;
}
spin_unlock_irqrestore(&rtc_lock, flags);
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
/*
* If the UIP (Update-in-progress) bit of the RTC is set for more then
* 10ms, the RTC is apparently broken or not present.
*/
bool mc146818_does_rtc_work(void)
{
int i;
unsigned char val;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
spin_unlock_irqrestore(&rtc_lock, flags);
return uip;
}
for (i = 0; i < 10; i++) {
spin_lock_irqsave(&rtc_lock, flags);
val = CMOS_READ(RTC_FREQ_SELECT);
spin_unlock_irqrestore(&rtc_lock, flags);
unsigned int mc146818_get_time(struct rtc_time *time)
if ((val & RTC_UIP) == 0)
return true;
mdelay(1);
}
return false;
}
EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
int mc146818_get_time(struct rtc_time *time)
{
unsigned char ctrl;
unsigned long flags;
unsigned int iter_count = 0;
unsigned char century = 0;
bool retry;
#ifdef CONFIG_MACH_DECSTATION
unsigned int real_year;
#endif
again:
if (iter_count > 10) {
memset(time, 0, sizeof(*time));
return -EIO;
}
iter_count++;
spin_lock_irqsave(&rtc_lock, flags);
/*
* read RTC once any update in progress is done. The update
* can take just over 2ms. We wait 20ms. There is no need to
* to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
* If you need to know *exactly* when a second has started, enable
* periodic update complete interrupts, (via ioctl) and then
* immediately read /dev/rtc which will block until you get the IRQ.
* Once the read clears, read the RTC time (again via ioctl). Easy.
* Check whether there is an update in progress during which the
* readout is unspecified. The maximum update time is ~2ms. Poll
* every msec for completion.
*
* Store the second value before checking UIP so a long lasting NMI
* which happens to hit after the UIP check cannot make an update
* cycle invisible.
*/
if (mc146818_is_updating())
mdelay(20);
time->tm_sec = CMOS_READ(RTC_SECONDS);
if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
spin_unlock_irqrestore(&rtc_lock, flags);
mdelay(1);
goto again;
}
/* Revalidate the above readout */
if (time->tm_sec != CMOS_READ(RTC_SECONDS)) {
spin_unlock_irqrestore(&rtc_lock, flags);
goto again;
}
/*
* Only the values that we read from the RTC are set. We leave
@ -50,8 +153,6 @@ unsigned int mc146818_get_time(struct rtc_time *time)
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
spin_lock_irqsave(&rtc_lock, flags);
time->tm_sec = CMOS_READ(RTC_SECONDS);
time->tm_min = CMOS_READ(RTC_MINUTES);
time->tm_hour = CMOS_READ(RTC_HOURS);
time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
@ -66,8 +167,24 @@ unsigned int mc146818_get_time(struct rtc_time *time)
century = CMOS_READ(acpi_gbl_FADT.century);
#endif
ctrl = CMOS_READ(RTC_CONTROL);
/*
* Check for the UIP bit again. If it is set now then
* the above values may contain garbage.
*/
retry = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
/*
* A NMI might have interrupted the above sequence so check whether
* the seconds value has changed which indicates that the NMI took
* longer than the UIP bit was set. Unlikely, but possible and
* there is also virt...
*/
retry |= time->tm_sec != CMOS_READ(RTC_SECONDS);
spin_unlock_irqrestore(&rtc_lock, flags);
if (retry)
goto again;
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
time->tm_sec = bcd2bin(time->tm_sec);
@ -95,7 +212,7 @@ unsigned int mc146818_get_time(struct rtc_time *time)
time->tm_mon--;
return RTC_24H;
return 0;
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
@ -132,7 +249,6 @@ int mc146818_set_time(struct rtc_time *time)
if (yrs > 255) /* They are unsigned */
return -EINVAL;
spin_lock_irqsave(&rtc_lock, flags);
#ifdef CONFIG_MACH_DECSTATION
real_yrs = yrs;
leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
@ -161,16 +277,16 @@ int mc146818_set_time(struct rtc_time *time)
/* These limits and adjustments are independent of
* whether the chip is in binary mode or not.
*/
if (yrs > 169) {
spin_unlock_irqrestore(&rtc_lock, flags);
if (yrs > 169)
return -EINVAL;
}
if (yrs >= 100)
yrs -= 100;
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
|| RTC_ALWAYS_BCD) {
spin_lock_irqsave(&rtc_lock, flags);
save_control = CMOS_READ(RTC_CONTROL);
spin_unlock_irqrestore(&rtc_lock, flags);
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
sec = bin2bcd(sec);
min = bin2bcd(min);
hrs = bin2bcd(hrs);
@ -180,6 +296,7 @@ int mc146818_set_time(struct rtc_time *time)
century = bin2bcd(century);
}
spin_lock_irqsave(&rtc_lock, flags);
save_control = CMOS_READ(RTC_CONTROL);
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);

View File

@ -291,7 +291,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
*
* DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
*/
if (dwc->gadget->speed <= USB_SPEED_HIGH) {
if (dwc->gadget->speed <= USB_SPEED_HIGH ||
DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_ENDTRANSFER) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;

View File

@ -609,7 +609,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
if (scr_readw(r) != vc->vc_video_erase_char)
break;
if (r != q && new_rows >= rows + logo_lines) {
save = kmalloc(array3_size(logo_lines, new_cols, 2),
save = kzalloc(array3_size(logo_lines, new_cols, 2),
GFP_KERNEL);
if (save) {
int i = cols < new_cols ? cols : new_cols;

View File

@ -5398,6 +5398,7 @@ static int clone_range(struct send_ctx *sctx,
u64 ext_len;
u64 clone_len;
u64 clone_data_offset;
bool crossed_src_i_size = false;
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(clone_root->root, path);
@ -5454,8 +5455,10 @@ static int clone_range(struct send_ctx *sctx,
if (key.offset >= clone_src_i_size)
break;
if (key.offset + ext_len > clone_src_i_size)
if (key.offset + ext_len > clone_src_i_size) {
ext_len = clone_src_i_size - key.offset;
crossed_src_i_size = true;
}
clone_data_offset = btrfs_file_extent_offset(leaf, ei);
if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
@ -5515,6 +5518,25 @@ static int clone_range(struct send_ctx *sctx,
ret = send_clone(sctx, offset, clone_len,
clone_root);
}
} else if (crossed_src_i_size && clone_len < len) {
/*
* If we are at i_size of the clone source inode and we
* can not clone from it, terminate the loop. This is
* to avoid sending two write operations, one with a
* length matching clone_len and the final one after
* this loop with a length of len - clone_len.
*
* When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
* was passed to the send ioctl), this helps avoid
* sending an encoded write for an offset that is not
* sector size aligned, in case the i_size of the source
* inode is not sector size aligned. That will make the
* receiver fallback to decompression of the data and
* writing it using regular buffered IO, therefore while
* not incorrect, it's not optimal due decompression and
* possible re-compression at the receiver.
*/
break;
} else {
ret = send_extent_data(sctx, offset, clone_len);
}

View File

@ -205,12 +205,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_needs_table_invalidate() (true)
#endif
void tlb_remove_table_sync_one(void);
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
static inline void tlb_remove_table_sync_one(void) { }
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */

View File

@ -71,6 +71,7 @@ struct css_task_iter {
ANDROID_KABI_RESERVE(1);
};
extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct ext_css_set init_ext_css_set;
#define init_css_set init_ext_css_set.cset

View File

@ -179,8 +179,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
struct page *follow_huge_pd(struct vm_area_struct *vma,
unsigned long address, hugepd_t hpd,
int flags, int pdshift);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags);
struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int flags);
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
@ -267,8 +267,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
return NULL;
}
static inline struct page *follow_huge_pmd(struct mm_struct *mm,
unsigned long address, pmd_t *pmd, int flags)
static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
unsigned long address, int flags)
{
return NULL;
}

View File

@ -125,7 +125,11 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
unsigned int mc146818_get_time(struct rtc_time *time);
bool mc146818_does_rtc_work(void);
int mc146818_get_time(struct rtc_time *time);
int mc146818_set_time(struct rtc_time *time);
bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
void *param);
#endif /* _MC146818RTC_H */

View File

@ -169,7 +169,6 @@ extern struct mutex cgroup_mutex;
extern spinlock_t css_set_lock;
extern struct cgroup_subsys *cgroup_subsys[];
extern struct list_head cgroup_roots;
extern struct file_system_type cgroup_fs_type;
/* iterate across the hierarchies */
#define for_each_root(root) \

View File

@ -436,6 +436,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
(FOLL_PIN | FOLL_GET)))
return ERR_PTR(-EINVAL);
/*
* Considering PTE level hugetlb, like continuous-PTE hugetlb on
* ARM64 architecture.
*/
if (is_vm_hugetlb_page(vma)) {
page = follow_huge_pmd_pte(vma, address, flags);
if (page)
return page;
return no_page_table(vma, flags);
}
retry:
if (unlikely(pmd_bad(*pmd)))
return no_page_table(vma, flags);
@ -591,7 +603,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
if (pmd_none(pmdval))
return no_page_table(vma, flags);
if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
page = follow_huge_pmd(mm, address, pmd, flags);
page = follow_huge_pmd_pte(vma, address, flags);
if (page)
return page;
return no_page_table(vma, flags);
@ -2542,7 +2554,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
next = pud_addr_end(addr, end);
if (unlikely(!pud_present(pud)))
return 0;
if (unlikely(pud_huge(pud))) {
if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
if (!gup_huge_pud(pud, pudp, addr, next, flags,
pages, nr))
return 0;

View File

@ -5626,12 +5626,13 @@ follow_huge_pd(struct vm_area_struct *vma,
}
struct page * __weak
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pmd_t *pmd, int flags)
follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
{
struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
struct page *page = NULL;
spinlock_t *ptl;
pte_t pte;
pte_t *ptep, pte;
/* FOLL_GET and FOLL_PIN are mutually exclusive. */
if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
@ -5639,17 +5640,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
return NULL;
retry:
ptl = pmd_lockptr(mm, pmd);
spin_lock(ptl);
/*
* make sure that the address range covered by this pmd is not
* unmapped from other threads.
*/
if (!pmd_huge(*pmd))
goto out;
pte = huge_ptep_get((pte_t *)pmd);
ptep = huge_pte_offset(mm, address, huge_page_size(h));
if (!ptep)
return NULL;
ptl = huge_pte_lock(h, mm, ptep);
pte = huge_ptep_get(ptep);
if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
page = pte_page(pte) +
((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
/*
* try_grab_page() should always succeed here, because: a) we
* hold the pmd (ptl) lock, and b) we've just checked that the
@ -5665,7 +5664,7 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
} else {
if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
__migration_entry_wait(mm, (pte_t *)pmd, ptl);
__migration_entry_wait(mm, ptep, ptl);
goto retry;
}
/*

View File

@ -1157,6 +1157,7 @@ static void collapse_huge_page(struct mm_struct *mm,
_pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(&range);
tlb_remove_table_sync_one();
spin_lock(pte_ptl);
isolated = __collapse_huge_page_isolate(vma, address, pte,
@ -1448,6 +1449,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
spinlock_t *ptl;
int count = 0;
int i;
struct mmu_notifier_range range;
if (!vma || !vma->vm_file ||
vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
@ -1462,6 +1464,14 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
return;
/*
* Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
* that got written to. Without this, we'd have to also lock the
* anon_vma if one exists.
*/
if (vma->anon_vma)
return;
hpage = find_lock_page(vma->vm_file->f_mapping,
linear_page_index(vma, haddr));
if (!hpage)
@ -1474,6 +1484,19 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
if (!pmd)
goto drop_hpage;
/*
* We need to lock the mapping so that from here on, only GUP-fast and
* hardware page walks can access the parts of the page tables that
* we're operating on.
*/
i_mmap_lock_write(vma->vm_file->f_mapping);
/*
* This spinlock should be unnecessary: Nobody else should be accessing
* the page tables under spinlock protection here, only
* lockless_pages_from_mm() and the hardware page walker can access page
* tables while all the high-level locks are held in write mode.
*/
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
/* step 1: check all mapped PTEs are to the right huge page */
@ -1520,12 +1543,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
}
/* step 4: collapse pmd */
ptl = pmd_lock(vma->vm_mm, pmd);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
haddr + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
_pmd = pmdp_collapse_flush(vma, haddr, pmd);
spin_unlock(ptl);
mm_dec_nr_ptes(mm);
tlb_remove_table_sync_one();
mmu_notifier_invalidate_range_end(&range);
pte_free(mm, pmd_pgtable(_pmd));
i_mmap_unlock_write(vma->vm_file->f_mapping);
drop_hpage:
unlock_page(hpage);
put_page(hpage);
@ -1533,6 +1561,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
abort:
pte_unmap_unlock(start_pte, ptl);
i_mmap_unlock_write(vma->vm_file->f_mapping);
goto drop_hpage;
}
@ -1582,7 +1611,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* An alternative would be drop the check, but check that page
* table is clear before calling pmdp_collapse_flush() under
* ptl. It has higher chance to recover THP for the VMA, but
* has higher cost too.
* has higher cost too. It would also probably require locking
* the anon_vma.
*/
if (vma->anon_vma)
continue;
@ -1604,12 +1634,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
*/
if (mmap_write_trylock(mm)) {
if (!khugepaged_test_exit(mm)) {
spinlock_t *ptl = pmd_lock(mm, pmd);
struct mmu_notifier_range range;
mmu_notifier_range_init(&range,
MMU_NOTIFY_CLEAR, 0,
NULL, mm, addr,
addr + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
/* assume page table is clear */
_pmd = pmdp_collapse_flush(vma, addr, pmd);
spin_unlock(ptl);
mm_dec_nr_ptes(mm);
tlb_remove_table_sync_one();
pte_free(mm, pmd_pgtable(_pmd));
mmu_notifier_invalidate_range_end(&range);
}
mmap_write_unlock(mm);
} else {

View File

@ -4899,6 +4899,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
unsigned int efd, cfd;
struct fd efile;
struct fd cfile;
struct dentry *cdentry;
const char *name;
char *endp;
int ret;
@ -4949,6 +4950,16 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
if (ret < 0)
goto out_put_cfile;
/*
* The control file must be a regular cgroup1 file. As a regular cgroup
* file can't be renamed, it's safe to access its name afterwards.
*/
cdentry = cfile.file->f_path.dentry;
if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
ret = -EINVAL;
goto out_put_cfile;
}
/*
* Determine the event callbacks and set them in @event. This used
* to be done via struct cftype but cgroup core no longer knows
@ -4957,7 +4968,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
*
* DO NOT ADD NEW FILES.
*/
name = cfile.file->f_path.dentry->d_name.name;
name = cdentry->d_name.name;
if (!strcmp(name, "memory.usage_in_bytes")) {
event->register_event = mem_cgroup_usage_register_event;
@ -4981,7 +4992,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
* automatically removed on cgroup destruction but the removal is
* asynchronous, so take an extra ref on @css.
*/
cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
&memory_cgrp_subsys);
ret = -EINVAL;
if (IS_ERR(cfile_css))

View File

@ -139,7 +139,7 @@ static void tlb_remove_table_smp_sync(void *arg)
/* Simply deliver the interrupt */
}
static void tlb_remove_table_sync_one(void)
void tlb_remove_table_sync_one(void)
{
/*
* This isn't an RCU grace period and hence the page-tables cannot be
@ -163,8 +163,6 @@ static void tlb_remove_table_free(struct mmu_table_batch *batch)
#else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
static void tlb_remove_table_sync_one(void) { }
static void tlb_remove_table_free(struct mmu_table_batch *batch)
{
__tlb_remove_table_free(batch);

View File

@ -118,7 +118,7 @@ struct p9_conn {
struct list_head unsent_req_list;
struct p9_req_t *rreq;
struct p9_req_t *wreq;
char tmp_buf[7];
char tmp_buf[P9_HDRSZ];
struct p9_fcall rc;
int wpos;
int wsize;
@ -291,7 +291,7 @@ static void p9_read_work(struct work_struct *work)
if (!m->rc.sdata) {
m->rc.sdata = m->tmp_buf;
m->rc.offset = 0;
m->rc.capacity = 7; /* start by reading header */
m->rc.capacity = P9_HDRSZ; /* start by reading header */
}
clear_bit(Rpending, &m->wsched);
@ -314,7 +314,7 @@ static void p9_read_work(struct work_struct *work)
p9_debug(P9_DEBUG_TRANS, "got new header\n");
/* Header size */
m->rc.size = 7;
m->rc.size = P9_HDRSZ;
err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
if (err) {
p9_debug(P9_DEBUG_ERROR,

View File

@ -231,6 +231,14 @@ static void p9_xen_response(struct work_struct *work)
continue;
}
if (h.size > req->rc.capacity) {
dev_warn(&priv->dev->dev,
"requested packet size too big: %d for tag %d with capacity %zd\n",
h.size, h.tag, req->rc.capacity);
req->status = REQ_STATUS_ERROR;
goto recv_error;
}
memcpy(&req->rc, &h, sizeof(h));
req->rc.offset = 0;
@ -240,6 +248,7 @@ static void p9_xen_response(struct work_struct *work)
masked_prod, &masked_cons,
XEN_9PFS_RING_SIZE(ring));
recv_error:
virt_mb();
cons += h.size;
ring->intf->in_cons = cons;

View File

@ -1010,6 +1010,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
hci_dev_lock(hdev);
hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
hci_dev_unlock(hdev);
hci_dev_put(hdev);
if (!hcon)
return -ENOENT;

View File

@ -736,7 +736,7 @@ static int __init bt_init(void)
err = bt_sysfs_init();
if (err < 0)
return err;
goto cleanup_led;
err = sock_register(&bt_sock_family_ops);
if (err)
@ -772,6 +772,8 @@ static int __init bt_init(void)
sock_unregister(PF_BLUETOOTH);
cleanup_sysfs:
bt_sysfs_cleanup();
cleanup_led:
bt_leds_cleanup();
return err;
}

View File

@ -3796,7 +3796,8 @@ int hci_register_dev(struct hci_dev *hdev)
hci_sock_dev_event(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
if (!hdev->suspend_notifier.notifier_call &&
!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
error = register_pm_notifier(&hdev->suspend_notifier);
if (error)

View File

@ -680,7 +680,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CAN_MTU)) {
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
dev->type, skb->len);
goto free_skb;
@ -706,7 +706,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CANFD_MTU)) {
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
dev->type, skb->len);
goto free_skb;

View File

@ -22,7 +22,8 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
if (!skb->dev)
return NULL;
pskb_trim_rcsum(skb, skb->len - len);
if (pskb_trim_rcsum(skb, skb->len - len))
return NULL;
skb->offload_fwd_mark = true;

View File

@ -830,6 +830,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
return -EINVAL;
}
if (!cfg->fc_table)
cfg->fc_table = RT_TABLE_MAIN;
return 0;
errout:
return err;

View File

@ -423,6 +423,7 @@ static struct fib_info *fib_find_info(struct fib_info *nfi)
nfi->fib_prefsrc == fi->fib_prefsrc &&
nfi->fib_priority == fi->fib_priority &&
nfi->fib_type == fi->fib_type &&
nfi->fib_tb_id == fi->fib_tb_id &&
memcmp(nfi->fib_metrics, fi->fib_metrics,
sizeof(u32) * RTAX_MAX) == 0 &&
!((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&

View File

@ -1493,24 +1493,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
struct ip_tunnel_parm *p = &t->parms;
__be16 o_flags = p->o_flags;
if (t->erspan_ver <= 2) {
if (t->erspan_ver != 0 && !t->collect_md)
o_flags |= TUNNEL_KEY;
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
goto nla_put_failure;
if (t->erspan_ver == 1) {
if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
goto nla_put_failure;
} else if (t->erspan_ver == 2) {
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
goto nla_put_failure;
}
}
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
@ -1551,6 +1533,34 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
return -EMSGSIZE;
}
static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
if (t->erspan_ver <= 2) {
if (t->erspan_ver != 0 && !t->collect_md)
t->parms.o_flags |= TUNNEL_KEY;
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
goto nla_put_failure;
if (t->erspan_ver == 1) {
if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
goto nla_put_failure;
} else if (t->erspan_ver == 2) {
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
goto nla_put_failure;
}
}
return ipgre_fill_info(skb, dev);
nla_put_failure:
return -EMSGSIZE;
}
static void erspan_setup(struct net_device *dev)
{
struct ip_tunnel *t = netdev_priv(dev);
@ -1629,7 +1639,7 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = {
.changelink = erspan_changelink,
.dellink = ip_tunnel_dellink,
.get_size = ipgre_get_size,
.fill_info = ipgre_fill_info,
.fill_info = erspan_fill_info,
.get_link_net = ip_tunnel_get_link_net,
};

View File

@ -919,6 +919,9 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (err < 0)
goto fail;
/* We prevent @rt from being freed. */
rcu_read_lock();
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
@ -942,6 +945,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (err == 0) {
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGOKS);
rcu_read_unlock();
return 0;
}
@ -949,6 +953,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGFAILS);
rcu_read_unlock();
return err;
slow_path_clean:

View File

@ -661,6 +661,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
sdata->dev = ndev;
sdata->wpan_dev.wpan_phy = local->hw.phy;
sdata->local = local;
INIT_LIST_HEAD(&sdata->wpan_dev.list);
/* setup type-dependent data */
ret = ieee802154_setup_sdata(sdata, type);

View File

@ -317,8 +317,13 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
}
#ifdef CONFIG_NF_CONNTRACK_MARK
static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark)
static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
{
u32 mark = READ_ONCE(ct->mark);
if (!mark)
return 0;
if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
goto nla_put_failure;
return 0;
@ -532,7 +537,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
{
if (ctnetlink_dump_status(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 ||
ctnetlink_dump_mark(skb, ct) < 0 ||
ctnetlink_dump_secctx(skb, ct) < 0 ||
ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 ||
@ -711,7 +716,6 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
struct sk_buff *skb;
unsigned int type;
unsigned int flags = 0, group;
u32 mark;
int err;
if (events & (1 << IPCT_DESTROY)) {
@ -812,9 +816,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
}
#ifdef CONFIG_NF_CONNTRACK_MARK
mark = READ_ONCE(ct->mark);
if ((events & (1 << IPCT_MARK) || mark) &&
ctnetlink_dump_mark(skb, mark) < 0)
if (events & (1 << IPCT_MARK) &&
ctnetlink_dump_mark(skb, ct) < 0)
goto nla_put_failure;
#endif
nlmsg_end(skb, nlh);
@ -2671,7 +2674,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
{
const struct nf_conntrack_zone *zone;
struct nlattr *nest_parms;
u32 mark;
zone = nf_ct_zone(ct);
@ -2729,8 +2731,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK
mark = READ_ONCE(ct->mark);
if (mark && ctnetlink_dump_mark(skb, mark) < 0)
if (ctnetlink_dump_mark(skb, ct) < 0)
goto nla_put_failure;
#endif
if (ctnetlink_dump_labels(skb, ct) < 0)

View File

@ -1162,6 +1162,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
struct nft_pipapo_match *m = priv->clone;
u8 genmask = nft_genmask_next(net);
struct nft_pipapo_field *f;
const u8 *start_p, *end_p;
int i, bsize_max, err = 0;
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
@ -1202,9 +1203,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
}
/* Validate */
start_p = start;
end_p = end;
nft_pipapo_for_each_field(f, i, m) {
const u8 *start_p = start, *end_p = end;
if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
return -ENOSPC;

View File

@ -218,6 +218,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
target->sens_res = nfca_poll->sens_res;
target->sel_res = nfca_poll->sel_res;
target->nfcid1_len = nfca_poll->nfcid1_len;
if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1))
return -EPROTO;
if (target->nfcid1_len > 0) {
memcpy(target->nfcid1, nfca_poll->nfcid1,
target->nfcid1_len);
@ -226,6 +228,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
target->sensb_res_len = nfcb_poll->sensb_res_len;
if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res))
return -EPROTO;
if (target->sensb_res_len > 0) {
memcpy(target->sensb_res, nfcb_poll->sensb_res,
target->sensb_res_len);
@ -234,6 +238,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
target->sensf_res_len = nfcf_poll->sensf_res_len;
if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res))
return -EPROTO;
if (target->sensf_res_len > 0) {
memcpy(target->sensf_res, nfcf_poll->sensf_res,
target->sensf_res_len);

View File

@ -2188,7 +2188,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (tipc_own_addr(l->net) > msg_prevnode(hdr))
l->net_plane = msg_net_plane(hdr);
skb_linearize(skb);
if (skb_linearize(skb))
goto exit;
hdr = buf_msg(skb);
data = msg_data(hdr);

View File

@ -1660,6 +1660,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
struct tipc_node *n;
struct sk_buff_head xmitq;
bool node_up = false;
struct net *peer_net;
int bearer_id;
int rc;
@ -1676,18 +1677,23 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
return -EHOSTUNREACH;
}
rcu_read_lock();
tipc_node_read_lock(n);
node_up = node_is_up(n);
if (node_up && n->peer_net && check_net(n->peer_net)) {
peer_net = n->peer_net;
tipc_node_read_unlock(n);
if (node_up && peer_net && check_net(peer_net)) {
/* xmit inner linux container */
tipc_lxc_xmit(n->peer_net, list);
tipc_lxc_xmit(peer_net, list);
if (likely(skb_queue_empty(list))) {
tipc_node_read_unlock(n);
rcu_read_unlock();
tipc_node_put(n);
return 0;
}
}
rcu_read_unlock();
tipc_node_read_lock(n);
bearer_id = n->active_links[selector & 1];
if (unlikely(bearer_id == INVALID_BEARER_ID)) {
tipc_node_read_unlock(n);

View File

@ -113,14 +113,16 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
}
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
struct user_namespace *user_ns)
{
uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
u32 portid, u32 seq, u32 flags, int sk_ino)
struct user_namespace *user_ns,
u32 portid, u32 seq, u32 flags, int sk_ino)
{
struct nlmsghdr *nlh;
struct unix_diag_msg *rep;
@ -166,7 +168,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
sk_diag_dump_uid(sk, skb))
sk_diag_dump_uid(sk, skb, user_ns))
goto out_nlmsg_trim;
nlmsg_end(skb, nlh);
@ -178,7 +180,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
}
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
u32 portid, u32 seq, u32 flags)
struct user_namespace *user_ns,
u32 portid, u32 seq, u32 flags)
{
int sk_ino;
@ -189,7 +192,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
if (!sk_ino)
return 0;
return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
}
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
@ -217,7 +220,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
goto next;
if (!(req->udiag_states & (1 << sk->sk_state)))
goto next;
if (sk_diag_dump(sk, skb, req,
if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI) < 0)
@ -285,7 +288,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
if (!rep)
goto out;
err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, 0, req->udiag_ino);
if (err < 0) {
nlmsg_free(rep);

View File

@ -112,15 +112,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event);
* expand the variable length event to linear buffer space.
*/
static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
static int seq_copy_in_kernel(void *ptr, void *src, int size)
{
char **bufptr = ptr;
memcpy(*bufptr, src, size);
*bufptr += size;
return 0;
}
static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
static int seq_copy_in_user(void *ptr, void *src, int size)
{
char __user **bufptr = ptr;
if (copy_to_user(*bufptr, src, size))
return -EFAULT;
*bufptr += size;
@ -149,8 +153,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
return newlen;
}
err = snd_seq_dump_var_event(event,
in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
(snd_seq_dump_func_t)seq_copy_in_user,
in_kernel ? seq_copy_in_kernel : seq_copy_in_user,
&buf);
return err < 0 ? err : newlen;
}

View File

@ -2489,6 +2489,14 @@ static void wm8962_configure_bclk(struct snd_soc_component *component)
snd_soc_component_update_bits(component, WM8962_CLOCKING2,
WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
/* DSPCLK_DIV field in WM8962_CLOCKING1 register is used to generate
* correct frequency of LRCLK and BCLK. Sometimes the read-only value
* can't be updated timely after enabling SYSCLK. This results in wrong
* calculation values. Delay is introduced here to wait for newest
* value from register. The time of the delay should be at least
* 500~1000us according to test.
*/
usleep_range(500, 1000);
dspclk = snd_soc_component_read(component, WM8962_CLOCKING1);
if (snd_soc_component_get_bias_level(component) != SND_SOC_BIAS_ON)

View File

@ -1155,6 +1155,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
return;
be_substream = snd_soc_dpcm_get_substream(be, stream);
if (!be_substream)
return;
for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)

View File

@ -1622,13 +1622,21 @@ ipv4_del_addr_test()
$IP addr add dev dummy1 172.16.104.1/24
$IP addr add dev dummy1 172.16.104.11/24
$IP addr add dev dummy1 172.16.104.12/24
$IP addr add dev dummy1 172.16.104.13/24
$IP addr add dev dummy2 172.16.104.1/24
$IP addr add dev dummy2 172.16.104.11/24
$IP addr add dev dummy2 172.16.104.12/24
$IP route add 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
$IP route add 172.16.106.0/24 dev lo src 172.16.104.12
$IP route add table 0 172.16.107.0/24 via 172.16.104.2 src 172.16.104.13
$IP route add vrf red 172.16.105.0/24 via 172.16.104.2 src 172.16.104.11
$IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12
set +e
# removing address from device in vrf should only remove route from vrf table
echo " Regular FIB info"
$IP addr del dev dummy2 172.16.104.11/24
$IP ro ls vrf red | grep -q 172.16.105.0/24
log_test $? 1 "Route removed from VRF when source address deleted"
@ -1646,6 +1654,35 @@ ipv4_del_addr_test()
$IP ro ls vrf red | grep -q 172.16.105.0/24
log_test $? 0 "Route in VRF is not removed by address delete"
# removing address from device in vrf should only remove route from vrf
# table even when the associated fib info only differs in table ID
echo " Identical FIB info with different table ID"
$IP addr del dev dummy2 172.16.104.12/24
$IP ro ls vrf red | grep -q 172.16.106.0/24
log_test $? 1 "Route removed from VRF when source address deleted"
$IP ro ls | grep -q 172.16.106.0/24
log_test $? 0 "Route in default VRF not removed"
$IP addr add dev dummy2 172.16.104.12/24
$IP route add vrf red 172.16.106.0/24 dev lo src 172.16.104.12
$IP addr del dev dummy1 172.16.104.12/24
$IP ro ls | grep -q 172.16.106.0/24
log_test $? 1 "Route removed in default VRF when source address deleted"
$IP ro ls vrf red | grep -q 172.16.106.0/24
log_test $? 0 "Route in VRF is not removed by address delete"
# removing address from device in default vrf should remove route from
# the default vrf even when route was inserted with a table ID of 0.
echo " Table ID 0"
$IP addr del dev dummy1 172.16.104.13/24
$IP ro ls | grep -q 172.16.107.0/24
log_test $? 1 "Route removed in default VRF when source address deleted"
$IP li del dummy1
$IP li del dummy2
cleanup

View File

@ -782,7 +782,7 @@ kci_test_ipsec_offload()
tmpl proto esp src $srcip dst $dstip spi 9 \
mode transport reqid 42
check_err $?
ip x p add dir out src $dstip/24 dst $srcip/24 \
ip x p add dir in src $dstip/24 dst $srcip/24 \
tmpl proto esp src $dstip dst $srcip spi 9 \
mode transport reqid 42
check_err $?