Merge 5.4.227 into android11-5.4-lts
Changes in 5.4.227 arm64: dts: rockchip: keep I2S1 disabled for GPIO function on ROCK Pi 4 series arm: dts: rockchip: fix node name for hym8563 rtc ARM: dts: rockchip: fix ir-receiver node names ARM: dts: rockchip: rk3188: fix lcdc1-rgb24 node name ARM: 9251/1: perf: Fix stacktraces for tracepoint events in THUMB2 kernels ARM: 9266/1: mm: fix no-MMU ZERO_PAGE() implementation ARM: dts: rockchip: disable arm_global_timer on rk3066 and rk3188 9p/fd: Use P9_HDRSZ for header size regulator: slg51000: Wait after asserting CS pin ALSA: seq: Fix function prototype mismatch in snd_seq_expand_var_event btrfs: send: avoid unaligned encoded writes when attempting to clone range ASoC: soc-pcm: Add NULL check in BE reparenting regulator: twl6030: fix get status of twl6032 regulators fbcon: Use kzalloc() in fbcon_prepare_logo() 9p/xen: check logical size for buffer size net: usb: qmi_wwan: add u-blox 0x1342 composition mm/khugepaged: take the right locks for page table retraction mm/khugepaged: fix GUP-fast interaction by sending IPI mm/khugepaged: invoke MMU notifiers in shmem/file collapse paths xen/netback: Ensure protocol headers don't fall in the non-linear area xen/netback: do some code cleanup xen/netback: don't call kfree_skb() with interrupts disabled Revert "net: dsa: b53: Fix valid setting for MDB entries" media: v4l2-dv-timings.c: fix too strict blanking sanity checks memcg: fix possible use-after-free in memcg_write_event_control() mm/gup: fix gup_pud_range() for dax KVM: s390: vsie: Fix the initialization of the epoch extension (epdx) field drm/shmem-helper: Remove errant put in error path HID: usbhid: Add ALWAYS_POLL quirk for some mice HID: hid-lg4ff: Add check for empty lbuf HID: core: fix shift-out-of-bounds in hid_report_raw_event can: af_can: fix NULL pointer dereference in can_rcv_filter ieee802154: cc2520: Fix error return code in cc2520_hw_init() ca8210: Fix crash by zero initializing data drm/bridge: ti-sn65dsi86: Fix output polarity setting bug gpio: amd8111: Fix PCI device reference count leak e1000e: Fix TX dispatch condition igb: Allocate MSI-X vector when testing af_unix: Get user_ns from in_skb in unix_diag_get_exact(). Bluetooth: 6LoWPAN: add missing hci_dev_put() in get_l2cap_conn() Bluetooth: Fix not cleanup led when bt_init fails net: dsa: ksz: Check return value selftests: rtnetlink: correct xfrm policy rule in kci_test_ipsec_offload mac802154: fix missing INIT_LIST_HEAD in ieee802154_if_add() net: encx24j600: Add parentheses to fix precedence net: encx24j600: Fix invalid logic in reading of MISTAT register xen-netfront: Fix NULL sring after live migration net: mvneta: Prevent out of bounds read in mvneta_config_rss() i40e: Fix not setting default xps_cpus after reset i40e: Fix for VF MAC address 0 i40e: Disallow ip4 and ip6 l4_4_bytes NFC: nci: Bounds check struct nfc_target arrays nvme initialize core quirks before calling nvme_init_subsystem net: stmmac: fix "snps,axi-config" node property parsing net: thunderx: Fix missing destroy_workqueue of nicvf_rx_mode_wq net: hisilicon: Fix potential use-after-free in hisi_femac_rx() net: hisilicon: Fix potential use-after-free in hix5hd2_rx() tipc: Fix potential OOB in tipc_link_proto_rcv() ipv4: Fix incorrect route flushing when source address is deleted ipv4: Fix incorrect route flushing when table ID 0 is used ethernet: aeroflex: fix potential skb leak in greth_init_rings() xen/netback: fix build warning net: plip: don't call kfree_skb/dev_kfree_skb() under spin_lock_irq() ipv6: avoid use-after-free in ip6_fragment() net: mvneta: Fix an out of bounds check can: esd_usb: Allow REC and TEC to return to zero Linux 5.4.227 Change-Id: Idd4fa0e113a2b94326764baa669ff6fb02797adb Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
4ae923b7c6
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 226
|
||||
SUBLEVEL = 227
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
&i2c1 {
|
||||
status = "okay";
|
||||
|
||||
hym8563: hym8563@51 {
|
||||
hym8563: rtc@51 {
|
||||
compatible = "haoyu,hym8563";
|
||||
reg = <0x51>;
|
||||
#clock-cells = <0>;
|
||||
|
@ -67,7 +67,7 @@
|
||||
#sound-dai-cells = <0>;
|
||||
};
|
||||
|
||||
ir_recv: gpio-ir-receiver {
|
||||
ir_recv: ir-receiver {
|
||||
compatible = "gpio-ir-receiver";
|
||||
gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
|
||||
pinctrl-names = "default";
|
||||
|
@ -404,7 +404,7 @@
|
||||
rockchip,pins = <2 RK_PD3 1 &pcfg_pull_none>;
|
||||
};
|
||||
|
||||
lcdc1_rgb24: ldcd1-rgb24 {
|
||||
lcdc1_rgb24: lcdc1-rgb24 {
|
||||
rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
|
||||
<2 RK_PA1 1 &pcfg_pull_none>,
|
||||
<2 RK_PA2 1 &pcfg_pull_none>,
|
||||
@ -632,7 +632,6 @@
|
||||
|
||||
&global_timer {
|
||||
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&local_timer {
|
||||
|
@ -53,7 +53,7 @@
|
||||
vin-supply = <&vcc_sys>;
|
||||
};
|
||||
|
||||
hym8563@51 {
|
||||
rtc@51 {
|
||||
compatible = "haoyu,hym8563";
|
||||
reg = <0x51>;
|
||||
|
||||
|
@ -233,7 +233,7 @@
|
||||
vin-supply = <&vcc_sys>;
|
||||
};
|
||||
|
||||
hym8563: hym8563@51 {
|
||||
hym8563: rtc@51 {
|
||||
compatible = "haoyu,hym8563";
|
||||
reg = <0x51>;
|
||||
#clock-cells = <0>;
|
||||
|
@ -145,7 +145,7 @@
|
||||
vin-supply = <&vcc_sys>;
|
||||
};
|
||||
|
||||
hym8563: hym8563@51 {
|
||||
hym8563: rtc@51 {
|
||||
compatible = "haoyu,hym8563";
|
||||
reg = <0x51>;
|
||||
#clock-cells = <0>;
|
||||
|
@ -165,7 +165,7 @@
|
||||
};
|
||||
|
||||
&i2c0 {
|
||||
hym8563: hym8563@51 {
|
||||
hym8563: rtc@51 {
|
||||
compatible = "haoyu,hym8563";
|
||||
reg = <0x51>;
|
||||
#clock-cells = <0>;
|
||||
|
@ -108,6 +108,13 @@
|
||||
reg = <0x1013c200 0x20>;
|
||||
interrupts = <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>;
|
||||
clocks = <&cru CORE_PERI>;
|
||||
status = "disabled";
|
||||
/* The clock source and the sched_clock provided by the arm_global_timer
|
||||
* on Rockchip rk3066a/rk3188 are quite unstable because their rates
|
||||
* depend on the CPU frequency.
|
||||
* Keep the arm_global_timer disabled in order to have the
|
||||
* DW_APB_TIMER (rk3066a) or ROCKCHIP_TIMER (rk3188) selected by default.
|
||||
*/
|
||||
};
|
||||
|
||||
local_timer: local-timer@1013c600 {
|
||||
|
@ -17,7 +17,7 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
|
||||
|
||||
#define perf_arch_fetch_caller_regs(regs, __ip) { \
|
||||
(regs)->ARM_pc = (__ip); \
|
||||
(regs)->ARM_fp = (unsigned long) __builtin_frame_address(0); \
|
||||
frame_pointer((regs)) = (unsigned long) __builtin_frame_address(0); \
|
||||
(regs)->ARM_sp = current_stack_pointer; \
|
||||
(regs)->ARM_cpsr = SVC_MODE; \
|
||||
}
|
||||
|
@ -51,12 +51,6 @@
|
||||
|
||||
typedef pte_t *pte_addr_t;
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(0))
|
||||
|
||||
/*
|
||||
* Mark the prot value as uncacheable and unbufferable.
|
||||
*/
|
||||
|
@ -10,6 +10,15 @@
|
||||
#include <linux/const.h>
|
||||
#include <asm/proc-fns.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
extern struct page *empty_zero_page;
|
||||
#define ZERO_PAGE(vaddr) (empty_zero_page)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
|
||||
#include <asm-generic/4level-fixup.h>
|
||||
@ -166,13 +175,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
#define __S111 __PAGE_SHARED_EXEC
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
extern struct page *empty_zero_page;
|
||||
#define ZERO_PAGE(vaddr) (empty_zero_page)
|
||||
|
||||
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
|
@ -26,6 +26,13 @@
|
||||
|
||||
unsigned long vectors_base;
|
||||
|
||||
/*
|
||||
* empty_zero_page is a special page that is used for
|
||||
* zero-initialized data and COW.
|
||||
*/
|
||||
struct page *empty_zero_page;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
struct mpu_rgn_info mpu_rgn_info;
|
||||
#endif
|
||||
@ -148,9 +155,21 @@ void __init adjust_lowmem_bounds(void)
|
||||
*/
|
||||
void __init paging_init(const struct machine_desc *mdesc)
|
||||
{
|
||||
void *zero_page;
|
||||
|
||||
early_trap_init((void *)vectors_base);
|
||||
mpu_setup();
|
||||
|
||||
/* allocate the zero page. */
|
||||
zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
if (!zero_page)
|
||||
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
||||
__func__, PAGE_SIZE, PAGE_SIZE);
|
||||
|
||||
bootmem_init();
|
||||
|
||||
empty_zero_page = virt_to_page(zero_page);
|
||||
flush_dcache_page(empty_zero_page);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -441,7 +441,6 @@
|
||||
&i2s1 {
|
||||
rockchip,playback-channels = <2>;
|
||||
rockchip,capture-channels = <2>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&i2s2 {
|
||||
|
@ -540,8 +540,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
|
||||
scb_s->eca |= scb_o->eca & ECA_CEI;
|
||||
/* Epoch Extension */
|
||||
if (test_kvm_facility(vcpu->kvm, 139))
|
||||
if (test_kvm_facility(vcpu->kvm, 139)) {
|
||||
scb_s->ecd |= scb_o->ecd & ECD_MEF;
|
||||
scb_s->epdx = scb_o->epdx;
|
||||
}
|
||||
|
||||
/* etoken */
|
||||
if (test_kvm_facility(vcpu->kvm, 156))
|
||||
|
@ -231,7 +231,10 @@ static int __init amd_gpio_init(void)
|
||||
ioport_unmap(gp.pm);
|
||||
goto out;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out:
|
||||
pci_dev_put(pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -239,6 +242,7 @@ static void __exit amd_gpio_exit(void)
|
||||
{
|
||||
gpiochip_remove(&gp.chip);
|
||||
ioport_unmap(gp.pm);
|
||||
pci_dev_put(gp.pdev);
|
||||
}
|
||||
|
||||
module_init(amd_gpio_init);
|
||||
|
@ -460,9 +460,9 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
|
||||
&pdata->bridge.encoder->crtc->state->adjusted_mode;
|
||||
u8 hsync_polarity = 0, vsync_polarity = 0;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
|
||||
hsync_polarity = CHA_HSYNC_POLARITY;
|
||||
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
|
||||
vsync_polarity = CHA_VSYNC_POLARITY;
|
||||
|
||||
ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,
|
||||
|
@ -554,10 +554,8 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
|
||||
|
||||
ret = drm_gem_shmem_get_pages(shmem);
|
||||
if (ret) {
|
||||
drm_gem_vm_close(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* VM_PFNMAP was set by drm_gem_mmap() */
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
|
@ -1303,6 +1303,9 @@ static s32 snto32(__u32 value, unsigned n)
|
||||
if (!value || !n)
|
||||
return 0;
|
||||
|
||||
if (n > 32)
|
||||
n = 32;
|
||||
|
||||
switch (n) {
|
||||
case 8: return ((__s8)value);
|
||||
case 16: return ((__s16)value);
|
||||
|
@ -259,6 +259,7 @@
|
||||
#define USB_DEVICE_ID_CH_AXIS_295 0x001c
|
||||
|
||||
#define USB_VENDOR_ID_CHERRY 0x046a
|
||||
#define USB_DEVICE_ID_CHERRY_MOUSE_000C 0x000c
|
||||
#define USB_DEVICE_ID_CHERRY_CYMOTION 0x0023
|
||||
#define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR 0x0027
|
||||
|
||||
@ -866,6 +867,7 @@
|
||||
#define SPI_DEVICE_ID_MS_SURFACE_D6_0 0x0c1d
|
||||
#define SPI_DEVICE_ID_MS_SURFACE_D6_1 0x0c42
|
||||
#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
|
||||
#define USB_DEVICE_ID_MS_MOUSE_0783 0x0783
|
||||
|
||||
#define USB_VENDOR_ID_MOJO 0x8282
|
||||
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
|
||||
@ -1298,6 +1300,7 @@
|
||||
|
||||
#define USB_VENDOR_ID_PRIMAX 0x0461
|
||||
#define USB_DEVICE_ID_PRIMAX_MOUSE_4D22 0x4d22
|
||||
#define USB_DEVICE_ID_PRIMAX_MOUSE_4E2A 0x4e2a
|
||||
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
|
||||
#define USB_DEVICE_ID_PRIMAX_REZEL 0x4e72
|
||||
#define USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F 0x4d0f
|
||||
|
@ -872,6 +872,12 @@ static ssize_t lg4ff_alternate_modes_store(struct device *dev, struct device_att
|
||||
return -ENOMEM;
|
||||
|
||||
i = strlen(lbuf);
|
||||
|
||||
if (i == 0) {
|
||||
kfree(lbuf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (lbuf[i-1] == '\n') {
|
||||
if (i == 1) {
|
||||
kfree(lbuf);
|
||||
|
@ -54,6 +54,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
|
||||
@ -122,6 +123,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_MOUSE_0783), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
|
||||
@ -146,6 +148,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4E2A), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D0F), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4D65), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_PIXART_MOUSE_4E22), HID_QUIRK_ALWAYS_POLL },
|
||||
|
@ -145,6 +145,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
|
||||
const struct v4l2_bt_timings *bt = &t->bt;
|
||||
const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
|
||||
u32 caps = cap->capabilities;
|
||||
const u32 max_vert = 10240;
|
||||
u32 max_hor = 3 * bt->width;
|
||||
|
||||
if (t->type != V4L2_DV_BT_656_1120)
|
||||
return false;
|
||||
@ -166,14 +168,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
|
||||
if (!bt->interlaced &&
|
||||
(bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch))
|
||||
return false;
|
||||
if (bt->hfrontporch > 2 * bt->width ||
|
||||
bt->hsync > 1024 || bt->hbackporch > 1024)
|
||||
/*
|
||||
* Some video receivers cannot properly separate the frontporch,
|
||||
* backporch and sync values, and instead they only have the total
|
||||
* blanking. That can be assigned to any of these three fields.
|
||||
* So just check that none of these are way out of range.
|
||||
*/
|
||||
if (bt->hfrontporch > max_hor ||
|
||||
bt->hsync > max_hor || bt->hbackporch > max_hor)
|
||||
return false;
|
||||
if (bt->vfrontporch > 4096 ||
|
||||
bt->vsync > 128 || bt->vbackporch > 4096)
|
||||
if (bt->vfrontporch > max_vert ||
|
||||
bt->vsync > max_vert || bt->vbackporch > max_vert)
|
||||
return false;
|
||||
if (bt->interlaced && (bt->il_vfrontporch > 4096 ||
|
||||
bt->il_vsync > 128 || bt->il_vbackporch > 4096))
|
||||
if (bt->interlaced && (bt->il_vfrontporch > max_vert ||
|
||||
bt->il_vsync > max_vert || bt->il_vbackporch > max_vert))
|
||||
return false;
|
||||
return fnc == NULL || fnc(t, fnc_handle);
|
||||
}
|
||||
|
@ -227,6 +227,10 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
|
||||
u8 rxerr = msg->msg.rx.data[2];
|
||||
u8 txerr = msg->msg.rx.data[3];
|
||||
|
||||
netdev_dbg(priv->netdev,
|
||||
"CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
|
||||
msg->msg.rx.dlc, state, ecc, rxerr, txerr);
|
||||
|
||||
skb = alloc_can_err_skb(priv->netdev, &cf);
|
||||
if (skb == NULL) {
|
||||
stats->rx_dropped++;
|
||||
@ -253,6 +257,8 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
|
||||
break;
|
||||
default:
|
||||
priv->can.state = CAN_STATE_ERROR_ACTIVE;
|
||||
txerr = 0;
|
||||
rxerr = 0;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
|
@ -1551,6 +1551,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
|
||||
|
||||
memset(&ent, 0, sizeof(ent));
|
||||
ent.port = port;
|
||||
ent.is_valid = is_valid;
|
||||
ent.vid = vid;
|
||||
ent.is_static = true;
|
||||
memcpy(ent.mac, addr, ETH_ALEN);
|
||||
|
@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth)
|
||||
if (dma_mapping_error(greth->dev, dma_addr)) {
|
||||
if (netif_msg_ifup(greth))
|
||||
dev_err(greth->dev, "Could not create initial DMA mapping\n");
|
||||
dev_kfree_skb(skb);
|
||||
goto cleanup;
|
||||
}
|
||||
greth->rx_skbuff[i] = skb;
|
||||
|
@ -2268,7 +2268,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
err = register_netdev(netdev);
|
||||
if (err) {
|
||||
dev_err(dev, "Failed to register netdevice\n");
|
||||
goto err_unregister_interrupts;
|
||||
goto err_destroy_workqueue;
|
||||
}
|
||||
|
||||
nic->msg_enable = debug;
|
||||
@ -2277,6 +2277,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
return 0;
|
||||
|
||||
err_destroy_workqueue:
|
||||
destroy_workqueue(nic->nicvf_rx_mode_wq);
|
||||
err_unregister_interrupts:
|
||||
nicvf_unregister_interrupts(nic);
|
||||
err_free_netdev:
|
||||
|
@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit)
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
napi_gro_receive(&priv->napi, skb);
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
dev->stats.rx_bytes += len;
|
||||
next:
|
||||
pos = (pos + 1) % rxq->num;
|
||||
if (rx_pkts_num >= limit)
|
||||
|
@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit)
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
napi_gro_receive(&priv->napi, skb);
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
dev->stats.rx_bytes += len;
|
||||
next:
|
||||
pos = dma_ring_incr(pos, RX_DESC_NUM);
|
||||
}
|
||||
|
@ -5916,9 +5916,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
e1000_tx_queue(tx_ring, tx_flags, count);
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
e1000_maybe_stop_tx(tx_ring,
|
||||
(MAX_SKB_FRAGS *
|
||||
((MAX_SKB_FRAGS + 1) *
|
||||
DIV_ROUND_UP(PAGE_SIZE,
|
||||
adapter->tx_fifo_limit) + 2));
|
||||
adapter->tx_fifo_limit) + 4));
|
||||
|
||||
if (!netdev_xmit_more() ||
|
||||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
|
||||
|
@ -4233,11 +4233,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* First 4 bytes of L4 header */
|
||||
if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF))
|
||||
new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
|
||||
else if (!usr_ip4_spec->l4_4_bytes)
|
||||
new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
|
||||
else
|
||||
if (usr_ip4_spec->l4_4_bytes)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Filtering on Type of Service is not supported. */
|
||||
|
@ -9953,6 +9953,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_clean_xps_state - clean xps state for every tx_ring
|
||||
* @vsi: ptr to the VSI
|
||||
**/
|
||||
static void i40e_clean_xps_state(struct i40e_vsi *vsi)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (vsi->tx_rings)
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++)
|
||||
if (vsi->tx_rings[i])
|
||||
clear_bit(__I40E_TX_XPS_INIT_DONE,
|
||||
vsi->tx_rings[i]->state);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_prep_for_reset - prep for the core to reset
|
||||
* @pf: board private structure
|
||||
@ -9984,8 +9999,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
|
||||
rtnl_unlock();
|
||||
|
||||
for (v = 0; v < pf->num_alloc_vsi; v++) {
|
||||
if (pf->vsi[v])
|
||||
if (pf->vsi[v]) {
|
||||
i40e_clean_xps_state(pf->vsi[v]);
|
||||
pf->vsi[v]->seid = 0;
|
||||
}
|
||||
}
|
||||
|
||||
i40e_shutdown_adminq(&pf->hw);
|
||||
|
@ -1394,6 +1394,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
|
||||
i40e_cleanup_reset_vf(vf);
|
||||
|
||||
i40e_flush(hw);
|
||||
usleep_range(20000, 40000);
|
||||
clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states);
|
||||
|
||||
return true;
|
||||
@ -1517,6 +1518,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr)
|
||||
}
|
||||
|
||||
i40e_flush(hw);
|
||||
usleep_range(20000, 40000);
|
||||
clear_bit(__I40E_VF_DISABLE, pf->state);
|
||||
|
||||
return true;
|
||||
|
@ -1402,6 +1402,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
|
||||
*data = 1;
|
||||
return -1;
|
||||
}
|
||||
wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8);
|
||||
wr32(E1000_EIMS, BIT(0));
|
||||
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
|
||||
shared_int = false;
|
||||
if (request_irq(irq,
|
||||
|
@ -3717,7 +3717,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
|
||||
/* Use the cpu associated to the rxq when it is online, in all
|
||||
* the other cases, use the cpu 0 which can't be offline.
|
||||
*/
|
||||
if (cpu_online(pp->rxq_def))
|
||||
if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def))
|
||||
elected_cpu = pp->rxq_def;
|
||||
|
||||
max_cpu = num_present_cpus();
|
||||
|
@ -364,7 +364,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg,
|
||||
goto err_out;
|
||||
|
||||
usleep_range(26, 100);
|
||||
while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
|
||||
while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
|
||||
(mistat & BUSY))
|
||||
cpu_relax();
|
||||
|
||||
@ -402,7 +402,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg,
|
||||
goto err_out;
|
||||
|
||||
usleep_range(26, 100);
|
||||
while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) &&
|
||||
while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) &&
|
||||
(mistat & BUSY))
|
||||
cpu_relax();
|
||||
|
||||
|
@ -107,10 +107,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
|
||||
|
||||
axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
|
||||
axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
|
||||
axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
|
||||
axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
|
||||
axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
|
||||
axi->axi_rb = of_property_read_bool(np, "snps,axi_rb");
|
||||
axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe");
|
||||
axi->axi_fb = of_property_read_bool(np, "snps,fb");
|
||||
axi->axi_mb = of_property_read_bool(np, "snps,mb");
|
||||
axi->axi_rb = of_property_read_bool(np, "snps,rb");
|
||||
|
||||
if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
|
||||
axi->axi_wr_osr_lmt = 1;
|
||||
|
@ -926,7 +926,7 @@ static int ca8210_spi_transfer(
|
||||
|
||||
dev_dbg(&spi->dev, "%s called\n", __func__);
|
||||
|
||||
cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC);
|
||||
cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC);
|
||||
if (!cas_ctl)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -973,7 +973,7 @@ static int cc2520_hw_init(struct cc2520_private *priv)
|
||||
|
||||
if (timeout-- <= 0) {
|
||||
dev_err(&priv->spi->dev, "oscillator start failed!\n");
|
||||
return ret;
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
udelay(1);
|
||||
} while (!(status & CC2520_STATUS_XOSC32M_STABLE));
|
||||
|
@ -444,12 +444,12 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
|
||||
}
|
||||
rcv->state = PLIP_PK_DONE;
|
||||
if (rcv->skb) {
|
||||
kfree_skb(rcv->skb);
|
||||
dev_kfree_skb_irq(rcv->skb);
|
||||
rcv->skb = NULL;
|
||||
}
|
||||
snd->state = PLIP_PK_DONE;
|
||||
if (snd->skb) {
|
||||
dev_kfree_skb(snd->skb);
|
||||
dev_consume_skb_irq(snd->skb);
|
||||
snd->skb = NULL;
|
||||
}
|
||||
spin_unlock_irq(&nl->lock);
|
||||
|
@ -1374,6 +1374,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
|
||||
{QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/
|
||||
{QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */
|
||||
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
@ -48,7 +48,6 @@
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
typedef unsigned int pending_ring_idx_t;
|
||||
#define INVALID_PENDING_RING_IDX (~0U)
|
||||
|
||||
struct pending_tx_info {
|
||||
struct xen_netif_tx_request req; /* tx request */
|
||||
@ -82,8 +81,6 @@ struct xenvif_rx_meta {
|
||||
/* Discriminate from any valid pending_idx value. */
|
||||
#define INVALID_PENDING_IDX 0xFFFF
|
||||
|
||||
#define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
|
||||
|
||||
#define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
|
||||
|
||||
/* The maximum number of frags is derived from the size of a grant (same
|
||||
@ -364,11 +361,6 @@ void xenvif_free(struct xenvif *vif);
|
||||
int xenvif_xenbus_init(void);
|
||||
void xenvif_xenbus_fini(void);
|
||||
|
||||
int xenvif_schedulable(struct xenvif *vif);
|
||||
|
||||
int xenvif_queue_stopped(struct xenvif_queue *queue);
|
||||
void xenvif_wake_queue(struct xenvif_queue *queue);
|
||||
|
||||
/* (Un)Map communication rings. */
|
||||
void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
|
||||
int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
|
||||
@ -391,17 +383,13 @@ int xenvif_dealloc_kthread(void *data);
|
||||
irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data);
|
||||
|
||||
bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread);
|
||||
void xenvif_rx_action(struct xenvif_queue *queue);
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
|
||||
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
|
||||
|
||||
void xenvif_carrier_on(struct xenvif *vif);
|
||||
|
||||
/* Callback from stack when TX packet can be released */
|
||||
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
|
||||
|
||||
/* Unmap a pending page and release it back to the guest */
|
||||
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
|
||||
|
||||
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
|
||||
{
|
||||
return MAX_PENDING_REQS -
|
||||
|
@ -70,7 +70,7 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
|
||||
wake_up(&queue->dealloc_wq);
|
||||
}
|
||||
|
||||
int xenvif_schedulable(struct xenvif *vif)
|
||||
static int xenvif_schedulable(struct xenvif *vif)
|
||||
{
|
||||
return netif_running(vif->dev) &&
|
||||
test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
|
||||
@ -178,20 +178,6 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int xenvif_queue_stopped(struct xenvif_queue *queue)
|
||||
{
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
unsigned int id = queue->id;
|
||||
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
|
||||
}
|
||||
|
||||
void xenvif_wake_queue(struct xenvif_queue *queue)
|
||||
{
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
unsigned int id = queue->id;
|
||||
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
|
||||
}
|
||||
|
||||
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
@ -269,14 +255,16 @@ xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
|
||||
skb_clear_hash(skb);
|
||||
|
||||
xenvif_rx_queue_tail(queue, skb);
|
||||
if (!xenvif_rx_queue_tail(queue, skb))
|
||||
goto drop;
|
||||
|
||||
xenvif_kick_thread(queue);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
drop:
|
||||
vif->dev->stats.tx_dropped++;
|
||||
dev_kfree_skb(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
@ -105,6 +105,8 @@ static void make_tx_response(struct xenvif_queue *queue,
|
||||
s8 st);
|
||||
static void push_tx_responses(struct xenvif_queue *queue);
|
||||
|
||||
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
|
||||
|
||||
static inline int tx_work_todo(struct xenvif_queue *queue);
|
||||
|
||||
static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
|
||||
@ -323,10 +325,13 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
|
||||
|
||||
|
||||
struct xenvif_tx_cb {
|
||||
u16 pending_idx;
|
||||
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
|
||||
u8 copy_count;
|
||||
};
|
||||
|
||||
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
|
||||
#define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
|
||||
#define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
|
||||
|
||||
static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
|
||||
u16 pending_idx,
|
||||
@ -361,31 +366,93 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
|
||||
struct sk_buff *skb,
|
||||
struct xen_netif_tx_request *txp,
|
||||
struct gnttab_map_grant_ref *gop,
|
||||
unsigned int frag_overflow,
|
||||
struct sk_buff *nskb)
|
||||
static void xenvif_get_requests(struct xenvif_queue *queue,
|
||||
struct sk_buff *skb,
|
||||
struct xen_netif_tx_request *first,
|
||||
struct xen_netif_tx_request *txfrags,
|
||||
unsigned *copy_ops,
|
||||
unsigned *map_ops,
|
||||
unsigned int frag_overflow,
|
||||
struct sk_buff *nskb,
|
||||
unsigned int extra_count,
|
||||
unsigned int data_len)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
skb_frag_t *frags = shinfo->frags;
|
||||
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
int start;
|
||||
u16 pending_idx;
|
||||
pending_ring_idx_t index;
|
||||
unsigned int nr_slots;
|
||||
struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
|
||||
struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
|
||||
struct xen_netif_tx_request *txp = first;
|
||||
|
||||
nr_slots = shinfo->nr_frags;
|
||||
nr_slots = shinfo->nr_frags + 1;
|
||||
|
||||
/* Skip first skb fragment if it is on same page as header fragment. */
|
||||
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
|
||||
copy_count(skb) = 0;
|
||||
|
||||
for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
|
||||
shinfo->nr_frags++, txp++, gop++) {
|
||||
/* Create copy ops for exactly data_len bytes into the skb head. */
|
||||
__skb_put(skb, data_len);
|
||||
while (data_len > 0) {
|
||||
int amount = data_len > txp->size ? txp->size : data_len;
|
||||
|
||||
cop->source.u.ref = txp->gref;
|
||||
cop->source.domid = queue->vif->domid;
|
||||
cop->source.offset = txp->offset;
|
||||
|
||||
cop->dest.domid = DOMID_SELF;
|
||||
cop->dest.offset = (offset_in_page(skb->data +
|
||||
skb_headlen(skb) -
|
||||
data_len)) & ~XEN_PAGE_MASK;
|
||||
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
|
||||
- data_len);
|
||||
|
||||
cop->len = amount;
|
||||
cop->flags = GNTCOPY_source_gref;
|
||||
|
||||
index = pending_index(queue->pending_cons);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
callback_param(queue, pending_idx).ctx = NULL;
|
||||
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
|
||||
copy_count(skb)++;
|
||||
|
||||
cop++;
|
||||
data_len -= amount;
|
||||
|
||||
if (amount == txp->size) {
|
||||
/* The copy op covered the full tx_request */
|
||||
|
||||
memcpy(&queue->pending_tx_info[pending_idx].req,
|
||||
txp, sizeof(*txp));
|
||||
queue->pending_tx_info[pending_idx].extra_count =
|
||||
(txp == first) ? extra_count : 0;
|
||||
|
||||
if (txp == first)
|
||||
txp = txfrags;
|
||||
else
|
||||
txp++;
|
||||
queue->pending_cons++;
|
||||
nr_slots--;
|
||||
} else {
|
||||
/* The copy op partially covered the tx_request.
|
||||
* The remainder will be mapped.
|
||||
*/
|
||||
txp->offset += amount;
|
||||
txp->size -= amount;
|
||||
}
|
||||
}
|
||||
|
||||
for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
|
||||
shinfo->nr_frags++, gop++) {
|
||||
index = pending_index(queue->pending_cons++);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
|
||||
xenvif_tx_create_map_op(queue, pending_idx, txp,
|
||||
txp == first ? extra_count : 0, gop);
|
||||
frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
|
||||
|
||||
if (txp == first)
|
||||
txp = txfrags;
|
||||
else
|
||||
txp++;
|
||||
}
|
||||
|
||||
if (frag_overflow) {
|
||||
@ -406,7 +473,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
|
||||
skb_shinfo(skb)->frag_list = nskb;
|
||||
}
|
||||
|
||||
return gop;
|
||||
(*copy_ops) = cop - queue->tx_copy_ops;
|
||||
(*map_ops) = gop - queue->tx_map_ops;
|
||||
}
|
||||
|
||||
static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
|
||||
@ -442,7 +510,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
struct gnttab_copy **gopp_copy)
|
||||
{
|
||||
struct gnttab_map_grant_ref *gop_map = *gopp_map;
|
||||
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
u16 pending_idx;
|
||||
/* This always points to the shinfo of the skb being checked, which
|
||||
* could be either the first or the one on the frag_list
|
||||
*/
|
||||
@ -453,24 +521,37 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
struct skb_shared_info *first_shinfo = NULL;
|
||||
int nr_frags = shinfo->nr_frags;
|
||||
const bool sharedslot = nr_frags &&
|
||||
frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
|
||||
int i, err;
|
||||
frag_get_pending_idx(&shinfo->frags[0]) ==
|
||||
copy_pending_idx(skb, copy_count(skb) - 1);
|
||||
int i, err = 0;
|
||||
|
||||
/* Check status of header. */
|
||||
err = (*gopp_copy)->status;
|
||||
if (unlikely(err)) {
|
||||
if (net_ratelimit())
|
||||
netdev_dbg(queue->vif->dev,
|
||||
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
|
||||
(*gopp_copy)->status,
|
||||
pending_idx,
|
||||
(*gopp_copy)->source.u.ref);
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (!sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_ERROR);
|
||||
for (i = 0; i < copy_count(skb); i++) {
|
||||
int newerr;
|
||||
|
||||
/* Check status of header. */
|
||||
pending_idx = copy_pending_idx(skb, i);
|
||||
|
||||
newerr = (*gopp_copy)->status;
|
||||
if (likely(!newerr)) {
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (i < copy_count(skb) - 1 || !sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
} else {
|
||||
err = newerr;
|
||||
if (net_ratelimit())
|
||||
netdev_dbg(queue->vif->dev,
|
||||
"Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
|
||||
(*gopp_copy)->status,
|
||||
pending_idx,
|
||||
(*gopp_copy)->source.u.ref);
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (i < copy_count(skb) - 1 || !sharedslot)
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_ERROR);
|
||||
}
|
||||
(*gopp_copy)++;
|
||||
}
|
||||
(*gopp_copy)++;
|
||||
|
||||
check_frags:
|
||||
for (i = 0; i < nr_frags; i++, gop_map++) {
|
||||
@ -517,14 +598,6 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
if (err)
|
||||
continue;
|
||||
|
||||
/* First error: if the header haven't shared a slot with the
|
||||
* first frag, release it as well.
|
||||
*/
|
||||
if (!sharedslot)
|
||||
xenvif_idx_release(queue,
|
||||
XENVIF_TX_CB(skb)->pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
|
||||
/* Invalidate preceding fragments of this skb. */
|
||||
for (j = 0; j < i; j++) {
|
||||
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
|
||||
@ -794,7 +867,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
unsigned *copy_ops,
|
||||
unsigned *map_ops)
|
||||
{
|
||||
struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
|
||||
struct sk_buff *skb, *nskb;
|
||||
int ret;
|
||||
unsigned int frag_overflow;
|
||||
@ -876,8 +948,12 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
continue;
|
||||
}
|
||||
|
||||
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
|
||||
XEN_NETBACK_TX_COPY_LEN : txreq.size;
|
||||
|
||||
ret = xenvif_count_requests(queue, &txreq, extra_count,
|
||||
txfrags, work_to_do);
|
||||
|
||||
if (unlikely(ret < 0))
|
||||
break;
|
||||
|
||||
@ -903,9 +979,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
index = pending_index(queue->pending_cons);
|
||||
pending_idx = queue->pending_ring[index];
|
||||
|
||||
data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
|
||||
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
|
||||
XEN_NETBACK_TX_COPY_LEN : txreq.size;
|
||||
if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
|
||||
data_len = txreq.size;
|
||||
|
||||
skb = xenvif_alloc_skb(data_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
@ -916,8 +991,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
}
|
||||
|
||||
skb_shinfo(skb)->nr_frags = ret;
|
||||
if (data_len < txreq.size)
|
||||
skb_shinfo(skb)->nr_frags++;
|
||||
/* At this point shinfo->nr_frags is in fact the number of
|
||||
* slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
|
||||
*/
|
||||
@ -979,54 +1052,19 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
|
||||
type);
|
||||
}
|
||||
|
||||
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
|
||||
|
||||
__skb_put(skb, data_len);
|
||||
queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
|
||||
queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
|
||||
queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
|
||||
|
||||
queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
|
||||
virt_to_gfn(skb->data);
|
||||
queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
|
||||
queue->tx_copy_ops[*copy_ops].dest.offset =
|
||||
offset_in_page(skb->data) & ~XEN_PAGE_MASK;
|
||||
|
||||
queue->tx_copy_ops[*copy_ops].len = data_len;
|
||||
queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
|
||||
|
||||
(*copy_ops)++;
|
||||
|
||||
if (data_len < txreq.size) {
|
||||
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
|
||||
pending_idx);
|
||||
xenvif_tx_create_map_op(queue, pending_idx, &txreq,
|
||||
extra_count, gop);
|
||||
gop++;
|
||||
} else {
|
||||
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
|
||||
INVALID_PENDING_IDX);
|
||||
memcpy(&queue->pending_tx_info[pending_idx].req,
|
||||
&txreq, sizeof(txreq));
|
||||
queue->pending_tx_info[pending_idx].extra_count =
|
||||
extra_count;
|
||||
}
|
||||
|
||||
queue->pending_cons++;
|
||||
|
||||
gop = xenvif_get_requests(queue, skb, txfrags, gop,
|
||||
frag_overflow, nskb);
|
||||
xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
|
||||
map_ops, frag_overflow, nskb, extra_count,
|
||||
data_len);
|
||||
|
||||
__skb_queue_tail(&queue->tx_queue, skb);
|
||||
|
||||
queue->tx.req_cons = idx;
|
||||
|
||||
if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
|
||||
if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
|
||||
(*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
|
||||
break;
|
||||
}
|
||||
|
||||
(*map_ops) = gop - queue->tx_map_ops;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1105,9 +1143,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
||||
while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
|
||||
struct xen_netif_tx_request *txp;
|
||||
u16 pending_idx;
|
||||
unsigned data_len;
|
||||
|
||||
pending_idx = XENVIF_TX_CB(skb)->pending_idx;
|
||||
pending_idx = copy_pending_idx(skb, 0);
|
||||
txp = &queue->pending_tx_info[pending_idx].req;
|
||||
|
||||
/* Check the remap error code. */
|
||||
@ -1126,18 +1163,6 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
|
||||
continue;
|
||||
}
|
||||
|
||||
data_len = skb->len;
|
||||
callback_param(queue, pending_idx).ctx = NULL;
|
||||
if (data_len < txp->size) {
|
||||
/* Append the packet payload as a fragment. */
|
||||
txp->offset += data_len;
|
||||
txp->size -= data_len;
|
||||
} else {
|
||||
/* Schedule a response immediately. */
|
||||
xenvif_idx_release(queue, pending_idx,
|
||||
XEN_NETIF_RSP_OKAY);
|
||||
}
|
||||
|
||||
if (txp->flags & XEN_NETTXF_csum_blank)
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
else if (txp->flags & XEN_NETTXF_data_validated)
|
||||
@ -1323,7 +1348,7 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
|
||||
/* Called after netfront has transmitted */
|
||||
int xenvif_tx_action(struct xenvif_queue *queue, int budget)
|
||||
{
|
||||
unsigned nr_mops, nr_cops = 0;
|
||||
unsigned nr_mops = 0, nr_cops = 0;
|
||||
int work_done, ret;
|
||||
|
||||
if (unlikely(!tx_work_todo(queue)))
|
||||
@ -1410,7 +1435,7 @@ static void push_tx_responses(struct xenvif_queue *queue)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
}
|
||||
|
||||
void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
|
||||
static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
|
||||
{
|
||||
int ret;
|
||||
struct gnttab_unmap_grant_ref tx_unmap_op;
|
||||
|
@ -82,9 +82,10 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
||||
return false;
|
||||
}
|
||||
|
||||
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool ret = true;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
@ -92,8 +93,7 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
struct net_device *dev = queue->vif->dev;
|
||||
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
|
||||
kfree_skb(skb);
|
||||
queue->vif->dev->stats.rx_dropped++;
|
||||
ret = false;
|
||||
} else {
|
||||
if (skb_queue_empty(&queue->rx_queue))
|
||||
xenvif_update_needed_slots(queue, skb);
|
||||
@ -104,6 +104,8 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
|
||||
@ -473,7 +475,7 @@ static void xenvif_rx_skb(struct xenvif_queue *queue)
|
||||
|
||||
#define RX_BATCH_SIZE 64
|
||||
|
||||
void xenvif_rx_action(struct xenvif_queue *queue)
|
||||
static void xenvif_rx_action(struct xenvif_queue *queue)
|
||||
{
|
||||
struct sk_buff_head completed_skbs;
|
||||
unsigned int work_done = 0;
|
||||
|
@ -1621,6 +1621,12 @@ static int netfront_resume(struct xenbus_device *dev)
|
||||
netif_tx_unlock_bh(info->netdev);
|
||||
|
||||
xennet_disconnect_backend(info);
|
||||
|
||||
rtnl_lock();
|
||||
if (info->queues)
|
||||
xennet_destroy_queues(info);
|
||||
rtnl_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2806,10 +2806,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
if (!ctrl->identified) {
|
||||
int i;
|
||||
|
||||
ret = nvme_init_subsystem(ctrl, id);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
/*
|
||||
* Check for quirks. Quirk can depend on firmware version,
|
||||
* so, in principle, the set of quirks present can change
|
||||
@ -2822,6 +2818,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
if (quirk_matches(id, &core_quirks[i]))
|
||||
ctrl->quirks |= core_quirks[i].quirks;
|
||||
}
|
||||
|
||||
ret = nvme_init_subsystem(ctrl, id);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
memcpy(ctrl->subsys->firmware_rev, id->fr,
|
||||
sizeof(ctrl->subsys->firmware_rev));
|
||||
|
@ -465,6 +465,8 @@ static int slg51000_i2c_probe(struct i2c_client *client,
|
||||
chip->cs_gpiod = cs_gpiod;
|
||||
}
|
||||
|
||||
usleep_range(10000, 11000);
|
||||
|
||||
i2c_set_clientdata(client, chip);
|
||||
chip->chip_irq = client->irq;
|
||||
chip->dev = dev;
|
||||
|
@ -67,6 +67,7 @@ struct twlreg_info {
|
||||
#define TWL6030_CFG_STATE_SLEEP 0x03
|
||||
#define TWL6030_CFG_STATE_GRP_SHIFT 5
|
||||
#define TWL6030_CFG_STATE_APP_SHIFT 2
|
||||
#define TWL6030_CFG_STATE_MASK 0x03
|
||||
#define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
|
||||
#define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
|
||||
TWL6030_CFG_STATE_APP_SHIFT)
|
||||
@ -128,13 +129,14 @@ static int twl6030reg_is_enabled(struct regulator_dev *rdev)
|
||||
if (grp < 0)
|
||||
return grp;
|
||||
grp &= P1_GRP_6030;
|
||||
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
|
||||
val = TWL6030_CFG_STATE_APP(val);
|
||||
} else {
|
||||
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
|
||||
val &= TWL6030_CFG_STATE_MASK;
|
||||
grp = 1;
|
||||
}
|
||||
|
||||
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
|
||||
val = TWL6030_CFG_STATE_APP(val);
|
||||
|
||||
return grp && (val == TWL6030_CFG_STATE_ON);
|
||||
}
|
||||
|
||||
@ -187,7 +189,12 @@ static int twl6030reg_get_status(struct regulator_dev *rdev)
|
||||
|
||||
val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
|
||||
|
||||
switch (TWL6030_CFG_STATE_APP(val)) {
|
||||
if (info->features & TWL6032_SUBCLASS)
|
||||
val &= TWL6030_CFG_STATE_MASK;
|
||||
else
|
||||
val = TWL6030_CFG_STATE_APP(val);
|
||||
|
||||
switch (val) {
|
||||
case TWL6030_CFG_STATE_ON:
|
||||
return REGULATOR_STATUS_NORMAL;
|
||||
|
||||
|
@ -604,7 +604,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
|
||||
if (scr_readw(r) != vc->vc_video_erase_char)
|
||||
break;
|
||||
if (r != q && new_rows >= rows + logo_lines) {
|
||||
save = kmalloc(array3_size(logo_lines, new_cols, 2),
|
||||
save = kzalloc(array3_size(logo_lines, new_cols, 2),
|
||||
GFP_KERNEL);
|
||||
if (save) {
|
||||
int i = cols < new_cols ? cols : new_cols;
|
||||
|
@ -5405,6 +5405,7 @@ static int clone_range(struct send_ctx *sctx,
|
||||
u64 ext_len;
|
||||
u64 clone_len;
|
||||
u64 clone_data_offset;
|
||||
bool crossed_src_i_size = false;
|
||||
|
||||
if (slot >= btrfs_header_nritems(leaf)) {
|
||||
ret = btrfs_next_leaf(clone_root->root, path);
|
||||
@ -5461,8 +5462,10 @@ static int clone_range(struct send_ctx *sctx,
|
||||
if (key.offset >= clone_src_i_size)
|
||||
break;
|
||||
|
||||
if (key.offset + ext_len > clone_src_i_size)
|
||||
if (key.offset + ext_len > clone_src_i_size) {
|
||||
ext_len = clone_src_i_size - key.offset;
|
||||
crossed_src_i_size = true;
|
||||
}
|
||||
|
||||
clone_data_offset = btrfs_file_extent_offset(leaf, ei);
|
||||
if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
|
||||
@ -5522,6 +5525,25 @@ static int clone_range(struct send_ctx *sctx,
|
||||
ret = send_clone(sctx, offset, clone_len,
|
||||
clone_root);
|
||||
}
|
||||
} else if (crossed_src_i_size && clone_len < len) {
|
||||
/*
|
||||
* If we are at i_size of the clone source inode and we
|
||||
* can not clone from it, terminate the loop. This is
|
||||
* to avoid sending two write operations, one with a
|
||||
* length matching clone_len and the final one after
|
||||
* this loop with a length of len - clone_len.
|
||||
*
|
||||
* When using encoded writes (BTRFS_SEND_FLAG_COMPRESSED
|
||||
* was passed to the send ioctl), this helps avoid
|
||||
* sending an encoded write for an offset that is not
|
||||
* sector size aligned, in case the i_size of the source
|
||||
* inode is not sector size aligned. That will make the
|
||||
* receiver fallback to decompression of the data and
|
||||
* writing it using regular buffered IO, therefore while
|
||||
* not incorrect, it's not optimal due decompression and
|
||||
* possible re-compression at the receiver.
|
||||
*/
|
||||
break;
|
||||
} else {
|
||||
ret = send_extent_data(sctx, offset, clone_len);
|
||||
}
|
||||
|
@ -190,12 +190,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
||||
#define tlb_needs_table_invalidate() (true)
|
||||
#endif
|
||||
|
||||
void tlb_remove_table_sync_one(void);
|
||||
|
||||
#else
|
||||
|
||||
#ifdef tlb_needs_table_invalidate
|
||||
#error tlb_needs_table_invalidate() requires HAVE_RCU_TABLE_FREE
|
||||
#endif
|
||||
|
||||
static inline void tlb_remove_table_sync_one(void) { }
|
||||
|
||||
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
|
||||
|
||||
|
||||
|
@ -69,6 +69,7 @@ struct css_task_iter {
|
||||
struct list_head iters_node; /* css_set->task_iters */
|
||||
};
|
||||
|
||||
extern struct file_system_type cgroup_fs_type;
|
||||
extern struct cgroup_root cgrp_dfl_root;
|
||||
extern struct css_set init_css_set;
|
||||
|
||||
|
@ -169,7 +169,6 @@ extern struct mutex cgroup_mutex;
|
||||
extern spinlock_t css_set_lock;
|
||||
extern struct cgroup_subsys *cgroup_subsys[];
|
||||
extern struct list_head cgroup_roots;
|
||||
extern struct file_system_type cgroup_fs_type;
|
||||
|
||||
/* iterate across the hierarchies */
|
||||
#define for_each_root(root) \
|
||||
|
2
mm/gup.c
2
mm/gup.c
@ -2240,7 +2240,7 @@ static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned lo
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
if (unlikely(pud_huge(pud))) {
|
||||
if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
|
||||
if (!gup_huge_pud(pud, pudp, addr, next, flags,
|
||||
pages, nr))
|
||||
return 0;
|
||||
|
@ -1060,6 +1060,7 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
_pmd = pmdp_collapse_flush(vma, address, pmd);
|
||||
spin_unlock(pmd_ptl);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
tlb_remove_table_sync_one();
|
||||
|
||||
spin_lock(pte_ptl);
|
||||
isolated = __collapse_huge_page_isolate(vma, address, pte);
|
||||
@ -1312,6 +1313,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
spinlock_t *ptl;
|
||||
int count = 0;
|
||||
int i;
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
if (!vma || !vma->vm_file ||
|
||||
vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
|
||||
@ -1326,6 +1328,14 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
|
||||
* that got written to. Without this, we'd have to also lock the
|
||||
* anon_vma if one exists.
|
||||
*/
|
||||
if (vma->anon_vma)
|
||||
return;
|
||||
|
||||
hpage = find_lock_page(vma->vm_file->f_mapping,
|
||||
linear_page_index(vma, haddr));
|
||||
if (!hpage)
|
||||
@ -1338,6 +1348,19 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
if (!pmd)
|
||||
goto drop_hpage;
|
||||
|
||||
/*
|
||||
* We need to lock the mapping so that from here on, only GUP-fast and
|
||||
* hardware page walks can access the parts of the page tables that
|
||||
* we're operating on.
|
||||
*/
|
||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||
|
||||
/*
|
||||
* This spinlock should be unnecessary: Nobody else should be accessing
|
||||
* the page tables under spinlock protection here, only
|
||||
* lockless_pages_from_mm() and the hardware page walker can access page
|
||||
* tables while all the high-level locks are held in write mode.
|
||||
*/
|
||||
start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
|
||||
|
||||
/* step 1: check all mapped PTEs are to the right huge page */
|
||||
@ -1384,12 +1407,17 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
}
|
||||
|
||||
/* step 4: collapse pmd */
|
||||
ptl = pmd_lock(vma->vm_mm, pmd);
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
|
||||
haddr + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
_pmd = pmdp_collapse_flush(vma, haddr, pmd);
|
||||
spin_unlock(ptl);
|
||||
mm_dec_nr_ptes(mm);
|
||||
tlb_remove_table_sync_one();
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
pte_free(mm, pmd_pgtable(_pmd));
|
||||
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
|
||||
drop_hpage:
|
||||
unlock_page(hpage);
|
||||
put_page(hpage);
|
||||
@ -1397,6 +1425,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
|
||||
|
||||
abort:
|
||||
pte_unmap_unlock(start_pte, ptl);
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
goto drop_hpage;
|
||||
}
|
||||
|
||||
@ -1446,7 +1475,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
|
||||
* An alternative would be drop the check, but check that page
|
||||
* table is clear before calling pmdp_collapse_flush() under
|
||||
* ptl. It has higher chance to recover THP for the VMA, but
|
||||
* has higher cost too.
|
||||
* has higher cost too. It would also probably require locking
|
||||
* the anon_vma.
|
||||
*/
|
||||
if (vma->anon_vma)
|
||||
continue;
|
||||
@ -1468,12 +1498,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
|
||||
*/
|
||||
if (down_write_trylock(&mm->mmap_sem)) {
|
||||
if (!khugepaged_test_exit(mm)) {
|
||||
spinlock_t *ptl = pmd_lock(mm, pmd);
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
mmu_notifier_range_init(&range,
|
||||
MMU_NOTIFY_CLEAR, 0,
|
||||
NULL, mm, addr,
|
||||
addr + HPAGE_PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
/* assume page table is clear */
|
||||
_pmd = pmdp_collapse_flush(vma, addr, pmd);
|
||||
spin_unlock(ptl);
|
||||
mm_dec_nr_ptes(mm);
|
||||
tlb_remove_table_sync_one();
|
||||
pte_free(mm, pmd_pgtable(_pmd));
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
}
|
||||
up_write(&mm->mmap_sem);
|
||||
} else {
|
||||
|
@ -4709,6 +4709,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
|
||||
unsigned int efd, cfd;
|
||||
struct fd efile;
|
||||
struct fd cfile;
|
||||
struct dentry *cdentry;
|
||||
const char *name;
|
||||
char *endp;
|
||||
int ret;
|
||||
@ -4759,6 +4760,16 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
|
||||
if (ret < 0)
|
||||
goto out_put_cfile;
|
||||
|
||||
/*
|
||||
* The control file must be a regular cgroup1 file. As a regular cgroup
|
||||
* file can't be renamed, it's safe to access its name afterwards.
|
||||
*/
|
||||
cdentry = cfile.file->f_path.dentry;
|
||||
if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
|
||||
ret = -EINVAL;
|
||||
goto out_put_cfile;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine the event callbacks and set them in @event. This used
|
||||
* to be done via struct cftype but cgroup core no longer knows
|
||||
@ -4767,7 +4778,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
|
||||
*
|
||||
* DO NOT ADD NEW FILES.
|
||||
*/
|
||||
name = cfile.file->f_path.dentry->d_name.name;
|
||||
name = cdentry->d_name.name;
|
||||
|
||||
if (!strcmp(name, "memory.usage_in_bytes")) {
|
||||
event->register_event = mem_cgroup_usage_register_event;
|
||||
@ -4791,7 +4802,7 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
|
||||
* automatically removed on cgroup destruction but the removal is
|
||||
* asynchronous, so take an extra ref on @css.
|
||||
*/
|
||||
cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
|
||||
cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
|
||||
&memory_cgrp_subsys);
|
||||
ret = -EINVAL;
|
||||
if (IS_ERR(cfile_css))
|
||||
|
@ -117,6 +117,11 @@ static void tlb_remove_table_smp_sync(void *arg)
|
||||
/* Simply deliver the interrupt */
|
||||
}
|
||||
|
||||
void tlb_remove_table_sync_one(void)
|
||||
{
|
||||
smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
|
||||
}
|
||||
|
||||
static void tlb_remove_table_one(void *table)
|
||||
{
|
||||
/*
|
||||
|
@ -118,7 +118,7 @@ struct p9_conn {
|
||||
struct list_head unsent_req_list;
|
||||
struct p9_req_t *rreq;
|
||||
struct p9_req_t *wreq;
|
||||
char tmp_buf[7];
|
||||
char tmp_buf[P9_HDRSZ];
|
||||
struct p9_fcall rc;
|
||||
int wpos;
|
||||
int wsize;
|
||||
@ -291,7 +291,7 @@ static void p9_read_work(struct work_struct *work)
|
||||
if (!m->rc.sdata) {
|
||||
m->rc.sdata = m->tmp_buf;
|
||||
m->rc.offset = 0;
|
||||
m->rc.capacity = 7; /* start by reading header */
|
||||
m->rc.capacity = P9_HDRSZ; /* start by reading header */
|
||||
}
|
||||
|
||||
clear_bit(Rpending, &m->wsched);
|
||||
@ -314,7 +314,7 @@ static void p9_read_work(struct work_struct *work)
|
||||
p9_debug(P9_DEBUG_TRANS, "got new header\n");
|
||||
|
||||
/* Header size */
|
||||
m->rc.size = 7;
|
||||
m->rc.size = P9_HDRSZ;
|
||||
err = p9_parse_header(&m->rc, &m->rc.size, NULL, NULL, 0);
|
||||
if (err) {
|
||||
p9_debug(P9_DEBUG_ERROR,
|
||||
|
@ -230,6 +230,14 @@ static void p9_xen_response(struct work_struct *work)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (h.size > req->rc.capacity) {
|
||||
dev_warn(&priv->dev->dev,
|
||||
"requested packet size too big: %d for tag %d with capacity %zd\n",
|
||||
h.size, h.tag, req->rc.capacity);
|
||||
req->status = REQ_STATUS_ERROR;
|
||||
goto recv_error;
|
||||
}
|
||||
|
||||
memcpy(&req->rc, &h, sizeof(h));
|
||||
req->rc.offset = 0;
|
||||
|
||||
@ -239,6 +247,7 @@ static void p9_xen_response(struct work_struct *work)
|
||||
masked_prod, &masked_cons,
|
||||
XEN_9PFS_RING_SIZE);
|
||||
|
||||
recv_error:
|
||||
virt_mb();
|
||||
cons += h.size;
|
||||
ring->intf->in_cons = cons;
|
||||
|
@ -1002,6 +1002,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
|
||||
hci_dev_lock(hdev);
|
||||
hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
|
||||
hci_dev_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
|
||||
if (!hcon)
|
||||
return -ENOENT;
|
||||
|
@ -735,7 +735,7 @@ static int __init bt_init(void)
|
||||
|
||||
err = bt_sysfs_init();
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto cleanup_led;
|
||||
|
||||
err = sock_register(&bt_sock_family_ops);
|
||||
if (err)
|
||||
@ -771,6 +771,8 @@ static int __init bt_init(void)
|
||||
sock_unregister(PF_BLUETOOTH);
|
||||
cleanup_sysfs:
|
||||
bt_sysfs_cleanup();
|
||||
cleanup_led:
|
||||
bt_leds_cleanup();
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -678,7 +678,7 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
{
|
||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||
|
||||
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU)) {
|
||||
if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CAN_MTU)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN skbuff: dev type %d, len %d\n",
|
||||
dev->type, skb->len);
|
||||
goto free_skb;
|
||||
@ -704,7 +704,7 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
{
|
||||
struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
|
||||
|
||||
if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU)) {
|
||||
if (unlikely(dev->type != ARPHRD_CAN || !can_get_ml_priv(dev) || skb->len != CANFD_MTU)) {
|
||||
pr_warn_once("PF_CAN: dropped non conform CAN FD skbuff: dev type %d, len %d\n",
|
||||
dev->type, skb->len);
|
||||
goto free_skb;
|
||||
|
@ -62,7 +62,8 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
|
||||
if (!skb->dev)
|
||||
return NULL;
|
||||
|
||||
pskb_trim_rcsum(skb, skb->len - len);
|
||||
if (pskb_trim_rcsum(skb, skb->len - len))
|
||||
return NULL;
|
||||
|
||||
skb->offload_fwd_mark = true;
|
||||
|
||||
|
@ -840,6 +840,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!cfg->fc_table)
|
||||
cfg->fc_table = RT_TABLE_MAIN;
|
||||
|
||||
return 0;
|
||||
errout:
|
||||
return err;
|
||||
|
@ -420,6 +420,7 @@ static struct fib_info *fib_find_info(struct fib_info *nfi)
|
||||
nfi->fib_prefsrc == fi->fib_prefsrc &&
|
||||
nfi->fib_priority == fi->fib_priority &&
|
||||
nfi->fib_type == fi->fib_type &&
|
||||
nfi->fib_tb_id == fi->fib_tb_id &&
|
||||
memcmp(nfi->fib_metrics, fi->fib_metrics,
|
||||
sizeof(u32) * RTAX_MAX) == 0 &&
|
||||
!((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
|
||||
|
@ -919,6 +919,9 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
/* We prevent @rt from being freed. */
|
||||
rcu_read_lock();
|
||||
|
||||
for (;;) {
|
||||
/* Prepare header of the next frame,
|
||||
* before previous one went down. */
|
||||
@ -942,6 +945,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
if (err == 0) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
|
||||
IPSTATS_MIB_FRAGOKS);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -949,6 +953,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
|
||||
IPSTATS_MIB_FRAGFAILS);
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
|
||||
slow_path_clean:
|
||||
|
@ -661,6 +661,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
|
||||
sdata->dev = ndev;
|
||||
sdata->wpan_dev.wpan_phy = local->hw.phy;
|
||||
sdata->local = local;
|
||||
INIT_LIST_HEAD(&sdata->wpan_dev.list);
|
||||
|
||||
/* setup type-dependent data */
|
||||
ret = ieee802154_setup_sdata(sdata, type);
|
||||
|
@ -218,6 +218,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
|
||||
target->sens_res = nfca_poll->sens_res;
|
||||
target->sel_res = nfca_poll->sel_res;
|
||||
target->nfcid1_len = nfca_poll->nfcid1_len;
|
||||
if (target->nfcid1_len > ARRAY_SIZE(target->nfcid1))
|
||||
return -EPROTO;
|
||||
if (target->nfcid1_len > 0) {
|
||||
memcpy(target->nfcid1, nfca_poll->nfcid1,
|
||||
target->nfcid1_len);
|
||||
@ -226,6 +228,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
|
||||
nfcb_poll = (struct rf_tech_specific_params_nfcb_poll *)params;
|
||||
|
||||
target->sensb_res_len = nfcb_poll->sensb_res_len;
|
||||
if (target->sensb_res_len > ARRAY_SIZE(target->sensb_res))
|
||||
return -EPROTO;
|
||||
if (target->sensb_res_len > 0) {
|
||||
memcpy(target->sensb_res, nfcb_poll->sensb_res,
|
||||
target->sensb_res_len);
|
||||
@ -234,6 +238,8 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
|
||||
nfcf_poll = (struct rf_tech_specific_params_nfcf_poll *)params;
|
||||
|
||||
target->sensf_res_len = nfcf_poll->sensf_res_len;
|
||||
if (target->sensf_res_len > ARRAY_SIZE(target->sensf_res))
|
||||
return -EPROTO;
|
||||
if (target->sensf_res_len > 0) {
|
||||
memcpy(target->sensf_res, nfcf_poll->sensf_res,
|
||||
target->sensf_res_len);
|
||||
|
@ -1971,7 +1971,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
||||
if (tipc_own_addr(l->net) > msg_prevnode(hdr))
|
||||
l->net_plane = msg_net_plane(hdr);
|
||||
|
||||
skb_linearize(skb);
|
||||
if (skb_linearize(skb))
|
||||
goto exit;
|
||||
|
||||
hdr = buf_msg(skb);
|
||||
data = msg_data(hdr);
|
||||
|
||||
|
@ -113,14 +113,16 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
|
||||
return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
|
||||
}
|
||||
|
||||
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
|
||||
static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb,
|
||||
struct user_namespace *user_ns)
|
||||
{
|
||||
uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
|
||||
uid_t uid = from_kuid_munged(user_ns, sock_i_uid(sk));
|
||||
return nla_put(nlskb, UNIX_DIAG_UID, sizeof(uid_t), &uid);
|
||||
}
|
||||
|
||||
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
|
||||
u32 portid, u32 seq, u32 flags, int sk_ino)
|
||||
struct user_namespace *user_ns,
|
||||
u32 portid, u32 seq, u32 flags, int sk_ino)
|
||||
{
|
||||
struct nlmsghdr *nlh;
|
||||
struct unix_diag_msg *rep;
|
||||
@ -166,7 +168,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
if ((req->udiag_show & UDIAG_SHOW_UID) &&
|
||||
sk_diag_dump_uid(sk, skb))
|
||||
sk_diag_dump_uid(sk, skb, user_ns))
|
||||
goto out_nlmsg_trim;
|
||||
|
||||
nlmsg_end(skb, nlh);
|
||||
@ -178,7 +180,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
|
||||
}
|
||||
|
||||
static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
|
||||
u32 portid, u32 seq, u32 flags)
|
||||
struct user_namespace *user_ns,
|
||||
u32 portid, u32 seq, u32 flags)
|
||||
{
|
||||
int sk_ino;
|
||||
|
||||
@ -189,7 +192,7 @@ static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
|
||||
if (!sk_ino)
|
||||
return 0;
|
||||
|
||||
return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
|
||||
return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino);
|
||||
}
|
||||
|
||||
static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
@ -217,7 +220,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
goto next;
|
||||
if (!(req->udiag_states & (1 << sk->sk_state)))
|
||||
goto next;
|
||||
if (sk_diag_dump(sk, skb, req,
|
||||
if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
|
||||
NETLINK_CB(cb->skb).portid,
|
||||
cb->nlh->nlmsg_seq,
|
||||
NLM_F_MULTI) < 0)
|
||||
@ -285,7 +288,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
|
||||
if (!rep)
|
||||
goto out;
|
||||
|
||||
err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
|
||||
err = sk_diag_fill(sk, rep, req, sk_user_ns(NETLINK_CB(in_skb).sk),
|
||||
NETLINK_CB(in_skb).portid,
|
||||
nlh->nlmsg_seq, 0, req->udiag_ino);
|
||||
if (err < 0) {
|
||||
nlmsg_free(rep);
|
||||
|
@ -112,15 +112,19 @@ EXPORT_SYMBOL(snd_seq_dump_var_event);
|
||||
* expand the variable length event to linear buffer space.
|
||||
*/
|
||||
|
||||
static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
|
||||
static int seq_copy_in_kernel(void *ptr, void *src, int size)
|
||||
{
|
||||
char **bufptr = ptr;
|
||||
|
||||
memcpy(*bufptr, src, size);
|
||||
*bufptr += size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
|
||||
static int seq_copy_in_user(void *ptr, void *src, int size)
|
||||
{
|
||||
char __user **bufptr = ptr;
|
||||
|
||||
if (copy_to_user(*bufptr, src, size))
|
||||
return -EFAULT;
|
||||
*bufptr += size;
|
||||
@ -149,8 +153,7 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
|
||||
return newlen;
|
||||
}
|
||||
err = snd_seq_dump_var_event(event,
|
||||
in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
|
||||
(snd_seq_dump_func_t)seq_copy_in_user,
|
||||
in_kernel ? seq_copy_in_kernel : seq_copy_in_user,
|
||||
&buf);
|
||||
return err < 0 ? err : newlen;
|
||||
}
|
||||
|
@ -1254,6 +1254,8 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
|
||||
return;
|
||||
|
||||
be_substream = snd_soc_dpcm_get_substream(be, stream);
|
||||
if (!be_substream)
|
||||
return;
|
||||
|
||||
for_each_dpcm_fe(be, stream, dpcm) {
|
||||
if (dpcm->fe == fe)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -780,7 +780,7 @@ kci_test_ipsec_offload()
|
||||
tmpl proto esp src $srcip dst $dstip spi 9 \
|
||||
mode transport reqid 42
|
||||
check_err $?
|
||||
ip x p add dir out src $dstip/24 dst $srcip/24 \
|
||||
ip x p add dir in src $dstip/24 dst $srcip/24 \
|
||||
tmpl proto esp src $dstip dst $srcip spi 9 \
|
||||
mode transport reqid 42
|
||||
check_err $?
|
||||
|
Loading…
Reference in New Issue
Block a user