Merge 5.10.184 into android12-5.10-lts

Changes in 5.10.184
	ata: ahci: fix enum constants for gcc-13
	gcc-plugins: Reorganize gimple includes for GCC 13
	sfc (gcc13): synchronize ef100_enqueue_skb()'s return type
	remove the sx8 block driver
	bonding (gcc13): synchronize bond_{a,t}lb_xmit() types
	f2fs: fix iostat lock protection
	blk-iocost: avoid 64-bit division in ioc_timer_fn
	block/blk-iocost (gcc13): keep large values in a new enum
	i40iw: fix build warning in i40iw_manage_apbvt()
	i40e: fix build warnings in i40e_alloc.h
	i40e: fix build warning in ice_fltr_add_mac_to_list()
	staging: vchiq_core: drop vchiq_status from vchiq_initialise
	spi: qup: Request DMA before enabling clocks
	afs: Fix setting of mtime when creating a file/dir/symlink
	wifi: mt76: mt7615: fix possible race in mt7615_mac_sta_poll
	neighbour: fix unaligned access to pneigh_entry
	net: dsa: lan9303: allow vid != 0 in port_fdb_{add|del} methods
	net/smc: Avoid to access invalid RMBs' MRs in SMCRv1 ADD LINK CONT
	net/sched: fq_pie: ensure reasonable TCA_FQ_PIE_QUANTUM values
	Bluetooth: Fix l2cap_disconnect_req deadlock
	Bluetooth: L2CAP: Add missing checks for invalid DCID
	qed/qede: Fix scheduling while atomic
	netfilter: conntrack: fix NULL pointer dereference in nf_confirm_cthelper
	netfilter: ipset: Add schedule point in call_ad().
	ipv6: rpl: Fix Route of Death.
	rfs: annotate lockless accesses to sk->sk_rxhash
	rfs: annotate lockless accesses to RFS sock flow table
	net: sched: move rtm_tca_policy declaration to include file
	net: sched: fix possible refcount leak in tc_chain_tmplt_add()
	bpf: Add extra path pointer check to d_path helper
	lib: cpu_rmap: Fix potential use-after-free in irq_cpu_rmap_release()
	bnxt_en: Don't issue AP reset during ethtool's reset operation
	bnxt_en: Query default VLAN before VNIC setup on a VF
	bnxt_en: Implement .set_port / .unset_port UDP tunnel callbacks
	batman-adv: Broken sync while rescheduling delayed work
	Input: xpad - delete a Razer DeathAdder mouse VID/PID entry
	Input: psmouse - fix OOB access in Elantech protocol
	ALSA: hda/realtek: Add a quirk for HP Slim Desktop S01
	ALSA: hda/realtek: Add Lenovo P3 Tower platform
	drm/amdgpu: fix xclk freq on CHIP_STONEY
	can: j1939: j1939_sk_send_loop_abort(): improved error queue handling in J1939 Socket
	can: j1939: change j1939_netdev_lock type to mutex
	can: j1939: avoid possible use-after-free when j1939_can_rx_register fails
	ceph: fix use-after-free bug for inodes when flushing capsnaps
	s390/dasd: Use correct lock while counting channel queue length
	Bluetooth: Fix use-after-free in hci_remove_ltk/hci_remove_irk
	Bluetooth: hci_qca: fix debugfs registration
	tee: amdtee: Add return_origin to 'struct tee_cmd_load_ta'
	rbd: move RBD_OBJ_FLAG_COPYUP_ENABLED flag setting
	rbd: get snapshot context after exclusive lock is ensured to be held
	pinctrl: meson-axg: add missing GPIOA_18 gpio group
	usb: usbfs: Enforce page requirements for mmap
	usb: usbfs: Use consistent mmap functions
	staging: vc04_services: fix gcc-13 build warning
	ASoC: codecs: wsa881x: do not set can_multi_write flag
	i2c: sprd: Delete i2c adapter in .remove's error path
	eeprom: at24: also select REGMAP
	riscv: fix kprobe __user string arg print fault issue
	vhost: support PACKED when setting-getting vring_base
	Revert "ext4: don't clear SB_RDONLY when remounting r/w until quota is re-enabled"
	ext4: only check dquot_initialize_needed() when debugging
	tcp: fix tcp_min_tso_segs sysctl
	xfs: verify buffer contents when we skip log replay
	MIPS: locking/atomic: Fix atomic{_64,}_sub_if_positive
	drm/atomic: Don't pollute crtc_state->mode_blob with error pointers
	btrfs: check return value of btrfs_commit_transaction in relocation
	btrfs: unset reloc control if transaction commit fails in prepare_to_relocate()
	Revert "staging: rtl8192e: Replace macro RTL_PCI_DEVICE with PCI_DEVICE"
	Linux 5.10.184

Change-Id: If2d013f1bba8d713f8935810a5887f80eabae81c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-06-16 10:02:56 +00:00
commit b7321283a9
73 changed files with 601 additions and 1929 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 183
SUBLEVEL = 184
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -203,7 +203,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
* The function returns the old value of @v minus @i.
*/
#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
static __inline__ type pfx##_sub_if_positive(type i, pfx##_t * v) \
{ \
type temp, result; \
\

View File

@ -22,6 +22,7 @@ config RISCV
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
select ARCH_HAS_MMIOWB
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY

View File

@ -232,7 +232,9 @@ enum {
/* 1/64k is granular enough and can easily be handled w/ u32 */
WEIGHT_ONE = 1 << 16,
};
enum {
/*
* As vtime is used to calculate the cost of each IO, it needs to
* be fairly high precision. For example, it should be able to
@ -256,6 +258,11 @@ enum {
VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
VRATE_CLAMP_ADJ_PCT = 4,
/* switch iff the conditions are met for longer than this */
AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
};
enum {
/* if IOs end up waiting for requests, issue less */
RQ_WAIT_BUSY_PCT = 5,
@ -294,9 +301,6 @@ enum {
/* don't let cmds which take a very long time pin lagging for too long */
MAX_LAGGING_PERIODS = 10,
/* switch iff the conditions are met for longer than this */
AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
/*
* Count IO size in 4k pages. The 12bit shift helps keeping
* size-proportional components of cost calculation in closer

View File

@ -24,6 +24,7 @@
#include <linux/libata.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/bits.h>
/* Enclosure Management Control */
#define EM_CTRL_MSG_TYPE 0x000f0000
@ -54,12 +55,12 @@ enum {
AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
AHCI_CMD_TBL_AR_SZ +
(AHCI_RX_FIS_SZ * 16),
AHCI_IRQ_ON_SG = (1 << 31),
AHCI_CMD_ATAPI = (1 << 5),
AHCI_CMD_WRITE = (1 << 6),
AHCI_CMD_PREFETCH = (1 << 7),
AHCI_CMD_RESET = (1 << 8),
AHCI_CMD_CLR_BUSY = (1 << 10),
AHCI_IRQ_ON_SG = BIT(31),
AHCI_CMD_ATAPI = BIT(5),
AHCI_CMD_WRITE = BIT(6),
AHCI_CMD_PREFETCH = BIT(7),
AHCI_CMD_RESET = BIT(8),
AHCI_CMD_CLR_BUSY = BIT(10),
RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */
RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
@ -77,37 +78,37 @@ enum {
HOST_CAP2 = 0x24, /* host capabilities, extended */
/* HOST_CTL bits */
HOST_RESET = (1 << 0), /* reset controller; self-clear */
HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
HOST_MRSM = (1 << 2), /* MSI Revert to Single Message */
HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
HOST_RESET = BIT(0), /* reset controller; self-clear */
HOST_IRQ_EN = BIT(1), /* global IRQ enable */
HOST_MRSM = BIT(2), /* MSI Revert to Single Message */
HOST_AHCI_EN = BIT(31), /* AHCI enabled */
/* HOST_CAP bits */
HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
HOST_CAP_PART = (1 << 13), /* Partial state capable */
HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
HOST_CAP_CLO = (1 << 24), /* Command List Override support */
HOST_CAP_LED = (1 << 25), /* Supports activity LED */
HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
HOST_CAP_SNTF = (1 << 29), /* SNotification register */
HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
HOST_CAP_SXS = BIT(5), /* Supports External SATA */
HOST_CAP_EMS = BIT(6), /* Enclosure Management support */
HOST_CAP_CCC = BIT(7), /* Command Completion Coalescing */
HOST_CAP_PART = BIT(13), /* Partial state capable */
HOST_CAP_SSC = BIT(14), /* Slumber state capable */
HOST_CAP_PIO_MULTI = BIT(15), /* PIO multiple DRQ support */
HOST_CAP_FBS = BIT(16), /* FIS-based switching support */
HOST_CAP_PMP = BIT(17), /* Port Multiplier support */
HOST_CAP_ONLY = BIT(18), /* Supports AHCI mode only */
HOST_CAP_CLO = BIT(24), /* Command List Override support */
HOST_CAP_LED = BIT(25), /* Supports activity LED */
HOST_CAP_ALPM = BIT(26), /* Aggressive Link PM support */
HOST_CAP_SSS = BIT(27), /* Staggered Spin-up */
HOST_CAP_MPS = BIT(28), /* Mechanical presence switch */
HOST_CAP_SNTF = BIT(29), /* SNotification register */
HOST_CAP_NCQ = BIT(30), /* Native Command Queueing */
HOST_CAP_64 = BIT(31), /* PCI DAC (64-bit DMA) support */
/* HOST_CAP2 bits */
HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
HOST_CAP2_SDS = (1 << 3), /* Support device sleep */
HOST_CAP2_SADM = (1 << 4), /* Support aggressive DevSlp */
HOST_CAP2_DESO = (1 << 5), /* DevSlp from slumber only */
HOST_CAP2_BOH = BIT(0), /* BIOS/OS handoff supported */
HOST_CAP2_NVMHCI = BIT(1), /* NVMHCI supported */
HOST_CAP2_APST = BIT(2), /* Automatic partial to slumber */
HOST_CAP2_SDS = BIT(3), /* Support device sleep */
HOST_CAP2_SADM = BIT(4), /* Support aggressive DevSlp */
HOST_CAP2_DESO = BIT(5), /* DevSlp from slumber only */
/* registers for each SATA port */
PORT_LST_ADDR = 0x00, /* command list DMA addr */
@ -129,24 +130,24 @@ enum {
PORT_DEVSLP = 0x44, /* device sleep */
/* PORT_IRQ_{STAT,MASK} bits */
PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
PORT_IRQ_COLD_PRES = BIT(31), /* cold presence detect */
PORT_IRQ_TF_ERR = BIT(30), /* task file error */
PORT_IRQ_HBUS_ERR = BIT(29), /* host bus fatal error */
PORT_IRQ_HBUS_DATA_ERR = BIT(28), /* host bus data error */
PORT_IRQ_IF_ERR = BIT(27), /* interface fatal error */
PORT_IRQ_IF_NONFATAL = BIT(26), /* interface non-fatal error */
PORT_IRQ_OVERFLOW = BIT(24), /* xfer exhausted available S/G */
PORT_IRQ_BAD_PMP = BIT(23), /* incorrect port multiplier */
PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
PORT_IRQ_PHYRDY = BIT(22), /* PhyRdy changed */
PORT_IRQ_DEV_ILCK = BIT(7), /* device interlock */
PORT_IRQ_CONNECT = BIT(6), /* port connect change status */
PORT_IRQ_SG_DONE = BIT(5), /* descriptor processed */
PORT_IRQ_UNK_FIS = BIT(4), /* unknown FIS rx'd */
PORT_IRQ_SDB_FIS = BIT(3), /* Set Device Bits FIS rx'd */
PORT_IRQ_DMAS_FIS = BIT(2), /* DMA Setup FIS rx'd */
PORT_IRQ_PIOS_FIS = BIT(1), /* PIO Setup FIS rx'd */
PORT_IRQ_D2H_REG_FIS = BIT(0), /* D2H Register FIS rx'd */
PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
PORT_IRQ_IF_ERR |
@ -162,34 +163,34 @@ enum {
PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
/* PORT_CMD bits */
PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
PORT_CMD_ESP = (1 << 21), /* External Sata Port */
PORT_CMD_HPCP = (1 << 18), /* HotPlug Capable Port */
PORT_CMD_PMP = (1 << 17), /* PMP attached */
PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
PORT_CMD_CLO = (1 << 3), /* Command list override */
PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
PORT_CMD_ASP = BIT(27), /* Aggressive Slumber/Partial */
PORT_CMD_ALPE = BIT(26), /* Aggressive Link PM enable */
PORT_CMD_ATAPI = BIT(24), /* Device is ATAPI */
PORT_CMD_FBSCP = BIT(22), /* FBS Capable Port */
PORT_CMD_ESP = BIT(21), /* External Sata Port */
PORT_CMD_HPCP = BIT(18), /* HotPlug Capable Port */
PORT_CMD_PMP = BIT(17), /* PMP attached */
PORT_CMD_LIST_ON = BIT(15), /* cmd list DMA engine running */
PORT_CMD_FIS_ON = BIT(14), /* FIS DMA engine running */
PORT_CMD_FIS_RX = BIT(4), /* Enable FIS receive DMA engine */
PORT_CMD_CLO = BIT(3), /* Command list override */
PORT_CMD_POWER_ON = BIT(2), /* Power up device */
PORT_CMD_SPIN_UP = BIT(1), /* Spin up device */
PORT_CMD_START = BIT(0), /* Enable port DMA engine */
PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
PORT_CMD_ICC_MASK = (0xfu << 28), /* i/f ICC state mask */
PORT_CMD_ICC_ACTIVE = (0x1u << 28), /* Put i/f in active state */
PORT_CMD_ICC_PARTIAL = (0x2u << 28), /* Put i/f in partial state */
PORT_CMD_ICC_SLUMBER = (0x6u << 28), /* Put i/f in slumber state */
/* PORT_FBS bits */
PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
PORT_FBS_SDE = (1 << 2), /* FBS single device error */
PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
PORT_FBS_EN = (1 << 0), /* Enable FBS */
PORT_FBS_SDE = BIT(2), /* FBS single device error */
PORT_FBS_DEC = BIT(1), /* FBS device error clear */
PORT_FBS_EN = BIT(0), /* Enable FBS */
/* PORT_DEVSLP bits */
PORT_DEVSLP_DM_OFFSET = 25, /* DITO multiplier offset */
@ -197,52 +198,52 @@ enum {
PORT_DEVSLP_DITO_OFFSET = 15, /* DITO offset */
PORT_DEVSLP_MDAT_OFFSET = 10, /* Minimum assertion time */
PORT_DEVSLP_DETO_OFFSET = 2, /* DevSlp exit timeout */
PORT_DEVSLP_DSP = (1 << 1), /* DevSlp present */
PORT_DEVSLP_ADSE = (1 << 0), /* Aggressive DevSlp enable */
PORT_DEVSLP_DSP = BIT(1), /* DevSlp present */
PORT_DEVSLP_ADSE = BIT(0), /* Aggressive DevSlp enable */
/* hpriv->flags bits */
#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
AHCI_HFLAG_NO_NCQ = (1 << 0),
AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
link offline */
AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
AHCI_HFLAG_NO_FPDMA_AA = (1 << 13), /* no FPDMA AA */
AHCI_HFLAG_YES_FBS = (1 << 14), /* force FBS cap on */
AHCI_HFLAG_DELAY_ENGINE = (1 << 15), /* do not start engine on
port start (wait until
error-handling stage) */
AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
AHCI_HFLAG_NO_NCQ = BIT(0),
AHCI_HFLAG_IGN_IRQ_IF_ERR = BIT(1), /* ignore IRQ_IF_ERR */
AHCI_HFLAG_IGN_SERR_INTERNAL = BIT(2), /* ignore SERR_INTERNAL */
AHCI_HFLAG_32BIT_ONLY = BIT(3), /* force 32bit */
AHCI_HFLAG_MV_PATA = BIT(4), /* PATA port */
AHCI_HFLAG_NO_MSI = BIT(5), /* no PCI MSI */
AHCI_HFLAG_NO_PMP = BIT(6), /* no PMP */
AHCI_HFLAG_SECT255 = BIT(8), /* max 255 sectors */
AHCI_HFLAG_YES_NCQ = BIT(9), /* force NCQ cap on */
AHCI_HFLAG_NO_SUSPEND = BIT(10), /* don't suspend */
AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = BIT(11), /* treat SRST timeout as
link offline */
AHCI_HFLAG_NO_SNTF = BIT(12), /* no sntf */
AHCI_HFLAG_NO_FPDMA_AA = BIT(13), /* no FPDMA AA */
AHCI_HFLAG_YES_FBS = BIT(14), /* force FBS cap on */
AHCI_HFLAG_DELAY_ENGINE = BIT(15), /* do not start engine on
port start (wait until
error-handling stage) */
AHCI_HFLAG_NO_DEVSLP = BIT(17), /* no device sleep */
AHCI_HFLAG_NO_FBS = BIT(18), /* no FBS */
#ifdef CONFIG_PCI_MSI
AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */
AHCI_HFLAG_MULTI_MSI = BIT(20), /* per-port MSI(-X) */
#else
/* compile out MSI infrastructure */
AHCI_HFLAG_MULTI_MSI = 0,
#endif
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
AHCI_HFLAG_YES_ALPM = (1 << 23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = (1 << 24), /* don't write to read
only registers */
AHCI_HFLAG_IS_MOBILE = (1 << 25), /* mobile chipset, use
SATA_MOBILE_LPM_POLICY
as default lpm_policy */
AHCI_HFLAG_SUSPEND_PHYS = (1 << 26), /* handle PHYs during
suspend/resume */
AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = (1 << 27), /* ignore -EOPNOTSUPP
from phy_power_on() */
AHCI_HFLAG_NO_SXS = (1 << 28), /* SXS not supported */
AHCI_HFLAG_WAKE_BEFORE_STOP = BIT(22), /* wake before DMA stop */
AHCI_HFLAG_YES_ALPM = BIT(23), /* force ALPM cap on */
AHCI_HFLAG_NO_WRITE_TO_RO = BIT(24), /* don't write to read
only registers */
AHCI_HFLAG_IS_MOBILE = BIT(25), /* mobile chipset, use
SATA_MOBILE_LPM_POLICY
as default lpm_policy */
AHCI_HFLAG_SUSPEND_PHYS = BIT(26), /* handle PHYs during
suspend/resume */
AHCI_HFLAG_IGN_NOTSUPP_POWER_ON = BIT(27), /* ignore -EOPNOTSUPP
from phy_power_on() */
AHCI_HFLAG_NO_SXS = BIT(28), /* SXS not supported */
/* ap->flags bits */
@ -258,22 +259,22 @@ enum {
EM_MAX_RETRY = 5,
/* em_ctl bits */
EM_CTL_RST = (1 << 9), /* Reset */
EM_CTL_TM = (1 << 8), /* Transmit Message */
EM_CTL_MR = (1 << 0), /* Message Received */
EM_CTL_ALHD = (1 << 26), /* Activity LED */
EM_CTL_XMT = (1 << 25), /* Transmit Only */
EM_CTL_SMB = (1 << 24), /* Single Message Buffer */
EM_CTL_SGPIO = (1 << 19), /* SGPIO messages supported */
EM_CTL_SES = (1 << 18), /* SES-2 messages supported */
EM_CTL_SAFTE = (1 << 17), /* SAF-TE messages supported */
EM_CTL_LED = (1 << 16), /* LED messages supported */
EM_CTL_RST = BIT(9), /* Reset */
EM_CTL_TM = BIT(8), /* Transmit Message */
EM_CTL_MR = BIT(0), /* Message Received */
EM_CTL_ALHD = BIT(26), /* Activity LED */
EM_CTL_XMT = BIT(25), /* Transmit Only */
EM_CTL_SMB = BIT(24), /* Single Message Buffer */
EM_CTL_SGPIO = BIT(19), /* SGPIO messages supported */
EM_CTL_SES = BIT(18), /* SES-2 messages supported */
EM_CTL_SAFTE = BIT(17), /* SAF-TE messages supported */
EM_CTL_LED = BIT(16), /* LED messages supported */
/* em message type */
EM_MSG_TYPE_LED = (1 << 0), /* LED */
EM_MSG_TYPE_SAFTE = (1 << 1), /* SAF-TE */
EM_MSG_TYPE_SES2 = (1 << 2), /* SES-2 */
EM_MSG_TYPE_SGPIO = (1 << 3), /* SGPIO */
EM_MSG_TYPE_LED = BIT(0), /* LED */
EM_MSG_TYPE_SAFTE = BIT(1), /* SAF-TE */
EM_MSG_TYPE_SES2 = BIT(2), /* SES-2 */
EM_MSG_TYPE_SGPIO = BIT(3), /* SGPIO */
};
struct ahci_cmd_hdr {

View File

@ -293,15 +293,6 @@ config BLK_DEV_SKD
Use device /dev/skd$N amd /dev/skd$Np$M.
config BLK_DEV_SX8
tristate "Promise SATA SX8 support"
depends on PCI
help
Saying Y or M here will enable support for the
Promise SATA SX8 controllers.
Use devices /dev/sx8/$N and /dev/sx8/$Np$M.
config BLK_DEV_RAM
tristate "RAM block device support"
help

View File

@ -29,8 +29,6 @@ obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/

View File

@ -1397,14 +1397,30 @@ static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
/*
* Must be called after rbd_obj_calc_img_extents().
*/
static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
{
if (!obj_req->num_img_extents ||
(rbd_obj_is_entire(obj_req) &&
!obj_req->img_request->snapc->num_snaps))
return false;
rbd_assert(obj_req->img_request->snapc);
return true;
if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
dout("%s %p objno %llu discard\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
if (!obj_req->num_img_extents) {
dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
if (rbd_obj_is_entire(obj_req) &&
!obj_req->img_request->snapc->num_snaps) {
dout("%s %p objno %llu entire\n", __func__, obj_req,
obj_req->ex.oe_objno);
return;
}
obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
}
static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
@ -1505,6 +1521,7 @@ __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
static struct ceph_osd_request *
rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
{
rbd_assert(obj_req->img_request->snapc);
return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
num_ops);
}
@ -1641,15 +1658,18 @@ static void rbd_img_request_init(struct rbd_img_request *img_request,
mutex_init(&img_request->state_mutex);
}
/*
* Only snap_id is captured here, for reads. For writes, snapshot
* context is captured in rbd_img_object_requests() after exclusive
* lock is ensured to be held.
*/
static void rbd_img_capture_header(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
lockdep_assert_held(&rbd_dev->header_rwsem);
if (rbd_img_is_write(img_req))
img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
else
if (!rbd_img_is_write(img_req))
img_req->snap_id = rbd_dev->spec->snap_id;
if (rbd_dev_parent_get(rbd_dev))
@ -2296,9 +2316,6 @@ static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
if (ret)
return ret;
if (rbd_obj_copyup_enabled(obj_req))
obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
obj_req->write_state = RBD_OBJ_WRITE_START;
return 0;
}
@ -2404,8 +2421,6 @@ static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
if (ret)
return ret;
if (rbd_obj_copyup_enabled(obj_req))
obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
if (!obj_req->num_img_extents) {
obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
if (rbd_obj_is_entire(obj_req))
@ -3351,6 +3366,7 @@ static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
case RBD_OBJ_WRITE_START:
rbd_assert(!*result);
rbd_obj_set_copyup_enabled(obj_req);
if (rbd_obj_write_is_noop(obj_req))
return true;
@ -3537,9 +3553,19 @@ static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
static void rbd_img_object_requests(struct rbd_img_request *img_req)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
struct rbd_obj_request *obj_req;
rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
rbd_assert(!need_exclusive_lock(img_req) ||
__rbd_is_lock_owner(rbd_dev));
if (rbd_img_is_write(img_req)) {
rbd_assert(!img_req->snapc);
down_read(&rbd_dev->header_rwsem);
img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
up_read(&rbd_dev->header_rwsem);
}
for_each_obj_request(img_req, obj_req) {
int result = 0;
@ -3557,7 +3583,6 @@ static void rbd_img_object_requests(struct rbd_img_request *img_req)
static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
{
struct rbd_device *rbd_dev = img_req->rbd_dev;
int ret;
again:
@ -3578,9 +3603,6 @@ static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
if (*result)
return true;
rbd_assert(!need_exclusive_lock(img_req) ||
__rbd_is_lock_owner(rbd_dev));
rbd_img_object_requests(img_req);
if (!img_req->pending.num_pending) {
*result = img_req->pending.result;
@ -4038,6 +4060,10 @@ static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
{
int ret;
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
ret = rbd_object_map_open(rbd_dev);
if (ret)

File diff suppressed because it is too large Load Diff

View File

@ -78,7 +78,8 @@ enum qca_flags {
QCA_HW_ERROR_EVENT,
QCA_SSR_TRIGGERED,
QCA_BT_OFF,
QCA_ROM_FW
QCA_ROM_FW,
QCA_DEBUGFS_CREATED,
};
enum qca_capabilities {
@ -633,6 +634,9 @@ static void qca_debugfs_init(struct hci_dev *hdev)
if (!hdev->debugfs)
return;
if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
return;
ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
/* read only */

View File

@ -329,8 +329,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
u32 reference_clock = adev->clock.spll.reference_freq;
u32 tmp;
if (adev->flags & AMD_IS_APU)
return reference_clock;
if (adev->flags & AMD_IS_APU) {
switch (adev->asic_type) {
case CHIP_STONEY:
/* vbios says 48Mhz, but the actual freq is 100Mhz */
return 10000;
default:
return reference_clock;
}
}
tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))

View File

@ -75,15 +75,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
state->mode_blob = NULL;
if (mode) {
struct drm_property_blob *blob;
drm_mode_convert_to_umode(&umode, mode);
state->mode_blob =
drm_property_create_blob(state->crtc->dev,
sizeof(umode),
&umode);
if (IS_ERR(state->mode_blob))
return PTR_ERR(state->mode_blob);
blob = drm_property_create_blob(crtc->dev,
sizeof(umode), &umode);
if (IS_ERR(blob))
return PTR_ERR(blob);
drm_mode_copy(&state->mode, mode);
state->mode_blob = blob;
state->enable = true;
DRM_DEBUG_ATOMIC("Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
mode->name, crtc->base.id, crtc->name, state);

View File

@ -576,12 +576,14 @@ static int sprd_i2c_remove(struct platform_device *pdev)
struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
int ret;
ret = pm_runtime_resume_and_get(i2c_dev->dev);
ret = pm_runtime_get_sync(i2c_dev->dev);
if (ret < 0)
return ret;
dev_err(&pdev->dev, "Failed to resume device (%pe)\n", ERR_PTR(ret));
i2c_del_adapter(&i2c_dev->adap);
clk_disable_unprepare(i2c_dev->clk);
if (ret >= 0)
clk_disable_unprepare(i2c_dev->clk);
pm_runtime_put_noidle(i2c_dev->dev);
pm_runtime_disable(i2c_dev->dev);

View File

@ -422,9 +422,8 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
bool ipv4,
u32 action);
int i40iw_manage_apbvt(struct i40iw_device *iwdev,
u16 accel_local_port,
bool add_port);
enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev,
u16 accel_local_port, bool add_port);
struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);
void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);

View File

@ -262,7 +262,6 @@ static const struct xpad_device {
{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },

View File

@ -674,10 +674,11 @@ static void process_packet_head_v4(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
int id = ((packet[3] & 0xe0) >> 5) - 1;
int id;
int pres, traces;
if (id < 0)
id = ((packet[3] & 0xe0) >> 5) - 1;
if (id < 0 || id >= ETP_MAX_FINGERS)
return;
etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2];
@ -707,7 +708,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
int id, sid;
id = ((packet[0] & 0xe0) >> 5) - 1;
if (id < 0)
if (id < 0 || id >= ETP_MAX_FINGERS)
return;
sid = ((packet[3] & 0xe0) >> 5) - 1;
@ -728,7 +729,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
if (sid >= 0) {
if (sid >= 0 && sid < ETP_MAX_FINGERS) {
etd->mt[sid].x += delta_x2 * weight;
etd->mt[sid].y -= delta_y2 * weight;
input_mt_slot(dev, sid);

View File

@ -6,6 +6,7 @@ config EEPROM_AT24
depends on I2C && SYSFS
select NVMEM
select NVMEM_SYSFS
select REGMAP
select REGMAP_I2C
help
Enable this driver to get read/write support to most I2C EEPROMs

View File

@ -1187,8 +1187,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
if (vid)
return -EOPNOTSUPP;
return lan9303_alr_add_port(chip, addr, port, false);
}
@ -1200,8 +1198,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
struct lan9303 *chip = ds->priv;
dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
if (vid)
return -EOPNOTSUPP;
lan9303_alr_del_port(chip, addr, port);
return 0;

View File

@ -8337,6 +8337,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
goto err_out;
}
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
rc = bnxt_setup_vnic(bp, 0);
if (rc)
goto err_out;
@ -12101,26 +12104,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(netdev);
struct udp_tunnel_info ti;
unsigned int cmd;
udp_tunnel_nic_get_port(netdev, table, 0, &ti);
if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
else
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
if (ti.port)
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
}
static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int entry, struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(netdev);
unsigned int cmd;
if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
else
cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
}
static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
.sync_table = bnxt_udp_tunnel_sync,
.set_port = bnxt_udp_tunnel_set_port,
.unset_port = bnxt_udp_tunnel_unset_port,
.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
.tables = {

View File

@ -3440,7 +3440,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
}
}
if (req & BNXT_FW_RESET_AP) {
if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code >= 0x10803) {
if (!bnxt_firmware_reset_ap(dev)) {

View File

@ -20,16 +20,11 @@ enum i40e_memory_type {
};
/* prototype for functions used for dynamic memory allocation */
i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem,
enum i40e_memory_type type,
u64 size, u32 alignment);
i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem);
i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
struct i40e_virt_mem *mem,
u32 size);
i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
struct i40e_virt_mem *mem);
int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
enum i40e_memory_type type, u64 size, u32 alignment);
int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem);
int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem,
u32 size);
int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */

View File

@ -128,7 +128,7 @@ void ice_fltr_remove_all(struct ice_vsi *vsi)
* @mac: MAC address to add
* @action: filter action
*/
int
enum ice_status
ice_fltr_add_mac_to_list(struct ice_vsi *vsi, struct list_head *list,
const u8 *mac, enum ice_sw_fwd_act_type action)
{

View File

@ -1902,7 +1902,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
{
u32 i;
if (!cdev) {
if (!cdev || cdev->recov_in_prog) {
memset(stats, 0, sizeof(*stats));
return;
}

View File

@ -273,6 +273,10 @@ struct qede_dev {
#define QEDE_ERR_WARN 3
struct qede_dump_info dump_info;
struct delayed_work periodic_task;
unsigned long stats_coal_ticks;
u32 stats_coal_usecs;
spinlock_t stats_lock; /* lock for vport stats access */
};
enum QEDE_STATE {

View File

@ -426,6 +426,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
}
}
spin_lock(&edev->stats_lock);
for (i = 0; i < QEDE_NUM_STATS; i++) {
if (qede_is_irrelevant_stat(edev, i))
continue;
@ -435,6 +437,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
buf++;
}
spin_unlock(&edev->stats_lock);
__qede_unlock(edev);
}
@ -815,6 +819,7 @@ static int qede_get_coalesce(struct net_device *dev,
coal->rx_coalesce_usecs = rx_coal;
coal->tx_coalesce_usecs = tx_coal;
coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
return rc;
}
@ -827,6 +832,19 @@ static int qede_set_coalesce(struct net_device *dev,
int i, rc = 0;
u16 rxc, txc;
if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
if (edev->stats_coal_usecs) {
edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
schedule_delayed_work(&edev->periodic_task, 0);
DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
edev->stats_coal_ticks);
} else {
cancel_delayed_work_sync(&edev->periodic_task);
}
}
if (!netif_running(dev)) {
DP_INFO(edev, "Interface is down\n");
return -EINVAL;
@ -2106,7 +2124,8 @@ static int qede_get_dump_data(struct net_device *dev,
}
static const struct ethtool_ops qede_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.set_link_ksettings = qede_set_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
@ -2155,7 +2174,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_STATS_BLOCK_USECS,
.get_link_ksettings = qede_get_link_ksettings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,

View File

@ -313,6 +313,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->ops->get_vport_stats(edev->cdev, &stats);
spin_lock(&edev->stats_lock);
p_common->no_buff_discards = stats.common.no_buff_discards;
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
p_common->ttl0_discard = stats.common.ttl0_discard;
@ -410,6 +412,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
p_ah->tx_1519_to_max_byte_packets =
stats.ah.tx_1519_to_max_byte_packets;
}
spin_unlock(&edev->stats_lock);
}
static void qede_get_stats64(struct net_device *dev,
@ -418,9 +422,10 @@ static void qede_get_stats64(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qede_stats_common *p_common;
qede_fill_by_demand_stats(edev);
p_common = &edev->stats.common;
spin_lock(&edev->stats_lock);
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
@ -440,6 +445,8 @@ static void qede_get_stats64(struct net_device *dev,
stats->collisions = edev->stats.bb.tx_total_collisions;
stats->rx_crc_errors = p_common->rx_crc_errors;
stats->rx_frame_errors = p_common->rx_align_errors;
spin_unlock(&edev->stats_lock);
}
#ifdef CONFIG_QED_SRIOV
@ -1001,6 +1008,23 @@ static void qede_unlock(struct qede_dev *edev)
rtnl_unlock();
}
static void qede_periodic_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
periodic_task.work);
qede_fill_by_demand_stats(edev);
schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
}
static void qede_init_periodic_task(struct qede_dev *edev)
{
INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
spin_lock_init(&edev->stats_lock);
edev->stats_coal_usecs = USEC_PER_SEC;
edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
}
static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
@ -1020,6 +1044,7 @@ static void qede_sp_task(struct work_struct *work)
*/
if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
cancel_delayed_work_sync(&edev->periodic_task);
#ifdef CONFIG_QED_SRIOV
/* SRIOV must be disabled outside the lock to avoid a deadlock.
* The recovery of the active VFs is currently not supported.
@ -1216,6 +1241,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
*/
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
qede_init_periodic_task(edev);
rc = register_netdev(edev->ndev);
if (rc) {
@ -1240,6 +1266,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
qede_log_probe(edev);
/* retain user config (for example - after recovery) */
if (edev->stats_coal_usecs)
schedule_delayed_work(&edev->periodic_task, 0);
return 0;
err4:
@ -1308,6 +1339,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
unregister_netdev(ndev);
cancel_delayed_work_sync(&edev->sp_task);
cancel_delayed_work_sync(&edev->periodic_task);
edev->ops->common->set_power_state(cdev, PCI_D0);

View File

@ -333,7 +333,8 @@ void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event)
* Returns 0 on success, error code otherwise. In case of an error this
* function will free the SKB.
*/
int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
struct efx_nic *efx = tx_queue->efx;

View File

@ -779,7 +779,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
poll_list);
spin_lock_bh(&dev->sta_poll_lock);
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;

View File

@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
GPIO_GROUP(GPIOA_15),
GPIO_GROUP(GPIOA_16),
GPIO_GROUP(GPIOA_17),
GPIO_GROUP(GPIOA_18),
GPIO_GROUP(GPIOA_19),
GPIO_GROUP(GPIOA_20),

View File

@ -505,10 +505,10 @@ static int __dasd_ioctl_information(struct dasd_block *block,
memcpy(dasd_info->type, base->discipline->name, 4);
spin_lock_irqsave(&block->queue_lock, flags);
spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
list_for_each(l, &base->ccw_queue)
dasd_info->chanq_len++;
spin_unlock_irqrestore(&block->queue_lock, flags);
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
return 0;
}

View File

@ -1030,23 +1030,8 @@ static int spi_qup_probe(struct platform_device *pdev)
return -ENXIO;
}
ret = clk_prepare_enable(cclk);
if (ret) {
dev_err(dev, "cannot enable core clock\n");
return ret;
}
ret = clk_prepare_enable(iclk);
if (ret) {
clk_disable_unprepare(cclk);
dev_err(dev, "cannot enable iface clock\n");
return ret;
}
master = spi_alloc_master(dev, sizeof(struct spi_qup));
if (!master) {
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
dev_err(dev, "cannot allocate master\n");
return -ENOMEM;
}
@ -1092,6 +1077,19 @@ static int spi_qup_probe(struct platform_device *pdev)
spin_lock_init(&controller->lock);
init_completion(&controller->done);
ret = clk_prepare_enable(cclk);
if (ret) {
dev_err(dev, "cannot enable core clock\n");
goto error_dma;
}
ret = clk_prepare_enable(iclk);
if (ret) {
clk_disable_unprepare(cclk);
dev_err(dev, "cannot enable iface clock\n");
goto error_dma;
}
iomode = readl_relaxed(base + QUP_IO_M_MODES);
size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
@ -1121,7 +1119,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = spi_qup_set_state(controller, QUP_STATE_RESET);
if (ret) {
dev_err(dev, "cannot set RESET state\n");
goto error_dma;
goto error_clk;
}
writel_relaxed(0, base + QUP_OPERATIONAL);
@ -1145,7 +1143,7 @@ static int spi_qup_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
IRQF_TRIGGER_HIGH, pdev->name, controller);
if (ret)
goto error_dma;
goto error_clk;
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
pm_runtime_use_autosuspend(dev);
@ -1160,11 +1158,12 @@ static int spi_qup_probe(struct platform_device *pdev)
disable_pm:
pm_runtime_disable(&pdev->dev);
error_clk:
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
error_dma:
spi_qup_release_dma(master);
error:
clk_disable_unprepare(cclk);
clk_disable_unprepare(iclk);
spi_master_put(master);
return ret;
}

View File

@ -50,9 +50,9 @@ static const struct rtl819x_ops rtl819xp_ops = {
};
static struct pci_device_id rtl8192_pci_id_tbl[] = {
{PCI_DEVICE(0x10ec, 0x8192)},
{PCI_DEVICE(0x07aa, 0x0044)},
{PCI_DEVICE(0x07aa, 0x0047)},
{RTL_PCI_DEVICE(0x10ec, 0x8192, rtl819xp_ops)},
{RTL_PCI_DEVICE(0x07aa, 0x0044, rtl819xp_ops)},
{RTL_PCI_DEVICE(0x07aa, 0x0047, rtl819xp_ops)},
{}
};

View File

@ -55,6 +55,11 @@
#define IS_HARDWARE_TYPE_8192SE(_priv) \
(((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192SE)
#define RTL_PCI_DEVICE(vend, dev, cfg) \
.vendor = (vend), .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \
.driver_data = (kernel_ulong_t)&(cfg)
#define TOTAL_CAM_ENTRY 32
#define CAM_CONTENT_COUNT 8

View File

@ -147,12 +147,11 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
unsigned int size, enum vchiq_bulk_dir dir);
#define VCHIQ_INIT_RETRIES 10
enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
int vchiq_initialise(struct vchiq_instance **instance_out)
{
enum vchiq_status status = VCHIQ_ERROR;
struct vchiq_state *state;
struct vchiq_instance *instance = NULL;
int i;
int i, ret;
vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
@ -169,6 +168,7 @@ enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
if (i == VCHIQ_INIT_RETRIES) {
vchiq_log_error(vchiq_core_log_level,
"%s: videocore not initialized\n", __func__);
ret = -ENOTCONN;
goto failed;
} else if (i > 0) {
vchiq_log_warning(vchiq_core_log_level,
@ -180,6 +180,7 @@ enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
if (!instance) {
vchiq_log_error(vchiq_core_log_level,
"%s: error allocating vchiq instance\n", __func__);
ret = -ENOMEM;
goto failed;
}
@ -190,13 +191,13 @@ enum vchiq_status vchiq_initialise(struct vchiq_instance **instance_out)
*instance_out = instance;
status = VCHIQ_SUCCESS;
ret = 0;
failed:
vchiq_log_trace(vchiq_core_log_level,
"%s(%p): returning %d", __func__, instance, status);
"%s(%p): returning %d", __func__, instance, ret);
return status;
return ret;
}
EXPORT_SYMBOL(vchiq_initialise);
@ -2223,6 +2224,7 @@ vchiq_keepalive_thread_func(void *v)
enum vchiq_status status;
struct vchiq_instance *instance;
unsigned int ka_handle;
int ret;
struct vchiq_service_params_kernel params = {
.fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
@ -2231,10 +2233,10 @@ vchiq_keepalive_thread_func(void *v)
.version_min = KEEPALIVE_VER_MIN
};
status = vchiq_initialise(&instance);
if (status != VCHIQ_SUCCESS) {
ret = vchiq_initialise(&instance);
if (ret) {
vchiq_log_error(vchiq_susp_log_level,
"%s vchiq_initialise failed %d", __func__, status);
"%s vchiq_initialise failed %d", __func__, ret);
goto exit;
}
@ -2313,7 +2315,7 @@ vchiq_arm_init_state(struct vchiq_state *state,
return VCHIQ_SUCCESS;
}
enum vchiq_status
int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
{
@ -2373,7 +2375,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
return ret;
}
enum vchiq_status
int
vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);

View File

@ -118,16 +118,18 @@ struct tee_cmd_unmap_shared_mem {
/**
* struct tee_cmd_load_ta - load Trusted Application (TA) binary into TEE
* @low_addr: [in] bits [31:0] of the physical address of the TA binary
* @hi_addr: [in] bits [63:32] of the physical address of the TA binary
* @size: [in] size of TA binary in bytes
* @ta_handle: [out] return handle of the loaded TA
* @low_addr: [in] bits [31:0] of the physical address of the TA binary
* @hi_addr: [in] bits [63:32] of the physical address of the TA binary
* @size: [in] size of TA binary in bytes
* @ta_handle: [out] return handle of the loaded TA
* @return_origin: [out] origin of return code after TEE processing
*/
struct tee_cmd_load_ta {
u32 low_addr;
u32 hi_addr;
u32 size;
u32 ta_handle;
u32 return_origin;
};
/**

View File

@ -423,19 +423,23 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
if (ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_COMMUNICATION;
} else if (arg->ret == TEEC_SUCCESS) {
ret = get_ta_refcount(load_cmd.ta_handle);
if (!ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
} else {
arg->ret_origin = load_cmd.return_origin;
/* Unload the TA on error */
unload_cmd.ta_handle = load_cmd.ta_handle;
psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
(void *)&unload_cmd,
sizeof(unload_cmd), &ret);
} else {
set_session_id(load_cmd.ta_handle, 0, &arg->session);
if (arg->ret == TEEC_SUCCESS) {
ret = get_ta_refcount(load_cmd.ta_handle);
if (!ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
/* Unload the TA on error */
unload_cmd.ta_handle = load_cmd.ta_handle;
psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
(void *)&unload_cmd,
sizeof(unload_cmd), &ret);
} else {
set_session_id(load_cmd.ta_handle, 0, &arg->session);
}
}
}
mutex_unlock(&ta_refcount_mutex);

View File

@ -170,3 +170,44 @@ void hcd_buffer_free(
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}
void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
size_t size, gfp_t mem_flags, dma_addr_t *dma)
{
if (size == 0)
return NULL;
if (hcd->localmem_pool)
return gen_pool_dma_alloc_align(hcd->localmem_pool,
size, dma, PAGE_SIZE);
/* some USB hosts just use PIO */
if (!hcd_uses_dma(hcd)) {
*dma = DMA_MAPPING_ERROR;
return (void *)__get_free_pages(mem_flags,
get_order(size));
}
return dma_alloc_coherent(hcd->self.sysdev,
size, dma, mem_flags);
}
void hcd_buffer_free_pages(struct usb_hcd *hcd,
size_t size, void *addr, dma_addr_t dma)
{
if (!addr)
return;
if (hcd->localmem_pool) {
gen_pool_free(hcd->localmem_pool,
(unsigned long)addr, size);
return;
}
if (!hcd_uses_dma(hcd)) {
free_pages((unsigned long)addr, get_order(size));
return;
}
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
}

View File

@ -173,6 +173,7 @@ static int connected(struct usb_dev_state *ps)
static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
{
struct usb_dev_state *ps = usbm->ps;
struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
unsigned long flags;
spin_lock_irqsave(&ps->lock, flags);
@ -181,8 +182,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
list_del(&usbm->memlist);
spin_unlock_irqrestore(&ps->lock, flags);
usb_free_coherent(ps->dev, usbm->size, usbm->mem,
usbm->dma_handle);
hcd_buffer_free_pages(hcd, usbm->size,
usbm->mem, usbm->dma_handle);
usbfs_decrease_memory_usage(
usbm->size + sizeof(struct usb_memory));
kfree(usbm);
@ -221,7 +222,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
size_t size = vma->vm_end - vma->vm_start;
void *mem;
unsigned long flags;
dma_addr_t dma_handle;
dma_addr_t dma_handle = DMA_MAPPING_ERROR;
int ret;
ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
@ -234,8 +235,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
goto error_decrease_mem;
}
mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
&dma_handle);
mem = hcd_buffer_alloc_pages(hcd,
size, GFP_USER | __GFP_NOWARN, &dma_handle);
if (!mem) {
ret = -ENOMEM;
goto error_free_usbm;
@ -251,7 +252,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
usbm->vma_use_count = 1;
INIT_LIST_HEAD(&usbm->memlist);
if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
/*
* In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
* normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
* whether we are in such cases, and then use remap_pfn_range (or
* dma_mmap_coherent) to map normal (or DMA) pages into the user
* space, respectively.
*/
if (dma_handle == DMA_MAPPING_ERROR) {
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(usbm->mem) >> PAGE_SHIFT,
size, vma->vm_page_prot) < 0) {

View File

@ -1621,17 +1621,25 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
r = -EFAULT;
break;
}
if (s.num > 0xffff) {
r = -EINVAL;
break;
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
vq->last_avail_idx = s.num & 0xffff;
vq->last_used_idx = (s.num >> 16) & 0xffff;
} else {
if (s.num > 0xffff) {
r = -EINVAL;
break;
}
vq->last_avail_idx = s.num;
}
vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
s.num = vq->last_avail_idx;
if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED))
s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16);
else
s.num = vq->last_avail_idx;
if (copy_to_user(argp, &s, sizeof s))
r = -EFAULT;
break;

View File

@ -87,13 +87,17 @@ struct vhost_virtqueue {
/* The routine to call when the Guest pings us, or timeout. */
vhost_work_fn_t handle_kick;
/* Last available index we saw. */
/* Last available index we saw.
* Values are limited to 0x7fff, and the high bit is used as
* a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_avail_idx;
/* Caches available index value from user. */
u16 avail_idx;
/* Last index we used. */
/* Last index we used.
* Values are limited to 0x7fff, and the high bit is used as
* a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_used_idx;
/* Used flags */

View File

@ -1313,6 +1313,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
op->dentry = dentry;
op->create.mode = S_IFDIR | mode;
op->create.reason = afs_edit_dir_for_mkdir;
op->mtime = current_time(dir);
op->ops = &afs_mkdir_operation;
return afs_do_sync_operation(op);
}
@ -1616,6 +1617,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
op->dentry = dentry;
op->create.mode = S_IFREG | mode;
op->create.reason = afs_edit_dir_for_create;
op->mtime = current_time(dir);
op->ops = &afs_create_operation;
return afs_do_sync_operation(op);
@ -1745,6 +1747,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
op->ops = &afs_symlink_operation;
op->create.reason = afs_edit_dir_for_symlink;
op->create.symlink = content;
op->mtime = current_time(dir);
return afs_do_sync_operation(op);
error:

View File

@ -1895,7 +1895,7 @@ int prepare_to_merge(struct reloc_control *rc, int err)
list_splice(&reloc_roots, &rc->reloc_roots);
if (!err)
btrfs_commit_transaction(trans);
err = btrfs_commit_transaction(trans);
else
btrfs_end_transaction(trans);
return err;
@ -3270,8 +3270,12 @@ int prepare_to_relocate(struct reloc_control *rc)
*/
return PTR_ERR(trans);
}
btrfs_commit_transaction(trans);
return 0;
ret = btrfs_commit_transaction(trans);
if (ret)
unset_reloc_control(rc);
return ret;
}
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
@ -3443,7 +3447,9 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
err = PTR_ERR(trans);
goto out_free;
}
btrfs_commit_transaction(trans);
ret = btrfs_commit_transaction(trans);
if (ret && !err)
err = ret;
out_free:
ret = clean_dirty_subvols(rc);
if (ret < 0 && !err)

View File

@ -1636,6 +1636,7 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
struct inode *inode = &ci->vfs_inode;
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_mds_session *session = NULL;
bool need_put = false;
int mds;
dout("ceph_flush_snaps %p\n", inode);
@ -1687,8 +1688,13 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
}
/* we flushed them all; remove this inode from the queue */
spin_lock(&mdsc->snap_flush_lock);
if (!list_empty(&ci->i_snap_flush_item))
need_put = true;
list_del_init(&ci->i_snap_flush_item);
spin_unlock(&mdsc->snap_flush_lock);
if (need_put)
iput(inode);
}
/*

View File

@ -647,8 +647,10 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
if (list_empty(&ci->i_snap_flush_item))
if (list_empty(&ci->i_snap_flush_item)) {
ihold(inode);
list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
}
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}

View File

@ -5788,7 +5788,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
ext4_group_t g;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
int err = 0;
int enable_rw = 0;
#ifdef CONFIG_QUOTA
int enable_quota = 0;
int i, j;
@ -5981,7 +5980,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (err)
goto restore_opts;
enable_rw = 1;
sb->s_flags &= ~SB_RDONLY;
if (ext4_has_feature_mmp(sb)) {
err = ext4_multi_mount_protect(sb,
le64_to_cpu(es->s_mmp_block));
@ -6028,9 +6027,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
ext4_release_system_zone(sb);
if (enable_rw)
sb->s_flags &= ~SB_RDONLY;
/*
* Reinitialize lazy itable initialization thread based on
* current settings

View File

@ -1999,8 +1999,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
else {
u32 ref;
#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode));
#endif
/* The old block is released after updating
the inode. */
error = dquot_alloc_block(inode,
@ -2062,8 +2063,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
/* We need to allocate a new block */
ext4_fsblk_t goal, block;
#ifdef EXT4_XATTR_DEBUG
WARN_ON_ONCE(dquot_initialize_needed(inode));
#endif
goal = ext4_group_first_block_no(sb,
EXT4_I(inode)->i_block_group);
block = ext4_new_meta_blocks(handle, inode, goal, 0,

View File

@ -503,9 +503,9 @@ static ssize_t __sbi_store(struct f2fs_attr *a,
if (!strcmp(a->attr.name, "iostat_period_ms")) {
if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS)
return -EINVAL;
spin_lock(&sbi->iostat_lock);
spin_lock_irq(&sbi->iostat_lock);
sbi->iostat_period_ms = (unsigned int)t;
spin_unlock(&sbi->iostat_lock);
spin_unlock_irq(&sbi->iostat_lock);
return count;
}

View File

@ -924,6 +924,16 @@ xlog_recover_buf_commit_pass2(
if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
trace_xfs_log_recover_buf_skip(log, buf_f);
xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
/*
* We're skipping replay of this buffer log item due to the log
* item LSN being behind the ondisk buffer. Verify the buffer
* contents since we aren't going to run the write verifier.
*/
if (bp->b_ops) {
bp->b_ops->verify_read(bp);
error = bp->b_error;
}
goto out_release;
}

View File

@ -751,8 +751,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
/* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
if (table->ents[index] != val)
table->ents[index] = val;
/* The following WRITE_ONCE() is paired with the READ_ONCE()
* here, and another one in get_rps_cpu().
*/
if (READ_ONCE(table->ents[index]) != val)
WRITE_ONCE(table->ents[index], val);
}
}

View File

@ -513,6 +513,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
void hcd_buffer_free(struct usb_bus *bus, size_t size,
void *addr, dma_addr_t dma);
void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
size_t size, gfp_t mem_flags, dma_addr_t *dma);
void hcd_buffer_free_pages(struct usb_hcd *hcd,
size_t size, void *addr, dma_addr_t dma);
/* generic bus glue, needed for host controllers that don't use PCI */
extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);

View File

@ -156,8 +156,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave);
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave);
void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link);
void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave);
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
netdev_tx_t bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
netdev_tx_t bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev);
struct slave *bond_xmit_alb_slave_get(struct bonding *bond,
struct sk_buff *skb);
struct slave *bond_xmit_tlb_slave_get(struct bonding *bond,

View File

@ -180,7 +180,7 @@ struct pneigh_entry {
struct net_device *dev;
u8 flags;
u8 protocol;
u8 key[];
u32 key[];
};
/*

View File

@ -129,6 +129,8 @@ static inline void qdisc_run(struct Qdisc *q)
}
}
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
*/

View File

@ -23,9 +23,6 @@ static inline int rpl_init(void)
static inline void rpl_exit(void) {}
#endif
/* Worst decompression memory usage ipv6 address (16) + pad 7 */
#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
unsigned char cmpre);

View File

@ -1092,8 +1092,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
* OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line.
*/
if (sk->sk_state == TCP_ESTABLISHED)
sock_rps_record_flow_hash(sk->sk_rxhash);
if (sk->sk_state == TCP_ESTABLISHED) {
/* This READ_ONCE() is paired with the WRITE_ONCE()
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
}
#endif
}
@ -1102,15 +1106,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_RPS
if (unlikely(sk->sk_rxhash != skb->hash))
sk->sk_rxhash = skb->hash;
/* The following WRITE_ONCE() is paired with the READ_ONCE()
* here, and another one in sock_rps_record_flow().
*/
if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
WRITE_ONCE(sk->sk_rxhash, skb->hash);
#endif
}
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
sk->sk_rxhash = 0;
/* Paired with READ_ONCE() in sock_rps_record_flow() */
WRITE_ONCE(sk->sk_rxhash, 0);
#endif
}

View File

@ -1128,13 +1128,23 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
{
struct path copy;
long len;
char *p;
if (!sz)
return 0;
p = d_path(path, buf, sz);
/*
* The path pointer is verified as trusted and safe to use,
* but let's double check it's valid anyway to workaround
* potentially broken verifier.
*/
len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
if (len < 0)
return len;
p = d_path(&copy, buf, sz);
if (IS_ERR(p)) {
len = PTR_ERR(p);
} else {

View File

@ -268,8 +268,8 @@ static void irq_cpu_rmap_release(struct kref *ref)
struct irq_glue *glue =
container_of(ref, struct irq_glue, notify.kref);
cpu_rmap_put(glue->rmap);
glue->rmap->obj[glue->index] = NULL;
cpu_rmap_put(glue->rmap);
kfree(glue);
}

View File

@ -102,7 +102,6 @@ static void batadv_dat_purge(struct work_struct *work);
*/
static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
msecs_to_jiffies(10000));
}
@ -822,6 +821,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
if (!bat_priv->dat.hash)
return -ENOMEM;
INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
batadv_dat_start_timer(bat_priv);
batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,

View File

@ -2685,10 +2685,10 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
{
struct smp_ltk *k;
struct smp_ltk *k, *tmp;
int removed = 0;
list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
continue;
@ -2704,9 +2704,9 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
{
struct smp_irk *k;
struct smp_irk *k, *tmp;
list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
continue;

View File

@ -4303,6 +4303,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
result = __le16_to_cpu(rsp->result);
status = __le16_to_cpu(rsp->status);
if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
dcid > L2CAP_CID_DYN_END))
return -EPROTO;
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
dcid, scid, result, status);
@ -4334,6 +4338,11 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
switch (result) {
case L2CAP_CR_SUCCESS:
if (__l2cap_get_chan_by_dcid(conn, dcid)) {
err = -EBADSLT;
break;
}
l2cap_state_change(chan, BT_CONFIG);
chan->ident = 0;
chan->dcid = dcid;
@ -4659,7 +4668,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
chan->ops->set_shutdown(chan);
l2cap_chan_unlock(chan);
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(chan);
l2cap_chan_del(chan, ECONNRESET);
mutex_unlock(&conn->chan_lock);
@ -4698,7 +4709,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
return 0;
}
l2cap_chan_unlock(chan);
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(chan);
l2cap_chan_del(chan, 0);
mutex_unlock(&conn->chan_lock);

View File

@ -122,7 +122,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
#define J1939_CAN_ID CAN_EFF_FLAG
#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
static DEFINE_SPINLOCK(j1939_netdev_lock);
static DEFINE_MUTEX(j1939_netdev_lock);
static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
{
@ -216,7 +216,7 @@ static void __j1939_rx_release(struct kref *kref)
j1939_can_rx_unregister(priv);
j1939_ecu_unmap_all(priv);
j1939_priv_set(priv->ndev, NULL);
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
}
/* get pointer to priv without increasing ref counter */
@ -244,9 +244,9 @@ static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
{
struct j1939_priv *priv;
spin_lock(&j1939_netdev_lock);
mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
return priv;
}
@ -256,14 +256,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
struct j1939_priv *priv, *priv_new;
int ret;
spin_lock(&j1939_netdev_lock);
mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) {
kref_get(&priv->rx_kref);
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
return priv;
}
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev);
if (!priv)
@ -273,29 +273,31 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
spin_lock_init(&priv->j1939_socks_lock);
INIT_LIST_HEAD(&priv->j1939_socks);
spin_lock(&j1939_netdev_lock);
mutex_lock(&j1939_netdev_lock);
priv_new = j1939_priv_get_by_ndev_locked(ndev);
if (priv_new) {
/* Someone was faster than us, use their priv and roll
* back our's.
*/
kref_get(&priv_new->rx_kref);
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
return priv_new;
}
j1939_priv_set(ndev, priv);
spin_unlock(&j1939_netdev_lock);
ret = j1939_can_rx_register(priv);
if (ret < 0)
goto out_priv_put;
mutex_unlock(&j1939_netdev_lock);
return priv;
out_priv_put:
j1939_priv_set(ndev, NULL);
mutex_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
@ -304,7 +306,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
void j1939_netdev_stop(struct j1939_priv *priv)
{
kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
j1939_priv_put(priv);
}

View File

@ -1013,6 +1013,11 @@ void j1939_sk_errqueue(struct j1939_session *session,
void j1939_sk_send_loop_abort(struct sock *sk, int err)
{
struct j1939_sock *jsk = j1939_sk(sk);
if (jsk->state & J1939_SOCK_ERRQUEUE)
return;
sk->sk_err = err;
sk->sk_error_report(sk);

View File

@ -4390,8 +4390,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
u32 next_cpu;
u32 ident;
/* First check into global flow table if there is a match */
ident = sock_flow_table->ents[hash & sock_flow_table->mask];
/* First check into global flow table if there is a match.
* This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
if ((ident ^ hash) & ~rps_cpu_mask)
goto try_rps;

View File

@ -31,7 +31,6 @@
static int two = 2;
static int four = 4;
static int thousand = 1000;
static int gso_max_segs = GSO_MAX_SEGS;
static int tcp_retr1_max = 255;
static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
@ -1197,7 +1196,6 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
.extra2 = &gso_max_segs,
},
{
.procname = "tcp_min_rtt_wlen",

View File

@ -552,24 +552,6 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
return -1;
}
if (skb_cloned(skb)) {
if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
GFP_ATOMIC)) {
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return -1;
}
} else {
err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
if (unlikely(err)) {
kfree_skb(skb);
return -1;
}
}
hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
hdr->cmpre))) {
kfree_skb(skb);
@ -615,6 +597,17 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb)
skb_pull(skb, ((hdr->hdrlen + 1) << 3));
skb_postpull_rcsum(skb, oldhdr,
sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
if (unlikely(!hdr->segments_left)) {
if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
GFP_ATOMIC)) {
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
kfree(buf);
return -1;
}
oldhdr = ipv6_hdr(skb);
}
skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
skb_mac_header_rebuild(skb);

View File

@ -1704,6 +1704,14 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
do {
if (retried) {
__ip_set_get(set);
nfnl_unlock(NFNL_SUBSYS_IPSET);
cond_resched();
nfnl_lock(NFNL_SUBSYS_IPSET);
__ip_set_put(set);
}
ip_set_lock(set);
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
ip_set_unlock(set);

View File

@ -2076,6 +2076,9 @@ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
return 0;
helper = rcu_dereference(help->helper);
if (!helper)
return 0;
if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
return 0;

View File

@ -41,8 +41,6 @@
#include <net/tc_act/tc_gate.h>
#include <net/flow_offload.h>
extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
/* The list of all installed classifier types */
static LIST_HEAD(tcf_proto_base);
@ -2774,6 +2772,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
return PTR_ERR(ops);
if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
module_put(ops->owner);
return -EOPNOTSUPP;
}

View File

@ -201,6 +201,11 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_CN;
}
static struct netlink_range_validation fq_pie_q_range = {
.min = 1,
.max = 1 << 20,
};
static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
[TCA_FQ_PIE_LIMIT] = {.type = NLA_U32},
[TCA_FQ_PIE_FLOWS] = {.type = NLA_U32},
@ -208,7 +213,8 @@ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
[TCA_FQ_PIE_TUPDATE] = {.type = NLA_U32},
[TCA_FQ_PIE_ALPHA] = {.type = NLA_U32},
[TCA_FQ_PIE_BETA] = {.type = NLA_U32},
[TCA_FQ_PIE_QUANTUM] = {.type = NLA_U32},
[TCA_FQ_PIE_QUANTUM] =
NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
[TCA_FQ_PIE_MEMORY_LIMIT] = {.type = NLA_U32},
[TCA_FQ_PIE_ECN_PROB] = {.type = NLA_U32},
[TCA_FQ_PIE_ECN] = {.type = NLA_U32},

View File

@ -716,6 +716,8 @@ static int smc_llc_add_link_cont(struct smc_link *link,
addc_llc->num_rkeys = *num_rkeys_todo;
n = *num_rkeys_todo;
for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
while (*buf_pos && !(*buf_pos)->used)
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
if (!*buf_pos) {
addc_llc->num_rkeys = addc_llc->num_rkeys -
*num_rkeys_todo;
@ -731,8 +733,6 @@ static int smc_llc_add_link_cont(struct smc_link *link,
(*num_rkeys_todo)--;
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
while (*buf_pos && !(*buf_pos)->used)
*buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
}
addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT;
addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);

View File

@ -108,7 +108,13 @@
#include "varasm.h"
#include "stor-layout.h"
#include "internal-fn.h"
#endif
#include "gimple.h"
#if BUILDING_GCC_VERSION >= 4009
#include "gimple-expr.h"
#include "gimple-iterator.h"
#include "gimple-fold.h"
#include "context.h"
#include "tree-ssa-alias.h"
@ -124,13 +130,10 @@
#include "gimplify.h"
#endif
#include "gimple.h"
#if BUILDING_GCC_VERSION >= 4009
#include "tree-ssa-operands.h"
#include "tree-phinodes.h"
#include "tree-cfg.h"
#include "gimple-iterator.h"
#include "gimple-ssa.h"
#include "ssa-iterators.h"
#endif

View File

@ -11162,6 +11162,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x103c, 0x8768, "HP Slim Desktop S01", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
@ -11183,6 +11184,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x1064, "Lenovo P3 Tower", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),

View File

@ -646,7 +646,6 @@ static struct regmap_config wsa881x_regmap_config = {
.readable_reg = wsa881x_readable_register,
.reg_format_endian = REGMAP_ENDIAN_NATIVE,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
.can_multi_write = true,
};
enum {