This is the 5.10.222 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmaY9zYACgkQONu9yGCS
 aT6v5g//WMifSZz85CUFaqgs65rwVfhTMpYtUeL5LiDuy+SMou6ViV3A93FpTkmj
 FJBvrr2y0bn8Y5Dp/fwYj10XUz+THZte/yEVnPh/NkV107FZD3fKa6GTnJY7H/XY
 4SoOGfPB4yfx+MpN6ZpLsu4cAt6FW8P+QfKOxBEboGkJSGpjEbGYFMtyZAMjknia
 QE8cKQ3LnMrQzHIizil5dZVlYaiMgJtlKTtUeVI1ixmaGDb3rCsnCVvMRvZnW95V
 aSgyJNrNix7a5tRgYwZHZp4t3p9iT2lyIFM3/y7TKcglVCMPw4nbsDdLNNq11qrk
 RdTdScR+9eKyJsEGVYOhXZFUFzOgHW22xyx0CCZmDMeu08WPNl4vhGewnndQy3yd
 6jdTRYDrU6SQNQ0AjRZXcdmfopIQxetHE7ZEKvbgBW6+u9oySYU8phPCNkma2JWr
 O2eY5AOF8zgPAdAzvF9Bt/qTlwLNjP0zczoIRX7HSvV03Nh9cQvgzKdSCfuPDU4a
 FX7mlokgweYa7WoWGPkzOlgMaJZksqstDnhbuwONoMPrNFTUjgm429K87iPdwzqC
 Yv4uDrpFXgkhfD4Aoks4wDpE2LgBKWz5Wnpo+WW4fjcrXtcIV2tTD9FkMjBv3ECv
 A8TTWsXxQtm3V54R4h7fAXg9KnZBuIYYDnB2u1317ZdaDkZRuPQ=
 =X2/A
 -----END PGP SIGNATURE-----

Merge 5.10.222 into android12-5.10-lts

Changes in 5.10.222
	Compiler Attributes: Add __uninitialized macro
	drm/lima: fix shared irq handling on driver remove
	media: dvb: as102-fe: Fix as10x_register_addr packing
	media: dvb-usb: dib0700_devices: Add missing release_firmware()
	IB/core: Implement a limit on UMAD receive List
	scsi: qedf: Make qedf_execute_tmf() non-preemptible
	crypto: aead,cipher - zeroize key buffer after use
	drm/amdgpu: Initialize timestamp for some legacy SOCs
	drm/amd/display: Check index msg_id before read or write
	drm/amd/display: Check pipe offset before setting vblank
	drm/amd/display: Skip finding free audio for unknown engine_id
	media: dw2102: Don't translate i2c read into write
	sctp: prefer struct_size over open coded arithmetic
	firmware: dmi: Stop decoding on broken entry
	Input: ff-core - prefer struct_size over open coded arithmetic
	net: dsa: mv88e6xxx: Correct check for empty list
	media: dvb-frontends: tda18271c2dd: Remove casting during div
	media: s2255: Use refcount_t instead of atomic_t for num_channels
	media: dvb-frontends: tda10048: Fix integer overflow
	i2c: i801: Annotate apanel_addr as __ro_after_init
	powerpc/64: Set _IO_BASE to POISON_POINTER_DELTA not 0 for CONFIG_PCI=n
	orangefs: fix out-of-bounds fsid access
	kunit: Fix timeout message
	powerpc/xmon: Check cpu id in commands "c#", "dp#" and "dx#"
	bpf: Avoid uninitialized value in BPF_CORE_READ_BITFIELD
	jffs2: Fix potential illegal address access in jffs2_free_inode
	s390/pkey: Wipe sensitive data on failure
	UPSTREAM: tcp: fix DSACK undo in fast recovery to call tcp_try_to_open()
	tcp_metrics: validate source addr length
	wifi: wilc1000: fix ies_len type in connect path
	bonding: Fix out-of-bounds read in bond_option_arp_ip_targets_set()
	selftests: fix OOM in msg_zerocopy selftest
	selftests: make order checking verbose in msg_zerocopy selftest
	inet_diag: Initialize pad field in struct inet_diag_req_v2
	nilfs2: fix inode number range checks
	nilfs2: add missing check for inode numbers on directory entries
	mm: optimize the redundant loop of mm_update_owner_next()
	mm: avoid overflows in dirty throttling logic
	Bluetooth: qca: Fix BT enable failure again for QCA6390 after warm reboot
	can: kvaser_usb: Explicitly initialize family in leafimx driver_info struct
	fsnotify: Do not generate events for O_PATH file descriptors
	Revert "mm/writeback: fix possible divide-by-zero in wb_dirty_limits(), again"
	drm/nouveau: fix null pointer dereference in nouveau_connector_get_modes
	drm/amdgpu/atomfirmware: silence UBSAN warning
	mtd: rawnand: Bypass a couple of sanity checks during NAND identification
	bnx2x: Fix multiple UBSAN array-index-out-of-bounds
	bpf, sockmap: Fix sk->sk_forward_alloc warn_on in sk_stream_kill_queues
	ima: Avoid blocking in RCU read-side critical section
	media: dw2102: fix a potential buffer overflow
	i2c: pnx: Fix potential deadlock warning from del_timer_sync() call in isr
	ALSA: hda/realtek: Enable headset mic of JP-IK LEAP W502 with ALC897
	nvme-multipath: find NUMA path only for online numa-node
	nvme: adjust multiples of NVME_CTRL_PAGE_SIZE in offset
	platform/x86: touchscreen_dmi: Add info for GlobalSpace SolT IVW 11.6" tablet
	platform/x86: touchscreen_dmi: Add info for the EZpad 6s Pro
	nvmet: fix a possible leak when destroy a ctrl during qp establishment
	kbuild: fix short log for AS in link-vmlinux.sh
	nilfs2: fix incorrect inode allocation from reserved inodes
	mm: prevent derefencing NULL ptr in pfn_section_valid()
	filelock: fix potential use-after-free in posix_lock_inode
	fs/dcache: Re-use value stored to dentry->d_flags instead of re-reading
	vfs: don't mod negative dentry count when on shrinker list
	tcp: fix incorrect undo caused by DSACK of TLP retransmit
	octeontx2-af: Fix incorrect value output on error path in rvu_check_rsrc_availability()
	net: lantiq_etop: add blank line after declaration
	net: ethernet: lantiq_etop: fix double free in detach
	ppp: reject claimed-as-LCP but actually malformed packets
	ethtool: netlink: do not return SQI value if link is down
	udp: Set SOCK_RCU_FREE earlier in udp_lib_get_port().
	net/sched: Fix UAF when resolving a clash
	s390: Mark psw in __load_psw_mask() as __unitialized
	ARM: davinci: Convert comma to semicolon
	octeontx2-af: fix detection of IP layer
	tcp: use signed arithmetic in tcp_rtx_probe0_timed_out()
	tcp: avoid too many retransmit packets
	net: ks8851: Fix potential TX stall after interface reopen
	USB: serial: option: add Telit generic core-dump composition
	USB: serial: option: add Telit FN912 rmnet compositions
	USB: serial: option: add Fibocom FM350-GL
	USB: serial: option: add support for Foxconn T99W651
	USB: serial: option: add Netprisma LCUK54 series modules
	USB: serial: option: add Rolling RW350-GL variants
	USB: serial: mos7840: fix crash on resume
	USB: Add USB_QUIRK_NO_SET_INTF quirk for START BP-850k
	usb: gadget: configfs: Prevent OOB read/write in usb_string_copy()
	USB: core: Fix duplicate endpoint bug by clearing reserved bits in the descriptor
	hpet: Support 32-bit userspace
	nvmem: meson-efuse: Fix return value of nvmem callbacks
	ALSA: hda/realtek: Enable Mute LED on HP 250 G7
	ALSA: hda/realtek: Limit mic boost on VAIO PRO PX
	libceph: fix race between delayed_work() and ceph_monc_stop()
	wireguard: allowedips: avoid unaligned 64-bit memory accesses
	wireguard: queueing: annotate intentional data race in cpu round robin
	wireguard: send: annotate intentional data race in checking empty queue
	x86/retpoline: Move a NOENDBR annotation to the SRSO dummy return thunk
	efi: ia64: move IA64-only declarations to new asm/efi.h header
	ipv6: annotate data-races around cnf.disable_ipv6
	ipv6: prevent NULL dereference in ip6_output()
	bpf: Allow reads from uninit stack
	nilfs2: fix kernel bug on rename operation of broken directory
	i2c: rcar: bring hardware to known state when probing
	i2c: mark HostNotify target address as used
	i2c: rcar: Add R-Car Gen4 support
	i2c: rcar: reset controller is mandatory for Gen3+
	i2c: rcar: introduce Gen4 devices
	i2c: rcar: ensure Gen3+ reset does not disturb local targets
	i2c: rcar: clear NO_RXDMA flag after resetting
	i2c: rcar: fix error code in probe()
	Linux 5.10.222

Change-Id: I39dedaef039a49c1b8b53dd83b83d481593ffb95
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-07-20 13:33:30 +00:00
commit 875057880e
128 changed files with 1208 additions and 446 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 5 VERSION = 5
PATCHLEVEL = 10 PATCHLEVEL = 10
SUBLEVEL = 221 SUBLEVEL = 222
EXTRAVERSION = EXTRAVERSION =
NAME = Dare mighty things NAME = Dare mighty things

View File

@ -62,7 +62,7 @@ static void davinci_pm_suspend(void)
/* Configure sleep count in deep sleep register */ /* Configure sleep count in deep sleep register */
val = __raw_readl(pm_config.deepsleep_reg); val = __raw_readl(pm_config.deepsleep_reg);
val &= ~DEEPSLEEP_SLEEPCOUNT_MASK, val &= ~DEEPSLEEP_SLEEPCOUNT_MASK;
val |= pm_config.sleepcount; val |= pm_config.sleepcount;
__raw_writel(val, pm_config.deepsleep_reg); __raw_writel(val, pm_config.deepsleep_reg);

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_EFI_H
#define _ASM_EFI_H
typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
void *efi_get_pal_addr(void);
void efi_map_pal_code(void);
void efi_memmap_walk(efi_freemem_callback_t, void *);
void efi_memmap_walk_uc(efi_freemem_callback_t, void *);
void efi_gettimeofday(struct timespec64 *ts);
#endif

View File

@ -34,6 +34,7 @@
#include <linux/kexec.h> #include <linux/kexec.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/efi.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/kregs.h> #include <asm/kregs.h>
#include <asm/meminit.h> #include <asm/meminit.h>

View File

@ -16,6 +16,7 @@
#include <linux/numa.h> #include <linux/numa.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <asm/efi.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/setup.h> #include <asm/setup.h>

View File

@ -91,6 +91,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/efi.h>
#include <asm/meminit.h> #include <asm/meminit.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>

View File

@ -45,6 +45,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/efi.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mca.h> #include <asm/mca.h>

View File

@ -26,6 +26,7 @@
#include <linux/sched/cputime.h> #include <linux/sched/cputime.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/efi.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sal.h> #include <asm/sal.h>

View File

@ -20,14 +20,12 @@
#include <linux/genalloc.h> #include <linux/genalloc.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <asm/efi.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pal.h> #include <asm/pal.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
struct uncached_pool { struct uncached_pool {
struct gen_pool *pool; struct gen_pool *pool;
struct mutex add_chunk_mutex; /* serialize adding a converted chunk */ struct mutex add_chunk_mutex; /* serialize adding a converted chunk */

View File

@ -20,6 +20,7 @@
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <asm/efi.h>
#include <asm/meminit.h> #include <asm/meminit.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/mca.h> #include <asm/mca.h>

View File

@ -24,6 +24,7 @@
#include <linux/efi.h> #include <linux/efi.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/efi.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/meminit.h> #include <asm/meminit.h>
#include <asm/numa.h> #include <asm/numa.h>

View File

@ -27,6 +27,7 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/efi.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/numa.h> #include <asm/numa.h>
#include <asm/patch.h> #include <asm/patch.h>

View File

@ -45,7 +45,7 @@ extern struct pci_dev *isa_bridge_pcidev;
* define properly based on the platform * define properly based on the platform
*/ */
#ifndef CONFIG_PCI #ifndef CONFIG_PCI
#define _IO_BASE 0 #define _IO_BASE POISON_POINTER_DELTA
#define _ISA_MEM_BASE 0 #define _ISA_MEM_BASE 0
#define PCI_DRAM_OFFSET 0 #define PCI_DRAM_OFFSET 0
#elif defined(CONFIG_PPC32) #elif defined(CONFIG_PPC32)

View File

@ -1249,7 +1249,7 @@ static int cpu_cmd(void)
unsigned long cpu, first_cpu, last_cpu; unsigned long cpu, first_cpu, last_cpu;
int timeout; int timeout;
if (!scanhex(&cpu)) { if (!scanhex(&cpu) || cpu >= num_possible_cpus()) {
/* print cpus waiting or in xmon */ /* print cpus waiting or in xmon */
printf("cpus stopped:"); printf("cpus stopped:");
last_cpu = first_cpu = NR_CPUS; last_cpu = first_cpu = NR_CPUS;
@ -2680,7 +2680,7 @@ static void dump_pacas(void)
termch = c; /* Put c back, it wasn't 'a' */ termch = c; /* Put c back, it wasn't 'a' */
if (scanhex(&num)) if (scanhex(&num) && num < num_possible_cpus())
dump_one_paca(num); dump_one_paca(num);
else else
dump_one_paca(xmon_owner); dump_one_paca(xmon_owner);
@ -2777,7 +2777,7 @@ static void dump_xives(void)
termch = c; /* Put c back, it wasn't 'a' */ termch = c; /* Put c back, it wasn't 'a' */
if (scanhex(&num)) if (scanhex(&num) && num < num_possible_cpus())
dump_one_xive(num); dump_one_xive(num);
else else
dump_one_xive(xmon_owner); dump_one_xive(xmon_owner);

View File

@ -252,8 +252,8 @@ static inline void __load_psw(psw_t psw)
*/ */
static __always_inline void __load_psw_mask(unsigned long mask) static __always_inline void __load_psw_mask(unsigned long mask)
{ {
psw_t psw __uninitialized;
unsigned long addr; unsigned long addr;
psw_t psw;
psw.mask = mask; psw.mask = mask;

View File

@ -105,6 +105,7 @@ __EXPORT_THUNK(srso_alias_untrain_ret)
/* dummy definition for alternatives */ /* dummy definition for alternatives */
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
ANNOTATE_UNRET_SAFE ANNOTATE_UNRET_SAFE
ANNOTATE_NOENDBR
ret ret
int3 int3
SYM_FUNC_END(srso_alias_untrain_ret) SYM_FUNC_END(srso_alias_untrain_ret)
@ -258,7 +259,6 @@ SYM_CODE_START(__x86_return_thunk)
UNWIND_HINT_FUNC UNWIND_HINT_FUNC
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE ANNOTATE_UNRET_SAFE
ANNOTATE_NOENDBR
ret ret
int3 int3
SYM_CODE_END(__x86_return_thunk) SYM_CODE_END(__x86_return_thunk)

View File

@ -35,8 +35,7 @@ static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen); memcpy(alignbuffer, key, keylen);
ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen); ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen); kfree_sensitive(buffer);
kfree(buffer);
return ret; return ret;
} }

View File

@ -34,8 +34,7 @@ static int setkey_unaligned(struct crypto_cipher *tfm, const u8 *key,
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen); memcpy(alignbuffer, key, keylen);
ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen); ret = cia->cia_setkey(crypto_cipher_tfm(tfm), alignbuffer, keylen);
memset(alignbuffer, 0, keylen); kfree_sensitive(buffer);
kfree(buffer);
return ret; return ret;
} }

View File

@ -2076,15 +2076,27 @@ static void qca_serdev_shutdown(struct device *dev)
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
struct hci_uart *hu = &qcadev->serdev_hu; struct hci_uart *hu = &qcadev->serdev_hu;
struct hci_dev *hdev = hu->hdev; struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
const u8 ibs_wake_cmd[] = { 0xFD }; const u8 ibs_wake_cmd[] = { 0xFD };
const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 }; const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
if (qcadev->btsoc_type == QCA_QCA6390) { if (qcadev->btsoc_type == QCA_QCA6390) {
if (test_bit(QCA_BT_OFF, &qca->flags) || /* The purpose of sending the VSC is to reset SOC into a initial
!test_bit(HCI_RUNNING, &hdev->flags)) * state and the state will ensure next hdev->setup() success.
* if HCI_QUIRK_NON_PERSISTENT_SETUP is set, it means that
* hdev->setup() can do its job regardless of SoC state, so
* don't need to send the VSC.
* if HCI_SETUP is set, it means that hdev->setup() was never
* invoked and the SOC is already in the initial state, so
* don't also need to send the VSC.
*/
if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks) ||
hci_dev_test_flag(hdev, HCI_SETUP))
return; return;
/* The serdev must be in open state when conrol logic arrives
* here, so also fix the use-after-free issue caused by that
* the serdev is flushed or wrote after it is closed.
*/
serdev_device_write_flush(serdev); serdev_device_write_flush(serdev);
ret = serdev_device_write_buf(serdev, ibs_wake_cmd, ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
sizeof(ibs_wake_cmd)); sizeof(ibs_wake_cmd));

View File

@ -304,8 +304,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
if (!devp->hd_ireqfreq) if (!devp->hd_ireqfreq)
return -EIO; return -EIO;
if (count < sizeof(unsigned long)) if (in_compat_syscall()) {
return -EINVAL; if (count < sizeof(compat_ulong_t))
return -EINVAL;
} else {
if (count < sizeof(unsigned long))
return -EINVAL;
}
add_wait_queue(&devp->hd_waitqueue, &wait); add_wait_queue(&devp->hd_waitqueue, &wait);
@ -329,9 +334,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
schedule(); schedule();
} }
retval = put_user(data, (unsigned long __user *)buf); if (in_compat_syscall()) {
if (!retval) retval = put_user(data, (compat_ulong_t __user *)buf);
retval = sizeof(unsigned long); if (!retval)
retval = sizeof(compat_ulong_t);
} else {
retval = put_user(data, (unsigned long __user *)buf);
if (!retval)
retval = sizeof(unsigned long);
}
out: out:
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
remove_wait_queue(&devp->hd_waitqueue, &wait); remove_wait_queue(&devp->hd_waitqueue, &wait);
@ -686,12 +698,24 @@ struct compat_hpet_info {
unsigned short hi_timer; unsigned short hi_timer;
}; };
/* 32-bit types would lead to different command codes which should be
* translated into 64-bit ones before passed to hpet_ioctl_common
*/
#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info)
#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t)
static long static long
hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{ {
struct hpet_info info; struct hpet_info info;
int err; int err;
if (cmd == COMPAT_HPET_INFO)
cmd = HPET_INFO;
if (cmd == COMPAT_HPET_IRQFREQ)
cmd = HPET_IRQFREQ;
mutex_lock(&hpet_mutex); mutex_lock(&hpet_mutex);
err = hpet_ioctl_common(file->private_data, cmd, arg, &info); err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
mutex_unlock(&hpet_mutex); mutex_unlock(&hpet_mutex);

View File

@ -101,6 +101,17 @@ static void dmi_decode_table(u8 *buf,
(data - buf + sizeof(struct dmi_header)) <= dmi_len) { (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
const struct dmi_header *dm = (const struct dmi_header *)data; const struct dmi_header *dm = (const struct dmi_header *)data;
/*
* If a short entry is found (less than 4 bytes), not only it
* is invalid, but we cannot reliably locate the next entry.
*/
if (dm->length < sizeof(struct dmi_header)) {
pr_warn(FW_BUG
"Corrupted DMI table, offset %zd (only %d entries processed)\n",
data - buf, i);
break;
}
/* /*
* We want to know the total length (formatted area and * We want to know the total length (formatted area and
* strings) before decoding to make sure we won't run off the * strings) before decoding to make sure we won't run off the

View File

@ -413,6 +413,14 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
int r; int r;
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
/*
* timestamp is not supported on some legacy SOCs (cik, cz, iceland,
* si and tonga), so initialize timestamp and timestamp_src to 0
*/
entry.timestamp = 0;
entry.timestamp_src = 0;
amdgpu_ih_decode_iv(adev, &entry); amdgpu_ih_decode_iv(adev, &entry);
trace_amdgpu_iv(ih - &adev->irq.ih, &entry); trace_amdgpu_iv(ih - &adev->irq.ih, &entry);

View File

@ -1802,6 +1802,9 @@ static struct audio *find_first_free_audio(
{ {
int i, available_audio_count; int i, available_audio_count;
if (id == ENGINE_ID_UNKNOWN)
return NULL;
available_audio_count = pool->audio_count; available_audio_count = pool->audio_count;
for (i = 0; i < available_audio_count; i++) { for (i = 0; i < available_audio_count; i++) {

View File

@ -211,8 +211,12 @@ bool dce110_vblank_set(struct irq_service *irq_service,
info->ext_id); info->ext_id);
uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK; uint8_t pipe_offset = dal_irq_src - IRQ_TYPE_VBLANK;
struct timing_generator *tg = struct timing_generator *tg;
dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (pipe_offset >= MAX_PIPES)
return false;
tg = dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
if (enable) { if (enable) {
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) { if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {

View File

@ -156,6 +156,10 @@ static enum mod_hdcp_status read(struct mod_hdcp *hdcp,
uint32_t cur_size = 0; uint32_t cur_size = 0;
uint32_t data_offset = 0; uint32_t data_offset = 0;
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
return MOD_HDCP_STATUS_DDC_FAILURE;
}
if (is_dp_hdcp(hdcp)) { if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) { while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);
@ -215,6 +219,10 @@ static enum mod_hdcp_status write(struct mod_hdcp *hdcp,
uint32_t cur_size = 0; uint32_t cur_size = 0;
uint32_t data_offset = 0; uint32_t data_offset = 0;
if (msg_id == MOD_HDCP_MESSAGE_ID_INVALID) {
return MOD_HDCP_STATUS_DDC_FAILURE;
}
if (is_dp_hdcp(hdcp)) { if (is_dp_hdcp(hdcp)) {
while (buf_len > 0) { while (buf_len > 0) {
cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE);

View File

@ -690,7 +690,7 @@ struct atom_gpio_pin_lut_v2_1
{ {
struct atom_common_table_header table_header; struct atom_common_table_header table_header;
/*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */ /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
struct atom_gpio_pin_assignment gpio_pin[8]; struct atom_gpio_pin_assignment gpio_pin[];
}; };

View File

@ -324,7 +324,9 @@ int lima_gp_init(struct lima_ip *ip)
void lima_gp_fini(struct lima_ip *ip) void lima_gp_fini(struct lima_ip *ip)
{ {
struct lima_device *dev = ip->dev;
devm_free_irq(dev->dev, ip->irq, ip);
} }
int lima_gp_pipe_init(struct lima_device *dev) int lima_gp_pipe_init(struct lima_device *dev)

View File

@ -118,7 +118,12 @@ int lima_mmu_init(struct lima_ip *ip)
void lima_mmu_fini(struct lima_ip *ip) void lima_mmu_fini(struct lima_ip *ip)
{ {
struct lima_device *dev = ip->dev;
if (ip->id == lima_ip_ppmmu_bcast)
return;
devm_free_irq(dev->dev, ip->irq, ip);
} }
void lima_mmu_flush_tlb(struct lima_ip *ip) void lima_mmu_flush_tlb(struct lima_ip *ip)

View File

@ -266,7 +266,9 @@ int lima_pp_init(struct lima_ip *ip)
void lima_pp_fini(struct lima_ip *ip) void lima_pp_fini(struct lima_ip *ip)
{ {
struct lima_device *dev = ip->dev;
devm_free_irq(dev->dev, ip->irq, ip);
} }
int lima_pp_bcast_resume(struct lima_ip *ip) int lima_pp_bcast_resume(struct lima_ip *ip)
@ -299,7 +301,9 @@ int lima_pp_bcast_init(struct lima_ip *ip)
void lima_pp_bcast_fini(struct lima_ip *ip) void lima_pp_bcast_fini(struct lima_ip *ip)
{ {
struct lima_device *dev = ip->dev;
devm_free_irq(dev->dev, ip->irq, ip);
} }
static int lima_pp_task_validate(struct lima_sched_pipe *pipe, static int lima_pp_task_validate(struct lima_sched_pipe *pipe,

View File

@ -960,6 +960,9 @@ nouveau_connector_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode; struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, nv_connector->native_mode); mode = drm_mode_duplicate(dev, nv_connector->native_mode);
if (!mode)
return 0;
drm_mode_probed_add(connector, mode); drm_mode_probed_add(connector, mode);
ret = 1; ret = 1;
} }

View File

@ -1078,7 +1078,7 @@ static const struct pci_device_id i801_ids[] = {
MODULE_DEVICE_TABLE(pci, i801_ids); MODULE_DEVICE_TABLE(pci, i801_ids);
#if defined CONFIG_X86 && defined CONFIG_DMI #if defined CONFIG_X86 && defined CONFIG_DMI
static unsigned char apanel_addr; static unsigned char apanel_addr __ro_after_init;
/* Scan the system ROM for the signature "FJKEYINF" */ /* Scan the system ROM for the signature "FJKEYINF" */
static __init const void __iomem *bios_signature(const void __iomem *bios) static __init const void __iomem *bios_signature(const void __iomem *bios)

View File

@ -15,7 +15,6 @@
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/timer.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/io.h> #include <linux/io.h>
@ -32,7 +31,6 @@ struct i2c_pnx_mif {
int ret; /* Return value */ int ret; /* Return value */
int mode; /* Interface mode */ int mode; /* Interface mode */
struct completion complete; /* I/O completion */ struct completion complete; /* I/O completion */
struct timer_list timer; /* Timeout */
u8 * buf; /* Data buffer */ u8 * buf; /* Data buffer */
int len; /* Length of data buffer */ int len; /* Length of data buffer */
int order; /* RX Bytes to order via TX */ int order; /* RX Bytes to order via TX */
@ -117,24 +115,6 @@ static inline int wait_reset(struct i2c_pnx_algo_data *data)
return (timeout <= 0); return (timeout <= 0);
} }
static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
{
struct timer_list *timer = &alg_data->mif.timer;
unsigned long expires = msecs_to_jiffies(alg_data->timeout);
if (expires <= 1)
expires = 2;
del_timer_sync(timer);
dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n",
jiffies, expires);
timer->expires = jiffies + expires;
add_timer(timer);
}
/** /**
* i2c_pnx_start - start a device * i2c_pnx_start - start a device
* @slave_addr: slave address * @slave_addr: slave address
@ -259,8 +239,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
I2C_REG_CTL(alg_data)); I2C_REG_CTL(alg_data));
del_timer_sync(&alg_data->mif.timer);
dev_dbg(&alg_data->adapter.dev, dev_dbg(&alg_data->adapter.dev,
"%s(): Waking up xfer routine.\n", "%s(): Waking up xfer routine.\n",
__func__); __func__);
@ -276,8 +254,6 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie),
I2C_REG_CTL(alg_data)); I2C_REG_CTL(alg_data));
/* Stop timer. */
del_timer_sync(&alg_data->mif.timer);
dev_dbg(&alg_data->adapter.dev, dev_dbg(&alg_data->adapter.dev,
"%s(): Waking up xfer routine after zero-xfer.\n", "%s(): Waking up xfer routine after zero-xfer.\n",
__func__); __func__);
@ -364,8 +340,6 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
mcntrl_drmie | mcntrl_daie); mcntrl_drmie | mcntrl_daie);
iowrite32(ctl, I2C_REG_CTL(alg_data)); iowrite32(ctl, I2C_REG_CTL(alg_data));
/* Kill timer. */
del_timer_sync(&alg_data->mif.timer);
complete(&alg_data->mif.complete); complete(&alg_data->mif.complete);
} }
} }
@ -400,8 +374,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
mcntrl_drmie); mcntrl_drmie);
iowrite32(ctl, I2C_REG_CTL(alg_data)); iowrite32(ctl, I2C_REG_CTL(alg_data));
/* Stop timer, to prevent timeout. */
del_timer_sync(&alg_data->mif.timer);
complete(&alg_data->mif.complete); complete(&alg_data->mif.complete);
} else if (stat & mstatus_nai) { } else if (stat & mstatus_nai) {
/* Slave did not acknowledge, generate a STOP */ /* Slave did not acknowledge, generate a STOP */
@ -419,8 +391,6 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
/* Our return value. */ /* Our return value. */
alg_data->mif.ret = -EIO; alg_data->mif.ret = -EIO;
/* Stop timer, to prevent timeout. */
del_timer_sync(&alg_data->mif.timer);
complete(&alg_data->mif.complete); complete(&alg_data->mif.complete);
} else { } else {
/* /*
@ -453,9 +423,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void i2c_pnx_timeout(struct timer_list *t) static void i2c_pnx_timeout(struct i2c_pnx_algo_data *alg_data)
{ {
struct i2c_pnx_algo_data *alg_data = from_timer(alg_data, t, mif.timer);
u32 ctl; u32 ctl;
dev_err(&alg_data->adapter.dev, dev_err(&alg_data->adapter.dev,
@ -472,7 +441,6 @@ static void i2c_pnx_timeout(struct timer_list *t)
iowrite32(ctl, I2C_REG_CTL(alg_data)); iowrite32(ctl, I2C_REG_CTL(alg_data));
wait_reset(alg_data); wait_reset(alg_data);
alg_data->mif.ret = -EIO; alg_data->mif.ret = -EIO;
complete(&alg_data->mif.complete);
} }
static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data) static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
@ -514,6 +482,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
struct i2c_msg *pmsg; struct i2c_msg *pmsg;
int rc = 0, completed = 0, i; int rc = 0, completed = 0, i;
struct i2c_pnx_algo_data *alg_data = adap->algo_data; struct i2c_pnx_algo_data *alg_data = adap->algo_data;
unsigned long time_left;
u32 stat; u32 stat;
dev_dbg(&alg_data->adapter.dev, dev_dbg(&alg_data->adapter.dev,
@ -548,7 +517,6 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n", dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
__func__, alg_data->mif.mode, alg_data->mif.len); __func__, alg_data->mif.mode, alg_data->mif.len);
i2c_pnx_arm_timer(alg_data);
/* initialize the completion var */ /* initialize the completion var */
init_completion(&alg_data->mif.complete); init_completion(&alg_data->mif.complete);
@ -564,7 +532,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
break; break;
/* Wait for completion */ /* Wait for completion */
wait_for_completion(&alg_data->mif.complete); time_left = wait_for_completion_timeout(&alg_data->mif.complete,
alg_data->timeout);
if (time_left == 0)
i2c_pnx_timeout(alg_data);
if (!(rc = alg_data->mif.ret)) if (!(rc = alg_data->mif.ret))
completed++; completed++;
@ -657,7 +628,10 @@ static int i2c_pnx_probe(struct platform_device *pdev)
alg_data->adapter.algo_data = alg_data; alg_data->adapter.algo_data = alg_data;
alg_data->adapter.nr = pdev->id; alg_data->adapter.nr = pdev->id;
alg_data->timeout = I2C_PNX_TIMEOUT_DEFAULT; alg_data->timeout = msecs_to_jiffies(I2C_PNX_TIMEOUT_DEFAULT);
if (alg_data->timeout <= 1)
alg_data->timeout = 2;
#ifdef CONFIG_OF #ifdef CONFIG_OF
alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node); alg_data->adapter.dev.of_node = of_node_get(pdev->dev.of_node);
if (pdev->dev.of_node) { if (pdev->dev.of_node) {
@ -677,8 +651,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
if (IS_ERR(alg_data->clk)) if (IS_ERR(alg_data->clk))
return PTR_ERR(alg_data->clk); return PTR_ERR(alg_data->clk);
timer_setup(&alg_data->mif.timer, i2c_pnx_timeout, 0);
snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name), snprintf(alg_data->adapter.name, sizeof(alg_data->adapter.name),
"%s", pdev->name); "%s", pdev->name);

View File

@ -116,6 +116,7 @@ enum rcar_i2c_type {
I2C_RCAR_GEN1, I2C_RCAR_GEN1,
I2C_RCAR_GEN2, I2C_RCAR_GEN2,
I2C_RCAR_GEN3, I2C_RCAR_GEN3,
I2C_RCAR_GEN4,
}; };
struct rcar_i2c_priv { struct rcar_i2c_priv {
@ -220,6 +221,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
} }
static void rcar_i2c_reset_slave(struct rcar_i2c_priv *priv)
{
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSSR, 0);
rcar_i2c_write(priv, ICSCR, SDBS);
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
}
static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv) static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
{ {
int ret; int ret;
@ -372,8 +381,8 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction); sg_dma_len(&priv->sg), priv->dma_direction);
/* Gen3 can only do one RXDMA per transfer and we just completed it */ /* Gen3+ can only do one RXDMA per transfer and we just completed it */
if (priv->devtype == I2C_RCAR_GEN3 && if (priv->devtype >= I2C_RCAR_GEN3 &&
priv->dma_direction == DMA_FROM_DEVICE) priv->dma_direction == DMA_FROM_DEVICE)
priv->flags |= ID_P_NO_RXDMA; priv->flags |= ID_P_NO_RXDMA;
@ -787,6 +796,10 @@ static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
{ {
int ret; int ret;
/* Don't reset if a slave instance is currently running */
if (priv->slave)
return -EISCONN;
ret = reset_control_reset(priv->rstc); ret = reset_control_reset(priv->rstc);
if (ret) if (ret)
return ret; return ret;
@ -811,14 +824,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
if (ret < 0) if (ret < 0)
goto out; goto out;
/* Gen3 needs a reset before allowing RXDMA once */ /* Gen3+ needs a reset. That also allows RXDMA once */
if (priv->devtype == I2C_RCAR_GEN3) { if (priv->devtype >= I2C_RCAR_GEN3) {
priv->flags |= ID_P_NO_RXDMA; ret = rcar_i2c_do_reset(priv);
if (!IS_ERR(priv->rstc)) { if (ret)
ret = rcar_i2c_do_reset(priv); goto out;
if (ret == 0) priv->flags &= ~ID_P_NO_RXDMA;
priv->flags &= ~ID_P_NO_RXDMA;
}
} }
rcar_i2c_init(priv); rcar_i2c_init(priv);
@ -888,11 +899,8 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* ensure no irq is running before clearing ptr */ /* ensure no irq is running before clearing ptr */
disable_irq(priv->irq); disable_irq(priv->irq);
rcar_i2c_write(priv, ICSIER, 0); rcar_i2c_reset_slave(priv);
rcar_i2c_write(priv, ICSSR, 0);
enable_irq(priv->irq); enable_irq(priv->irq);
rcar_i2c_write(priv, ICSCR, SDBS);
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
priv->slave = NULL; priv->slave = NULL;
@ -945,6 +953,7 @@ static const struct of_device_id rcar_i2c_dt_ids[] = {
{ .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 }, { .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 },
{ .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 }, { .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 },
{ .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 }, { .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 },
{ .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN4 },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids); MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
@ -1004,22 +1013,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
goto out_pm_disable; goto out_pm_disable;
} }
rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ /* Bring hardware to known state */
rcar_i2c_init(priv);
rcar_i2c_reset_slave(priv);
if (priv->devtype < I2C_RCAR_GEN3) { if (priv->devtype < I2C_RCAR_GEN3) {
irqflags |= IRQF_NO_THREAD; irqflags |= IRQF_NO_THREAD;
irqhandler = rcar_i2c_gen2_irq; irqhandler = rcar_i2c_gen2_irq;
} }
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {
ret = reset_control_status(priv->rstc);
if (ret < 0)
priv->rstc = ERR_PTR(-ENOTSUPP);
}
}
/* Stay always active when multi-master to keep arbitration working */ /* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master")) if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED; priv->flags |= ID_P_PM_BLOCKED;
@ -1029,6 +1031,22 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (of_property_read_bool(dev->of_node, "smbus")) if (of_property_read_bool(dev->of_node, "smbus"))
priv->flags |= ID_P_HOST_NOTIFY; priv->flags |= ID_P_HOST_NOTIFY;
/* R-Car Gen3+ needs a reset before every transfer */
if (priv->devtype >= I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc)) {
ret = PTR_ERR(priv->rstc);
goto out_pm_put;
}
ret = reset_control_status(priv->rstc);
if (ret < 0)
goto out_pm_put;
/* hard reset disturbs HostNotify local target, so disable it */
priv->flags &= ~ID_P_HOST_NOTIFY;
}
ret = platform_get_irq(pdev, 0); ret = platform_get_irq(pdev, 0);
if (ret < 0) if (ret < 0)
goto out_pm_put; goto out_pm_put;

View File

@ -969,6 +969,7 @@ EXPORT_SYMBOL_GPL(i2c_unregister_device);
static const struct i2c_device_id dummy_id[] = { static const struct i2c_device_id dummy_id[] = {
{ "dummy", 0 }, { "dummy", 0 },
{ "smbus_host_notify", 0 },
{ }, { },
}; };

View File

@ -63,6 +63,8 @@ MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
#define MAX_UMAD_RECV_LIST_SIZE 200000
enum { enum {
IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS, IB_UMAD_MAX_PORTS = RDMA_MAX_PORTS,
IB_UMAD_MAX_AGENTS = 32, IB_UMAD_MAX_AGENTS = 32,
@ -113,6 +115,7 @@ struct ib_umad_file {
struct mutex mutex; struct mutex mutex;
struct ib_umad_port *port; struct ib_umad_port *port;
struct list_head recv_list; struct list_head recv_list;
atomic_t recv_list_size;
struct list_head send_list; struct list_head send_list;
struct list_head port_list; struct list_head port_list;
spinlock_t send_lock; spinlock_t send_lock;
@ -180,24 +183,28 @@ static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
return file->agents_dead ? NULL : file->agent[id]; return file->agents_dead ? NULL : file->agent[id];
} }
static int queue_packet(struct ib_umad_file *file, static int queue_packet(struct ib_umad_file *file, struct ib_mad_agent *agent,
struct ib_mad_agent *agent, struct ib_umad_packet *packet, bool is_recv_mad)
struct ib_umad_packet *packet)
{ {
int ret = 1; int ret = 1;
mutex_lock(&file->mutex); mutex_lock(&file->mutex);
if (is_recv_mad &&
atomic_read(&file->recv_list_size) > MAX_UMAD_RECV_LIST_SIZE)
goto unlock;
for (packet->mad.hdr.id = 0; for (packet->mad.hdr.id = 0;
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
packet->mad.hdr.id++) packet->mad.hdr.id++)
if (agent == __get_agent(file, packet->mad.hdr.id)) { if (agent == __get_agent(file, packet->mad.hdr.id)) {
list_add_tail(&packet->list, &file->recv_list); list_add_tail(&packet->list, &file->recv_list);
atomic_inc(&file->recv_list_size);
wake_up_interruptible(&file->recv_wait); wake_up_interruptible(&file->recv_wait);
ret = 0; ret = 0;
break; break;
} }
unlock:
mutex_unlock(&file->mutex); mutex_unlock(&file->mutex);
return ret; return ret;
@ -224,7 +231,7 @@ static void send_handler(struct ib_mad_agent *agent,
if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
packet->length = IB_MGMT_MAD_HDR; packet->length = IB_MGMT_MAD_HDR;
packet->mad.hdr.status = ETIMEDOUT; packet->mad.hdr.status = ETIMEDOUT;
if (!queue_packet(file, agent, packet)) if (!queue_packet(file, agent, packet, false))
return; return;
} }
kfree(packet); kfree(packet);
@ -284,7 +291,7 @@ static void recv_handler(struct ib_mad_agent *agent,
rdma_destroy_ah_attr(&ah_attr); rdma_destroy_ah_attr(&ah_attr);
} }
if (queue_packet(file, agent, packet)) if (queue_packet(file, agent, packet, true))
goto err2; goto err2;
return; return;
@ -409,6 +416,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
list_del(&packet->list); list_del(&packet->list);
atomic_dec(&file->recv_list_size);
mutex_unlock(&file->mutex); mutex_unlock(&file->mutex);
@ -421,6 +429,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
/* Requeue packet */ /* Requeue packet */
mutex_lock(&file->mutex); mutex_lock(&file->mutex);
list_add(&packet->list, &file->recv_list); list_add(&packet->list, &file->recv_list);
atomic_inc(&file->recv_list_size);
mutex_unlock(&file->mutex); mutex_unlock(&file->mutex);
} else { } else {
if (packet->recv_wc) if (packet->recv_wc)

View File

@ -12,8 +12,10 @@
/* #define DEBUG */ /* #define DEBUG */
#include <linux/input.h> #include <linux/input.h>
#include <linux/limits.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -318,9 +320,8 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
return -EINVAL; return -EINVAL;
} }
ff_dev_size = sizeof(struct ff_device) + ff_dev_size = struct_size(ff, effect_owners, max_effects);
max_effects * sizeof(struct file *); if (ff_dev_size == SIZE_MAX) /* overflow */
if (ff_dev_size < max_effects) /* overflow */
return -EINVAL; return -EINVAL;
ff = kzalloc(ff_dev_size, GFP_KERNEL); ff = kzalloc(ff_dev_size, GFP_KERNEL);

View File

@ -174,6 +174,6 @@ struct as10x_register_addr {
uint32_t addr; uint32_t addr;
/* register mode access */ /* register mode access */
uint8_t mode; uint8_t mode;
}; } __packed;
#endif #endif

View File

@ -410,6 +410,7 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
struct tda10048_config *config = &state->config; struct tda10048_config *config = &state->config;
int i; int i;
u32 if_freq_khz; u32 if_freq_khz;
u64 sample_freq;
dprintk(1, "%s(bw = %d)\n", __func__, bw); dprintk(1, "%s(bw = %d)\n", __func__, bw);
@ -451,9 +452,11 @@ static int tda10048_set_if(struct dvb_frontend *fe, u32 bw)
dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor); dprintk(1, "- pll_pfactor = %d\n", state->pll_pfactor);
/* Calculate the sample frequency */ /* Calculate the sample frequency */
state->sample_freq = state->xtal_hz * (state->pll_mfactor + 45); sample_freq = state->xtal_hz;
state->sample_freq /= (state->pll_nfactor + 1); sample_freq *= state->pll_mfactor + 45;
state->sample_freq /= (state->pll_pfactor + 4); do_div(sample_freq, state->pll_nfactor + 1);
do_div(sample_freq, state->pll_pfactor + 4);
state->sample_freq = sample_freq;
dprintk(1, "- sample_freq = %d\n", state->sample_freq); dprintk(1, "- sample_freq = %d\n", state->sample_freq);
/* Update the I/F */ /* Update the I/F */

View File

@ -328,7 +328,7 @@ static int CalcMainPLL(struct tda_state *state, u32 freq)
OscFreq = (u64) freq * (u64) Div; OscFreq = (u64) freq * (u64) Div;
OscFreq *= (u64) 16384; OscFreq *= (u64) 16384;
do_div(OscFreq, (u64)16000000); do_div(OscFreq, 16000000);
MainDiv = OscFreq; MainDiv = OscFreq;
state->m_Regs[MPD] = PostDiv & 0x77; state->m_Regs[MPD] = PostDiv & 0x77;
@ -352,7 +352,7 @@ static int CalcCalPLL(struct tda_state *state, u32 freq)
OscFreq = (u64)freq * (u64)Div; OscFreq = (u64)freq * (u64)Div;
/* CalDiv = u32( OscFreq * 16384 / 16000000 ); */ /* CalDiv = u32( OscFreq * 16384 / 16000000 ); */
OscFreq *= (u64)16384; OscFreq *= (u64)16384;
do_div(OscFreq, (u64)16000000); do_div(OscFreq, 16000000);
CalDiv = OscFreq; CalDiv = OscFreq;
state->m_Regs[CPD] = PostDiv; state->m_Regs[CPD] = PostDiv;

View File

@ -2419,7 +2419,12 @@ static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap)
adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config); adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config);
return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; if (!adap->fe_adap[0].fe) {
release_firmware(state->frontend_firmware);
return -ENODEV;
}
return 0;
} }
static int dib9090_tuner_attach(struct dvb_usb_adapter *adap) static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
@ -2492,8 +2497,10 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80); dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80);
adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]); adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]);
if (adap->fe_adap[0].fe == NULL) if (!adap->fe_adap[0].fe) {
release_firmware(state->frontend_firmware);
return -ENODEV; return -ENODEV;
}
i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0); i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0);
dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82); dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82);
@ -2501,7 +2508,12 @@ static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap)
fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]); fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]);
dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave); dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave);
return fe_slave == NULL ? -ENODEV : 0; if (!fe_slave) {
release_firmware(state->frontend_firmware);
return -ENODEV;
}
return 0;
} }
static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap) static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)

View File

@ -716,6 +716,7 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
{ {
struct dvb_usb_device *d = i2c_get_adapdata(adap); struct dvb_usb_device *d = i2c_get_adapdata(adap);
struct dw2102_state *state; struct dw2102_state *state;
int j;
if (!d) if (!d)
return -ENODEV; return -ENODEV;
@ -729,11 +730,11 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
return -EAGAIN; return -EAGAIN;
} }
switch (num) { j = 0;
case 1: while (j < num) {
switch (msg[0].addr) { switch (msg[j].addr) {
case SU3000_STREAM_CTRL: case SU3000_STREAM_CTRL:
state->data[0] = msg[0].buf[0] + 0x36; state->data[0] = msg[j].buf[0] + 0x36;
state->data[1] = 3; state->data[1] = 3;
state->data[2] = 0; state->data[2] = 0;
if (dvb_usb_generic_rw(d, state->data, 3, if (dvb_usb_generic_rw(d, state->data, 3,
@ -745,61 +746,86 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (dvb_usb_generic_rw(d, state->data, 1, if (dvb_usb_generic_rw(d, state->data, 1,
state->data, 2, 0) < 0) state->data, 2, 0) < 0)
err("i2c transfer failed."); err("i2c transfer failed.");
msg[0].buf[1] = state->data[0]; msg[j].buf[1] = state->data[0];
msg[0].buf[0] = state->data[1]; msg[j].buf[0] = state->data[1];
break; break;
default: default:
if (3 + msg[0].len > sizeof(state->data)) { /* if the current write msg is followed by a another
warn("i2c wr: len=%d is too big!\n", * read msg to/from the same address
msg[0].len); */
if ((j+1 < num) && (msg[j+1].flags & I2C_M_RD) &&
(msg[j].addr == msg[j+1].addr)) {
/* join both i2c msgs to one usb read command */
if (4 + msg[j].len > sizeof(state->data)) {
warn("i2c combined wr/rd: write len=%d is too big!\n",
msg[j].len);
num = -EOPNOTSUPP;
break;
}
if (1 + msg[j+1].len > sizeof(state->data)) {
warn("i2c combined wr/rd: read len=%d is too big!\n",
msg[j+1].len);
num = -EOPNOTSUPP;
break;
}
state->data[0] = 0x09;
state->data[1] = msg[j].len;
state->data[2] = msg[j+1].len;
state->data[3] = msg[j].addr;
memcpy(&state->data[4], msg[j].buf, msg[j].len);
if (dvb_usb_generic_rw(d, state->data, msg[j].len + 4,
state->data, msg[j+1].len + 1, 0) < 0)
err("i2c transfer failed.");
memcpy(msg[j+1].buf, &state->data[1], msg[j+1].len);
j++;
break;
}
if (msg[j].flags & I2C_M_RD) {
/* single read */
if (4 + msg[j].len > sizeof(state->data)) {
warn("i2c rd: len=%d is too big!\n", msg[j].len);
num = -EOPNOTSUPP;
break;
}
state->data[0] = 0x09;
state->data[1] = 0;
state->data[2] = msg[j].len;
state->data[3] = msg[j].addr;
memcpy(&state->data[4], msg[j].buf, msg[j].len);
if (dvb_usb_generic_rw(d, state->data, 4,
state->data, msg[j].len + 1, 0) < 0)
err("i2c transfer failed.");
memcpy(msg[j].buf, &state->data[1], msg[j].len);
break;
}
/* single write */
if (3 + msg[j].len > sizeof(state->data)) {
warn("i2c wr: len=%d is too big!\n", msg[j].len);
num = -EOPNOTSUPP; num = -EOPNOTSUPP;
break; break;
} }
/* always i2c write*/
state->data[0] = 0x08; state->data[0] = 0x08;
state->data[1] = msg[0].addr; state->data[1] = msg[j].addr;
state->data[2] = msg[0].len; state->data[2] = msg[j].len;
memcpy(&state->data[3], msg[0].buf, msg[0].len); memcpy(&state->data[3], msg[j].buf, msg[j].len);
if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3, if (dvb_usb_generic_rw(d, state->data, msg[j].len + 3,
state->data, 1, 0) < 0) state->data, 1, 0) < 0)
err("i2c transfer failed."); err("i2c transfer failed.");
} // switch
j++;
} } // while
break;
case 2:
/* always i2c read */
if (4 + msg[0].len > sizeof(state->data)) {
warn("i2c rd: len=%d is too big!\n",
msg[0].len);
num = -EOPNOTSUPP;
break;
}
if (1 + msg[1].len > sizeof(state->data)) {
warn("i2c rd: len=%d is too big!\n",
msg[1].len);
num = -EOPNOTSUPP;
break;
}
state->data[0] = 0x09;
state->data[1] = msg[0].len;
state->data[2] = msg[1].len;
state->data[3] = msg[0].addr;
memcpy(&state->data[4], msg[0].buf, msg[0].len);
if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
state->data, msg[1].len + 1, 0) < 0)
err("i2c transfer failed.");
memcpy(msg[1].buf, &state->data[1], msg[1].len);
break;
default:
warn("more than 2 i2c messages at a time is not handled yet.");
break;
}
mutex_unlock(&d->data_mutex); mutex_unlock(&d->data_mutex);
mutex_unlock(&d->i2c_mutex); mutex_unlock(&d->i2c_mutex);
return num; return num;

View File

@ -247,7 +247,7 @@ struct s2255_vc {
struct s2255_dev { struct s2255_dev {
struct s2255_vc vc[MAX_CHANNELS]; struct s2255_vc vc[MAX_CHANNELS];
struct v4l2_device v4l2_dev; struct v4l2_device v4l2_dev;
atomic_t num_channels; refcount_t num_channels;
int frames; int frames;
struct mutex lock; /* channels[].vdev.lock */ struct mutex lock; /* channels[].vdev.lock */
struct mutex cmdlock; /* protects cmdbuf */ struct mutex cmdlock; /* protects cmdbuf */
@ -1552,11 +1552,11 @@ static void s2255_video_device_release(struct video_device *vdev)
container_of(vdev, struct s2255_vc, vdev); container_of(vdev, struct s2255_vc, vdev);
dprintk(dev, 4, "%s, chnls: %d\n", __func__, dprintk(dev, 4, "%s, chnls: %d\n", __func__,
atomic_read(&dev->num_channels)); refcount_read(&dev->num_channels));
v4l2_ctrl_handler_free(&vc->hdl); v4l2_ctrl_handler_free(&vc->hdl);
if (atomic_dec_and_test(&dev->num_channels)) if (refcount_dec_and_test(&dev->num_channels))
s2255_destroy(dev); s2255_destroy(dev);
return; return;
} }
@ -1661,7 +1661,7 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
"failed to register video device!\n"); "failed to register video device!\n");
break; break;
} }
atomic_inc(&dev->num_channels); refcount_inc(&dev->num_channels);
v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n", v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n",
video_device_node_name(&vc->vdev)); video_device_node_name(&vc->vdev));
@ -1669,11 +1669,11 @@ static int s2255_probe_v4l(struct s2255_dev *dev)
pr_info("Sensoray 2255 V4L driver Revision: %s\n", pr_info("Sensoray 2255 V4L driver Revision: %s\n",
S2255_VERSION); S2255_VERSION);
/* if no channels registered, return error and probe will fail*/ /* if no channels registered, return error and probe will fail*/
if (atomic_read(&dev->num_channels) == 0) { if (refcount_read(&dev->num_channels) == 0) {
v4l2_device_unregister(&dev->v4l2_dev); v4l2_device_unregister(&dev->v4l2_dev);
return ret; return ret;
} }
if (atomic_read(&dev->num_channels) != MAX_CHANNELS) if (refcount_read(&dev->num_channels) != MAX_CHANNELS)
pr_warn("s2255: Not all channels available.\n"); pr_warn("s2255: Not all channels available.\n");
return 0; return 0;
} }
@ -2222,7 +2222,7 @@ static int s2255_probe(struct usb_interface *interface,
goto errorFWDATA1; goto errorFWDATA1;
} }
atomic_set(&dev->num_channels, 0); refcount_set(&dev->num_channels, 0);
dev->pid = id->idProduct; dev->pid = id->idProduct;
dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL); dev->fw_data = kzalloc(sizeof(struct s2255_fw), GFP_KERNEL);
if (!dev->fw_data) if (!dev->fw_data)
@ -2342,12 +2342,12 @@ static void s2255_disconnect(struct usb_interface *interface)
{ {
struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface)); struct s2255_dev *dev = to_s2255_dev(usb_get_intfdata(interface));
int i; int i;
int channels = atomic_read(&dev->num_channels); int channels = refcount_read(&dev->num_channels);
mutex_lock(&dev->lock); mutex_lock(&dev->lock);
v4l2_device_disconnect(&dev->v4l2_dev); v4l2_device_disconnect(&dev->v4l2_dev);
mutex_unlock(&dev->lock); mutex_unlock(&dev->lock);
/*see comments in the uvc_driver.c usb disconnect function */ /*see comments in the uvc_driver.c usb disconnect function */
atomic_inc(&dev->num_channels); refcount_inc(&dev->num_channels);
/* unregister each video device. */ /* unregister each video device. */
for (i = 0; i < channels; i++) for (i = 0; i < channels; i++)
video_unregister_device(&dev->vc[i].vdev); video_unregister_device(&dev->vc[i].vdev);
@ -2360,7 +2360,7 @@ static void s2255_disconnect(struct usb_interface *interface)
dev->vc[i].vidstatus_ready = 1; dev->vc[i].vidstatus_ready = 1;
wake_up(&dev->vc[i].wait_vidstatus); wake_up(&dev->vc[i].wait_vidstatus);
} }
if (atomic_dec_and_test(&dev->num_channels)) if (refcount_dec_and_test(&dev->num_channels))
s2255_destroy(dev); s2255_destroy(dev);
dev_info(&interface->dev, "%s\n", __func__); dev_info(&interface->dev, "%s\n", __func__);
} }

View File

@ -964,28 +964,32 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
unsigned int offset_in_page) unsigned int offset_in_page)
{ {
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
bool ident_stage = !mtd->writesize;
/* Make sure the offset is less than the actual page size. */ /* Bypass all checks during NAND identification */
if (offset_in_page > mtd->writesize + mtd->oobsize) if (likely(!ident_stage)) {
return -EINVAL; /* Make sure the offset is less than the actual page size. */
if (offset_in_page > mtd->writesize + mtd->oobsize)
/*
* On small page NANDs, there's a dedicated command to access the OOB
* area, and the column address is relative to the start of the OOB
* area, not the start of the page. Asjust the address accordingly.
*/
if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
offset_in_page -= mtd->writesize;
/*
* The offset in page is expressed in bytes, if the NAND bus is 16-bit
* wide, then it must be divided by 2.
*/
if (chip->options & NAND_BUSWIDTH_16) {
if (WARN_ON(offset_in_page % 2))
return -EINVAL; return -EINVAL;
offset_in_page /= 2; /*
* On small page NANDs, there's a dedicated command to access the OOB
* area, and the column address is relative to the start of the OOB
* area, not the start of the page. Asjust the address accordingly.
*/
if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
offset_in_page -= mtd->writesize;
/*
* The offset in page is expressed in bytes, if the NAND bus is 16-bit
* wide, then it must be divided by 2.
*/
if (chip->options & NAND_BUSWIDTH_16) {
if (WARN_ON(offset_in_page % 2))
return -EINVAL;
offset_in_page /= 2;
}
} }
addrs[0] = offset_in_page; addrs[0] = offset_in_page;
@ -994,7 +998,7 @@ static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
* Small page NANDs use 1 cycle for the columns, while large page NANDs * Small page NANDs use 1 cycle for the columns, while large page NANDs
* need 2 * need 2
*/ */
if (mtd->writesize <= 512) if (!ident_stage && mtd->writesize <= 512)
return 1; return 1;
addrs[1] = offset_in_page >> 8; addrs[1] = offset_in_page >> 8;
@ -1189,16 +1193,19 @@ int nand_change_read_column_op(struct nand_chip *chip,
unsigned int len, bool force_8bit) unsigned int len, bool force_8bit)
{ {
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
bool ident_stage = !mtd->writesize;
if (len && !buf) if (len && !buf)
return -EINVAL; return -EINVAL;
if (offset_in_page + len > mtd->writesize + mtd->oobsize) if (!ident_stage) {
return -EINVAL; if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
/* Small page NANDs do not support column change. */ /* Small page NANDs do not support column change. */
if (mtd->writesize <= 512) if (mtd->writesize <= 512)
return -ENOTSUPP; return -ENOTSUPP;
}
if (nand_has_exec_op(chip)) { if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr = const struct nand_sdr_timings *sdr =

View File

@ -1100,9 +1100,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
__be32 target; __be32 target;
if (newval->string) { if (newval->string) {
if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) { if (strlen(newval->string) < 1 ||
netdev_err(bond->dev, "invalid ARP target %pI4 specified\n", !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
&target); netdev_err(bond->dev, "invalid ARP target specified\n");
return ret; return ret;
} }
if (newval->string[0] == '+') if (newval->string[0] == '+')

View File

@ -114,6 +114,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = { static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
.quirks = 0, .quirks = 0,
.family = KVASER_LEAF,
.ops = &kvaser_usb_leaf_dev_ops, .ops = &kvaser_usb_leaf_dev_ops,
}; };

View File

@ -116,8 +116,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{ {
struct mv88e6xxx_mdio_bus *mdio_bus; struct mv88e6xxx_mdio_bus *mdio_bus;
mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus, mdio_bus = list_first_entry_or_null(&chip->mdios,
list); struct mv88e6xxx_mdio_bus, list);
if (!mdio_bus) if (!mdio_bus)
return NULL; return NULL;

View File

@ -1262,7 +1262,7 @@ enum {
struct bnx2x_fw_stats_req { struct bnx2x_fw_stats_req {
struct stats_query_header hdr; struct stats_query_header hdr;
struct stats_query_entry query[FP_SB_MAX_E1x+ struct stats_query_entry query[FP_SB_MAX_E2 +
BNX2X_FIRST_QUEUE_QUERY_IDX]; BNX2X_FIRST_QUEUE_QUERY_IDX];
}; };

View File

@ -213,8 +213,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
if (ch->dma.irq) if (ch->dma.irq)
free_irq(ch->dma.irq, priv); free_irq(ch->dma.irq, priv);
if (IS_RX(ch->idx)) { if (IS_RX(ch->idx)) {
int desc; struct ltq_dma_channel *dma = &ch->dma;
for (desc = 0; desc < LTQ_DESC_NUM; desc++)
for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]); dev_kfree_skb_any(ch->skb[ch->dma.desc]);
} }
} }

View File

@ -54,8 +54,13 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CUSTOM1 = 0xF, NPC_LT_LB_CUSTOM1 = 0xF,
}; };
/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
* headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
* differ only at bit 0 so mask 0xE can be used to detect extended headers.
*/
enum npc_kpu_lc_ltype { enum npc_kpu_lc_ltype {
NPC_LT_LC_IP = 1, NPC_LT_LC_PTP = 1,
NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT, NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6, NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT, NPC_LT_LC_IP6_EXT,
@ -63,7 +68,6 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_RARP, NPC_LT_LC_RARP,
NPC_LT_LC_MPLS, NPC_LT_LC_MPLS,
NPC_LT_LC_NSH, NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE, NPC_LT_LC_FCOE,
NPC_LT_LC_CUSTOM0 = 0xE, NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF, NPC_LT_LC_CUSTOM1 = 0xF,

View File

@ -1357,7 +1357,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
if (req->ssow > block->lf.max) { if (req->ssow > block->lf.max) {
dev_err(&rvu->pdev->dev, dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid SSOW req, %d > max %d\n", "Func 0x%x: Invalid SSOW req, %d > max %d\n",
pcifunc, req->sso, block->lf.max); pcifunc, req->ssow, block->lf.max);
return -EINVAL; return -EINVAL;
} }
mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);

View File

@ -501,6 +501,7 @@ static int ks8851_net_open(struct net_device *dev)
ks8851_wrreg16(ks, KS_IER, ks->rc_ier); ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
ks->queued_len = 0; ks->queued_len = 0;
ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
netif_start_queue(ks->netdev); netif_start_queue(ks->netdev);
netif_dbg(ks, ifup, ks->netdev, "network device up\n"); netif_dbg(ks, ifup, ks->netdev, "network device up\n");
@ -1057,7 +1058,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int ret; int ret;
ks->netdev = netdev; ks->netdev = netdev;
ks->tx_space = 6144;
gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL); gpio = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0, NULL);
if (gpio == -EPROBE_DEFER) if (gpio == -EPROBE_DEFER)

View File

@ -70,6 +70,7 @@
#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
#define PPP_PROTO_LEN 2 #define PPP_PROTO_LEN 2
#define PPP_LCP_HDRLEN 4
/* /*
* An instance of /dev/ppp can be associated with either a ppp * An instance of /dev/ppp can be associated with either a ppp
@ -489,6 +490,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
return ret; return ret;
} }
static bool ppp_check_packet(struct sk_buff *skb, size_t count)
{
/* LCP packets must include LCP header which 4 bytes long:
* 1-byte code, 1-byte identifier, and 2-byte length.
*/
return get_unaligned_be16(skb->data) != PPP_LCP ||
count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
}
static ssize_t ppp_write(struct file *file, const char __user *buf, static ssize_t ppp_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
@ -511,6 +521,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
kfree_skb(skb); kfree_skb(skb);
goto out; goto out;
} }
ret = -EINVAL;
if (unlikely(!ppp_check_packet(skb, count))) {
kfree_skb(skb);
goto out;
}
switch (pf->kind) { switch (pf->kind) {
case INTERFACE: case INTERFACE:

View File

@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
if (bits == 32) { if (bits == 32) {
*(u32 *)dst = be32_to_cpu(*(const __be32 *)src); *(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
} else if (bits == 128) { } else if (bits == 128) {
((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); ((u64 *)dst)[0] = get_unaligned_be64(src);
((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); ((u64 *)dst)[1] = get_unaligned_be64(src + 8);
} }
} }

View File

@ -126,10 +126,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
*/ */
static inline int wg_cpumask_next_online(int *last_cpu) static inline int wg_cpumask_next_online(int *last_cpu)
{ {
int cpu = cpumask_next(*last_cpu, cpu_online_mask); int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
if (cpu >= nr_cpu_ids) if (cpu >= nr_cpu_ids)
cpu = cpumask_first(cpu_online_mask); cpu = cpumask_first(cpu_online_mask);
*last_cpu = cpu; WRITE_ONCE(*last_cpu, cpu);
return cpu; return cpu;
} }

View File

@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
{ {
struct sk_buff *skb; struct sk_buff *skb;
if (skb_queue_empty(&peer->staged_packet_queue)) { if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
GFP_ATOMIC); GFP_ATOMIC);
if (unlikely(!skb)) if (unlikely(!skb))

View File

@ -364,7 +364,8 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct ieee80211_p2p_noa_attr noa_attr; struct ieee80211_p2p_noa_attr noa_attr;
const struct cfg80211_bss_ies *ies; const struct cfg80211_bss_ies *ies;
struct wilc_join_bss_param *param; struct wilc_join_bss_param *param;
u8 rates_len = 0, ies_len; u8 rates_len = 0;
int ies_len;
int ret; int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL); param = kzalloc(sizeof(*param), GFP_KERNEL);

View File

@ -420,7 +420,7 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
int node, srcu_idx; int node, srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu); srcu_idx = srcu_read_lock(&head->srcu);
for_each_node(node) for_each_online_node(node)
__nvme_find_path(head, node); __nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx); srcu_read_unlock(&head->srcu, srcu_idx);
} }

View File

@ -844,7 +844,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
struct bio_vec bv = req_bvec(req); struct bio_vec bv = req_bvec(req);
if (!is_pci_p2pdma_page(bv.bv_page)) { if (!is_pci_p2pdma_page(bv.bv_page)) {
if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) +
bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
return nvme_setup_prp_simple(dev, req, return nvme_setup_prp_simple(dev, req,
&cmnd->rw, &bv); &cmnd->rw, &bv);

View File

@ -795,6 +795,15 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
wait_for_completion(&sq->free_done); wait_for_completion(&sq->free_done);
percpu_ref_exit(&sq->ref); percpu_ref_exit(&sq->ref);
/*
* we must reference the ctrl again after waiting for inflight IO
* to complete. Because admin connect may have sneaked in after we
* store sq->ctrl locally, but before we killed the percpu_ref. the
* admin connect allocates and assigns sq->ctrl, which now needs a
* final ref put, as this ctrl is going away.
*/
ctrl = sq->ctrl;
if (ctrl) { if (ctrl) {
/* /*
* The teardown flow may take some time, and the host may not * The teardown flow may take some time, and the host may not

View File

@ -18,18 +18,24 @@ static int meson_efuse_read(void *context, unsigned int offset,
void *val, size_t bytes) void *val, size_t bytes)
{ {
struct meson_sm_firmware *fw = context; struct meson_sm_firmware *fw = context;
int ret;
return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, ret = meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset,
bytes, 0, 0, 0); bytes, 0, 0, 0);
return ret < 0 ? ret : 0;
} }
static int meson_efuse_write(void *context, unsigned int offset, static int meson_efuse_write(void *context, unsigned int offset,
void *val, size_t bytes) void *val, size_t bytes)
{ {
struct meson_sm_firmware *fw = context; struct meson_sm_firmware *fw = context;
int ret;
return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, ret = meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset,
bytes, 0, 0, 0); bytes, 0, 0, 0);
return ret < 0 ? ret : 0;
} }
static const struct of_device_id meson_efuse_match[] = { static const struct of_device_id meson_efuse_match[] = {

View File

@ -857,6 +857,22 @@ static const struct ts_dmi_data schneider_sct101ctm_data = {
.properties = schneider_sct101ctm_props, .properties = schneider_sct101ctm_props,
}; };
static const struct property_entry globalspace_solt_ivw116_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 7),
PROPERTY_ENTRY_U32("touchscreen-min-y", 22),
PROPERTY_ENTRY_U32("touchscreen-size-x", 1723),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1077),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-globalspace-solt-ivw116.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
{ }
};
static const struct ts_dmi_data globalspace_solt_ivw116_data = {
.acpi_name = "MSSL1680:00",
.properties = globalspace_solt_ivw116_props,
};
static const struct property_entry techbite_arc_11_6_props[] = { static const struct property_entry techbite_arc_11_6_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 5), PROPERTY_ENTRY_U32("touchscreen-min-x", 5),
PROPERTY_ENTRY_U32("touchscreen-min-y", 7), PROPERTY_ENTRY_U32("touchscreen-min-y", 7),
@ -1261,6 +1277,17 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"), DMI_MATCH(DMI_BIOS_DATE, "04/24/2018"),
}, },
}, },
{
/* Jumper EZpad 6s Pro */
.driver_data = (void *)&jumper_ezpad_6_pro_b_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Jumper"),
DMI_MATCH(DMI_PRODUCT_NAME, "Ezpad"),
/* Above matches are too generic, add bios match */
DMI_MATCH(DMI_BIOS_VERSION, "E.WSA116_8.E1.042.bin"),
DMI_MATCH(DMI_BIOS_DATE, "01/08/2020"),
},
},
{ {
/* Jumper EZpad 6 m4 */ /* Jumper EZpad 6 m4 */
.driver_data = (void *)&jumper_ezpad_6_m4_data, .driver_data = (void *)&jumper_ezpad_6_m4_data,
@ -1490,6 +1517,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"), DMI_MATCH(DMI_PRODUCT_NAME, "SCT101CTM"),
}, },
}, },
{
/* GlobalSpace SoLT IVW 11.6" */
.driver_data = (void *)&globalspace_solt_ivw116_data,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Globalspace Tech Pvt Ltd"),
DMI_MATCH(DMI_PRODUCT_NAME, "SolTIVW"),
DMI_MATCH(DMI_PRODUCT_SKU, "PN20170413488"),
},
},
{ {
/* Techbite Arc 11.6 */ /* Techbite Arc 11.6 */
.driver_data = (void *)&techbite_arc_11_6_data, .driver_data = (void *)&techbite_arc_11_6_data,

View File

@ -1155,7 +1155,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (rc) if (rc)
break; break;
if (copy_to_user(ucs, &kcs, sizeof(kcs))) if (copy_to_user(ucs, &kcs, sizeof(kcs)))
return -EFAULT; rc = -EFAULT;
memzero_explicit(&kcs, sizeof(kcs)); memzero_explicit(&kcs, sizeof(kcs));
break; break;
} }
@ -1187,7 +1187,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (rc) if (rc)
break; break;
if (copy_to_user(ucp, &kcp, sizeof(kcp))) if (copy_to_user(ucp, &kcp, sizeof(kcp)))
return -EFAULT; rc = -EFAULT;
memzero_explicit(&kcp, sizeof(kcp)); memzero_explicit(&kcp, sizeof(kcp));
break; break;
} }

View File

@ -2351,9 +2351,6 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
io_req->fcport = fcport; io_req->fcport = fcport;
io_req->cmd_type = QEDF_TASK_MGMT_CMD; io_req->cmd_type = QEDF_TASK_MGMT_CMD;
/* Record which cpu this request is associated with */
io_req->cpu = smp_processor_id();
/* Set TM flags */ /* Set TM flags */
io_req->io_req_flags = QEDF_READ; io_req->io_req_flags = QEDF_READ;
io_req->data_xfer_len = 0; io_req->data_xfer_len = 0;
@ -2375,6 +2372,9 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
spin_lock_irqsave(&fcport->rport_lock, flags); spin_lock_irqsave(&fcport->rport_lock, flags);
/* Record which cpu this request is associated with */
io_req->cpu = smp_processor_id();
sqe_idx = qedf_get_sqe_idx(fcport); sqe_idx = qedf_get_sqe_idx(fcport);
sqe = &fcport->sq[sqe_idx]; sqe = &fcport->sq[sqe_idx];
memset(sqe, 0, sizeof(struct fcoe_wqe)); memset(sqe, 0, sizeof(struct fcoe_wqe));

View File

@ -291,6 +291,20 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
if (ifp->desc.bNumEndpoints >= num_ep) if (ifp->desc.bNumEndpoints >= num_ep)
goto skip_to_next_endpoint_or_interface_descriptor; goto skip_to_next_endpoint_or_interface_descriptor;
/* Save a copy of the descriptor and use it instead of the original */
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
memcpy(&endpoint->desc, d, n);
d = &endpoint->desc;
/* Clear the reserved bits in bEndpointAddress */
i = d->bEndpointAddress &
(USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK);
if (i != d->bEndpointAddress) {
dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n",
cfgno, inum, asnum, d->bEndpointAddress, i);
endpoint->desc.bEndpointAddress = i;
}
/* Check for duplicate endpoint addresses */ /* Check for duplicate endpoint addresses */
if (config_endpoint_is_duplicate(config, inum, asnum, d)) { if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
@ -308,10 +322,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
} }
} }
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; /* Accept this endpoint */
++ifp->desc.bNumEndpoints; ++ifp->desc.bNumEndpoints;
memcpy(&endpoint->desc, d, n);
INIT_LIST_HEAD(&endpoint->urb_list); INIT_LIST_HEAD(&endpoint->urb_list);
/* /*

View File

@ -504,6 +504,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG }, USB_QUIRK_DELAY_CTRL_MSG },
/* START BP-850k Printer */
{ USB_DEVICE(0x1bc3, 0x0003), .driver_info = USB_QUIRK_NO_SET_INTF },
/* MIDI keyboard WORLDE MINI */ /* MIDI keyboard WORLDE MINI */
{ USB_DEVICE(0x1c75, 0x0204), .driver_info = { USB_DEVICE(0x1c75, 0x0204), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS }, USB_QUIRK_CONFIG_INTF_STRINGS },

View File

@ -136,9 +136,12 @@ static int usb_string_copy(const char *s, char **s_copy)
int ret; int ret;
char *str; char *str;
char *copy = *s_copy; char *copy = *s_copy;
ret = strlen(s); ret = strlen(s);
if (ret > USB_MAX_STRING_LEN) if (ret > USB_MAX_STRING_LEN)
return -EOVERFLOW; return -EOVERFLOW;
if (ret < 1)
return -EINVAL;
if (copy) { if (copy) {
str = copy; str = copy;

View File

@ -1764,6 +1764,49 @@ static int mos7840_port_remove(struct usb_serial_port *port)
return 0; return 0;
} }
static int mos7840_suspend(struct usb_serial *serial, pm_message_t message)
{
struct moschip_port *mos7840_port;
struct usb_serial_port *port;
int i;
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
if (!tty_port_initialized(&port->port))
continue;
mos7840_port = usb_get_serial_port_data(port);
usb_kill_urb(mos7840_port->read_urb);
mos7840_port->read_urb_busy = false;
}
return 0;
}
static int mos7840_resume(struct usb_serial *serial)
{
struct moschip_port *mos7840_port;
struct usb_serial_port *port;
int res;
int i;
for (i = 0; i < serial->num_ports; ++i) {
port = serial->port[i];
if (!tty_port_initialized(&port->port))
continue;
mos7840_port = usb_get_serial_port_data(port);
mos7840_port->read_urb_busy = true;
res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO);
if (res)
mos7840_port->read_urb_busy = false;
}
return 0;
}
static struct usb_serial_driver moschip7840_4port_device = { static struct usb_serial_driver moschip7840_4port_device = {
.driver = { .driver = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
@ -1792,6 +1835,8 @@ static struct usb_serial_driver moschip7840_4port_device = {
.port_probe = mos7840_port_probe, .port_probe = mos7840_port_probe,
.port_remove = mos7840_port_remove, .port_remove = mos7840_port_remove,
.read_bulk_callback = mos7840_bulk_in_callback, .read_bulk_callback = mos7840_bulk_in_callback,
.suspend = mos7840_suspend,
.resume = mos7840_resume,
}; };
static struct usb_serial_driver * const serial_drivers[] = { static struct usb_serial_driver * const serial_drivers[] = {

View File

@ -1425,6 +1425,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) }, .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
.driver_info = NCTRL(0) }, .driver_info = NCTRL(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3000, 0xff), /* Telit FN912 */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3001, 0xff), /* Telit FN912 */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */
.driver_info = NCTRL(2) }, .driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */
@ -1433,6 +1437,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) }, .driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */
.driver_info = NCTRL(2) }, .driver_info = NCTRL(2) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x9000, 0xff), /* Telit generic core-dump device */
.driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP }, .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
@ -2224,6 +2230,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7126, 0xff, 0x00, 0x00),
.driver_info = NCTRL(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00),
.driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200),
.driver_info = RSVD(1) | RSVD(4) }, .driver_info = RSVD(1) | RSVD(4) },
@ -2284,6 +2294,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) }, .driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */
.driver_info = RSVD(3) }, .driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */
.driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
@ -2321,6 +2333,32 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) }, .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
.driver_info = RSVD(5) }, .driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff), /* Rolling RW350-GL (laptop MBIM) */
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Global */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for Global SKU */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for China SKU */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for SA */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for EU */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for NA */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for China EDU */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Golbal EDU */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },

View File

@ -329,7 +329,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
WRITE_ONCE(dentry->d_flags, flags); WRITE_ONCE(dentry->d_flags, flags);
dentry->d_inode = NULL; dentry->d_inode = NULL;
if (dentry->d_flags & DCACHE_LRU_LIST) /*
* The negative counter only tracks dentries on the LRU. Don't inc if
* d_lru is on another list.
*/
if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_inc(nr_dentry_negative); this_cpu_inc(nr_dentry_negative);
} }
@ -1940,9 +1944,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
/* /*
* Decrement negative dentry count if it was in the LRU list. * The negative counter only tracks dentries on the LRU. Don't dec if
* d_lru is on another list.
*/ */
if (dentry->d_flags & DCACHE_LRU_LIST) if ((dentry->d_flags &
(DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_dec(nr_dentry_negative); this_cpu_dec(nr_dentry_negative);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq); raw_write_seqcount_begin(&dentry->d_seq);

View File

@ -58,6 +58,7 @@ static void jffs2_i_init_once(void *foo)
struct jffs2_inode_info *f = foo; struct jffs2_inode_info *f = foo;
mutex_init(&f->sem); mutex_init(&f->sem);
f->target = NULL;
inode_init_once(&f->vfs_inode); inode_init_once(&f->vfs_inode);
} }

View File

@ -1337,9 +1337,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
locks_wake_up_blocks(left); locks_wake_up_blocks(left);
} }
out: out:
trace_posix_lock_inode(inode, request, error);
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem); percpu_up_read(&file_rwsem);
trace_posix_lock_inode(inode, request, error);
/* /*
* Free any unused locks. * Free any unused locks.
*/ */

View File

@ -377,11 +377,12 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
* @target: offset number of an entry in the group (start point) * @target: offset number of an entry in the group (start point)
* @bsize: size in bits * @bsize: size in bits
* @lock: spin lock protecting @bitmap * @lock: spin lock protecting @bitmap
* @wrap: whether to wrap around
*/ */
static int nilfs_palloc_find_available_slot(unsigned char *bitmap, static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
unsigned long target, unsigned long target,
unsigned int bsize, unsigned int bsize,
spinlock_t *lock) spinlock_t *lock, bool wrap)
{ {
int pos, end = bsize; int pos, end = bsize;
@ -397,6 +398,8 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
end = target; end = target;
} }
if (!wrap)
return -ENOSPC;
/* wrap around */ /* wrap around */
for (pos = 0; pos < end; pos++) { for (pos = 0; pos < end; pos++) {
@ -495,9 +498,10 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
* nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object * nilfs_palloc_prepare_alloc_entry - prepare to allocate a persistent object
* @inode: inode of metadata file using this allocator * @inode: inode of metadata file using this allocator
* @req: nilfs_palloc_req structure exchanged for the allocation * @req: nilfs_palloc_req structure exchanged for the allocation
* @wrap: whether to wrap around
*/ */
int nilfs_palloc_prepare_alloc_entry(struct inode *inode, int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *req) struct nilfs_palloc_req *req, bool wrap)
{ {
struct buffer_head *desc_bh, *bitmap_bh; struct buffer_head *desc_bh, *bitmap_bh;
struct nilfs_palloc_group_desc *desc; struct nilfs_palloc_group_desc *desc;
@ -516,7 +520,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
entries_per_group = nilfs_palloc_entries_per_group(inode); entries_per_group = nilfs_palloc_entries_per_group(inode);
for (i = 0; i < ngroups; i += n) { for (i = 0; i < ngroups; i += n) {
if (group >= ngroups) { if (group >= ngroups && wrap) {
/* wrap around */ /* wrap around */
group = 0; group = 0;
maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr, maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr,
@ -541,7 +545,13 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
bitmap = bitmap_kaddr + bh_offset(bitmap_bh); bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
pos = nilfs_palloc_find_available_slot( pos = nilfs_palloc_find_available_slot(
bitmap, group_offset, bitmap, group_offset,
entries_per_group, lock); entries_per_group, lock, wrap);
/*
* Since the search for a free slot in the
* second and subsequent bitmap blocks always
* starts from the beginning, the wrap flag
* only has an effect on the first search.
*/
if (pos >= 0) { if (pos >= 0) {
/* found a free entry */ /* found a free entry */
nilfs_palloc_group_desc_add_entries( nilfs_palloc_group_desc_add_entries(

View File

@ -50,8 +50,8 @@ struct nilfs_palloc_req {
struct buffer_head *pr_entry_bh; struct buffer_head *pr_entry_bh;
}; };
int nilfs_palloc_prepare_alloc_entry(struct inode *, int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
struct nilfs_palloc_req *); struct nilfs_palloc_req *req, bool wrap);
void nilfs_palloc_commit_alloc_entry(struct inode *, void nilfs_palloc_commit_alloc_entry(struct inode *,
struct nilfs_palloc_req *); struct nilfs_palloc_req *);
void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *); void nilfs_palloc_abort_alloc_entry(struct inode *, struct nilfs_palloc_req *);

View File

@ -75,7 +75,7 @@ int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req)
{ {
int ret; int ret;
ret = nilfs_palloc_prepare_alloc_entry(dat, req); ret = nilfs_palloc_prepare_alloc_entry(dat, req, true);
if (ret < 0) if (ret < 0)
return ret; return ret;

View File

@ -143,6 +143,9 @@ static bool nilfs_check_page(struct page *page)
goto Enamelen; goto Enamelen;
if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
goto Espan; goto Espan;
if (unlikely(p->inode &&
NILFS_PRIVATE_INODE(le64_to_cpu(p->inode))))
goto Einumber;
} }
if (offs != limit) if (offs != limit)
goto Eend; goto Eend;
@ -168,6 +171,9 @@ static bool nilfs_check_page(struct page *page)
goto bad_entry; goto bad_entry;
Espan: Espan:
error = "directory entry across blocks"; error = "directory entry across blocks";
goto bad_entry;
Einumber:
error = "disallowed inode number";
bad_entry: bad_entry:
nilfs_error(sb, nilfs_error(sb,
"bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d", "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
@ -390,11 +396,39 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p) struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
{ {
struct nilfs_dir_entry *de = nilfs_get_page(dir, 0, p); struct page *page;
struct nilfs_dir_entry *de, *next_de;
size_t limit;
char *msg;
de = nilfs_get_page(dir, 0, &page);
if (IS_ERR(de)) if (IS_ERR(de))
return NULL; return NULL;
return nilfs_next_entry(de);
limit = nilfs_last_byte(dir, 0); /* is a multiple of chunk size */
if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino ||
!nilfs_match(1, ".", de))) {
msg = "missing '.'";
goto fail;
}
next_de = nilfs_next_entry(de);
/*
* If "next_de" has not reached the end of the chunk, there is
* at least one more record. Check whether it matches "..".
*/
if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) ||
!nilfs_match(2, "..", next_de))) {
msg = "missing '..'";
goto fail;
}
*p = page;
return next_de;
fail:
nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg);
nilfs_put_page(page);
return NULL;
} }
ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr)

View File

@ -55,13 +55,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
struct nilfs_palloc_req req; struct nilfs_palloc_req req;
int ret; int ret;
req.pr_entry_nr = 0; /* req.pr_entry_nr = NILFS_FIRST_INO(ifile->i_sb);
* 0 says find free inode from beginning
* of a group. dull code!!
*/
req.pr_entry_bh = NULL; req.pr_entry_bh = NULL;
ret = nilfs_palloc_prepare_alloc_entry(ifile, &req); ret = nilfs_palloc_prepare_alloc_entry(ifile, &req, false);
if (!ret) { if (!ret) {
ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1, ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1,
&req.pr_entry_bh); &req.pr_entry_bh);

View File

@ -116,9 +116,15 @@ enum {
#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino) #define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
#define NILFS_MDT_INODE(sb, ino) \ #define NILFS_MDT_INODE(sb, ino) \
((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino))) ((ino) < NILFS_USER_INO && (NILFS_MDT_INO_BITS & BIT(ino)))
#define NILFS_VALID_INODE(sb, ino) \ #define NILFS_VALID_INODE(sb, ino) \
((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino))) ((ino) >= NILFS_FIRST_INO(sb) || \
((ino) < NILFS_USER_INO && (NILFS_SYS_INO_BITS & BIT(ino))))
#define NILFS_PRIVATE_INODE(ino) ({ \
ino_t __ino = (ino); \
((__ino) < NILFS_USER_INO && (__ino) != NILFS_ROOT_INO && \
(__ino) != NILFS_SKETCH_INO); })
/** /**
* struct nilfs_transaction_info: context information for synchronization * struct nilfs_transaction_info: context information for synchronization

View File

@ -452,6 +452,12 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
} }
nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino); nilfs->ns_first_ino = le32_to_cpu(sbp->s_first_ino);
if (nilfs->ns_first_ino < NILFS_USER_INO) {
nilfs_err(nilfs->ns_sb,
"too small lower limit for non-reserved inode numbers: %u",
nilfs->ns_first_ino);
return -EINVAL;
}
nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {

View File

@ -182,7 +182,7 @@ struct the_nilfs {
unsigned long ns_nrsvsegs; unsigned long ns_nrsvsegs;
unsigned long ns_first_data_block; unsigned long ns_first_data_block;
int ns_inode_size; int ns_inode_size;
int ns_first_ino; unsigned int ns_first_ino;
u32 ns_crc_seed; u32 ns_crc_seed;
/* /sys/fs/<nilfs>/<device> */ /* /sys/fs/<nilfs>/<device> */

View File

@ -200,7 +200,8 @@ static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
(long)new_op->downcall.resp.statfs.files_avail); (long)new_op->downcall.resp.statfs.files_avail);
buf->f_type = sb->s_magic; buf->f_type = sb->s_magic;
memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid)); buf->f_fsid.val[0] = ORANGEFS_SB(sb)->fs_id;
buf->f_fsid.val[1] = ORANGEFS_SB(sb)->id;
buf->f_bsize = new_op->downcall.resp.statfs.block_size; buf->f_bsize = new_op->downcall.resp.statfs.block_size;
buf->f_namelen = ORANGEFS_NAME_MAX; buf->f_namelen = ORANGEFS_NAME_MAX;

View File

@ -1806,6 +1806,7 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
void sock_map_unhash(struct sock *sk); void sock_map_unhash(struct sock *sk);
void sock_map_destroy(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout); void sock_map_close(struct sock *sk, long timeout);
#else #else
static inline int sock_map_prog_update(struct bpf_map *map, static inline int sock_map_prog_update(struct bpf_map *map,

View File

@ -269,6 +269,18 @@
*/ */
#define __section(section) __attribute__((__section__(section))) #define __section(section) __attribute__((__section__(section)))
/*
* Optional: only supported since gcc >= 12
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
* clang: https://clang.llvm.org/docs/AttributeReference.html#uninitialized
*/
#if __has_attribute(__uninitialized__)
# define __uninitialized __attribute__((__uninitialized__))
#else
# define __uninitialized
#endif
/* /*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute

View File

@ -171,8 +171,6 @@ int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
size_t hdr_bytes); size_t hdr_bytes);
int __efi_capsule_setup_info(struct capsule_info *cap_info); int __efi_capsule_setup_info(struct capsule_info *cap_info);
typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
/* /*
* Types and defines for Time Services * Types and defines for Time Services
*/ */
@ -609,10 +607,6 @@ efi_guid_to_str(efi_guid_t *guid, char *out)
} }
extern void efi_init (void); extern void efi_init (void);
extern void *efi_get_pal_addr (void);
extern void efi_map_pal_code (void);
extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
#ifdef CONFIG_EFI #ifdef CONFIG_EFI
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#else #else

View File

@ -84,7 +84,13 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
{ {
const struct path *path = &file->f_path; const struct path *path = &file->f_path;
if (file->f_mode & FMODE_NONOTIFY) /*
* FMODE_NONOTIFY are fds generated by fanotify itself which should not
* generate new events. We also don't want to generate events for
* FMODE_PATH fds (involves open & close events) as they are just
* handle creation / destruction events and not "real" file events.
*/
if (file->f_mode & (FMODE_NONOTIFY | FMODE_PATH))
return 0; return 0;
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);

View File

@ -372,7 +372,7 @@ LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
#ifdef CONFIG_AUDIT #ifdef CONFIG_AUDIT
LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr, LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
void **lsmrule) void **lsmrule, gfp_t gfp)
LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule) LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule) LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule) LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)

View File

@ -1380,8 +1380,9 @@ static inline int subsection_map_index(unsigned long pfn)
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)
{ {
int idx = subsection_map_index(pfn); int idx = subsection_map_index(pfn);
struct mem_section_usage *usage = READ_ONCE(ms->usage);
return test_bit(idx, READ_ONCE(ms->usage)->subsection_map); return usage ? test_bit(idx, usage->subsection_map) : 0;
} }
#else #else
static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn)

View File

@ -1864,7 +1864,8 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer)
#ifdef CONFIG_AUDIT #ifdef CONFIG_AUDIT
#ifdef CONFIG_SECURITY #ifdef CONFIG_SECURITY
int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
gfp_t gfp);
int security_audit_rule_known(struct audit_krule *krule); int security_audit_rule_known(struct audit_krule *krule);
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
void security_audit_rule_free(void *lsmrule); void security_audit_rule_free(void *lsmrule);
@ -1872,7 +1873,7 @@ void security_audit_rule_free(void *lsmrule);
#else #else
static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr, static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
void **lsmrule) void **lsmrule, gfp_t gfp)
{ {
return 0; return 0;
} }

View File

@ -98,6 +98,7 @@ struct sk_psock {
spinlock_t link_lock; spinlock_t link_lock;
refcount_t refcnt; refcount_t refcnt;
void (*saved_unhash)(struct sock *sk); void (*saved_unhash)(struct sock *sk);
void (*saved_destroy)(struct sock *sk);
void (*saved_close)(struct sock *sk, long timeout); void (*saved_close)(struct sock *sk, long timeout);
void (*saved_write_space)(struct sock *sk); void (*saved_write_space)(struct sock *sk);
struct proto *sk_proto; struct proto *sk_proto;

View File

@ -521,7 +521,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
entry->rule.buflen += f_val; entry->rule.buflen += f_val;
f->lsm_str = str; f->lsm_str = str;
err = security_audit_rule_init(f->type, f->op, str, err = security_audit_rule_init(f->type, f->op, str,
(void **)&f->lsm_rule); (void **)&f->lsm_rule,
GFP_KERNEL);
/* Keep currently invalid fields around in case they /* Keep currently invalid fields around in case they
* become valid after a policy reload. */ * become valid after a policy reload. */
if (err == -EINVAL) { if (err == -EINVAL) {
@ -790,7 +791,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
/* our own (refreshed) copy of lsm_rule */ /* our own (refreshed) copy of lsm_rule */
ret = security_audit_rule_init(df->type, df->op, df->lsm_str, ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
(void **)&df->lsm_rule); (void **)&df->lsm_rule, GFP_KERNEL);
/* Keep currently invalid fields around in case they /* Keep currently invalid fields around in case they
* become valid after a policy reload. */ * become valid after a policy reload. */
if (ret == -EINVAL) { if (ret == -EINVAL) {

View File

@ -2807,6 +2807,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
continue; continue;
if (type == STACK_MISC) if (type == STACK_MISC)
continue; continue;
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n", verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size); off, i, size);
return -EACCES; return -EACCES;
@ -2844,6 +2846,8 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
continue; continue;
if (type == STACK_ZERO) if (type == STACK_ZERO)
continue; continue;
if (type == STACK_INVALID && env->allow_uninit_stack)
continue;
verbose(env, "invalid read from stack off %d+%d size %d\n", verbose(env, "invalid read from stack off %d+%d size %d\n",
off, i, size); off, i, size);
return -EACCES; return -EACCES;
@ -4300,7 +4304,8 @@ static int check_stack_range_initialized(
stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE];
if (*stype == STACK_MISC) if (*stype == STACK_MISC)
goto mark; goto mark;
if (*stype == STACK_ZERO) { if ((*stype == STACK_ZERO) ||
(*stype == STACK_INVALID && env->allow_uninit_stack)) {
if (clobber) { if (clobber) {
/* helper can write anything into the stack */ /* helper can write anything into the stack */
*stype = STACK_MISC; *stype = STACK_MISC;
@ -9492,6 +9497,10 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue; continue;
if (env->allow_uninit_stack &&
old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC)
continue;
/* explored stack has more populated slots than current stack /* explored stack has more populated slots than current stack
* and these slots were used * and these slots were used
*/ */

View File

@ -434,6 +434,8 @@ void mm_update_next_owner(struct mm_struct *mm)
* Search through everything else, we should not get here often. * Search through everything else, we should not get here often.
*/ */
for_each_process(g) { for_each_process(g) {
if (atomic_read(&mm->mm_users) <= 1)
break;
if (g->flags & PF_KTHREAD) if (g->flags & PF_KTHREAD)
continue; continue;
for_each_thread(g, c) { for_each_thread(g, c) {

View File

@ -76,7 +76,6 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
time_remaining = wait_for_completion_timeout(&try_completion, time_remaining = wait_for_completion_timeout(&try_completion,
kunit_test_timeout()); kunit_test_timeout());
if (time_remaining == 0) { if (time_remaining == 0) {
kunit_err(test, "try timed out\n");
try_catch->try_result = -ETIMEDOUT; try_catch->try_result = -ETIMEDOUT;
} }
@ -89,6 +88,8 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
try_catch->try_result = 0; try_catch->try_result = 0;
else if (exit_code == -EINTR) else if (exit_code == -EINTR)
kunit_err(test, "wake_up_process() was never called\n"); kunit_err(test, "wake_up_process() was never called\n");
else if (exit_code == -ETIMEDOUT)
kunit_err(test, "try timed out\n");
else if (exit_code) else if (exit_code)
kunit_err(test, "Unknown error: %d\n", exit_code); kunit_err(test, "Unknown error: %d\n", exit_code);

View File

@ -435,13 +435,20 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
else else
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
tsk = current; tsk = current;
if (rt_task(tsk)) { if (rt_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32; bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
thresh += thresh / 4 + global_wb_domain.dirty_limit / 32; thresh += thresh / 4 + global_wb_domain.dirty_limit / 32;
} }
/*
* Dirty throttling logic assumes the limits in page units fit into
* 32-bits. This gives 16TB dirty limits max which is hopefully enough.
*/
if (thresh > UINT_MAX)
thresh = UINT_MAX;
/* This makes sure bg_thresh is within 32-bits as well */
if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
dtc->thresh = thresh; dtc->thresh = thresh;
dtc->bg_thresh = bg_thresh; dtc->bg_thresh = bg_thresh;
@ -491,7 +498,11 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat)
if (rt_task(tsk)) if (rt_task(tsk))
dirty += dirty / 4; dirty += dirty / 4;
return dirty; /*
* Dirty throttling logic assumes the limits in page units fit into
* 32-bits. This gives 16TB dirty limits max which is hopefully enough.
*/
return min_t(unsigned long, dirty, UINT_MAX);
} }
/** /**
@ -527,10 +538,17 @@ int dirty_background_bytes_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos) void *buffer, size_t *lenp, loff_t *ppos)
{ {
int ret; int ret;
unsigned long old_bytes = dirty_background_bytes;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write) if (ret == 0 && write) {
if (DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE) >
UINT_MAX) {
dirty_background_bytes = old_bytes;
return -ERANGE;
}
dirty_background_ratio = 0; dirty_background_ratio = 0;
}
return ret; return ret;
} }
@ -556,6 +574,10 @@ int dirty_bytes_handler(struct ctl_table *table, int write,
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret == 0 && write && vm_dirty_bytes != old_bytes) { if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
if (DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) > UINT_MAX) {
vm_dirty_bytes = old_bytes;
return -ERANGE;
}
writeback_set_ratelimit(); writeback_set_ratelimit();
vm_dirty_ratio = 0; vm_dirty_ratio = 0;
} }
@ -1527,7 +1549,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
*/ */
dtc->wb_thresh = __wb_calc_thresh(dtc); dtc->wb_thresh = __wb_calc_thresh(dtc);
dtc->wb_bg_thresh = dtc->thresh ? dtc->wb_bg_thresh = dtc->thresh ?
div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
/* /*
* In order to avoid the stacked BDI deadlock we need * In order to avoid the stacked BDI deadlock we need

View File

@ -1014,13 +1014,19 @@ static void delayed_work(struct work_struct *work)
struct ceph_mon_client *monc = struct ceph_mon_client *monc =
container_of(work, struct ceph_mon_client, delayed_work.work); container_of(work, struct ceph_mon_client, delayed_work.work);
dout("monc delayed_work\n");
mutex_lock(&monc->mutex); mutex_lock(&monc->mutex);
dout("%s mon%d\n", __func__, monc->cur_mon);
if (monc->cur_mon < 0) {
goto out;
}
if (monc->hunting) { if (monc->hunting) {
dout("%s continuing hunt\n", __func__); dout("%s continuing hunt\n", __func__);
reopen_session(monc); reopen_session(monc);
} else { } else {
int is_auth = ceph_auth_is_authenticated(monc->auth); int is_auth = ceph_auth_is_authenticated(monc->auth);
dout("%s is_authed %d\n", __func__, is_auth);
if (ceph_con_keepalive_expired(&monc->con, if (ceph_con_keepalive_expired(&monc->con,
CEPH_MONC_PING_TIMEOUT)) { CEPH_MONC_PING_TIMEOUT)) {
dout("monc keepalive timeout\n"); dout("monc keepalive timeout\n");
@ -1045,6 +1051,8 @@ static void delayed_work(struct work_struct *work)
} }
} }
__schedule_delayed(monc); __schedule_delayed(monc);
out:
mutex_unlock(&monc->mutex); mutex_unlock(&monc->mutex);
} }
@ -1157,13 +1165,15 @@ EXPORT_SYMBOL(ceph_monc_init);
void ceph_monc_stop(struct ceph_mon_client *monc) void ceph_monc_stop(struct ceph_mon_client *monc)
{ {
dout("stop\n"); dout("stop\n");
cancel_delayed_work_sync(&monc->delayed_work);
mutex_lock(&monc->mutex); mutex_lock(&monc->mutex);
__close_session(monc); __close_session(monc);
monc->hunting = false;
monc->cur_mon = -1; monc->cur_mon = -1;
mutex_unlock(&monc->mutex); mutex_unlock(&monc->mutex);
cancel_delayed_work_sync(&monc->delayed_work);
/* /*
* flush msgr queue before we destroy ourselves to ensure that: * flush msgr queue before we destroy ourselves to ensure that:
* - any work that references our embedded con is finished. * - any work that references our embedded con is finished.

View File

@ -599,6 +599,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
psock->eval = __SK_NONE; psock->eval = __SK_NONE;
psock->sk_proto = prot; psock->sk_proto = prot;
psock->saved_unhash = prot->unhash; psock->saved_unhash = prot->unhash;
psock->saved_destroy = prot->destroy;
psock->saved_close = prot->close; psock->saved_close = prot->close;
psock->saved_write_space = sk->sk_write_space; psock->saved_write_space = sk->sk_write_space;

View File

@ -1566,6 +1566,28 @@ void sock_map_unhash(struct sock *sk)
saved_unhash(sk); saved_unhash(sk);
} }
void sock_map_destroy(struct sock *sk)
{
void (*saved_destroy)(struct sock *sk);
struct sk_psock *psock;
rcu_read_lock();
psock = sk_psock_get(sk);
if (unlikely(!psock)) {
rcu_read_unlock();
if (sk->sk_prot->destroy)
sk->sk_prot->destroy(sk);
return;
}
saved_destroy = psock->saved_destroy;
sock_map_remove_links(sk, psock);
rcu_read_unlock();
sk_psock_put(sk, psock);
saved_destroy(sk);
}
EXPORT_SYMBOL_GPL(sock_map_destroy);
void sock_map_close(struct sock *sk, long timeout) void sock_map_close(struct sock *sk, long timeout)
{ {
void (*saved_close)(struct sock *sk, long timeout); void (*saved_close)(struct sock *sk, long timeout);

View File

@ -36,6 +36,8 @@ static int linkstate_get_sqi(struct net_device *dev)
mutex_lock(&phydev->lock); mutex_lock(&phydev->lock);
if (!phydev->drv || !phydev->drv->get_sqi) if (!phydev->drv || !phydev->drv->get_sqi)
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
else if (!phydev->link)
ret = -ENETDOWN;
else else
ret = phydev->drv->get_sqi(phydev); ret = phydev->drv->get_sqi(phydev);
mutex_unlock(&phydev->lock); mutex_unlock(&phydev->lock);
@ -54,6 +56,8 @@ static int linkstate_get_sqi_max(struct net_device *dev)
mutex_lock(&phydev->lock); mutex_lock(&phydev->lock);
if (!phydev->drv || !phydev->drv->get_sqi_max) if (!phydev->drv || !phydev->drv->get_sqi_max)
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
else if (!phydev->link)
ret = -ENETDOWN;
else else
ret = phydev->drv->get_sqi_max(phydev); ret = phydev->drv->get_sqi_max(phydev);
mutex_unlock(&phydev->lock); mutex_unlock(&phydev->lock);
@ -61,6 +65,17 @@ static int linkstate_get_sqi_max(struct net_device *dev)
return ret; return ret;
}; };
static bool linkstate_sqi_critical_error(int sqi)
{
return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN;
}
static bool linkstate_sqi_valid(struct linkstate_reply_data *data)
{
return data->sqi >= 0 && data->sqi_max >= 0 &&
data->sqi <= data->sqi_max;
}
static int linkstate_get_link_ext_state(struct net_device *dev, static int linkstate_get_link_ext_state(struct net_device *dev,
struct linkstate_reply_data *data) struct linkstate_reply_data *data)
{ {
@ -92,12 +107,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
data->link = __ethtool_get_link(dev); data->link = __ethtool_get_link(dev);
ret = linkstate_get_sqi(dev); ret = linkstate_get_sqi(dev);
if (ret < 0 && ret != -EOPNOTSUPP) if (linkstate_sqi_critical_error(ret))
goto out; goto out;
data->sqi = ret; data->sqi = ret;
ret = linkstate_get_sqi_max(dev); ret = linkstate_get_sqi_max(dev);
if (ret < 0 && ret != -EOPNOTSUPP) if (linkstate_sqi_critical_error(ret))
goto out; goto out;
data->sqi_max = ret; data->sqi_max = ret;
@ -122,11 +137,10 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base,
len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */ len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */
+ 0; + 0;
if (data->sqi != -EOPNOTSUPP) if (linkstate_sqi_valid(data)) {
len += nla_total_size(sizeof(u32)); len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */
len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */
if (data->sqi_max != -EOPNOTSUPP) }
len += nla_total_size(sizeof(u32));
if (data->link_ext_state_provided) if (data->link_ext_state_provided)
len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */ len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */
@ -147,13 +161,14 @@ static int linkstate_fill_reply(struct sk_buff *skb,
nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link)) nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link))
return -EMSGSIZE; return -EMSGSIZE;
if (data->sqi != -EOPNOTSUPP && if (linkstate_sqi_valid(data)) {
nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi)) if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi))
return -EMSGSIZE; return -EMSGSIZE;
if (data->sqi_max != -EOPNOTSUPP && if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX,
nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max)) data->sqi_max))
return -EMSGSIZE; return -EMSGSIZE;
}
if (data->link_ext_state_provided) { if (data->link_ext_state_provided) {
if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE, if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE,

View File

@ -1275,6 +1275,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb,
req.sdiag_family = AF_UNSPEC; /* compatibility */ req.sdiag_family = AF_UNSPEC; /* compatibility */
req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type); req.sdiag_protocol = inet_diag_type2proto(cb->nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext; req.idiag_ext = rc->idiag_ext;
req.pad = 0;
req.idiag_states = rc->idiag_states; req.idiag_states = rc->idiag_states;
req.id = rc->id; req.id = rc->id;
@ -1290,6 +1291,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
req.sdiag_family = rc->idiag_family; req.sdiag_family = rc->idiag_family;
req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type); req.sdiag_protocol = inet_diag_type2proto(nlh->nlmsg_type);
req.idiag_ext = rc->idiag_ext; req.idiag_ext = rc->idiag_ext;
req.pad = 0;
req.idiag_states = rc->idiag_states; req.idiag_states = rc->idiag_states;
req.id = rc->id; req.id = rc->id;

View File

@ -582,6 +582,7 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
struct proto *base) struct proto *base)
{ {
prot[TCP_BPF_BASE] = *base; prot[TCP_BPF_BASE] = *base;
prot[TCP_BPF_BASE].destroy = sock_map_destroy;
prot[TCP_BPF_BASE].close = sock_map_close; prot[TCP_BPF_BASE].close = sock_map_close;
prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg; prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read; prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;

View File

@ -2085,8 +2085,16 @@ void tcp_clear_retrans(struct tcp_sock *tp)
static inline void tcp_init_undo(struct tcp_sock *tp) static inline void tcp_init_undo(struct tcp_sock *tp)
{ {
tp->undo_marker = tp->snd_una; tp->undo_marker = tp->snd_una;
/* Retransmission still in flight may cause DSACKs later. */ /* Retransmission still in flight may cause DSACKs later. */
tp->undo_retrans = tp->retrans_out ? : -1; /* First, account for regular retransmits in flight: */
tp->undo_retrans = tp->retrans_out;
/* Next, account for TLP retransmits in flight: */
if (tp->tlp_high_seq && tp->tlp_retrans)
tp->undo_retrans++;
/* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
if (!tp->undo_retrans)
tp->undo_retrans = -1;
} }
static bool tcp_is_rack(const struct sock *sk) static bool tcp_is_rack(const struct sock *sk)
@ -2165,6 +2173,7 @@ void tcp_enter_loss(struct sock *sk)
tcp_set_ca_state(sk, TCP_CA_Loss); tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt; tp->high_seq = tp->snd_nxt;
tp->tlp_high_seq = 0;
tcp_ecn_queue_cwr(tp); tcp_ecn_queue_cwr(tp);
/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
@ -2994,7 +3003,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
return; return;
if (tcp_try_undo_dsack(sk)) if (tcp_try_undo_dsack(sk))
tcp_try_keep_open(sk); tcp_try_to_open(sk, flag);
tcp_identify_packet_loss(sk, ack_flag); tcp_identify_packet_loss(sk, ack_flag);
if (icsk->icsk_ca_state != TCP_CA_Recovery) { if (icsk->icsk_ca_state != TCP_CA_Recovery) {

Some files were not shown because too many files have changed in this diff Show More