This is the 5.10.200 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmVLt8IACgkQONu9yGCS
 aT5SXw//ZJPwvVxnv21ldEE7vxdLSuSz/Ond0kV6A74ypBbZDsvqwvRbP0aCMoTx
 zJOARfQIyBMeMrbD2zKvJrhStMxJaFbzMMr2isMDdY0USRPTWkNu5Y18ZOnT3sgu
 099NOT2pEGNF5IlUxJPS64YTBelf8aD/Vv04AgbpPZ0Zb8n1Xmgg4jP0h5O7ERRU
 tevkdHg7xVrfV3dlI14sQbP4b/CHmS8sRFesUAf3qJ+p/7MwVeCk4u6GzlYnLEF6
 O/2K3hmUcvg0d1J9a7ESNohBrdHJXUpgjW+/hfDuMZ2XAW+2DQGT//aCdrilUbGU
 sWF8G/NKuDwd6bPmm8+u0ZJfwKHk3PAepQxCdQWlAkKeygx3RDjFYDwBjPiqSeJg
 fr1obUGLcwWf7CBrcI2d++oZX+R5Jw7W+c32xfRfHPlX5YFqwtayoKs/z7vhhqox
 VRLwCvgbj/WDSF9/H/4yR+pA7PmYLJiCTLWEDq3eEY9sMEYxWytfdU2D4UcHZ7gM
 iRxlDOkFLjjlYFPo6Nd004YAQW+K2tMJnaGHuv2Lk2Edt3iJLJcUE2+bRY+n2nsc
 DIZ+uZ97/kTRNomyG/XXMuWiJ+2HHgYBnYWDbEBggawhzQPJ/U6KIvMg44Rjn6e7
 89fv/HxuwcRvNSuxgAs5IL/w9KdwTbmcgCnAn/8wURE3/wH0ybA=
 =SpN0
 -----END PGP SIGNATURE-----

Merge 5.10.200 into android12-5.10-lts

Changes in 5.10.200
	selftests/ftrace: Add new test case which checks non unique symbol
	mcb: Return actual parsed size when reading chameleon table
	mcb-lpc: Reallocate memory region to avoid memory overlapping
	virtio_balloon: Fix endless deflation and inflation on arm64
	virtio-mmio: fix memory leak of vm_dev
	mm/page_alloc: correct start page when guard page debug is enabled
	mmc: renesas_sdhi: use custom mask for TMIO_MASK_ALL
	drm/dp_mst: Fix NULL deref in get_mst_branch_device_by_guid_helper()
	r8169: fix the KCSAN reported data-race in rtl_tx while reading TxDescArray[entry].opts1
	r8169: fix the KCSAN reported data race in rtl_rx while reading desc->opts1
	treewide: Spelling fix in comment
	igb: Fix potential memory leak in igb_add_ethtool_nfc_entry
	neighbour: fix various data-races
	igc: Fix ambiguity in the ethtool advertising
	net: ieee802154: adf7242: Fix some potential buffer overflow in adf7242_stats_show()
	net: usb: smsc95xx: Fix uninit-value access in smsc95xx_read_reg
	r8152: Increase USB control msg timeout to 5000ms as per spec
	r8152: Run the unload routine if we have errors during probe
	r8152: Cancel hw_phy_work if we have an error in probe
	r8152: Release firmware if we have an error in probe
	tcp: fix wrong RTO timeout when received SACK reneging
	gtp: uapi: fix GTPA_MAX
	gtp: fix fragmentation needed check with gso
	i40e: Fix wrong check for I40E_TXR_FLAGS_WB_ON_ITR
	kasan: print the original fault addr when access invalid shadow
	iio: exynos-adc: request second interupt only when touchscreen mode is used
	i2c: muxes: i2c-mux-pinctrl: Use of_get_i2c_adapter_by_node()
	i2c: muxes: i2c-mux-gpmux: Use of_get_i2c_adapter_by_node()
	i2c: muxes: i2c-demux-pinctrl: Use of_get_i2c_adapter_by_node()
	i2c: stm32f7: Fix PEC handling in case of SMBUS transfers
	i2c: aspeed: Fix i2c bus hang in slave read
	tracing/kprobes: Fix the description of variable length arguments
	misc: fastrpc: Clean buffers on remote invocation failures
	nvmem: imx: correct nregs for i.MX6ULL
	nvmem: imx: correct nregs for i.MX6SLL
	nvmem: imx: correct nregs for i.MX6UL
	perf/core: Fix potential NULL deref
	sparc32: fix a braino in fault handling in csum_and_copy_..._user()
	clk: Sanitize possible_parent_show to Handle Return Value of of_clk_get_parent_name
	iio: adc: xilinx: use helper variable for &pdev->dev
	iio: adc: xilinx: use devm_krealloc() instead of kfree() + kcalloc()
	iio: adc: xilinx: use more devres helpers and remove remove()
	iio: adc: xilinx-xadc: Don't clobber preset voltage/temperature thresholds
	x86/i8259: Skip probing when ACPI/MADT advertises PCAT compatibility
	kobject: Fix slab-out-of-bounds in fill_kobj_path()
	smbdirect: missing rc checks while waiting for rdma events
	f2fs: fix to do sanity check on inode type during garbage collection
	x86/mm: Simplify RESERVE_BRK()
	x86/mm: Fix RESERVE_BRK() for older binutils
	ext4: add two helper functions extent_logical_end() and pa_logical_end()
	ext4: fix BUG in ext4_mb_new_inode_pa() due to overflow
	ext4: avoid overlapping preallocations due to overflow
	objtool/x86: add missing embedded_insn check
	driver: platform: Add helper for safer setting of driver_override
	rpmsg: Constify local variable in field store macro
	rpmsg: Fix kfree() of static memory on setting driver_override
	rpmsg: Fix calling device_lock() on non-initialized device
	rpmsg: glink: Release driver_override
	rpmsg: Fix possible refcount leak in rpmsg_register_device_override()
	x86: Fix .brk attribute in linker script
	net: sched: cls_u32: Fix allocation size in u32_init()
	irqchip/stm32-exti: add missing DT IRQ flag translation
	dmaengine: ste_dma40: Fix PM disable depth imbalance in d40_probe
	Input: synaptics-rmi4 - handle reset delay when using SMBus trsnsport
	fbdev: atyfb: only use ioremap_uc() on i386 and ia64
	spi: npcm-fiu: Fix UMA reads when dummy.nbytes == 0
	netfilter: nfnetlink_log: silence bogus compiler warning
	ASoC: rt5650: fix the wrong result of key button
	fbdev: uvesafb: Call cn_del_callback() at the end of uvesafb_exit()
	scsi: mpt3sas: Fix in error path
	platform/mellanox: mlxbf-tmfifo: Fix a warning message
	net: chelsio: cxgb4: add an error code check in t4_load_phy_fw
	powerpc/mm: Fix boot crash with FLATMEM
	can: isotp: change error format from decimal to symbolic error names
	can: isotp: add symbolic error message to isotp_module_init()
	can: isotp: Add error message if txqueuelen is too small
	can: isotp: set max PDU size to 64 kByte
	can: isotp: isotp_bind(): return -EINVAL on incorrect CAN ID formatting
	can: isotp: check CAN address family in isotp_bind()
	can: isotp: handle wait_event_interruptible() return values
	can: isotp: add local echo tx processing and tx without FC
	can: isotp: isotp_bind(): do not validate unused address information
	can: isotp: isotp_sendmsg(): fix TX state detection and wait behavior
	PCI: Prevent xHCI driver from claiming AMD VanGogh USB3 DRD device
	usb: storage: set 1.50 as the lower bcdDevice for older "Super Top" compatibility
	usb: raw-gadget: properly handle interrupted requests
	tty: 8250: Remove UC-257 and UC-431
	tty: 8250: Add support for additional Brainboxes UC cards
	tty: 8250: Add support for Brainboxes UP cards
	tty: 8250: Add support for Intashield IS-100
	ALSA: hda: intel-dsp-config: Fix JSL Chromebook quirk detection
	Linux 5.10.200

Change-Id: I064cfd04d19db1e81d073b02c00258d3ebac2aa3
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-11-26 14:07:49 +00:00
commit 9cba6b5683
79 changed files with 1050 additions and 570 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 199
SUBLEVEL = 200
EXTRAVERSION =
NAME = Dare mighty things

View File

@ -906,6 +906,8 @@ void __init setup_arch(char **cmdline_p)
/* Parse memory topology */
mem_topology_setup();
/* Set max_mapnr before paging_init() */
set_max_mapnr(max_pfn);
/*
* Release secondary cpus out of their spinloops at 0x60 now that

View File

@ -293,7 +293,6 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
set_max_mapnr(max_pfn);
kasan_late_init();

View File

@ -463,5 +463,5 @@ ccslow: cmp %g1, 0
* we only bother with faults on loads... */
cc_fault:
ret
retl
clr %o0

View File

@ -67,6 +67,8 @@ struct legacy_pic {
void (*make_irq)(unsigned int irq);
};
void legacy_pic_pcat_compat(void);
extern struct legacy_pic *legacy_pic;
extern struct legacy_pic null_legacy_pic;

View File

@ -108,27 +108,16 @@ extern unsigned long _brk_end;
void *extend_brk(size_t size, size_t align);
/*
* Reserve space in the brk section. The name must be unique within
* the file, and somewhat descriptive. The size is in bytes. Must be
* used at file scope.
* Reserve space in the .brk section, which is a block of memory from which the
* caller is allowed to allocate very early (before even memblock is available)
* by calling extend_brk(). All allocated memory will be eventually converted
* to memblock. Any leftover unallocated memory will be freed.
*
* (This uses a temp function to wrap the asm so we can pass it the
* size parameter; otherwise we wouldn't be able to. We can't use a
* "section" attribute on a normal variable because it always ends up
* being @progbits, which ends up allocating space in the vmlinux
* executable.)
* The size is in bytes.
*/
#define RESERVE_BRK(name,sz) \
static void __section(".discard.text") __used notrace \
__brk_reservation_fn_##name##__(void) { \
asm volatile ( \
".pushsection .brk_reservation,\"aw\",@nobits;" \
".brk." #name ":" \
" 1:.skip %c0;" \
" .size .brk." #name ", . - 1b;" \
" .popsection" \
: : "i" (sz)); \
}
#define RESERVE_BRK(name, size) \
__section(".bss..brk") __aligned(1) __used \
static char __brk_##name[size]
/* Helper for reserving space for arrays of things */
#define RESERVE_BRK_ARRAY(type, name, entries) \
@ -146,12 +135,19 @@ asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
#endif /* __i386__ */
#endif /* _SETUP */
#else
#define RESERVE_BRK(name,sz) \
.pushsection .brk_reservation,"aw",@nobits; \
.brk.name: \
1: .skip sz; \
.size .brk.name,.-1b; \
#else /* __ASSEMBLY */
.macro __RESERVE_BRK name, size
.pushsection .bss..brk, "aw"
SYM_DATA_START(__brk_\name)
.skip \size
SYM_DATA_END(__brk_\name)
.popsection
.endm
#define RESERVE_BRK(name, size) __RESERVE_BRK name, size
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_SETUP_H */

View File

@ -141,6 +141,9 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
madt->address);
}
if (madt->flags & ACPI_MADT_PCAT_COMPAT)
legacy_pic_pcat_compat();
default_acpi_madt_oem_check(madt->header.oem_id,
madt->header.oem_table_id);

View File

@ -32,6 +32,7 @@
*/
static void init_8259A(int auto_eoi);
static bool pcat_compat __ro_after_init;
static int i8259A_auto_eoi;
DEFINE_RAW_SPINLOCK(i8259A_lock);
@ -301,15 +302,32 @@ static void unmask_8259A(void)
static int probe_8259A(void)
{
unsigned char new_val, probe_val = ~(1 << PIC_CASCADE_IR);
unsigned long flags;
unsigned char probe_val = ~(1 << PIC_CASCADE_IR);
unsigned char new_val;
/*
* Check to see if we have a PIC.
* Mask all except the cascade and read
* back the value we just wrote. If we don't
* have a PIC, we will read 0xff as opposed to the
* value we wrote.
* If MADT has the PCAT_COMPAT flag set, then do not bother probing
* for the PIC. Some BIOSes leave the PIC uninitialized and probing
* fails.
*
* Right now this causes problems as quite some code depends on
* nr_legacy_irqs() > 0 or has_legacy_pic() == true. This is silly
* when the system has an IO/APIC because then PIC is not required
* at all, except for really old machines where the timer interrupt
* must be routed through the PIC. So just pretend that the PIC is
* there and let legacy_pic->init() initialize it for nothing.
*
* Alternatively this could just try to initialize the PIC and
* repeat the probe, but for cases where there is no PIC that's
* just pointless.
*/
if (pcat_compat)
return nr_legacy_irqs();
/*
* Check to see if we have a PIC. Mask all except the cascade and
* read back the value we just wrote. If we don't have a PIC, we
* will read 0xff as opposed to the value we wrote.
*/
raw_spin_lock_irqsave(&i8259A_lock, flags);
@ -431,5 +449,9 @@ static int __init i8259A_init_ops(void)
return 0;
}
device_initcall(i8259A_init_ops);
void __init legacy_pic_pcat_compat(void)
{
pcat_compat = true;
}

View File

@ -64,11 +64,6 @@ RESERVE_BRK(dmi_alloc, 65536);
#endif
/*
* Range of the BSS area. The size of the BSS area is determined
* at link time, with RESERVE_BRK*() facility reserving additional
* chunks.
*/
unsigned long _brk_start = (unsigned long)__brk_base;
unsigned long _brk_end = (unsigned long)__brk_base;

View File

@ -414,7 +414,7 @@ SECTIONS
.brk : AT(ADDR(.brk) - LOAD_OFFSET) {
__brk_base = .;
. += 64 * 1024; /* 64k alignment slop space */
*(.brk_reservation) /* areas brk users have reserved */
*(.bss..brk) /* areas brk users have reserved */
__brk_limit = .;
}

View File

@ -30,6 +30,75 @@ static struct device *next_device(struct klist_iter *i)
return dev;
}
/**
* driver_set_override() - Helper to set or clear driver override.
* @dev: Device to change
* @override: Address of string to change (e.g. &device->driver_override);
* The contents will be freed and hold newly allocated override.
* @s: NUL-terminated string, new driver name to force a match, pass empty
* string to clear it ("" or "\n", where the latter is only for sysfs
* interface).
* @len: length of @s
*
* Helper to set or clear driver override in a device, intended for the cases
* when the driver_override field is allocated by driver/bus code.
*
* Returns: 0 on success or a negative error code on failure.
*/
int driver_set_override(struct device *dev, const char **override,
const char *s, size_t len)
{
const char *new, *old;
char *cp;
if (!override || !s)
return -EINVAL;
/*
* The stored value will be used in sysfs show callback (sysfs_emit()),
* which has a length limit of PAGE_SIZE and adds a trailing newline.
* Thus we can store one character less to avoid truncation during sysfs
* show.
*/
if (len >= (PAGE_SIZE - 1))
return -EINVAL;
if (!len) {
/* Empty string passed - clear override */
device_lock(dev);
old = *override;
*override = NULL;
device_unlock(dev);
kfree(old);
return 0;
}
cp = strnchr(s, len, '\n');
if (cp)
len = cp - s;
new = kstrndup(s, len, GFP_KERNEL);
if (!new)
return -ENOMEM;
device_lock(dev);
old = *override;
if (cp != s) {
*override = new;
} else {
/* "\n" passed - clear override */
kfree(new);
*override = NULL;
}
device_unlock(dev);
kfree(old);
return 0;
}
EXPORT_SYMBOL_GPL(driver_set_override);
/**
* driver_for_each_device - Iterator for devices bound to a driver.
* @drv: Driver we're iterating.

View File

@ -1046,31 +1046,11 @@ static ssize_t driver_override_store(struct device *dev,
const char *buf, size_t count)
{
struct platform_device *pdev = to_platform_device(dev);
char *driver_override, *old, *cp;
int ret;
/* We need to keep extra room for a newline */
if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
if (!driver_override)
return -ENOMEM;
cp = strchr(driver_override, '\n');
if (cp)
*cp = '\0';
device_lock(dev);
old = pdev->driver_override;
if (strlen(driver_override)) {
pdev->driver_override = driver_override;
} else {
kfree(driver_override);
pdev->driver_override = NULL;
}
device_unlock(dev);
kfree(old);
ret = driver_set_override(dev, &pdev->driver_override, buf, count);
if (ret)
return ret;
return count;
}

View File

@ -3246,6 +3246,7 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
unsigned int i, char terminator)
{
struct clk_core *parent;
const char *name = NULL;
/*
* Go through the following options to fetch a parent's name.
@ -3260,18 +3261,20 @@ static void possible_parent_show(struct seq_file *s, struct clk_core *core,
* registered (yet).
*/
parent = clk_core_get_parent_by_index(core, i);
if (parent)
if (parent) {
seq_puts(s, parent->name);
else if (core->parents[i].name)
} else if (core->parents[i].name) {
seq_puts(s, core->parents[i].name);
else if (core->parents[i].fw_name)
} else if (core->parents[i].fw_name) {
seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
else if (core->parents[i].index >= 0)
seq_puts(s,
of_clk_get_parent_name(core->of_node,
core->parents[i].index));
else
seq_puts(s, "(missing)");
} else {
if (core->parents[i].index >= 0)
name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
if (!name)
name = "(missing)";
seq_puts(s, name);
}
seq_putc(s, terminator);
}

View File

@ -3698,6 +3698,7 @@ static int __init d40_probe(struct platform_device *pdev)
regulator_disable(base->lcpa_regulator);
regulator_put(base->lcpa_regulator);
}
pm_runtime_disable(base->dev);
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);

View File

@ -2612,14 +2612,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
struct drm_dp_mst_branch *found_mstb;
struct drm_dp_mst_port *port;
if (!mstb)
return NULL;
if (memcmp(mstb->guid, guid, 16) == 0)
return mstb;
list_for_each_entry(port, &mstb->ports, next) {
if (!port->mstb)
continue;
found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
if (found_mstb)

View File

@ -740,6 +740,8 @@ static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr)
func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG);
func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN;
writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG);
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
}
static int aspeed_i2c_reg_slave(struct i2c_client *client)
@ -756,7 +758,6 @@ static int aspeed_i2c_reg_slave(struct i2c_client *client)
__aspeed_i2c_reg_slave(bus, client->addr);
bus->slave = client;
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
spin_unlock_irqrestore(&bus->lock, flags);
return 0;

View File

@ -1042,9 +1042,10 @@ static int stm32f7_i2c_smbus_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
/* Configure PEC */
if ((flags & I2C_CLIENT_PEC) && f7_msg->size != I2C_SMBUS_QUICK) {
cr1 |= STM32F7_I2C_CR1_PECEN;
cr2 |= STM32F7_I2C_CR2_PECBYTE;
if (!f7_msg->read_write)
if (!f7_msg->read_write) {
cr2 |= STM32F7_I2C_CR2_PECBYTE;
f7_msg->count++;
}
} else {
cr1 &= ~STM32F7_I2C_CR1_PECEN;
cr2 &= ~STM32F7_I2C_CR2_PECBYTE;
@ -1132,8 +1133,10 @@ static void stm32f7_i2c_smbus_rep_start(struct stm32f7_i2c_dev *i2c_dev)
f7_msg->stop = true;
/* Add one byte for PEC if needed */
if (cr1 & STM32F7_I2C_CR1_PECEN)
if (cr1 & STM32F7_I2C_CR1_PECEN) {
cr2 |= STM32F7_I2C_CR2_PECBYTE;
f7_msg->count++;
}
/* Set number of bytes to be transferred */
cr2 &= ~(STM32F7_I2C_CR2_NBYTES_MASK);

View File

@ -61,7 +61,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne
if (ret)
goto err;
adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
adap = of_get_i2c_adapter_by_node(priv->chan[new_chan].parent_np);
if (!adap) {
ret = -ENODEV;
goto err_with_revert;

View File

@ -52,7 +52,7 @@ static struct i2c_adapter *mux_parent_adapter(struct device *dev)
dev_err(dev, "Cannot parse i2c-parent\n");
return ERR_PTR(-ENODEV);
}
parent = of_find_i2c_adapter_by_node(parent_np);
parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
if (!parent)
return ERR_PTR(-EPROBE_DEFER);

View File

@ -62,7 +62,7 @@ static struct i2c_adapter *i2c_mux_pinctrl_parent_adapter(struct device *dev)
dev_err(dev, "Cannot parse i2c-parent\n");
return ERR_PTR(-ENODEV);
}
parent = of_find_i2c_adapter_by_node(parent_np);
parent = of_get_i2c_adapter_by_node(parent_np);
of_node_put(parent_np);
if (!parent)
return ERR_PTR(-EPROBE_DEFER);

View File

@ -821,16 +821,26 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
}
/* leave out any TS related code if unreachable */
if (IS_REACHABLE(CONFIG_INPUT)) {
has_ts = of_property_read_bool(pdev->dev.of_node,
"has-touchscreen") || pdata;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
info->irq = irq;
irq = platform_get_irq(pdev, 1);
if (irq == -EPROBE_DEFER)
return irq;
if (has_ts) {
irq = platform_get_irq(pdev, 1);
if (irq == -EPROBE_DEFER)
return irq;
info->tsirq = irq;
info->tsirq = irq;
} else {
info->tsirq = -1;
}
info->dev = &pdev->dev;
@ -895,12 +905,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->init_hw)
info->data->init_hw(info);
/* leave out any TS related code if unreachable */
if (IS_REACHABLE(CONFIG_INPUT)) {
has_ts = of_property_read_bool(pdev->dev.of_node,
"has-touchscreen") || pdata;
}
if (pdata)
info->delay = pdata->delay;
else

View File

@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/overflow.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
@ -585,15 +586,22 @@ static int xadc_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *mask)
{
struct xadc *xadc = iio_priv(indio_dev);
unsigned int n;
size_t new_size, n;
void *data;
n = bitmap_weight(mask, indio_dev->masklength);
kfree(xadc->data);
xadc->data = kcalloc(n, sizeof(*xadc->data), GFP_KERNEL);
if (!xadc->data)
if (check_mul_overflow(n, sizeof(*xadc->data), &new_size))
return -ENOMEM;
data = devm_krealloc(indio_dev->dev.parent, xadc->data,
new_size, GFP_KERNEL);
if (!data)
return -ENOMEM;
memset(data, 0, new_size);
xadc->data = data;
return 0;
}
@ -705,11 +713,12 @@ static const struct iio_trigger_ops xadc_trigger_ops = {
static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
const char *name)
{
struct device *dev = indio_dev->dev.parent;
struct iio_trigger *trig;
int ret;
trig = iio_trigger_alloc("%s%d-%s", indio_dev->name,
indio_dev->id, name);
trig = devm_iio_trigger_alloc(dev, "%s%d-%s", indio_dev->name,
indio_dev->id, name);
if (trig == NULL)
return ERR_PTR(-ENOMEM);
@ -717,15 +726,11 @@ static struct iio_trigger *xadc_alloc_trigger(struct iio_dev *indio_dev,
trig->ops = &xadc_trigger_ops;
iio_trigger_set_drvdata(trig, iio_priv(indio_dev));
ret = iio_trigger_register(trig);
ret = devm_iio_trigger_register(dev, trig);
if (ret)
goto error_free_trig;
return ERR_PTR(ret);
return trig;
error_free_trig:
iio_trigger_free(trig);
return ERR_PTR(ret);
}
static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
@ -1184,8 +1189,23 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
return 0;
}
static void xadc_clk_disable_unprepare(void *data)
{
struct clk *clk = data;
clk_disable_unprepare(clk);
}
static void xadc_cancel_delayed_work(void *data)
{
struct delayed_work *work = data;
cancel_delayed_work_sync(work);
}
static int xadc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *id;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
@ -1195,10 +1215,10 @@ static int xadc_probe(struct platform_device *pdev)
int irq;
int i;
if (!pdev->dev.of_node)
if (!dev->of_node)
return -ENODEV;
id = of_match_node(xadc_of_match_table, pdev->dev.of_node);
id = of_match_node(xadc_of_match_table, dev->of_node);
if (!id)
return -EINVAL;
@ -1206,7 +1226,7 @@ static int xadc_probe(struct platform_device *pdev)
if (irq <= 0)
return -ENXIO;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*xadc));
indio_dev = devm_iio_device_alloc(dev, sizeof(*xadc));
if (!indio_dev)
return -ENOMEM;
@ -1226,39 +1246,40 @@ static int xadc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &xadc_info;
ret = xadc_parse_dt(indio_dev, pdev->dev.of_node, &conf0);
ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0);
if (ret)
return ret;
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
ret = iio_triggered_buffer_setup(indio_dev,
&iio_pollfunc_store_time, &xadc_trigger_handler,
&xadc_buffer_ops);
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
&xadc_trigger_handler,
&xadc_buffer_ops);
if (ret)
return ret;
xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
if (IS_ERR(xadc->convst_trigger)) {
ret = PTR_ERR(xadc->convst_trigger);
goto err_triggered_buffer_cleanup;
}
if (IS_ERR(xadc->convst_trigger))
return PTR_ERR(xadc->convst_trigger);
xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
"samplerate");
if (IS_ERR(xadc->samplerate_trigger)) {
ret = PTR_ERR(xadc->samplerate_trigger);
goto err_free_convst_trigger;
}
if (IS_ERR(xadc->samplerate_trigger))
return PTR_ERR(xadc->samplerate_trigger);
}
xadc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(xadc->clk)) {
ret = PTR_ERR(xadc->clk);
goto err_free_samplerate_trigger;
}
xadc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(xadc->clk))
return PTR_ERR(xadc->clk);
ret = clk_prepare_enable(xadc->clk);
if (ret)
goto err_free_samplerate_trigger;
return ret;
ret = devm_add_action_or_reset(dev,
xadc_clk_disable_unprepare, xadc->clk);
if (ret)
return ret;
/*
* Make sure not to exceed the maximum samplerate since otherwise the
@ -1267,22 +1288,28 @@ static int xadc_probe(struct platform_device *pdev)
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
ret = xadc_read_samplerate(xadc);
if (ret < 0)
goto err_free_samplerate_trigger;
return ret;
if (ret > XADC_MAX_SAMPLERATE) {
ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
if (ret < 0)
goto err_free_samplerate_trigger;
return ret;
}
}
ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
dev_name(&pdev->dev), indio_dev);
ret = devm_request_irq(dev, xadc->irq, xadc->ops->interrupt_handler, 0,
dev_name(dev), indio_dev);
if (ret)
goto err_clk_disable_unprepare;
return ret;
ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work,
&xadc->zynq_unmask_work);
if (ret)
return ret;
ret = xadc->ops->setup(pdev, indio_dev, xadc->irq);
if (ret)
goto err_free_irq;
return ret;
for (i = 0; i < 16; i++)
xadc_read_adc_reg(xadc, XADC_REG_THRESHOLD(i),
@ -1290,7 +1317,7 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_CONF0, conf0);
if (ret)
goto err_free_irq;
return ret;
bipolar_mask = 0;
for (i = 0; i < indio_dev->num_channels; i++) {
@ -1300,85 +1327,21 @@ static int xadc_probe(struct platform_device *pdev)
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(0), bipolar_mask);
if (ret)
goto err_free_irq;
return ret;
ret = xadc_write_adc_reg(xadc, XADC_REG_INPUT_MODE(1),
bipolar_mask >> 16);
if (ret)
goto err_free_irq;
/* Disable all alarms */
ret = xadc_update_adc_reg(xadc, XADC_REG_CONF1, XADC_CONF1_ALARM_MASK,
XADC_CONF1_ALARM_MASK);
if (ret)
goto err_free_irq;
/* Set thresholds to min/max */
for (i = 0; i < 16; i++) {
/*
* Set max voltage threshold and both temperature thresholds to
* 0xffff, min voltage threshold to 0.
*/
if (i % 8 < 4 || i == 7)
xadc->threshold[i] = 0xffff;
else
xadc->threshold[i] = 0;
ret = xadc_write_adc_reg(xadc, XADC_REG_THRESHOLD(i),
xadc->threshold[i]);
if (ret)
goto err_free_irq;
}
return ret;
/* Go to non-buffered mode */
xadc_postdisable(indio_dev);
ret = iio_device_register(indio_dev);
if (ret)
goto err_free_irq;
platform_set_drvdata(pdev, indio_dev);
return 0;
err_free_irq:
free_irq(xadc->irq, indio_dev);
cancel_delayed_work_sync(&xadc->zynq_unmask_work);
err_clk_disable_unprepare:
clk_disable_unprepare(xadc->clk);
err_free_samplerate_trigger:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_trigger_free(xadc->samplerate_trigger);
err_free_convst_trigger:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_trigger_free(xadc->convst_trigger);
err_triggered_buffer_cleanup:
if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
iio_triggered_buffer_cleanup(indio_dev);
return ret;
}
static int xadc_remove(struct platform_device *pdev)
{
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct xadc *xadc = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
iio_trigger_free(xadc->samplerate_trigger);
iio_trigger_free(xadc->convst_trigger);
iio_triggered_buffer_cleanup(indio_dev);
}
free_irq(xadc->irq, indio_dev);
cancel_delayed_work_sync(&xadc->zynq_unmask_work);
clk_disable_unprepare(xadc->clk);
kfree(xadc->data);
return 0;
return devm_iio_device_register(dev, indio_dev);
}
static struct platform_driver xadc_driver = {
.probe = xadc_probe,
.remove = xadc_remove,
.driver = {
.name = "xadc",
.of_match_table = xadc_of_match_table,

View File

@ -1749,6 +1749,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
!SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10);
const struct rmi_device_platform_data pdata = {
.reset_delay_ms = 30,
.sensor_pdata = {
.sensor_type = rmi_sensor_touchpad,
.axis_align.flip_y = true,

View File

@ -235,12 +235,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
{
int retval;
struct i2c_client *client = rmi_smb->client;
int smbus_version;
/*
* psmouse driver resets the controller, we only need to wait
* to give the firmware chance to fully reinitialize.
*/
if (rmi_smb->xport.pdata.reset_delay_ms)
msleep(rmi_smb->xport.pdata.reset_delay_ms);
/* we need to get the smbus version to activate the touchpad */
retval = rmi_smb_get_version(rmi_smb);
if (retval < 0)
return retval;
smbus_version = rmi_smb_get_version(rmi_smb);
if (smbus_version < 0)
return smbus_version;
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
smbus_version);
if (smbus_version != 2 && smbus_version != 3) {
dev_err(&client->dev, "Unrecognized SMB version %d\n",
smbus_version);
return -ENODEV;
}
return 0;
}
@ -253,11 +270,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
rmi_smb_clear_state(rmi_smb);
/*
* we do not call the actual reset command, it has to be handled in
* PS/2 or there will be races between PS/2 and SMBus.
* PS/2 should ensure that a psmouse_reset is called before
* intializing the device and after it has been removed to be in a known
* state.
* We do not call the actual reset command, it has to be handled in
* PS/2 or there will be races between PS/2 and SMBus. PS/2 should
* ensure that a psmouse_reset is called before initializing the
* device and after it has been removed to be in a known state.
*/
return rmi_smb_enable_smbus_mode(rmi_smb);
}
@ -273,7 +289,6 @@ static int rmi_smb_probe(struct i2c_client *client,
{
struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
struct rmi_smb_xport *rmi_smb;
int smbus_version;
int error;
if (!pdata) {
@ -312,18 +327,9 @@ static int rmi_smb_probe(struct i2c_client *client,
rmi_smb->xport.proto_name = "smb";
rmi_smb->xport.ops = &rmi_smb_ops;
smbus_version = rmi_smb_get_version(rmi_smb);
if (smbus_version < 0)
return smbus_version;
rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
smbus_version);
if (smbus_version != 2 && smbus_version != 3) {
dev_err(&client->dev, "Unrecognized SMB version %d\n",
smbus_version);
return -ENODEV;
}
error = rmi_smb_enable_smbus_mode(rmi_smb);
if (error)
return error;
i2c_set_clientdata(client, rmi_smb);

View File

@ -403,6 +403,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = {
.map = irq_map_generic_chip,
.alloc = stm32_exti_alloc,
.free = stm32_exti_free,
.xlate = irq_domain_xlate_twocell,
};
static void stm32_irq_ack(struct irq_data *d)

View File

@ -23,7 +23,7 @@ static int mcb_lpc_probe(struct platform_device *pdev)
{
struct resource *res;
struct priv *priv;
int ret = 0;
int ret = 0, table_size;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@ -58,16 +58,43 @@ static int mcb_lpc_probe(struct platform_device *pdev)
ret = chameleon_parse_cells(priv->bus, priv->mem->start, priv->base);
if (ret < 0) {
mcb_release_bus(priv->bus);
return ret;
goto out_mcb_bus;
}
dev_dbg(&pdev->dev, "Found %d cells\n", ret);
table_size = ret;
if (table_size < CHAM_HEADER_SIZE) {
/* Release the previous resources */
devm_iounmap(&pdev->dev, priv->base);
devm_release_mem_region(&pdev->dev, priv->mem->start, resource_size(priv->mem));
/* Then, allocate it again with the actual chameleon table size */
res = devm_request_mem_region(&pdev->dev, priv->mem->start,
table_size,
KBUILD_MODNAME);
if (!res) {
dev_err(&pdev->dev, "Failed to request PCI memory\n");
ret = -EBUSY;
goto out_mcb_bus;
}
priv->base = devm_ioremap(&pdev->dev, priv->mem->start, table_size);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
goto out_mcb_bus;
}
platform_set_drvdata(pdev, priv);
}
mcb_bus_add_devices(priv->bus);
return 0;
out_mcb_bus:
mcb_release_bus(priv->bus);
return ret;
}
static int mcb_lpc_remove(struct platform_device *pdev)

View File

@ -128,7 +128,7 @@ static void chameleon_parse_bar(void __iomem *base,
}
}
static int chameleon_get_bar(char __iomem **base, phys_addr_t mapbase,
static int chameleon_get_bar(void __iomem **base, phys_addr_t mapbase,
struct chameleon_bar **cb)
{
struct chameleon_bar *c;
@ -177,12 +177,13 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
{
struct chameleon_fpga_header *header;
struct chameleon_bar *cb;
char __iomem *p = base;
void __iomem *p = base;
int num_cells = 0;
uint32_t dtype;
int bar_count;
int ret;
u32 hsize;
u32 table_size;
hsize = sizeof(struct chameleon_fpga_header);
@ -237,12 +238,16 @@ int chameleon_parse_cells(struct mcb_bus *bus, phys_addr_t mapbase,
num_cells++;
}
if (num_cells == 0)
num_cells = -EINVAL;
if (num_cells == 0) {
ret = -EINVAL;
goto free_bar;
}
table_size = p - base;
pr_debug("%d cell(s) found. Chameleon table size: 0x%04x bytes\n", num_cells, table_size);
kfree(cb);
kfree(header);
return num_cells;
return table_size;
free_bar:
kfree(cb);

View File

@ -990,11 +990,6 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
err = wait_for_completion_interruptible(&ctx->work);
}
if (err)
goto bail;
/* Check the response from remote dsp */
err = ctx->retval;
if (err)
goto bail;
@ -1007,6 +1002,11 @@ static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
goto bail;
}
/* Check the response from remote dsp */
err = ctx->retval;
if (err)
goto bail;
bail:
if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
/* We are done with this compute context */

View File

@ -571,7 +571,7 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK,
TMIO_MASK_INIT_RCAR2);
TMIO_MASK_ALL_RCAR2);
}
#define SH_MOBILE_SDHI_MIN_TAP_ROW 3
@ -1012,6 +1012,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->ops.start_signal_voltage_switch =
renesas_sdhi_start_signal_voltage_switch;
host->sdcard_irq_setbit_mask = TMIO_STAT_ALWAYS_SET_27;
host->sdcard_irq_mask_all = TMIO_MASK_ALL_RCAR2;
host->reset = renesas_sdhi_reset;
} else {
host->sdcard_irq_mask_all = TMIO_MASK_ALL;

View File

@ -97,8 +97,8 @@
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_INIT_RCAR2 0x8b7f031d /* Initial value for R-Car Gen2+ */
#define TMIO_MASK_ALL 0x837f031d
#define TMIO_MASK_ALL_RCAR2 0x8b7f031d
#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \

View File

@ -3846,6 +3846,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
if (ret)
return ret;
/* If we have version number support, then check to see that the new
* firmware got loaded properly.

View File

@ -2663,7 +2663,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
return budget;
}
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Exit the polling mode, but don't re-enable interrupts if stack might

View File

@ -2987,11 +2987,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
if (err)
goto err_out_w_lock;
igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
if (err)
goto err_out_input_filter;
spin_unlock(&adapter->nfc_lock);
return 0;
err_out_input_filter:
igb_erase_filter(adapter, input);
err_out_w_lock:
spin_unlock(&adapter->nfc_lock);
err_out:

View File

@ -1775,7 +1775,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
u32 advertising;
u16 advertised = 0;
/* When adapter in resetting mode, autoneg/speed/duplex
* cannot be changed
@ -1800,18 +1800,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
/* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
* We have to check this and convert it to ADVERTISE_2500_FULL
* (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
*/
if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
advertising |= ADVERTISE_2500_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
2500baseT_Full))
advertised |= ADVERTISE_2500_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
1000baseT_Full))
advertised |= ADVERTISE_1000_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
100baseT_Full))
advertised |= ADVERTISE_100_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
100baseT_Half))
advertised |= ADVERTISE_100_HALF;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
10baseT_Full))
advertised |= ADVERTISE_10_FULL;
if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
10baseT_Half))
advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
hw->phy.autoneg_advertised = advertising;
hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {

View File

@ -4470,7 +4470,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
struct sk_buff *skb = tp->tx_skb[entry].skb;
u32 status;
status = le32_to_cpu(tp->TxDescArray[entry].opts1);
status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
if (status & DescOwn)
break;
@ -4544,7 +4544,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
dma_addr_t addr;
u32 status;
status = le32_to_cpu(desc->opts1);
status = le32_to_cpu(READ_ONCE(desc->opts1));
if (status & DescOwn)
break;

View File

@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
key_index = wl->current_key;
if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
/* reques to change default key index */
/* request to change default key index */
pr_debug("%s: request to change default key to %d\n",
__func__, key_index);
wl->current_key = key_index;

View File

@ -538,8 +538,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
if (iph->frag_off & htons(IP_DF) &&
((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));

View File

@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
static void adf7242_debugfs_init(struct adf7242_local *lp)
{
char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
char debugfs_dir_name[DNAME_INLINE_LEN + 1];
strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
"adf7242-%s", dev_name(&lp->spi->dev));
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);

View File

@ -1042,7 +1042,7 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
value, index, tmp, size, 500);
value, index, tmp, size, USB_CTRL_GET_TIMEOUT);
if (ret < 0)
memset(data, 0xff, size);
else
@ -1065,7 +1065,7 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
value, index, tmp, size, 500);
value, index, tmp, size, USB_CTRL_SET_TIMEOUT);
kfree(tmp);
@ -6615,7 +6615,8 @@ static u8 rtl_get_version(struct usb_interface *intf)
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
USB_CTRL_GET_TIMEOUT);
if (ret > 0)
ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
@ -6824,6 +6825,10 @@ static int rtl8152_probe(struct usb_interface *intf,
out1:
tasklet_kill(&tp->tx_tl);
cancel_delayed_work_sync(&tp->hw_phy_work);
if (tp->rtl_ops.unload)
tp->rtl_ops.unload(tp);
rtl8152_release_firmware(tp);
usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);

View File

@ -86,7 +86,9 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
if (ret < 0) {
if (ret < 4) {
ret = ret < 0 ? ret : -ENODATA;
if (ret != -ENODEV)
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);

View File

@ -467,7 +467,7 @@ static const struct ocotp_params imx6sl_params = {
};
static const struct ocotp_params imx6sll_params = {
.nregs = 128,
.nregs = 80,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
@ -481,14 +481,14 @@ static const struct ocotp_params imx6sx_params = {
};
static const struct ocotp_params imx6ul_params = {
.nregs = 128,
.nregs = 144,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,
};
static const struct ocotp_params imx6ull_params = {
.nregs = 64,
.nregs = 80,
.bank_address_words = 0,
.set_timing = imx_ocotp_set_imx6_timing,
.ctrl = IMX_OCOTP_BM_CTRL_DEFAULT,

View File

@ -597,7 +597,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
/*
* In the AMD NL platform, this device ([1022:7912]) has a class code of
* PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will
* claim it.
* claim it. The same applies on the VanGogh platform device ([1022:163a]).
*
* But the dwc3 driver is a more specific driver for this device, and we'd
* prefer to use it instead of xhci. To prevent xhci from claiming the
@ -605,7 +605,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_
* defines as "USB device (not host controller)". The dwc3 driver can then
* claim it based on its Vendor and Device ID.
*/
static void quirk_amd_nl_class(struct pci_dev *pdev)
static void quirk_amd_dwc_class(struct pci_dev *pdev)
{
u32 class = pdev->class;
@ -615,7 +615,9 @@ static void quirk_amd_nl_class(struct pci_dev *pdev)
class, pdev->class);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
quirk_amd_nl_class);
quirk_amd_dwc_class);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VANGOGH_USB,
quirk_amd_dwc_class);
/*
* Synopsys USB 3.x host HAPS platform has a class code of

View File

@ -588,24 +588,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
if (!IS_VRING_DROP(vring)) {
if (is_rx)
if (is_rx) {
if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
sizeof(u64));
else
memcpy(&data, addr + vring->cur_len,
sizeof(u64));
} else {
memcpy(&data, addr + vring->cur_len,
sizeof(u64));
}
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
if (!IS_VRING_DROP(vring)) {
if (is_rx)
if (is_rx) {
if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
else
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
} else {
data = 0;
memcpy(&data, addr + vring->cur_len,
len - vring->cur_len);
}
vring->cur_len = len;
}

View File

@ -1379,6 +1379,7 @@ static void qcom_glink_rpdev_release(struct device *dev)
struct glink_channel *channel = to_glink_channel(rpdev->ept);
channel->rpdev = NULL;
kfree(rpdev->driver_override);
kfree(rpdev);
}
@ -1607,6 +1608,7 @@ static void qcom_glink_device_release(struct device *dev)
/* Release qcom_glink_alloc_channel() reference */
kref_put(&channel->refcount, qcom_glink_channel_release);
kfree(rpdev->driver_override);
kfree(rpdev);
}

View File

@ -368,7 +368,8 @@ field##_store(struct device *dev, struct device_attribute *attr, \
const char *buf, size_t sz) \
{ \
struct rpmsg_device *rpdev = to_rpmsg_device(dev); \
char *new, *old; \
const char *old; \
char *new; \
\
new = kstrndup(buf, sz, GFP_KERNEL); \
if (!new) \
@ -565,24 +566,52 @@ static struct bus_type rpmsg_bus = {
.remove = rpmsg_dev_remove,
};
int rpmsg_register_device(struct rpmsg_device *rpdev)
/*
* A helper for registering rpmsg device with driver override and name.
* Drivers should not be using it, but instead rpmsg_register_device().
*/
int rpmsg_register_device_override(struct rpmsg_device *rpdev,
const char *driver_override)
{
struct device *dev = &rpdev->dev;
int ret;
if (driver_override)
strcpy(rpdev->id.name, driver_override);
dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
rpdev->id.name, rpdev->src, rpdev->dst);
rpdev->dev.bus = &rpmsg_bus;
ret = device_register(&rpdev->dev);
device_initialize(dev);
if (driver_override) {
ret = driver_set_override(dev, &rpdev->driver_override,
driver_override,
strlen(driver_override));
if (ret) {
dev_err(dev, "device_set_override failed: %d\n", ret);
put_device(dev);
return ret;
}
}
ret = device_add(dev);
if (ret) {
dev_err(dev, "device_register failed: %d\n", ret);
dev_err(dev, "device_add failed: %d\n", ret);
kfree(rpdev->driver_override);
rpdev->driver_override = NULL;
put_device(&rpdev->dev);
}
return ret;
}
EXPORT_SYMBOL(rpmsg_register_device_override);
int rpmsg_register_device(struct rpmsg_device *rpdev)
{
return rpmsg_register_device_override(rpdev, NULL);
}
EXPORT_SYMBOL(rpmsg_register_device);
/*

View File

@ -89,10 +89,7 @@ struct device *rpmsg_find_device(struct device *parent,
*/
static inline int rpmsg_chrdev_register_device(struct rpmsg_device *rpdev)
{
strcpy(rpdev->id.name, "rpmsg_chrdev");
rpdev->driver_override = "rpmsg_chrdev";
return rpmsg_register_device(rpdev);
return rpmsg_register_device_override(rpdev, "rpmsg_ctrl");
}
#endif

View File

@ -11624,8 +11624,10 @@ _mpt3sas_init(void)
mpt3sas_ctl_init(hbas_to_enumerate);
error = pci_register_driver(&mpt3sas_driver);
if (error)
if (error) {
mpt3sas_ctl_exit(hbas_to_enumerate);
scsih_exit();
}
return error;
}

View File

@ -334,8 +334,9 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
uma_cfg |= ilog2(op->cmd.buswidth);
uma_cfg |= ilog2(op->addr.buswidth)
<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
uma_cfg |= ilog2(op->dummy.buswidth)
<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
if (op->dummy.nbytes)
uma_cfg |= ilog2(op->dummy.buswidth)
<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
uma_cfg |= ilog2(op->data.buswidth)
<< NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;

View File

@ -5151,6 +5151,12 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b1_bt_1_115200 },
/*
* IntaShield IS-100
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0D60,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b2_1_115200 },
/*
* IntaShield IS-200
*/
@ -5178,10 +5184,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0AA2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_1_115200 },
/*
* Brainboxes UC-257
* Brainboxes UC-253/UC-734
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0861,
{ PCI_VENDOR_ID_INTASHIELD, 0x0CA1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
@ -5196,6 +5206,66 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
pbn_b2_4_115200 },
/*
* Brainboxes UP-189
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0AC1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0AC2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0AC3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UP-200
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0B21,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0B22,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0B23,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UP-869
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0C01,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0C02,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0C03,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UP-880
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0C21,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0C22,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0C23,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-268
*/
@ -5217,6 +5287,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08E2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08E3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-310
*/
@ -5227,6 +5305,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* Brainboxes UC-313
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x08A1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08A2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x08A3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@ -5241,6 +5327,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
/*
* Brainboxes UC-346
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0B01,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0B02,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@ -5252,6 +5342,10 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0A82,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0A83,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
@ -5264,12 +5358,34 @@ static const struct pci_device_id serial_pci_tbl[] = {
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-420/431
* Brainboxes UC-420
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0921,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Brainboxes UC-607
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x09A1,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x09A2,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x09A3,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_2_115200 },
/*
* Brainboxes UC-836
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0D41,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
pbn_b2_4_115200 },
/*
* Perle PCI-RAS cards
*/

View File

@ -662,12 +662,12 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
if (WARN_ON(in && dev->ep0_out_pending)) {
ret = -ENODEV;
dev->state = STATE_DEV_FAILED;
goto out_done;
goto out_unlock;
}
if (WARN_ON(!in && dev->ep0_in_pending)) {
ret = -ENODEV;
dev->state = STATE_DEV_FAILED;
goto out_done;
goto out_unlock;
}
dev->req->buf = data;
@ -682,7 +682,7 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
"fail, usb_ep_queue returned %d\n", ret);
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_FAILED;
goto out_done;
goto out_queue_failed;
}
ret = wait_for_completion_interruptible(&dev->ep0_done);
@ -691,13 +691,16 @@ static int raw_process_ep0_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
usb_ep_dequeue(dev->gadget->ep0, dev->req);
wait_for_completion(&dev->ep0_done);
spin_lock_irqsave(&dev->lock, flags);
goto out_done;
if (dev->ep0_status == -ECONNRESET)
dev->ep0_status = -EINTR;
goto out_interrupted;
}
spin_lock_irqsave(&dev->lock, flags);
ret = dev->ep0_status;
out_done:
out_interrupted:
ret = dev->ep0_status;
out_queue_failed:
dev->ep0_urb_queued = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);
@ -1059,7 +1062,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
"fail, usb_ep_queue returned %d\n", ret);
spin_lock_irqsave(&dev->lock, flags);
dev->state = STATE_DEV_FAILED;
goto out_done;
goto out_queue_failed;
}
ret = wait_for_completion_interruptible(&done);
@ -1068,13 +1071,16 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
usb_ep_dequeue(ep->ep, ep->req);
wait_for_completion(&done);
spin_lock_irqsave(&dev->lock, flags);
goto out_done;
if (ep->status == -ECONNRESET)
ep->status = -EINTR;
goto out_interrupted;
}
spin_lock_irqsave(&dev->lock, flags);
ret = ep->status;
out_done:
out_interrupted:
ret = ep->status;
out_queue_failed:
ep->urb_queued = false;
out_unlock:
spin_unlock_irqrestore(&dev->lock, flags);

View File

@ -19,7 +19,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
UNUSUAL_DEV( 0x14cd, 0x6116, 0x0150, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),

View File

@ -3440,11 +3440,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
}
info->fix.mmio_start = raddr;
#if defined(__i386__) || defined(__ia64__)
/*
* By using strong UC we force the MTRR to never have an
* effect on the MMIO region on both non-PAT and PAT systems.
*/
par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
#else
par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
#endif
if (par->ati_regbase == NULL)
return -ENOMEM;

View File

@ -1933,10 +1933,10 @@ static void uvesafb_exit(void)
}
}
cn_del_callback(&uvesafb_cn_id);
driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
platform_device_unregister(uvesafb_device);
platform_driver_unregister(&uvesafb_driver);
cn_del_callback(&uvesafb_cn_id);
}
module_exit(uvesafb_exit);

View File

@ -402,7 +402,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
&num_pages);
target = num_pages;
/*
* Aligned up to guest page size to avoid inflating and deflating
* balloon endlessly.
*/
target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
return target - vb->num_pages;
}

View File

@ -596,14 +596,17 @@ static int virtio_mmio_probe(struct platform_device *pdev)
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vm_dev->base))
return PTR_ERR(vm_dev->base);
if (IS_ERR(vm_dev->base)) {
rc = PTR_ERR(vm_dev->base);
goto free_vm_dev;
}
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
return -ENODEV;
rc = -ENODEV;
goto free_vm_dev;
}
/* Check device version */
@ -611,7 +614,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
return -ENXIO;
rc = -ENXIO;
goto free_vm_dev;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@ -620,7 +624,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
return -ENODEV;
rc = -ENODEV;
goto free_vm_dev;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
@ -650,6 +655,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
put_device(&vm_dev->vdev.dev);
return rc;
free_vm_dev:
kfree(vm_dev);
return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)

View File

@ -571,8 +571,13 @@ static struct rdma_cm_id *smbd_create_id(
log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
goto out;
}
wait_for_completion_interruptible_timeout(
rc = wait_for_completion_interruptible_timeout(
&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
/* e.g. if interrupted returns -ERESTARTSYS */
if (rc < 0) {
log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
goto out;
}
rc = info->ri_rc;
if (rc) {
log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
@ -585,8 +590,13 @@ static struct rdma_cm_id *smbd_create_id(
log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
goto out;
}
wait_for_completion_interruptible_timeout(
rc = wait_for_completion_interruptible_timeout(
&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
/* e.g. if interrupted returns -ERESTARTSYS */
if (rc < 0) {
log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
goto out;
}
rc = info->ri_rc;
if (rc) {
log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);

View File

@ -3515,8 +3515,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
struct ext4_super_block *es = sbi->s_es;
int bsbits, max;
ext4_lblk_t end;
loff_t size, start_off;
loff_t size, start_off, end;
loff_t orig_size __maybe_unused;
ext4_lblk_t start;
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
@ -3545,7 +3544,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* first, let's learn actual file size
* given current request is allocated */
size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
size = extent_logical_end(sbi, &ac->ac_o_ex);
size = size << bsbits;
if (size < i_size_read(ac->ac_inode))
size = i_size_read(ac->ac_inode);
@ -3624,7 +3623,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* check we don't cross already preallocated blocks */
rcu_read_lock();
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
ext4_lblk_t pa_end;
loff_t pa_end;
if (pa->pa_deleted)
continue;
@ -3634,8 +3633,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
continue;
}
pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
pa->pa_len);
pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
/* PA must not overlap original request */
BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
@ -3664,12 +3662,11 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
/* XXX: extra loop to check we really don't overlap preallocations */
rcu_read_lock();
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
ext4_lblk_t pa_end;
loff_t pa_end;
spin_lock(&pa->pa_lock);
if (pa->pa_deleted == 0) {
pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb),
pa->pa_len);
pa_end = pa_logical_end(EXT4_SB(ac->ac_sb), pa);
BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
}
spin_unlock(&pa->pa_lock);
@ -3885,8 +3882,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* all fields in this condition don't change,
* so we can skip locking for them */
if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
ac->ac_o_ex.fe_logical >= (pa->pa_lstart +
EXT4_C2B(sbi, pa->pa_len)))
ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, pa))
continue;
/* non-extent files can't have physical blocks past 2^32 */
@ -4131,8 +4127,11 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa = ac->ac_pa;
if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
int new_bex_start;
int new_bex_end;
struct ext4_free_extent ex = {
.fe_logical = ac->ac_g_ex.fe_logical,
.fe_len = ac->ac_g_ex.fe_len,
};
loff_t orig_goal_end = extent_logical_end(sbi, &ex);
/* we can't allocate as much as normalizer wants.
* so, found space must get proper lstart
@ -4151,29 +4150,23 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
* still cover original start
* 3. Else, keep the best ex at start of original request.
*/
new_bex_end = ac->ac_g_ex.fe_logical +
EXT4_C2B(sbi, ac->ac_g_ex.fe_len);
new_bex_start = new_bex_end - EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
if (ac->ac_o_ex.fe_logical >= new_bex_start)
ex.fe_len = ac->ac_b_ex.fe_len;
ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len);
if (ac->ac_o_ex.fe_logical >= ex.fe_logical)
goto adjust_bex;
new_bex_start = ac->ac_g_ex.fe_logical;
new_bex_end =
new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
if (ac->ac_o_ex.fe_logical < new_bex_end)
ex.fe_logical = ac->ac_g_ex.fe_logical;
if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
goto adjust_bex;
new_bex_start = ac->ac_o_ex.fe_logical;
new_bex_end =
new_bex_start + EXT4_C2B(sbi, ac->ac_b_ex.fe_len);
ex.fe_logical = ac->ac_o_ex.fe_logical;
adjust_bex:
ac->ac_b_ex.fe_logical = new_bex_start;
ac->ac_b_ex.fe_logical = ex.fe_logical;
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
BUG_ON(new_bex_end > (ac->ac_g_ex.fe_logical +
EXT4_C2B(sbi, ac->ac_g_ex.fe_len)));
BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
}
/* preallocation can change ac_b_ex, thus we store actually
@ -4704,7 +4697,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
return;
size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len);
size = extent_logical_end(sbi, &ac->ac_o_ex);
isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1)
>> bsbits;

View File

@ -199,6 +199,20 @@ static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
(fex->fe_start << EXT4_SB(sb)->s_cluster_bits);
}
static inline loff_t extent_logical_end(struct ext4_sb_info *sbi,
struct ext4_free_extent *fex)
{
/* Use loff_t to avoid end exceeding ext4_lblk_t max. */
return (loff_t)fex->fe_logical + EXT4_C2B(sbi, fex->fe_len);
}
static inline loff_t pa_logical_end(struct ext4_sb_info *sbi,
struct ext4_prealloc_space *pa)
{
/* Use loff_t to avoid end exceeding ext4_lblk_t max. */
return (loff_t)pa->pa_lstart + EXT4_C2B(sbi, pa->pa_len);
}
typedef int (*ext4_mballoc_query_range_fn)(
struct super_block *sb,
ext4_group_t agno,

View File

@ -1522,7 +1522,8 @@ static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
int err;
inode = f2fs_iget(sb, dni.ino);
if (IS_ERR(inode) || is_bad_inode(inode))
if (IS_ERR(inode) || is_bad_inode(inode) ||
special_file(inode->i_mode))
continue;
err = f2fs_gc_pinned_control(inode, gc_type, segno);

View File

@ -155,6 +155,8 @@ extern int __must_check driver_create_file(struct device_driver *driver,
extern void driver_remove_file(struct device_driver *driver,
const struct driver_attribute *attr);
int driver_set_override(struct device *dev, const char **override,
const char *s, size_t len);
extern int __must_check driver_for_each_device(struct device_driver *drv,
struct device *start,
void *data,

View File

@ -456,10 +456,10 @@ static inline void kasan_free_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
#ifdef CONFIG_KASAN_INLINE
#ifdef CONFIG_KASAN
void kasan_non_canonical_hook(unsigned long addr);
#else /* CONFIG_KASAN_INLINE */
#else /* CONFIG_KASAN */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
#endif /* CONFIG_KASAN_INLINE */
#endif /* CONFIG_KASAN */
#endif /* LINUX_KASAN_H */

View File

@ -555,6 +555,7 @@
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000

View File

@ -31,7 +31,11 @@ struct platform_device {
struct resource *resource;
const struct platform_device_id *id_entry;
char *driver_override; /* Driver name to force a match */
/*
* Driver name to force a match. Do not set directly, because core
* frees it. Use driver_set_override() to set or clear it.
*/
const char *driver_override;
/* MFD cell pointer */
struct mfd_cell *mfd_cell;

View File

@ -42,7 +42,9 @@ struct rpmsg_channel_info {
* rpmsg_device - device that belong to the rpmsg bus
* @dev: the device struct
* @id: device id (used to match between rpmsg drivers and devices)
* @driver_override: driver name to force a match
* @driver_override: driver name to force a match; do not set directly,
* because core frees it; use driver_set_override() to
* set or clear it.
* @src: local address
* @dst: destination address
* @ept: the rpmsg endpoint of this channel
@ -51,7 +53,7 @@ struct rpmsg_channel_info {
struct rpmsg_device {
struct device dev;
struct rpmsg_device_id id;
char *driver_override;
const char *driver_override;
u32 src;
u32 dst;
struct rpmsg_endpoint *ept;
@ -120,6 +122,8 @@ struct rpmsg_driver {
#if IS_ENABLED(CONFIG_RPMSG)
int rpmsg_register_device_override(struct rpmsg_device *rpdev,
const char *driver_override);
int register_rpmsg_device(struct rpmsg_device *dev);
void unregister_rpmsg_device(struct rpmsg_device *dev);
int __register_rpmsg_driver(struct rpmsg_driver *drv, struct module *owner);
@ -147,6 +151,12 @@ int rpmsg_set_signals(struct rpmsg_endpoint *ept, u32 set, u32 clear);
#else
static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
const char *driver_override)
{
return -ENXIO;
}
static inline int register_rpmsg_device(struct rpmsg_device *dev)
{
return -ENXIO;

View File

@ -124,18 +124,19 @@ struct can_isotp_ll_options {
/* flags for isotp behaviour */
#define CAN_ISOTP_LISTEN_MODE 0x001 /* listen only (do not send FC) */
#define CAN_ISOTP_EXTEND_ADDR 0x002 /* enable extended addressing */
#define CAN_ISOTP_TX_PADDING 0x004 /* enable CAN frame padding tx path */
#define CAN_ISOTP_RX_PADDING 0x008 /* enable CAN frame padding rx path */
#define CAN_ISOTP_CHK_PAD_LEN 0x010 /* check received CAN frame padding */
#define CAN_ISOTP_CHK_PAD_DATA 0x020 /* check received CAN frame padding */
#define CAN_ISOTP_HALF_DUPLEX 0x040 /* half duplex error state handling */
#define CAN_ISOTP_FORCE_TXSTMIN 0x080 /* ignore stmin from received FC */
#define CAN_ISOTP_FORCE_RXSTMIN 0x100 /* ignore CFs depending on rx stmin */
#define CAN_ISOTP_RX_EXT_ADDR 0x200 /* different rx extended addressing */
#define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */
#define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */
#define CAN_ISOTP_LISTEN_MODE 0x0001 /* listen only (do not send FC) */
#define CAN_ISOTP_EXTEND_ADDR 0x0002 /* enable extended addressing */
#define CAN_ISOTP_TX_PADDING 0x0004 /* enable CAN frame padding tx path */
#define CAN_ISOTP_RX_PADDING 0x0008 /* enable CAN frame padding rx path */
#define CAN_ISOTP_CHK_PAD_LEN 0x0010 /* check received CAN frame padding */
#define CAN_ISOTP_CHK_PAD_DATA 0x0020 /* check received CAN frame padding */
#define CAN_ISOTP_HALF_DUPLEX 0x0040 /* half duplex error state handling */
#define CAN_ISOTP_FORCE_TXSTMIN 0x0080 /* ignore stmin from received FC */
#define CAN_ISOTP_FORCE_RXSTMIN 0x0100 /* ignore CFs depending on rx stmin */
#define CAN_ISOTP_RX_EXT_ADDR 0x0200 /* different rx extended addressing */
#define CAN_ISOTP_WAIT_TX_DONE 0x0400 /* wait for tx completion */
#define CAN_ISOTP_SF_BROADCAST 0x0800 /* 1-to-N functional addressing */
#define CAN_ISOTP_CF_BROADCAST 0x1000 /* 1-to-N transmission w/o FC */
/* protocol machine default values */

View File

@ -32,6 +32,6 @@ enum gtp_attrs {
GTPA_PAD,
__GTPA_MAX,
};
#define GTPA_MAX (__GTPA_MAX + 1)
#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _UAPI_LINUX_GTP_H_ */

View File

@ -952,7 +952,7 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
* @name: The name of the kprobe event
* @loc: The location of the kprobe event
* @kretprobe: Is this a return probe?
* @args: Variable number of arg (pairs), one pair for each field
* @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_gen_cmd_start() wrapper, which automatically
@ -1025,7 +1025,7 @@ EXPORT_SYMBOL_GPL(__kprobe_event_gen_cmd_start);
/**
* __kprobe_event_add_fields - Add probe fields to a kprobe command from arg list
* @cmd: A pointer to the dynevent_cmd struct representing the new event
* @args: Variable number of arg (pairs), one pair for each field
* @...: Variable number of arg (pairs), one pair for each field
*
* NOTE: Users normally won't want to call this function directly, but
* rather use the kprobe_event_add_fields() wrapper, which

View File

@ -144,7 +144,7 @@ static int get_kobj_path_length(struct kobject *kobj)
return length;
}
static void fill_kobj_path(struct kobject *kobj, char *path, int length)
static int fill_kobj_path(struct kobject *kobj, char *path, int length)
{
struct kobject *parent;
@ -153,12 +153,16 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
if (length <= 0)
return -EINVAL;
memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
kobj, __func__, path);
return 0;
}
/**
@ -173,13 +177,17 @@ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
char *path;
int len;
retry:
len = get_kobj_path_length(kobj);
if (len == 0)
return NULL;
path = kzalloc(len, gfp_mask);
if (!path)
return NULL;
fill_kobj_path(kobj, path, len);
if (fill_kobj_path(kobj, path, len)) {
kfree(path);
goto retry;
}
return path;
}

View File

@ -434,9 +434,8 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write,
return ret;
}
#ifdef CONFIG_KASAN_INLINE
/*
* With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
* With CONFIG_KASAN, accesses to bogus pointers (outside the high
* canonical half of the address space) cause out-of-bounds shadow memory reads
* before the actual access. For addresses in the low canonical half of the
* address space, as well as most non-canonical addresses, that out-of-bounds
@ -472,4 +471,3 @@ void kasan_non_canonical_hook(unsigned long addr)
pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
}
#endif

View File

@ -9210,6 +9210,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
next_page = page;
current_buddy = page + size;
}
page = next_page;
if (set_page_guard(zone, current_buddy, high, migratetype))
continue;
@ -9217,7 +9218,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
if (current_buddy != target) {
add_to_free_list(current_buddy, zone, high, migratetype);
set_buddy_order(current_buddy, high);
page = next_page;
}
}
}

View File

@ -14,7 +14,6 @@
* - use CAN_ISOTP_WAIT_TX_DONE flag to block the caller until the PDU is sent
* - as we have static buffers the check whether the PDU fits into the buffer
* is done at FF reception time (no support for sending 'wait frames')
* - take care of the tx-queue-len as traffic shaping is still on the TODO list
*
* Copyright (c) 2020 Volkswagen Group Electronic Research
* All rights reserved.
@ -87,9 +86,9 @@ MODULE_ALIAS("can-proto-6");
/* ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can
* take full 32 bit values (4 Gbyte). We would need some good concept to handle
* this between user space and kernel space. For now increase the static buffer
* to something about 8 kbyte to be able to test this new functionality.
* to something about 64 kbyte to be able to test this new functionality.
*/
#define MAX_MSG_LENGTH 8200
#define MAX_MSG_LENGTH 66000
/* N_PCI type values in bits 7-4 of N_PCI bytes */
#define N_PCI_SF 0x00 /* single frame */
@ -105,18 +104,23 @@ MODULE_ALIAS("can-proto-6");
#define FC_CONTENT_SZ 3 /* flow control content size in byte (FS/BS/STmin) */
#define ISOTP_CHECK_PADDING (CAN_ISOTP_CHK_PAD_LEN | CAN_ISOTP_CHK_PAD_DATA)
#define ISOTP_ALL_BC_FLAGS (CAN_ISOTP_SF_BROADCAST | CAN_ISOTP_CF_BROADCAST)
/* Flow Status given in FC frame */
#define ISOTP_FC_CTS 0 /* clear to send */
#define ISOTP_FC_WT 1 /* wait */
#define ISOTP_FC_OVFLW 2 /* overflow */
#define ISOTP_FC_TIMEOUT 1 /* 1 sec */
#define ISOTP_ECHO_TIMEOUT 2 /* 2 secs */
enum {
ISOTP_IDLE = 0,
ISOTP_WAIT_FIRST_FC,
ISOTP_WAIT_FC,
ISOTP_WAIT_DATA,
ISOTP_SENDING
ISOTP_SENDING,
ISOTP_SHUTDOWN,
};
struct tpcon {
@ -137,13 +141,14 @@ struct isotp_sock {
canid_t rxid;
ktime_t tx_gap;
ktime_t lastrxcf_tstamp;
struct hrtimer rxtimer, txtimer;
struct hrtimer rxtimer, txtimer, txfrtimer;
struct can_isotp_options opt;
struct can_isotp_fc_options rxfc, txfc;
struct can_isotp_ll_options ll;
u32 frame_txtime;
u32 force_tx_stmin;
u32 force_rx_stmin;
u32 cfecho; /* consecutive frame echo tag */
struct tpcon rx, tx;
struct list_head notifier;
wait_queue_head_t wait;
@ -159,6 +164,17 @@ static inline struct isotp_sock *isotp_sk(const struct sock *sk)
return (struct isotp_sock *)sk;
}
static u32 isotp_bc_flags(struct isotp_sock *so)
{
return so->opt.flags & ISOTP_ALL_BC_FLAGS;
}
static bool isotp_register_rxid(struct isotp_sock *so)
{
/* no broadcast modes => register rx_id for FC frame reception */
return (isotp_bc_flags(so) == 0);
}
static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer)
{
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
@ -228,8 +244,8 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
can_send_ret = can_send(nskb, 1);
if (can_send_ret)
pr_notice_once("can-isotp: %s: can_send_ret %d\n",
__func__, can_send_ret);
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(can_send_ret));
dev_put(dev);
@ -240,7 +256,8 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
so->lastrxcf_tstamp = ktime_set(0, 0);
/* start rx timeout watchdog */
hrtimer_start(&so->rxtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
hrtimer_start(&so->rxtimer, ktime_set(ISOTP_FC_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
return 0;
}
@ -326,6 +343,8 @@ static int check_pad(struct isotp_sock *so, struct canfd_frame *cf,
return 0;
}
static void isotp_send_cframe(struct isotp_sock *so);
static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
{
struct sock *sk = &so->sk;
@ -380,14 +399,15 @@ static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
case ISOTP_FC_CTS:
so->tx.bs = 0;
so->tx.state = ISOTP_SENDING;
/* start cyclic timer for sending CF frame */
hrtimer_start(&so->txtimer, so->tx_gap,
/* send CF frame and enable echo timeout handling */
hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
isotp_send_cframe(so);
break;
case ISOTP_FC_WT:
/* start timer to wait for next FC frame */
hrtimer_start(&so->txtimer, ktime_set(1, 0),
hrtimer_start(&so->txtimer, ktime_set(ISOTP_FC_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
break;
@ -582,7 +602,7 @@ static int isotp_rcv_cf(struct sock *sk, struct canfd_frame *cf, int ae,
/* perform blocksize handling, if enabled */
if (!so->rxfc.bs || ++so->rx.bs < so->rxfc.bs) {
/* start rx timeout watchdog */
hrtimer_start(&so->rxtimer, ktime_set(1, 0),
hrtimer_start(&so->rxtimer, ktime_set(ISOTP_FC_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
return 0;
}
@ -713,6 +733,63 @@ static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
cf->data[0] = so->opt.ext_address;
}
static void isotp_send_cframe(struct isotp_sock *so)
{
struct sock *sk = &so->sk;
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf;
int can_send_ret;
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev)
return;
skb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv), GFP_ATOMIC);
if (!skb) {
dev_put(dev);
return;
}
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
cf = (struct canfd_frame *)skb->data;
skb_put_zero(skb, so->ll.mtu);
/* create consecutive frame */
isotp_fill_dataframe(cf, so, ae, 0);
/* place consecutive frame N_PCI in appropriate index */
cf->data[ae] = N_PCI_CF | so->tx.sn++;
so->tx.sn %= 16;
so->tx.bs++;
cf->flags = so->ll.tx_flags;
skb->dev = dev;
can_skb_set_owner(skb, sk);
/* cfecho should have been zero'ed by init/isotp_rcv_echo() */
if (so->cfecho)
pr_notice_once("can-isotp: cfecho is %08X != 0\n", so->cfecho);
/* set consecutive frame echo tag */
so->cfecho = *(u32 *)cf->data;
/* send frame with local echo enabled */
can_send_ret = can_send(skb, 1);
if (can_send_ret) {
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(can_send_ret));
if (can_send_ret == -ENOBUFS)
pr_notice_once("can-isotp: tx queue is full\n");
}
dev_put(dev);
}
static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
int ae)
{
@ -746,7 +823,50 @@ static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
cf->data[i] = so->tx.buf[so->tx.idx++];
so->tx.sn = 1;
so->tx.state = ISOTP_WAIT_FIRST_FC;
}
static void isotp_rcv_echo(struct sk_buff *skb, void *data)
{
struct sock *sk = (struct sock *)data;
struct isotp_sock *so = isotp_sk(sk);
struct canfd_frame *cf = (struct canfd_frame *)skb->data;
/* only handle my own local echo CF/SF skb's (no FF!) */
if (skb->sk != sk || so->cfecho != *(u32 *)cf->data)
return;
/* cancel local echo timeout */
hrtimer_cancel(&so->txtimer);
/* local echo skb with consecutive frame has been consumed */
so->cfecho = 0;
if (so->tx.idx >= so->tx.len) {
/* we are done */
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
return;
}
if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
/* stop and wait for FC with timeout */
so->tx.state = ISOTP_WAIT_FC;
hrtimer_start(&so->txtimer, ktime_set(ISOTP_FC_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
return;
}
/* no gap between data frames needed => use burst mode */
if (!so->tx_gap) {
/* enable echo timeout handling */
hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
isotp_send_cframe(so);
return;
}
/* start timer to send next consecutive frame with correct delay */
hrtimer_start(&so->txfrtimer, so->tx_gap, HRTIMER_MODE_REL_SOFT);
}
static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
@ -754,135 +874,69 @@ static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
txtimer);
struct sock *sk = &so->sk;
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf;
enum hrtimer_restart restart = HRTIMER_NORESTART;
int can_send_ret;
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
switch (so->tx.state) {
case ISOTP_WAIT_FC:
case ISOTP_WAIT_FIRST_FC:
/* don't handle timeouts in IDLE or SHUTDOWN state */
if (so->tx.state == ISOTP_IDLE || so->tx.state == ISOTP_SHUTDOWN)
return HRTIMER_NORESTART;
/* we did not get any flow control frame in time */
/* we did not get any flow control or echo frame in time */
/* report 'communication error on send' */
sk->sk_err = ECOMM;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
/* report 'communication error on send' */
sk->sk_err = ECOMM;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
/* reset tx state */
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
break;
/* reset tx state */
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
case ISOTP_SENDING:
return HRTIMER_NORESTART;
}
/* push out the next segmented pdu */
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev)
break;
static enum hrtimer_restart isotp_txfr_timer_handler(struct hrtimer *hrtimer)
{
struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
txfrtimer);
isotp_tx_burst:
skb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv),
GFP_ATOMIC);
if (!skb) {
dev_put(dev);
break;
}
/* start echo timeout handling and cover below protocol error */
hrtimer_start(&so->txtimer, ktime_set(ISOTP_ECHO_TIMEOUT, 0),
HRTIMER_MODE_REL_SOFT);
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
can_skb_prv(skb)->skbcnt = 0;
/* cfecho should be consumed by isotp_rcv_echo() here */
if (so->tx.state == ISOTP_SENDING && !so->cfecho)
isotp_send_cframe(so);
cf = (struct canfd_frame *)skb->data;
skb_put_zero(skb, so->ll.mtu);
/* create consecutive frame */
isotp_fill_dataframe(cf, so, ae, 0);
/* place consecutive frame N_PCI in appropriate index */
cf->data[ae] = N_PCI_CF | so->tx.sn++;
so->tx.sn %= 16;
so->tx.bs++;
cf->flags = so->ll.tx_flags;
skb->dev = dev;
can_skb_set_owner(skb, sk);
can_send_ret = can_send(skb, 1);
if (can_send_ret)
pr_notice_once("can-isotp: %s: can_send_ret %d\n",
__func__, can_send_ret);
if (so->tx.idx >= so->tx.len) {
/* we are done */
so->tx.state = ISOTP_IDLE;
dev_put(dev);
wake_up_interruptible(&so->wait);
break;
}
if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
/* stop and wait for FC */
so->tx.state = ISOTP_WAIT_FC;
dev_put(dev);
hrtimer_set_expires(&so->txtimer,
ktime_add(ktime_get(),
ktime_set(1, 0)));
restart = HRTIMER_RESTART;
break;
}
/* no gap between data frames needed => use burst mode */
if (!so->tx_gap)
goto isotp_tx_burst;
/* start timer to send next data frame with correct delay */
dev_put(dev);
hrtimer_set_expires(&so->txtimer,
ktime_add(ktime_get(), so->tx_gap));
restart = HRTIMER_RESTART;
break;
default:
WARN_ON_ONCE(1);
}
return restart;
return HRTIMER_NORESTART;
}
static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct isotp_sock *so = isotp_sk(sk);
u32 old_state = so->tx.state;
struct sk_buff *skb;
struct net_device *dev;
struct canfd_frame *cf;
int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
s64 hrtimer_sec = 0;
s64 hrtimer_sec = ISOTP_ECHO_TIMEOUT;
int off;
int err;
if (!so->bound)
if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
/* we do not support multiple buffers - for now */
if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE ||
wq_has_sleeper(&so->wait)) {
if (msg->msg_flags & MSG_DONTWAIT) {
err = -EAGAIN;
goto err_out;
}
while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
/* we do not support multiple buffers - for now */
if (msg->msg_flags & MSG_DONTWAIT)
return -EAGAIN;
if (so->tx.state == ISOTP_SHUTDOWN)
return -EADDRNOTAVAIL;
/* wait for complete transmission of current pdu */
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (err)
goto err_out;
goto err_event_drop;
}
if (!size || size > MAX_MSG_LENGTH) {
@ -894,7 +948,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
/* does the given data fit into a single frame for SF_BROADCAST? */
if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
if ((isotp_bc_flags(so) == CAN_ISOTP_SF_BROADCAST) &&
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
err = -EINVAL;
goto err_out_drop;
@ -927,6 +981,10 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
cf = (struct canfd_frame *)skb->data;
skb_put_zero(skb, so->ll.mtu);
/* cfecho should have been zero'ed by init / former isotp_rcv_echo() */
if (so->cfecho)
pr_notice_once("can-isotp: uninit cfecho %08X\n", so->cfecho);
/* check for single frame transmission depending on TX_DL */
if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
/* The message size generally fits into a SingleFrame - good.
@ -952,22 +1010,40 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
else
cf->data[ae] |= size;
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
/* don't enable wait queue for a single frame transmission */
wait_tx_done = 0;
/* set CF echo tag for isotp_rcv_echo() (SF-mode) */
so->cfecho = *(u32 *)cf->data;
} else {
/* send first frame and wait for FC */
/* send first frame */
isotp_create_fframe(cf, so, ae);
/* start timeout for FC */
hrtimer_sec = 1;
hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
HRTIMER_MODE_REL_SOFT);
if (isotp_bc_flags(so) == CAN_ISOTP_CF_BROADCAST) {
/* set timer for FC-less operation (STmin = 0) */
if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN)
so->tx_gap = ktime_set(0, so->force_tx_stmin);
else
so->tx_gap = ktime_set(0, so->frame_txtime);
/* disable wait for FCs due to activated block size */
so->txfc.bs = 0;
/* set CF echo tag for isotp_rcv_echo() (CF-mode) */
so->cfecho = *(u32 *)cf->data;
} else {
/* standard flow control check */
so->tx.state = ISOTP_WAIT_FIRST_FC;
/* start timeout for FC */
hrtimer_sec = ISOTP_FC_TIMEOUT;
/* no CF echo tag for isotp_rcv_echo() (FF-mode) */
so->cfecho = 0;
}
}
hrtimer_start(&so->txtimer, ktime_set(hrtimer_sec, 0),
HRTIMER_MODE_REL_SOFT);
/* send the first or only CAN frame */
cf->flags = so->ll.tx_flags;
@ -976,19 +1052,23 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
err = can_send(skb, 1);
dev_put(dev);
if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %d\n",
__func__, err);
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(err));
/* no transmission -> no timeout monitoring */
if (hrtimer_sec)
hrtimer_cancel(&so->txtimer);
hrtimer_cancel(&so->txtimer);
/* reset consecutive frame echo tag */
so->cfecho = 0;
goto err_out_drop;
}
if (wait_tx_done) {
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
if (err)
goto err_event_drop;
err = sock_error(sk);
if (err)
@ -997,13 +1077,15 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return size;
err_event_drop:
/* got signal: force tx state machine to be idle */
so->tx.state = ISOTP_IDLE;
hrtimer_cancel(&so->txfrtimer);
hrtimer_cancel(&so->txtimer);
err_out_drop:
/* drop this PDU and unlock a potential wait queue */
old_state = ISOTP_IDLE;
err_out:
so->tx.state = old_state;
if (so->tx.state == ISOTP_IDLE)
wake_up_interruptible(&so->wait);
so->tx.state = ISOTP_IDLE;
wake_up_interruptible(&so->wait);
return err;
}
@ -1067,7 +1149,13 @@ static int isotp_release(struct socket *sock)
net = sock_net(sk);
/* wait for complete transmission of current pdu */
wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
while (wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE) == 0 &&
cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SHUTDOWN) != ISOTP_IDLE)
;
/* force state machines to be idle also when a signal occurred */
so->tx.state = ISOTP_SHUTDOWN;
so->rx.state = ISOTP_IDLE;
spin_lock(&isotp_notifier_lock);
while (isotp_busy_notifier == so) {
@ -1081,21 +1169,27 @@ static int isotp_release(struct socket *sock)
lock_sock(sk);
/* remove current filters & unregister */
if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) {
if (so->bound) {
if (so->ifindex) {
struct net_device *dev;
dev = dev_get_by_index(net, so->ifindex);
if (dev) {
can_rx_unregister(net, dev, so->rxid,
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
if (isotp_register_rxid(so))
can_rx_unregister(net, dev, so->rxid,
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
can_rx_unregister(net, dev, so->txid,
SINGLE_MASK(so->txid),
isotp_rcv_echo, sk);
dev_put(dev);
synchronize_rcu();
}
}
}
hrtimer_cancel(&so->txfrtimer);
hrtimer_cancel(&so->txtimer);
hrtimer_cancel(&so->rxtimer);
@ -1119,26 +1213,38 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
struct net *net = sock_net(sk);
int ifindex;
struct net_device *dev;
canid_t tx_id, rx_id;
canid_t tx_id = addr->can_addr.tp.tx_id;
canid_t rx_id = addr->can_addr.tp.rx_id;
int err = 0;
int notify_enetdown = 0;
int do_rx_reg = 1;
if (len < ISOTP_MIN_NAMELEN)
return -EINVAL;
/* sanitize tx/rx CAN identifiers */
tx_id = addr->can_addr.tp.tx_id;
if (addr->can_family != AF_CAN)
return -EINVAL;
/* sanitize tx CAN identifier */
if (tx_id & CAN_EFF_FLAG)
tx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
else
tx_id &= CAN_SFF_MASK;
rx_id = addr->can_addr.tp.rx_id;
if (rx_id & CAN_EFF_FLAG)
rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
else
rx_id &= CAN_SFF_MASK;
/* give feedback on wrong CAN-ID value */
if (tx_id != addr->can_addr.tp.tx_id)
return -EINVAL;
/* sanitize rx CAN identifier (if needed) */
if (isotp_register_rxid(so)) {
if (rx_id & CAN_EFF_FLAG)
rx_id &= (CAN_EFF_FLAG | CAN_EFF_MASK);
else
rx_id &= CAN_SFF_MASK;
/* give feedback on wrong CAN-ID value */
if (rx_id != addr->can_addr.tp.rx_id)
return -EINVAL;
}
if (!addr->can_ifindex)
return -ENODEV;
@ -1150,12 +1256,8 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
goto out;
}
/* do not register frame reception for functional addressing */
if (so->opt.flags & CAN_ISOTP_SF_BROADCAST)
do_rx_reg = 0;
/* do not validate rx address for functional addressing */
if (do_rx_reg && rx_id == tx_id) {
/* ensure different CAN IDs when the rx_id is to be registered */
if (isotp_register_rxid(so) && rx_id == tx_id) {
err = -EADDRNOTAVAIL;
goto out;
}
@ -1180,10 +1282,17 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
ifindex = dev->ifindex;
if (do_rx_reg)
if (isotp_register_rxid(so))
can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id),
isotp_rcv, sk, "isotp", sk);
/* no consecutive frame echo skb in flight */
so->cfecho = 0;
/* register for echo skb's */
can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id),
isotp_rcv_echo, sk, "isotpe", sk);
dev_put(dev);
/* switch to new settings */
@ -1244,6 +1353,15 @@ static int isotp_setsockopt_locked(struct socket *sock, int level, int optname,
if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR))
so->opt.rx_ext_address = so->opt.ext_address;
/* these broadcast flags are not allowed together */
if (isotp_bc_flags(so) == ISOTP_ALL_BC_FLAGS) {
/* CAN_ISOTP_SF_BROADCAST is prioritized */
so->opt.flags &= ~CAN_ISOTP_CF_BROADCAST;
/* give user feedback on wrong config attempt */
ret = -EINVAL;
}
/* check for frame_txtime changes (0 => no changes) */
if (so->opt.frame_txtime) {
if (so->opt.frame_txtime == CAN_ISOTP_FRAME_TXTIME_ZERO)
@ -1394,10 +1512,16 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg,
case NETDEV_UNREGISTER:
lock_sock(sk);
/* remove current filters & unregister */
if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST)))
can_rx_unregister(dev_net(dev), dev, so->rxid,
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
if (so->bound) {
if (isotp_register_rxid(so))
can_rx_unregister(dev_net(dev), dev, so->rxid,
SINGLE_MASK(so->rxid),
isotp_rcv, sk);
can_rx_unregister(dev_net(dev), dev, so->txid,
SINGLE_MASK(so->txid),
isotp_rcv_echo, sk);
}
so->ifindex = 0;
so->bound = 0;
@ -1470,6 +1594,8 @@ static int isotp_init(struct sock *sk)
so->rxtimer.function = isotp_rx_timer_handler;
hrtimer_init(&so->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
so->txtimer.function = isotp_tx_timer_handler;
hrtimer_init(&so->txfrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
so->txfrtimer.function = isotp_txfr_timer_handler;
init_waitqueue_head(&so->wait);
spin_lock_init(&so->rx_lock);
@ -1550,7 +1676,7 @@ static __init int isotp_module_init(void)
err = can_proto_register(&isotp_can_proto);
if (err < 0)
pr_err("can: registration of isotp protocol failed\n");
pr_err("can: registration of isotp protocol failed %pe\n", ERR_PTR(err));
else
register_netdevice_notifier(&canisotp_notifier);

View File

@ -224,7 +224,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
int max_clean = atomic_read(&tbl->gc_entries) -
READ_ONCE(tbl->gc_thresh2);
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
@ -253,7 +254,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
}
}
tbl->last_flush = jiffies;
WRITE_ONCE(tbl->last_flush, jiffies);
write_unlock_bh(&tbl->lock);
@ -409,17 +410,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
int entries;
int entries, gc_thresh3;
if (exempt_from_gc)
goto do_alloc;
entries = atomic_inc_return(&tbl->gc_entries) - 1;
if (entries >= tbl->gc_thresh3 ||
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
if (!neigh_forced_gc(tbl) &&
entries >= tbl->gc_thresh3) {
gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
if (entries >= gc_thresh3 ||
(entries >= READ_ONCE(tbl->gc_thresh2) &&
time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
net_info_ratelimited("%s: neighbor table overflow!\n",
tbl->id);
NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@ -902,13 +903,14 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
tbl->last_rand = jiffies;
WRITE_ONCE(tbl->last_rand, jiffies);
list_for_each_entry(p, &tbl->parms_list, list)
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
goto out;
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@ -2055,15 +2057,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndtmsg->ndtm_pad2 = 0;
if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
NDTA_PAD) ||
nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
goto nla_put_failure;
{
unsigned long now = jiffies;
long flush_delta = now - tbl->last_flush;
long rand_delta = now - tbl->last_rand;
long flush_delta = now - READ_ONCE(tbl->last_flush);
long rand_delta = now - READ_ONCE(tbl->last_rand);
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
@ -2071,7 +2074,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
.ndtc_proxy_qlen = tbl->proxy_queue.qlen,
.ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
};
rcu_read_lock_bh();
@ -2094,17 +2097,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
ndst.ndts_allocs += st->allocs;
ndst.ndts_destroys += st->destroys;
ndst.ndts_hash_grows += st->hash_grows;
ndst.ndts_res_failed += st->res_failed;
ndst.ndts_lookups += st->lookups;
ndst.ndts_hits += st->hits;
ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
ndst.ndts_forced_gc_runs += st->forced_gc_runs;
ndst.ndts_table_fulls += st->table_fulls;
ndst.ndts_allocs += READ_ONCE(st->allocs);
ndst.ndts_destroys += READ_ONCE(st->destroys);
ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
ndst.ndts_res_failed += READ_ONCE(st->res_failed);
ndst.ndts_lookups += READ_ONCE(st->lookups);
ndst.ndts_hits += READ_ONCE(st->hits);
ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
}
if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@ -2328,16 +2331,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout_tbl_lock;
if (tb[NDTA_THRESH1])
tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
if (tb[NDTA_THRESH2])
tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
if (tb[NDTA_THRESH3])
tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
if (tb[NDTA_GC_INTERVAL])
tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
err = 0;

View File

@ -2186,16 +2186,17 @@ void tcp_enter_loss(struct sock *sk)
* restore sanity to the SACK scoreboard. If the apparent reneging
* persists until this RTO then we'll clear the SACK scoreboard.
*/
static bool tcp_check_sack_reneging(struct sock *sk, int flag)
static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
{
if (flag & FLAG_SACK_RENEGING &&
flag & FLAG_SND_UNA_ADVANCED) {
if (*ack_flag & FLAG_SACK_RENEGING &&
*ack_flag & FLAG_SND_UNA_ADVANCED) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
delay, TCP_RTO_MAX);
*ack_flag &= ~FLAG_SET_XMIT_TIMER;
return true;
}
return false;
@ -2951,7 +2952,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */
if (tcp_check_sack_reneging(sk, flag))
if (tcp_check_sack_reneging(sk, ack_flag))
return;
/* C. Check consistency of the current state. */

View File

@ -683,8 +683,8 @@ nfulnl_log_packet(struct net *net,
unsigned int plen = 0;
struct nfnl_log_net *log = nfnl_log_pernet(net);
const struct nfnl_ct_hook *nfnl_ct = NULL;
enum ip_conntrack_info ctinfo = 0;
struct nf_conn *ct = NULL;
enum ip_conntrack_info ctinfo;
if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
li = li_user;

View File

@ -364,7 +364,7 @@ static int u32_init(struct tcf_proto *tp)
idr_init(&root_ht->handle_idr);
if (tp_c == NULL) {
tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;

View File

@ -329,6 +329,12 @@ static const struct config_entry config_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
}
},
{
.ident = "Google firmware",
.matches = {
DMI_MATCH(DMI_BIOS_VERSION, "Google"),
}
},
{}
}
},

View File

@ -3241,6 +3241,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
}
rt5645_irq(0, rt5645);

View File

@ -2192,7 +2192,7 @@ static bool is_special_call(struct instruction *insn)
if (!dest)
return false;
if (dest->fentry)
if (dest->fentry || dest->embedded_insn)
return true;
}

View File

@ -0,0 +1,13 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: Test failure of registering kprobe on non unique symbol
# requires: kprobe_events
SYMBOL='name_show'
# We skip this test on kernel where SYMBOL is unique or does not exist.
if [ "$(grep -c -E "[[:alnum:]]+ t ${SYMBOL}" /proc/kallsyms)" -le '1' ]; then
exit_unsupported
fi
! echo "p:test_non_unique ${SYMBOL}" > kprobe_events