This is the 5.4.240 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmQtPbUACgkQONu9yGCS aT462xAAhgh6J/KB4thj31ULLDPkX3zEuTLKIBlLK617NkKHF9k0XA6oAo9A2Fyy t/MfXJvjmmL0kxsWqmoir0ZrPMifgdAK5hoxjXfvjWYtlYi3k0CXqXlg4YQ9Xalp VU3O0RRli3KQxKK3u1PhnMMui7+l3pMELza3pUvyhCxRJx3K9loXkbrFZqdOvXEV QuZ0ugKaxEwWnwStqIzIAUw+jt/13TwPrVQC6cBjkeOOItw2kNw1SPzrjptfHahG M8fApzAKEgZPa49gDw95hZLawt4Acf5suITLgktBtzniFbj8c5A7jaYMFnaKVv3/ 1zUhDu6VYZ5UfLzwYoLnmZ08vWVCTi8r28MJ/f1UdkPlhH9T6blos5RdGB9+4Al8 17KmOPSXLjzs36cSJFaj521earJSrcwvhsc/sc0ENk0U3CO1d0JkqZKClD2QRt82 z4yOlkd8j7SbpMgLdwwKbn0PqlK9YddCH7vXNCeMu9thA+Zjy7Z1zCWzENrh8btt EcQls3VfHSue9avVhkb5THlhEjY8Pe4/x061YWCYqzamIg5/9xjmYTE8mJdXQVxs zr2wgDikAfXHM440/yQgCiAYLT+gB7ewef+ubbhWVwMDviu8vTWlPAiLqnR7TUAp CHvypmojDa6iLVnLGvPmIZTkChGCj0x3u7b5VDBJmlt/DLi8amw= =Y+Jp -----END PGP SIGNATURE----- Merge 5.4.240 into android11-5.4-lts Changes in 5.4.240 net: tls: fix possible race condition between do_tls_getsockopt_conf() and do_tls_setsockopt_conf() power: supply: da9150: Fix use after free bug in da9150_charger_remove due to race condition iavf: fix inverted Rx hash condition leading to disabled hash iavf: fix non-tunneled IPv6 UDP packet type and hashing intel/igbvf: free irq on the error path in igbvf_request_msix() igbvf: Regard vf reset nack as success i2c: imx-lpi2c: check only for enabled interrupt flags scsi: scsi_dh_alua: Fix memleak for 'qdata' in alua_activate() net: usb: smsc95xx: Limit packet length to skb->len qed/qed_sriov: guard against NULL derefs from qed_iov_get_vf_info xirc2ps_cs: Fix use after free bug in xirc2ps_detach net: qcom/emac: Fix use after free bug in emac_remove due to race condition net/ps3_gelic_net: Fix RX sk_buff length net/ps3_gelic_net: Use dma_mapping_error keys: Do not cache key in task struct if key is requested from kernel thread bpf: Adjust insufficient default bpf_jit_limit net/mlx5: Read the TC mapping of all priorities on ETS query atm: idt77252: fix kmemleak when rmmod idt77252 erspan: do not use skb_mac_header() in ndo_start_xmit() net/sonic: use dma_mapping_error() for error check nvme-tcp: fix nvme_tcp_term_pdu to match spec hvc/xen: prevent concurrent accesses to the shared ring net: mdio: thunder: Add missing fwnode_handle_put() Bluetooth: btqcomsmd: Fix command timeout after setting BD address Bluetooth: btsdio: fix use after free bug in btsdio_remove due to unfinished work platform/chrome: cros_ec_chardev: fix kernel data leak from ioctl hwmon (it87): Fix voltage scaling for chips with 10.9mV ADCs scsi: qla2xxx: Perform lockless command completion in abort path uas: Add US_FL_NO_REPORT_OPCODES for JMicron JMS583Gen 2 thunderbolt: Use const qualifier for `ring_interrupt_index` riscv: Bump COMMAND_LINE_SIZE value to 1024 ca8210: fix mac_len negative array access m68k: Only force 030 bus error if PC not in exception table selftests/bpf: check that modifier resolves after pointer scsi: target: iscsi: Fix an error message in iscsi_check_key() scsi: ufs: core: Add soft dependency on governor_simpleondemand scsi: lpfc: Avoid usage of list iterator variable after loop net: usb: cdc_mbim: avoid altsetting toggling for Telit FE990 net: usb: qmi_wwan: add Telit 0x1080 composition sh: sanitize the flags on sigreturn cifs: empty interface list when server doesn't support query interfaces scsi: core: Add BLIST_SKIP_VPD_PAGES for SKhynix H28U74301AMR usb: gadget: u_audio: don't let userspace block driver unbind fsverity: Remove WQ_UNBOUND from fsverity read workqueue igb: revert rtnl_lock() that causes deadlock dm thin: fix deadlock when swapping to thin device usb: cdns3: Fix issue with using incorrect PCI device function usb: chipdea: core: fix return -EINVAL if request role is the same with current role usb: chipidea: core: fix possible concurrent when switch role wifi: mac80211: fix qos on mesh interfaces nilfs2: fix kernel-infoleak in nilfs_ioctl_wrap_copy() i2c: xgene-slimpro: Fix out-of-bounds bug in xgene_slimpro_i2c_xfer() dm stats: check for and propagate alloc_percpu failure dm crypt: add cond_resched() to dmcrypt_write() sched/fair: sanitize vruntime of entity being placed sched/fair: Sanitize vruntime of entity being migrated tun: avoid double free in tun_free_netdev ocfs2: fix data corruption after failed write fsverity: don't drop pagecache at end of FS_IOC_ENABLE_VERITY bus: imx-weim: fix branch condition evaluates to a garbage value md: avoid signed overflow in slot_store() ALSA: asihpi: check pao in control_message() ALSA: hda/ca0132: fixup buffer overrun at tuning_ctl_set() fbdev: tgafb: Fix potential divide by zero sched_getaffinity: don't assume 'cpumask_size()' is fully initialized fbdev: nvidia: Fix potential divide by zero fbdev: intelfb: Fix potential divide by zero fbdev: lxfb: Fix potential divide by zero fbdev: au1200fb: Fix potential divide by zero ca8210: Fix unsigned mac_len comparison with zero in ca8210_skb_tx() dma-mapping: drop the dev argument to arch_sync_dma_for_* mips: bmips: BCM6358: disable RAC flush for TP1 mtd: rawnand: meson: invalidate cache on polling ECC bit scsi: megaraid_sas: Fix crash after a double completion ptp_qoriq: fix memory leak in probe() regulator: fix spelling mistake "Cant" -> "Can't" regulator: Handle deferred clk net/net_failover: fix txq exceeding warning can: bcm: bcm_tx_setup(): fix KMSAN uninit-value in vfs_write s390/vfio-ap: fix memory leak in vfio_ap device driver i40e: fix registers dump after run ethtool adapter self test bnxt_en: Fix typo in PCI id to device description string mapping net: dsa: mv88e6xxx: Enable IGMP snooping on user ports only net: mvneta: make tx buffer array agnostic pinctrl: ocelot: Fix alt mode for ocelot Input: alps - fix compatibility with -funsigned-char Input: focaltech - use explicitly signed char type cifs: prevent infinite recursion in CIFSGetDFSRefer() cifs: fix DFS traversal oops without CONFIG_CIFS_DFS_UPCALL Input: goodix - add Lenovo Yoga Book X90F to nine_bytes_report DMI table xen/netback: don't do grant copy across page boundary pinctrl: at91-pio4: fix domain name assignment NFSv4: Fix hangs when recovering open state after a server reboot ALSA: hda/conexant: Partial revert of a quirk for Lenovo ALSA: usb-audio: Fix regression on detection of Roland VS-100 drm/etnaviv: fix reference leak when mmaping imported buffer s390/uaccess: add missing earlyclobber annotations to __clear_user() btrfs: scan device in non-exclusive mode ext4: fix kernel BUG in 'ext4_write_inline_data_end()' net_sched: add __rcu annotation to netdev->qdisc net: sched: fix race condition in qdisc_graft() firmware: arm_scmi: Fix device node validation for mailbox transport gfs2: Always check inode size of inline inodes Linux 5.4.240 Change-Id: Ibe603c6cdf434feacfd91e87ba359dc544223a21 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
974e2ad014
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 239
|
||||
SUBLEVEL = 240
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -48,8 +48,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
* upper layer functions (in include/linux/dma-mapping.h)
|
||||
*/
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
@ -69,8 +69,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
|
@ -2333,15 +2333,15 @@ void arch_teardown_dma_ops(struct device *dev)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
|
||||
size, dir);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
|
||||
size, dir);
|
||||
|
@ -70,20 +70,20 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
|
||||
* pfn_valid returns true the pages is local and we can use the native
|
||||
* dma-direct functions, otherwise we call the Xen specific version.
|
||||
*/
|
||||
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
|
||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
|
||||
void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (pfn_valid(PFN_DOWN(handle)))
|
||||
arch_sync_dma_for_cpu(dev, paddr, size, dir);
|
||||
arch_sync_dma_for_cpu(paddr, size, dir);
|
||||
else if (dir != DMA_TO_DEVICE)
|
||||
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
|
||||
}
|
||||
|
||||
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
|
||||
phys_addr_t paddr, size_t size, enum dma_data_direction dir)
|
||||
void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (pfn_valid(PFN_DOWN(handle)))
|
||||
arch_sync_dma_for_device(dev, paddr, size, dir);
|
||||
arch_sync_dma_for_device(paddr, size, dir);
|
||||
else if (dir == DMA_FROM_DEVICE)
|
||||
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
|
||||
else
|
||||
|
@ -13,14 +13,14 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_map_area(phys_to_virt(paddr), size, dir);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_unmap_area(phys_to_virt(paddr), size, dir);
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ void __init coherent_mem_init(phys_addr_t start, u32 size)
|
||||
sizeof(long));
|
||||
}
|
||||
|
||||
static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
||||
static void c6x_dma_sync(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
@ -160,14 +160,14 @@ static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return c6x_dma_sync(dev, paddr, size, dir);
|
||||
return c6x_dma_sync(paddr, size, dir);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
return c6x_dma_sync(dev, paddr, size, dir);
|
||||
return c6x_dma_sync(paddr, size, dir);
|
||||
}
|
||||
|
@ -58,8 +58,8 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||
cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
@ -74,8 +74,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
|
@ -55,8 +55,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
void *addr = phys_to_virt(paddr);
|
||||
|
||||
|
@ -73,8 +73,8 @@ __ia64_sync_icache_dcache (pte_t pte)
|
||||
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
||||
* flush them when they get mapped into an executable vm-area.
|
||||
*/
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long pfn = PHYS_PFN(paddr);
|
||||
|
||||
|
@ -61,8 +61,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
|
||||
#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/extable.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/fpu.h>
|
||||
@ -550,7 +551,8 @@ static inline void bus_error030 (struct frame *fp)
|
||||
errorcode |= 2;
|
||||
|
||||
if (mmusr & (MMU_I | MMU_WP)) {
|
||||
if (ssw & 4) {
|
||||
/* We might have an exception table for this PC */
|
||||
if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
|
||||
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
|
||||
ssw & RW ? "read" : "write",
|
||||
fp->un.fmtb.daddr,
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <linux/bug.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
||||
static void __dma_sync(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
switch (direction) {
|
||||
@ -31,14 +31,14 @@ static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_sync(dev, paddr, size, dir);
|
||||
__dma_sync(paddr, size, dir);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_sync(dev, paddr, size, dir);
|
||||
__dma_sync(paddr, size, dir);
|
||||
}
|
||||
|
@ -64,7 +64,9 @@ phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu_all(struct device *dev)
|
||||
bool bmips_rac_flush_disable;
|
||||
|
||||
void arch_sync_dma_for_cpu_all(void)
|
||||
{
|
||||
void __iomem *cbr = BMIPS_GET_CBR();
|
||||
u32 cfg;
|
||||
@ -74,6 +76,9 @@ void arch_sync_dma_for_cpu_all(struct device *dev)
|
||||
boot_cpu_type() != CPU_BMIPS4380)
|
||||
return;
|
||||
|
||||
if (unlikely(bmips_rac_flush_disable))
|
||||
return;
|
||||
|
||||
/* Flush stale data out of the readahead cache */
|
||||
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
|
||||
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
|
||||
|
@ -34,6 +34,8 @@
|
||||
#define REG_BCM6328_OTP ((void __iomem *)CKSEG1ADDR(0x1000062c))
|
||||
#define BCM6328_TP1_DISABLED BIT(9)
|
||||
|
||||
extern bool bmips_rac_flush_disable;
|
||||
|
||||
static const unsigned long kbase = VMLINUX_LOAD_ADDRESS & 0xfff00000;
|
||||
|
||||
struct bmips_quirk {
|
||||
@ -103,6 +105,12 @@ static void bcm6358_quirks(void)
|
||||
* disable SMP for now
|
||||
*/
|
||||
bmips_smp_enabled = 0;
|
||||
|
||||
/*
|
||||
* RAC flush causes kernel panics on BCM6358 when booting from TP1
|
||||
* because the bootloader is not initializing it properly.
|
||||
*/
|
||||
bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
|
||||
}
|
||||
|
||||
static void bcm6368_quirks(void)
|
||||
|
@ -592,7 +592,7 @@ static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
return vdma_alloc(phys, size);
|
||||
}
|
||||
|
||||
@ -600,7 +600,7 @@ static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_cpu(dev, vdma_log2phys(dma_addr), size, dir);
|
||||
arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
|
||||
vdma_free(dma_addr);
|
||||
}
|
||||
|
||||
@ -612,7 +612,7 @@ static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
|
||||
for_each_sg(sglist, sg, nents, i) {
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
|
||||
arch_sync_dma_for_device(sg_phys(sg), sg->length,
|
||||
dir);
|
||||
sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
@ -631,8 +631,7 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
|
||||
for_each_sg(sglist, sg, nents, i) {
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length,
|
||||
dir);
|
||||
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
|
||||
vdma_free(sg->dma_address);
|
||||
}
|
||||
}
|
||||
@ -640,13 +639,13 @@ static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
static void jazz_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
arch_sync_dma_for_device(dev, vdma_log2phys(addr), size, dir);
|
||||
arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
|
||||
}
|
||||
|
||||
static void jazz_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
arch_sync_dma_for_cpu(dev, vdma_log2phys(addr), size, dir);
|
||||
arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
|
||||
}
|
||||
|
||||
static void jazz_dma_sync_sg_for_device(struct device *dev,
|
||||
@ -656,7 +655,7 @@ static void jazz_dma_sync_sg_for_device(struct device *dev,
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
||||
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static void jazz_dma_sync_sg_for_cpu(struct device *dev,
|
||||
@ -666,7 +665,7 @@ static void jazz_dma_sync_sg_for_cpu(struct device *dev,
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
||||
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
const struct dma_map_ops jazz_dma_ops = {
|
||||
|
@ -27,7 +27,7 @@
|
||||
* R10000 and R12000 are used in such systems, the SGI IP28 Indigo² rsp.
|
||||
* SGI IP32 aka O2.
|
||||
*/
|
||||
static inline bool cpu_needs_post_dma_flush(struct device *dev)
|
||||
static inline bool cpu_needs_post_dma_flush(void)
|
||||
{
|
||||
switch (boot_cpu_type()) {
|
||||
case CPU_R10000:
|
||||
@ -118,17 +118,17 @@ static inline void dma_sync_phys(phys_addr_t paddr, size_t size,
|
||||
} while (left);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_phys(paddr, size, dir);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (cpu_needs_post_dma_flush(dev))
|
||||
if (cpu_needs_post_dma_flush())
|
||||
dma_sync_phys(paddr, size, dir);
|
||||
}
|
||||
#endif
|
||||
|
@ -46,8 +46,8 @@ static inline void cache_op(phys_addr_t paddr, size_t size,
|
||||
} while (left);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_FROM_DEVICE:
|
||||
@ -61,8 +61,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
|
@ -18,8 +18,8 @@
|
||||
#include <linux/cache.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
void *vaddr = phys_to_virt(paddr);
|
||||
|
||||
@ -42,8 +42,8 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
void *vaddr = phys_to_virt(paddr);
|
||||
|
||||
|
@ -125,7 +125,7 @@ arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
free_pages_exact(vaddr, size);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
|
||||
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
unsigned long cl;
|
||||
|
@ -439,14 +439,14 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
free_pages((unsigned long)__va(dma_handle), order);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
|
||||
}
|
||||
|
@ -104,14 +104,14 @@ static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
|
||||
#endif
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_sync_page(paddr, size, dir);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
__dma_sync_page(paddr, size, dir);
|
||||
}
|
||||
|
8
arch/riscv/include/uapi/asm/setup.h
Normal file
8
arch/riscv/include/uapi/asm/setup.h
Normal file
@ -0,0 +1,8 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
|
||||
|
||||
#ifndef _UAPI_ASM_RISCV_SETUP_H
|
||||
#define _UAPI_ASM_RISCV_SETUP_H
|
||||
|
||||
#define COMMAND_LINE_SIZE 1024
|
||||
|
||||
#endif /* _UAPI_ASM_RISCV_SETUP_H */
|
@ -339,7 +339,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
|
||||
"4: slgr %0,%0\n"
|
||||
"5:\n"
|
||||
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
|
||||
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
|
||||
: "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2)
|
||||
: "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
|
||||
return size;
|
||||
}
|
||||
|
@ -51,6 +51,7 @@
|
||||
#define SR_FD 0x00008000
|
||||
#define SR_MD 0x40000000
|
||||
|
||||
#define SR_USER_MASK 0x00000303 // M, Q, S, T bits
|
||||
/*
|
||||
* DSP structure and data
|
||||
*/
|
||||
|
@ -25,7 +25,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
* Pages from the page allocator may have data present in
|
||||
* cache. So flush the cache before using uncached memory.
|
||||
*/
|
||||
arch_sync_dma_for_device(dev, virt_to_phys(ret), size,
|
||||
arch_sync_dma_for_device(virt_to_phys(ret), size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
|
||||
@ -59,8 +59,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
iounmap(vaddr);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
void *addr = sh_cacheop_vaddr(phys_to_virt(paddr));
|
||||
|
||||
|
@ -116,6 +116,7 @@ static int
|
||||
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
|
||||
{
|
||||
unsigned int err = 0;
|
||||
unsigned int sr = regs->sr & ~SR_USER_MASK;
|
||||
|
||||
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
|
||||
COPY(regs[1]);
|
||||
@ -131,6 +132,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
|
||||
COPY(sr); COPY(pc);
|
||||
#undef COPY
|
||||
|
||||
regs->sr = (regs->sr & SR_USER_MASK) | sr;
|
||||
|
||||
#ifdef CONFIG_SH_FPU
|
||||
if (boot_cpu_data.flags & CPU_HAS_FPU) {
|
||||
int owned_fp;
|
||||
|
@ -368,8 +368,8 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
|
||||
/* IIep is write-through, not flushing on cpu to device transfer. */
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
if (dir != PCI_DMA_TODEVICE)
|
||||
dma_make_coherent(paddr, PAGE_ALIGN(size));
|
||||
|
@ -44,8 +44,8 @@ static void do_cache_op(phys_addr_t paddr, size_t size,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
@ -62,8 +62,8 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
|
@ -2914,6 +2914,7 @@ close_card_oam(struct idt77252_dev *card)
|
||||
|
||||
recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
|
||||
}
|
||||
kfree(vc);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2957,6 +2958,15 @@ open_card_ubr0(struct idt77252_dev *card)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
close_card_ubr0(struct idt77252_dev *card)
|
||||
{
|
||||
struct vc_map *vc = card->vcs[0];
|
||||
|
||||
free_scq(card, vc->scq);
|
||||
kfree(vc);
|
||||
}
|
||||
|
||||
static int
|
||||
idt77252_dev_open(struct idt77252_dev *card)
|
||||
{
|
||||
@ -3006,6 +3016,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
|
||||
struct idt77252_dev *card = dev->dev_data;
|
||||
u32 conf;
|
||||
|
||||
close_card_ubr0(card);
|
||||
close_card_oam(card);
|
||||
|
||||
conf = SAR_CFG_RXPTH | /* enable receive path */
|
||||
|
@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = qca_set_bdaddr_rome(hdev, bdaddr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* The firmware stops responding for a while after setting the bdaddr,
|
||||
* causing timeouts for subsequent commands. Sleep a bit to avoid this.
|
||||
*/
|
||||
usleep_range(1000, 10000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btqcomsmd_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct btqcomsmd *btq;
|
||||
@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
|
||||
hdev->close = btqcomsmd_close;
|
||||
hdev->send = btqcomsmd_send;
|
||||
hdev->setup = btqcomsmd_setup;
|
||||
hdev->set_bdaddr = qca_set_bdaddr_rome;
|
||||
hdev->set_bdaddr = btqcomsmd_set_bdaddr;
|
||||
|
||||
ret = hci_register_dev(hdev);
|
||||
if (ret < 0)
|
||||
|
@ -343,6 +343,7 @@ static void btsdio_remove(struct sdio_func *func)
|
||||
|
||||
BT_DBG("func %p", func);
|
||||
|
||||
cancel_work_sync(&data->work);
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
|
@ -192,8 +192,8 @@ static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
|
||||
const struct of_device_id *of_id = of_match_device(weim_id_table,
|
||||
&pdev->dev);
|
||||
const struct imx_weim_devtype *devtype = of_id->data;
|
||||
int ret = 0, have_child = 0;
|
||||
struct device_node *child;
|
||||
int ret, have_child = 0;
|
||||
struct cs_timing_state ts = {};
|
||||
u32 reg;
|
||||
|
||||
|
@ -737,6 +737,39 @@ static int scmi_mailbox_check(struct device_node *np, int idx)
|
||||
idx, NULL);
|
||||
}
|
||||
|
||||
static int scmi_mailbox_chan_validate(struct device *cdev)
|
||||
{
|
||||
int num_mb, num_sh, ret = 0;
|
||||
struct device_node *np = cdev->of_node;
|
||||
|
||||
num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
|
||||
num_sh = of_count_phandle_with_args(np, "shmem", NULL);
|
||||
/* Bail out if mboxes and shmem descriptors are inconsistent */
|
||||
if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
|
||||
dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
|
||||
of_node_full_name(np));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (num_sh > 1) {
|
||||
struct device_node *np_tx, *np_rx;
|
||||
|
||||
np_tx = of_parse_phandle(np, "shmem", 0);
|
||||
np_rx = of_parse_phandle(np, "shmem", 1);
|
||||
/* SCMI Tx and Rx shared mem areas have to be distinct */
|
||||
if (!np_tx || !np_rx || np_tx == np_rx) {
|
||||
dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
|
||||
of_node_full_name(np));
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
of_node_put(np_tx);
|
||||
of_node_put(np_rx);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
|
||||
int prot_id, bool tx)
|
||||
{
|
||||
@ -760,6 +793,10 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
|
||||
goto idr_alloc;
|
||||
}
|
||||
|
||||
ret = scmi_mailbox_chan_validate(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
|
||||
if (!cinfo)
|
||||
return -ENOMEM;
|
||||
|
@ -93,7 +93,15 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
|
||||
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
return dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
|
||||
int ret;
|
||||
|
||||
ret = dma_buf_mmap(etnaviv_obj->base.dma_buf, vma, 0);
|
||||
if (!ret) {
|
||||
/* Drop the reference acquired by drm_gem_mmap_obj(). */
|
||||
drm_gem_object_put(&etnaviv_obj->base);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
|
||||
|
@ -486,6 +486,8 @@ static const struct it87_devices it87_devices[] = {
|
||||
#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
|
||||
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
|
||||
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
|
||||
#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \
|
||||
FEAT_10_9MV_ADC))
|
||||
|
||||
struct it87_sio_data {
|
||||
int sioaddr;
|
||||
@ -3098,7 +3100,7 @@ static int it87_probe(struct platform_device *pdev)
|
||||
"Detected broken BIOS defaults, disabling PWM interface\n");
|
||||
|
||||
/* Starting with IT8721F, we handle scaling of internal voltages */
|
||||
if (has_12mv_adc(data)) {
|
||||
if (has_scaling(data)) {
|
||||
if (sio_data->internal & BIT(0))
|
||||
data->in_scaled |= BIT(3); /* in3 is AVCC */
|
||||
if (sio_data->internal & BIT(1))
|
||||
|
@ -508,10 +508,14 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
|
||||
static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
|
||||
{
|
||||
struct lpi2c_imx_struct *lpi2c_imx = dev_id;
|
||||
unsigned int enabled;
|
||||
unsigned int temp;
|
||||
|
||||
enabled = readl(lpi2c_imx->base + LPI2C_MIER);
|
||||
|
||||
lpi2c_imx_intctrl(lpi2c_imx, 0);
|
||||
temp = readl(lpi2c_imx->base + LPI2C_MSR);
|
||||
temp &= enabled;
|
||||
|
||||
if (temp & MSR_RDF)
|
||||
lpi2c_imx_read_rxfifo(lpi2c_imx);
|
||||
|
@ -308,6 +308,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
|
||||
u32 msg[3];
|
||||
int rc;
|
||||
|
||||
if (writelen > I2C_SMBUS_BLOCK_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(ctx->dma_buffer, data, writelen);
|
||||
paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -852,8 +852,8 @@ static void alps_process_packet_v6(struct psmouse *psmouse)
|
||||
x = y = z = 0;
|
||||
|
||||
/* Divide 4 since trackpoint's speed is too fast */
|
||||
input_report_rel(dev2, REL_X, (char)x / 4);
|
||||
input_report_rel(dev2, REL_Y, -((char)y / 4));
|
||||
input_report_rel(dev2, REL_X, (s8)x / 4);
|
||||
input_report_rel(dev2, REL_Y, -((s8)y / 4));
|
||||
|
||||
psmouse_report_standard_buttons(dev2, packet[3]);
|
||||
|
||||
@ -1104,8 +1104,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
|
||||
((packet[3] & 0x20) << 1);
|
||||
z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1);
|
||||
|
||||
input_report_rel(dev2, REL_X, (char)x);
|
||||
input_report_rel(dev2, REL_Y, -((char)y));
|
||||
input_report_rel(dev2, REL_X, (s8)x);
|
||||
input_report_rel(dev2, REL_Y, -((s8)y));
|
||||
input_report_abs(dev2, ABS_PRESSURE, z);
|
||||
|
||||
psmouse_report_standard_buttons(dev2, packet[1]);
|
||||
@ -2294,20 +2294,20 @@ static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch)
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
|
||||
x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
|
||||
x_pitch = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
|
||||
x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */
|
||||
|
||||
y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */
|
||||
y_pitch = (s8)reg >> 4; /* sign extend upper 4 bits */
|
||||
y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */
|
||||
|
||||
reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1);
|
||||
if (reg < 0)
|
||||
return reg;
|
||||
|
||||
x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */
|
||||
x_electrode = (s8)(reg << 4) >> 4; /* sign extend lower 4 bits */
|
||||
x_electrode = 17 + x_electrode;
|
||||
|
||||
y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */
|
||||
y_electrode = (s8)reg >> 4; /* sign extend upper 4 bits */
|
||||
y_electrode = 13 + y_electrode;
|
||||
|
||||
x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */
|
||||
|
@ -202,8 +202,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
|
||||
state->pressed = packet[0] >> 7;
|
||||
finger1 = ((packet[0] >> 4) & 0x7) - 1;
|
||||
if (finger1 < FOC_MAX_FINGERS) {
|
||||
state->fingers[finger1].x += (char)packet[1];
|
||||
state->fingers[finger1].y += (char)packet[2];
|
||||
state->fingers[finger1].x += (s8)packet[1];
|
||||
state->fingers[finger1].y += (s8)packet[2];
|
||||
} else {
|
||||
psmouse_err(psmouse, "First finger in rel packet invalid: %d\n",
|
||||
finger1);
|
||||
@ -218,8 +218,8 @@ static void focaltech_process_rel_packet(struct psmouse *psmouse,
|
||||
*/
|
||||
finger2 = ((packet[3] >> 4) & 0x7) - 1;
|
||||
if (finger2 < FOC_MAX_FINGERS) {
|
||||
state->fingers[finger2].x += (char)packet[4];
|
||||
state->fingers[finger2].y += (char)packet[5];
|
||||
state->fingers[finger2].x += (s8)packet[4];
|
||||
state->fingers[finger2].y += (s8)packet[5];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,10 +170,18 @@ static const struct dmi_system_id rotated_screen[] = {
|
||||
static const struct dmi_system_id nine_bytes_report[] = {
|
||||
#if defined(CONFIG_DMI) && defined(CONFIG_X86)
|
||||
{
|
||||
.ident = "Lenovo YogaBook",
|
||||
/* YB1-X91L/F and YB1-X90L/F */
|
||||
/* Lenovo Yoga Book X90F / X90L */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
|
||||
}
|
||||
},
|
||||
{
|
||||
/* Lenovo Yoga Book X91F / X91L */
|
||||
.matches = {
|
||||
/* Non exact match to match F + L versions */
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"),
|
||||
}
|
||||
},
|
||||
#endif
|
||||
|
@ -706,7 +706,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||
arch_sync_dma_for_cpu(dev, phys, size, dir);
|
||||
arch_sync_dma_for_cpu(phys, size, dir);
|
||||
}
|
||||
|
||||
static void iommu_dma_sync_single_for_device(struct device *dev,
|
||||
@ -718,7 +718,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
|
||||
return;
|
||||
|
||||
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
|
||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
}
|
||||
|
||||
static void iommu_dma_sync_sg_for_cpu(struct device *dev,
|
||||
@ -732,7 +732,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
||||
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static void iommu_dma_sync_sg_for_device(struct device *dev,
|
||||
@ -746,7 +746,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
|
||||
return;
|
||||
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
|
||||
arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
|
||||
}
|
||||
|
||||
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
@ -761,7 +761,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
|
||||
dma_handle =__iommu_dma_map(dev, phys, size, prot);
|
||||
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
|
||||
dma_handle != DMA_MAPPING_ERROR)
|
||||
arch_sync_dma_for_device(dev, phys, size, dir);
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
return dma_handle;
|
||||
}
|
||||
|
||||
|
@ -1554,6 +1554,7 @@ static int dmcrypt_write(void *data)
|
||||
io = crypt_io_from_node(rb_first(&write_tree));
|
||||
rb_erase(&io->rb_node, &write_tree);
|
||||
kcryptd_io_write(io);
|
||||
cond_resched();
|
||||
} while (!RB_EMPTY_ROOT(&write_tree));
|
||||
blk_finish_plug(&plug);
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
|
||||
atomic_read(&shared->in_flight[WRITE]);
|
||||
}
|
||||
|
||||
void dm_stats_init(struct dm_stats *stats)
|
||||
int dm_stats_init(struct dm_stats *stats)
|
||||
{
|
||||
int cpu;
|
||||
struct dm_stats_last_position *last;
|
||||
@ -196,11 +196,16 @@ void dm_stats_init(struct dm_stats *stats)
|
||||
mutex_init(&stats->mutex);
|
||||
INIT_LIST_HEAD(&stats->list);
|
||||
stats->last = alloc_percpu(struct dm_stats_last_position);
|
||||
if (!stats->last)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
last = per_cpu_ptr(stats->last, cpu);
|
||||
last->last_sector = (sector_t)ULLONG_MAX;
|
||||
last->last_rw = UINT_MAX;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dm_stats_cleanup(struct dm_stats *stats)
|
||||
|
@ -22,7 +22,7 @@ struct dm_stats_aux {
|
||||
unsigned long long duration_ns;
|
||||
};
|
||||
|
||||
void dm_stats_init(struct dm_stats *st);
|
||||
int dm_stats_init(struct dm_stats *st);
|
||||
void dm_stats_cleanup(struct dm_stats *st);
|
||||
|
||||
struct mapped_device;
|
||||
|
@ -3407,6 +3407,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
pt->adjusted_pf = pt->requested_pf = pf;
|
||||
bio_init(&pt->flush_bio, NULL, 0);
|
||||
ti->num_flush_bios = 1;
|
||||
ti->limit_swap_bios = true;
|
||||
|
||||
/*
|
||||
* Only need to enable discards if the pool should pass
|
||||
@ -4292,6 +4293,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
goto bad;
|
||||
|
||||
ti->num_flush_bios = 1;
|
||||
ti->limit_swap_bios = true;
|
||||
ti->flush_supported = true;
|
||||
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
|
||||
|
||||
|
@ -1979,7 +1979,9 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
if (!md->bdev)
|
||||
goto bad;
|
||||
|
||||
dm_stats_init(&md->stats);
|
||||
r = dm_stats_init(&md->stats);
|
||||
if (r < 0)
|
||||
goto bad;
|
||||
|
||||
/* Populate the mapping, nobody knows we exist yet */
|
||||
spin_lock(&_minor_lock);
|
||||
|
@ -3082,6 +3082,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
|
||||
err = kstrtouint(buf, 10, (unsigned int *)&slot);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (slot < 0)
|
||||
/* overflow */
|
||||
return -ENOSPC;
|
||||
}
|
||||
if (rdev->mddev->pers && slot == -1) {
|
||||
/* Setting 'slot' on an active array requires also
|
||||
|
@ -172,6 +172,7 @@ struct meson_nfc {
|
||||
|
||||
dma_addr_t daddr;
|
||||
dma_addr_t iaddr;
|
||||
u32 info_bytes;
|
||||
|
||||
unsigned long assigned_cs;
|
||||
};
|
||||
@ -499,6 +500,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
|
||||
nfc->daddr, datalen, dir);
|
||||
return ret;
|
||||
}
|
||||
nfc->info_bytes = infolen;
|
||||
cmd = GENCMDIADDRL(NFC_CMD_AIL, nfc->iaddr);
|
||||
writel(cmd, nfc->reg_base + NFC_REG_CMD);
|
||||
|
||||
@ -516,8 +518,10 @@ static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
|
||||
struct meson_nfc *nfc = nand_get_controller_data(nand);
|
||||
|
||||
dma_unmap_single(nfc->dev, nfc->daddr, datalen, dir);
|
||||
if (infolen)
|
||||
if (infolen) {
|
||||
dma_unmap_single(nfc->dev, nfc->iaddr, infolen, dir);
|
||||
nfc->info_bytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int meson_nfc_read_buf(struct nand_chip *nand, u8 *buf, int len)
|
||||
@ -706,6 +710,8 @@ static void meson_nfc_check_ecc_pages_valid(struct meson_nfc *nfc,
|
||||
usleep_range(10, 15);
|
||||
/* info is updated by nfc dma engine*/
|
||||
smp_rmb();
|
||||
dma_sync_single_for_cpu(nfc->dev, nfc->iaddr, nfc->info_bytes,
|
||||
DMA_FROM_DEVICE);
|
||||
ret = *info & ECC_COMPLETE;
|
||||
} while (!ret);
|
||||
}
|
||||
|
@ -2433,9 +2433,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
|
||||
* If this is the upstream port for this switch, enable
|
||||
* forwarding of unknown unicasts and multicasts.
|
||||
*/
|
||||
reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP |
|
||||
MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
|
||||
reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP |
|
||||
MV88E6XXX_PORT_CTL0_STATE_FORWARDING;
|
||||
/* Forward any IPv4 IGMP or IPv6 MLD frames received
|
||||
* by a USER port to the CPU port to allow snooping.
|
||||
*/
|
||||
if (dsa_is_user_port(ds, port))
|
||||
reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP;
|
||||
|
||||
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -221,12 +221,12 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
|
||||
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
|
||||
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
|
||||
#ifdef CONFIG_BNXT_SRIOV
|
||||
|
@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct i40e_diag_reg_test_info i40e_reg_list[] = {
|
||||
const struct i40e_diag_reg_test_info i40e_reg_list[] = {
|
||||
/* offset mask elements stride */
|
||||
{I40E_QTX_CTL(0), 0x0000FFBF, 1,
|
||||
I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
|
||||
@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
|
||||
{
|
||||
i40e_status ret_code = 0;
|
||||
u32 reg, mask;
|
||||
u32 elements;
|
||||
u32 i, j;
|
||||
|
||||
for (i = 0; i40e_reg_list[i].offset != 0 &&
|
||||
!ret_code; i++) {
|
||||
|
||||
elements = i40e_reg_list[i].elements;
|
||||
/* set actual reg range for dynamically allocated resources */
|
||||
if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
|
||||
hw->func_caps.num_tx_qp != 0)
|
||||
i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
|
||||
elements = hw->func_caps.num_tx_qp;
|
||||
if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
|
||||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
|
||||
i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
|
||||
i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
|
||||
i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
|
||||
hw->func_caps.num_msix_vectors != 0)
|
||||
i40e_reg_list[i].elements =
|
||||
hw->func_caps.num_msix_vectors - 1;
|
||||
elements = hw->func_caps.num_msix_vectors - 1;
|
||||
|
||||
/* test register access */
|
||||
mask = i40e_reg_list[i].mask;
|
||||
for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
|
||||
for (j = 0; j < elements && !ret_code; j++) {
|
||||
reg = i40e_reg_list[i].offset +
|
||||
(j * i40e_reg_list[i].stride);
|
||||
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
|
||||
|
@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info {
|
||||
u32 stride; /* bytes between each element */
|
||||
};
|
||||
|
||||
extern struct i40e_diag_reg_test_info i40e_reg_list[];
|
||||
extern const struct i40e_diag_reg_test_info i40e_reg_list[];
|
||||
|
||||
i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
|
||||
i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
|
||||
|
@ -662,7 +662,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[] = {
|
||||
/* Non Tunneled IPv6 */
|
||||
IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
|
||||
IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
|
||||
IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
|
||||
IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
|
||||
IAVF_PTT_UNUSED_ENTRY(91),
|
||||
IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
|
||||
IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
|
||||
|
@ -1061,7 +1061,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
|
||||
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
|
||||
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
|
||||
|
||||
if (ring->netdev->features & NETIF_F_RXHASH)
|
||||
if (!(ring->netdev->features & NETIF_F_RXHASH))
|
||||
return;
|
||||
|
||||
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
|
||||
|
@ -3674,9 +3674,7 @@ static void igb_remove(struct pci_dev *pdev)
|
||||
igb_release_hw_control(adapter);
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
rtnl_lock();
|
||||
igb_disable_sriov(pdev);
|
||||
rtnl_unlock();
|
||||
#endif
|
||||
|
||||
unregister_netdev(netdev);
|
||||
|
@ -1070,7 +1070,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
|
||||
igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
|
||||
netdev);
|
||||
if (err)
|
||||
goto out;
|
||||
goto free_irq_tx;
|
||||
|
||||
adapter->rx_ring->itr_register = E1000_EITR(vector);
|
||||
adapter->rx_ring->itr_val = adapter->current_itr;
|
||||
@ -1079,10 +1079,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
|
||||
err = request_irq(adapter->msix_entries[vector].vector,
|
||||
igbvf_msix_other, 0, netdev->name, netdev);
|
||||
if (err)
|
||||
goto out;
|
||||
goto free_irq_rx;
|
||||
|
||||
igbvf_configure_msix(adapter);
|
||||
return 0;
|
||||
free_irq_rx:
|
||||
free_irq(adapter->msix_entries[--vector].vector, netdev);
|
||||
free_irq_tx:
|
||||
free_irq(adapter->msix_entries[--vector].vector, netdev);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
@ -1,6 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright(c) 2009 - 2018 Intel Corporation. */
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
|
||||
#include "vf.h"
|
||||
|
||||
static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
|
||||
@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
|
||||
/* set our "perm_addr" based on info provided by PF */
|
||||
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
|
||||
if (!ret_val) {
|
||||
if (msgbuf[0] == (E1000_VF_RESET |
|
||||
E1000_VT_MSGTYPE_ACK))
|
||||
switch (msgbuf[0]) {
|
||||
case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
|
||||
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
|
||||
else
|
||||
break;
|
||||
case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
|
||||
eth_zero_addr(hw->mac.perm_addr);
|
||||
break;
|
||||
default:
|
||||
ret_val = -E1000_ERR_MAC_INIT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -549,6 +549,20 @@ struct mvneta_rx_desc {
|
||||
};
|
||||
#endif
|
||||
|
||||
enum mvneta_tx_buf_type {
|
||||
MVNETA_TYPE_SKB,
|
||||
MVNETA_TYPE_XDP_TX,
|
||||
MVNETA_TYPE_XDP_NDO,
|
||||
};
|
||||
|
||||
struct mvneta_tx_buf {
|
||||
enum mvneta_tx_buf_type type;
|
||||
union {
|
||||
struct xdp_frame *xdpf;
|
||||
struct sk_buff *skb;
|
||||
};
|
||||
};
|
||||
|
||||
struct mvneta_tx_queue {
|
||||
/* Number of this TX queue, in the range 0-7 */
|
||||
u8 id;
|
||||
@ -564,8 +578,8 @@ struct mvneta_tx_queue {
|
||||
int tx_stop_threshold;
|
||||
int tx_wake_threshold;
|
||||
|
||||
/* Array of transmitted skb */
|
||||
struct sk_buff **tx_skb;
|
||||
/* Array of transmitted buffers */
|
||||
struct mvneta_tx_buf *buf;
|
||||
|
||||
/* Index of last TX DMA descriptor that was inserted */
|
||||
int txq_put_index;
|
||||
@ -1774,14 +1788,9 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
|
||||
struct mvneta_tx_desc *tx_desc = txq->descs +
|
||||
txq->txq_get_index;
|
||||
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
|
||||
|
||||
if (skb) {
|
||||
bytes_compl += skb->len;
|
||||
pkts_compl++;
|
||||
}
|
||||
|
||||
mvneta_txq_inc_get(txq);
|
||||
|
||||
@ -1789,9 +1798,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
|
||||
dma_unmap_single(pp->dev->dev.parent,
|
||||
tx_desc->buf_phys_addr,
|
||||
tx_desc->data_size, DMA_TO_DEVICE);
|
||||
if (!skb)
|
||||
if (!buf->skb)
|
||||
continue;
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
bytes_compl += buf->skb->len;
|
||||
pkts_compl++;
|
||||
dev_kfree_skb_any(buf->skb);
|
||||
}
|
||||
|
||||
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
|
||||
@ -2242,16 +2254,19 @@ static inline void
|
||||
mvneta_tso_put_hdr(struct sk_buff *skb,
|
||||
struct mvneta_port *pp, struct mvneta_tx_queue *txq)
|
||||
{
|
||||
struct mvneta_tx_desc *tx_desc;
|
||||
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
||||
struct mvneta_tx_desc *tx_desc;
|
||||
|
||||
txq->tx_skb[txq->txq_put_index] = NULL;
|
||||
tx_desc = mvneta_txq_next_desc_get(txq);
|
||||
tx_desc->data_size = hdr_len;
|
||||
tx_desc->command = mvneta_skb_tx_csum(pp, skb);
|
||||
tx_desc->command |= MVNETA_TXD_F_DESC;
|
||||
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
|
||||
txq->txq_put_index * TSO_HEADER_SIZE;
|
||||
buf->type = MVNETA_TYPE_SKB;
|
||||
buf->skb = NULL;
|
||||
|
||||
mvneta_txq_inc_put(txq);
|
||||
}
|
||||
|
||||
@ -2260,6 +2275,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
|
||||
struct sk_buff *skb, char *data, int size,
|
||||
bool last_tcp, bool is_last)
|
||||
{
|
||||
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
||||
struct mvneta_tx_desc *tx_desc;
|
||||
|
||||
tx_desc = mvneta_txq_next_desc_get(txq);
|
||||
@ -2273,7 +2289,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
|
||||
}
|
||||
|
||||
tx_desc->command = 0;
|
||||
txq->tx_skb[txq->txq_put_index] = NULL;
|
||||
buf->type = MVNETA_TYPE_SKB;
|
||||
buf->skb = NULL;
|
||||
|
||||
if (last_tcp) {
|
||||
/* last descriptor in the TCP packet */
|
||||
@ -2281,7 +2298,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
|
||||
|
||||
/* last descriptor in SKB */
|
||||
if (is_last)
|
||||
txq->tx_skb[txq->txq_put_index] = skb;
|
||||
buf->skb = skb;
|
||||
}
|
||||
mvneta_txq_inc_put(txq);
|
||||
return 0;
|
||||
@ -2366,6 +2383,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
|
||||
int i, nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
|
||||
for (i = 0; i < nr_frags; i++) {
|
||||
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
void *addr = skb_frag_address(frag);
|
||||
|
||||
@ -2385,12 +2403,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
|
||||
if (i == nr_frags - 1) {
|
||||
/* Last descriptor */
|
||||
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
|
||||
txq->tx_skb[txq->txq_put_index] = skb;
|
||||
buf->skb = skb;
|
||||
} else {
|
||||
/* Descriptor in the middle: Not First, Not Last */
|
||||
tx_desc->command = 0;
|
||||
txq->tx_skb[txq->txq_put_index] = NULL;
|
||||
buf->skb = NULL;
|
||||
}
|
||||
buf->type = MVNETA_TYPE_SKB;
|
||||
mvneta_txq_inc_put(txq);
|
||||
}
|
||||
|
||||
@ -2418,6 +2437,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
struct mvneta_port *pp = netdev_priv(dev);
|
||||
u16 txq_id = skb_get_queue_mapping(skb);
|
||||
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
|
||||
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
|
||||
struct mvneta_tx_desc *tx_desc;
|
||||
int len = skb->len;
|
||||
int frags = 0;
|
||||
@ -2450,16 +2470,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
goto out;
|
||||
}
|
||||
|
||||
buf->type = MVNETA_TYPE_SKB;
|
||||
if (frags == 1) {
|
||||
/* First and Last descriptor */
|
||||
tx_cmd |= MVNETA_TXD_FLZ_DESC;
|
||||
tx_desc->command = tx_cmd;
|
||||
txq->tx_skb[txq->txq_put_index] = skb;
|
||||
buf->skb = skb;
|
||||
mvneta_txq_inc_put(txq);
|
||||
} else {
|
||||
/* First but not Last */
|
||||
tx_cmd |= MVNETA_TXD_F_DESC;
|
||||
txq->tx_skb[txq->txq_put_index] = NULL;
|
||||
buf->skb = NULL;
|
||||
mvneta_txq_inc_put(txq);
|
||||
tx_desc->command = tx_cmd;
|
||||
/* Continue with other skb fragments */
|
||||
@ -3005,9 +3026,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
|
||||
|
||||
txq->last_desc = txq->size - 1;
|
||||
|
||||
txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
|
||||
GFP_KERNEL);
|
||||
if (!txq->tx_skb) {
|
||||
txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
|
||||
if (!txq->buf) {
|
||||
dma_free_coherent(pp->dev->dev.parent,
|
||||
txq->size * MVNETA_DESC_ALIGNED_SIZE,
|
||||
txq->descs, txq->descs_phys);
|
||||
@ -3019,7 +3039,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
|
||||
txq->size * TSO_HEADER_SIZE,
|
||||
&txq->tso_hdrs_phys, GFP_KERNEL);
|
||||
if (!txq->tso_hdrs) {
|
||||
kfree(txq->tx_skb);
|
||||
kfree(txq->buf);
|
||||
dma_free_coherent(pp->dev->dev.parent,
|
||||
txq->size * MVNETA_DESC_ALIGNED_SIZE,
|
||||
txq->descs, txq->descs_phys);
|
||||
@ -3074,7 +3094,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
|
||||
{
|
||||
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
|
||||
|
||||
kfree(txq->tx_skb);
|
||||
kfree(txq->buf);
|
||||
|
||||
if (txq->tso_hdrs)
|
||||
dma_free_coherent(pp->dev->dev.parent,
|
||||
|
@ -109,12 +109,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
|
||||
if (!MLX5_CAP_GEN(priv->mdev, ets))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
|
||||
for (i = 0; i < ets->ets_cap; i++) {
|
||||
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
|
||||
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
|
||||
for (i = 0; i < ets->ets_cap; i++) {
|
||||
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -256,7 +256,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
*/
|
||||
|
||||
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
|
||||
if (!laddr) {
|
||||
if (dma_mapping_error(lp->device, laddr)) {
|
||||
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
@ -474,7 +474,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,
|
||||
|
||||
*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
|
||||
SONIC_RBSIZE, DMA_FROM_DEVICE);
|
||||
if (!*new_addr) {
|
||||
if (dma_mapping_error(lp->device, *new_addr)) {
|
||||
dev_kfree_skb(*new_skb);
|
||||
*new_skb = NULL;
|
||||
return false;
|
||||
|
@ -4404,6 +4404,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
|
||||
}
|
||||
|
||||
vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
|
||||
if (!vf)
|
||||
return -EINVAL;
|
||||
|
||||
vport_id = vf->vport_id;
|
||||
|
||||
return qed_configure_vport_wfq(cdev, vport_id, rate);
|
||||
@ -5150,7 +5153,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
|
||||
|
||||
/* Validate that the VF has a configured vport */
|
||||
vf = qed_iov_get_vf_info(hwfn, i, true);
|
||||
if (!vf->vport_instance)
|
||||
if (!vf || !vf->vport_instance)
|
||||
continue;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
@ -738,9 +738,15 @@ static int emac_remove(struct platform_device *pdev)
|
||||
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
|
||||
struct emac_adapter *adpt = netdev_priv(netdev);
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_disable(netdev);
|
||||
|
||||
unregister_netdev(netdev);
|
||||
netif_napi_del(&adpt->rx_q.napi);
|
||||
|
||||
free_irq(adpt->irq.irq, &adpt->irq);
|
||||
cancel_work_sync(&adpt->work_thread);
|
||||
|
||||
emac_clks_teardown(adpt);
|
||||
|
||||
put_device(&adpt->phydev->mdio.dev);
|
||||
|
@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card,
|
||||
|
||||
/* set up the hardware pointers in each descriptor */
|
||||
for (i = 0; i < no; i++, descr++) {
|
||||
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
|
||||
descr->bus_addr =
|
||||
dma_map_single(ctodev(card), descr,
|
||||
GELIC_DESCR_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
dma_addr_t cpu_addr;
|
||||
|
||||
if (!descr->bus_addr)
|
||||
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
|
||||
|
||||
cpu_addr = dma_map_single(ctodev(card), descr,
|
||||
GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
|
||||
|
||||
if (dma_mapping_error(ctodev(card), cpu_addr))
|
||||
goto iommu_error;
|
||||
|
||||
descr->bus_addr = cpu_to_be32(cpu_addr);
|
||||
descr->next = descr + 1;
|
||||
descr->prev = descr - 1;
|
||||
}
|
||||
@ -365,28 +367,30 @@ static int gelic_card_init_chain(struct gelic_card *card,
|
||||
*
|
||||
* allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
|
||||
* Activate the descriptor state-wise
|
||||
*
|
||||
* Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
|
||||
* must be a multiple of GELIC_NET_RXBUF_ALIGN.
|
||||
*/
|
||||
static int gelic_descr_prepare_rx(struct gelic_card *card,
|
||||
struct gelic_descr *descr)
|
||||
{
|
||||
static const unsigned int rx_skb_size =
|
||||
ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
|
||||
GELIC_NET_RXBUF_ALIGN - 1;
|
||||
dma_addr_t cpu_addr;
|
||||
int offset;
|
||||
unsigned int bufsize;
|
||||
|
||||
if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
|
||||
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
|
||||
/* we need to round up the buffer size to a multiple of 128 */
|
||||
bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
|
||||
|
||||
/* and we need to have it 128 byte aligned, therefore we allocate a
|
||||
* bit more */
|
||||
descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
|
||||
descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
|
||||
if (!descr->skb) {
|
||||
descr->buf_addr = 0; /* tell DMAC don't touch memory */
|
||||
dev_info(ctodev(card),
|
||||
"%s:allocate skb failed !!\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
descr->buf_size = cpu_to_be32(bufsize);
|
||||
descr->buf_size = cpu_to_be32(rx_skb_size);
|
||||
descr->dmac_cmd_status = 0;
|
||||
descr->result_size = 0;
|
||||
descr->valid_size = 0;
|
||||
@ -397,11 +401,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
|
||||
if (offset)
|
||||
skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
|
||||
/* io-mmu-map the skb */
|
||||
descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
|
||||
descr->skb->data,
|
||||
GELIC_NET_MAX_MTU,
|
||||
DMA_FROM_DEVICE));
|
||||
if (!descr->buf_addr) {
|
||||
cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
|
||||
GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
|
||||
descr->buf_addr = cpu_to_be32(cpu_addr);
|
||||
if (dma_mapping_error(ctodev(card), cpu_addr)) {
|
||||
dev_kfree_skb_any(descr->skb);
|
||||
descr->skb = NULL;
|
||||
dev_info(ctodev(card),
|
||||
@ -781,7 +784,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,
|
||||
|
||||
buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);
|
||||
|
||||
if (!buf) {
|
||||
if (dma_mapping_error(ctodev(card), buf)) {
|
||||
dev_err(ctodev(card),
|
||||
"dma map 2 failed (%p, %i). Dropping packet\n",
|
||||
skb->data, skb->len);
|
||||
@ -917,7 +920,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
|
||||
data_error = be32_to_cpu(descr->data_error);
|
||||
/* unmap skb buffer */
|
||||
dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
|
||||
GELIC_NET_MAX_MTU,
|
||||
GELIC_NET_MAX_FRAME,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
skb_put(skb, be32_to_cpu(descr->valid_size)?
|
||||
|
@ -19,8 +19,9 @@
|
||||
#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
|
||||
#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */
|
||||
|
||||
#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
|
||||
#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
|
||||
#define GELIC_NET_MAX_FRAME 2312
|
||||
#define GELIC_NET_MAX_MTU 2294
|
||||
#define GELIC_NET_MIN_MTU 64
|
||||
#define GELIC_NET_RXBUF_ALIGN 128
|
||||
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
|
||||
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
|
||||
|
@ -503,6 +503,11 @@ static void
|
||||
xirc2ps_detach(struct pcmcia_device *link)
|
||||
{
|
||||
struct net_device *dev = link->priv;
|
||||
struct local_info *local = netdev_priv(dev);
|
||||
|
||||
netif_carrier_off(dev);
|
||||
netif_tx_disable(dev);
|
||||
cancel_work_sync(&local->tx_timeout_task);
|
||||
|
||||
dev_dbg(&link->dev, "detach\n");
|
||||
|
||||
|
@ -1944,10 +1944,9 @@ static int ca8210_skb_tx(
|
||||
struct ca8210_priv *priv
|
||||
)
|
||||
{
|
||||
int status;
|
||||
struct ieee802154_hdr header = { };
|
||||
struct secspec secspec;
|
||||
unsigned int mac_len;
|
||||
int mac_len, status;
|
||||
|
||||
dev_dbg(&priv->spi->dev, "%s called\n", __func__);
|
||||
|
||||
@ -1955,6 +1954,8 @@ static int ca8210_skb_tx(
|
||||
* packet
|
||||
*/
|
||||
mac_len = ieee802154_hdr_peek_addrs(skb, &header);
|
||||
if (mac_len < 0)
|
||||
return mac_len;
|
||||
|
||||
secspec.security_level = header.sec.level;
|
||||
secspec.key_id_mode = header.sec.key_id_mode;
|
||||
|
@ -130,14 +130,10 @@ static u16 net_failover_select_queue(struct net_device *dev,
|
||||
txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
|
||||
else
|
||||
txq = netdev_pick_tx(primary_dev, skb, NULL);
|
||||
|
||||
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
|
||||
|
||||
return txq;
|
||||
} else {
|
||||
txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
|
||||
}
|
||||
|
||||
txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
|
||||
|
||||
/* Save the original txq to restore before passing to the driver */
|
||||
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
|
||||
|
||||
|
@ -104,6 +104,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
|
||||
if (i >= ARRAY_SIZE(nexus->buses))
|
||||
break;
|
||||
}
|
||||
fwnode_handle_put(fwn);
|
||||
return 0;
|
||||
|
||||
err_release_regions:
|
||||
|
@ -250,6 +250,9 @@ struct tun_struct {
|
||||
struct tun_prog __rcu *steering_prog;
|
||||
struct tun_prog __rcu *filter_prog;
|
||||
struct ethtool_link_ksettings link_ksettings;
|
||||
/* init args */
|
||||
struct file *file;
|
||||
struct ifreq *ifr;
|
||||
};
|
||||
|
||||
struct veth {
|
||||
@ -275,6 +278,9 @@ void *tun_ptr_to_xdp(void *ptr)
|
||||
}
|
||||
EXPORT_SYMBOL(tun_ptr_to_xdp);
|
||||
|
||||
static void tun_flow_init(struct tun_struct *tun);
|
||||
static void tun_flow_uninit(struct tun_struct *tun);
|
||||
|
||||
static int tun_napi_receive(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
|
||||
@ -1027,6 +1033,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
|
||||
|
||||
static const struct ethtool_ops tun_ethtool_ops;
|
||||
|
||||
static int tun_net_init(struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
struct ifreq *ifr = tun->ifr;
|
||||
int err;
|
||||
|
||||
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
|
||||
if (!tun->pcpu_stats)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&tun->lock);
|
||||
|
||||
err = security_tun_dev_alloc_security(&tun->security);
|
||||
if (err < 0) {
|
||||
free_percpu(tun->pcpu_stats);
|
||||
return err;
|
||||
}
|
||||
|
||||
tun_flow_init(tun);
|
||||
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
|
||||
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
dev->features = dev->hw_features | NETIF_F_LLTX;
|
||||
dev->vlan_features = dev->features &
|
||||
~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
tun->flags = (tun->flags & ~TUN_FEATURES) |
|
||||
(ifr->ifr_flags & TUN_FEATURES);
|
||||
|
||||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS, false);
|
||||
if (err < 0) {
|
||||
tun_flow_uninit(tun);
|
||||
security_tun_dev_free_security(tun->security);
|
||||
free_percpu(tun->pcpu_stats);
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Net device detach from fd. */
|
||||
static void tun_net_uninit(struct net_device *dev)
|
||||
{
|
||||
@ -1285,6 +1334,7 @@ static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
|
||||
}
|
||||
|
||||
static const struct net_device_ops tun_netdev_ops = {
|
||||
.ndo_init = tun_net_init,
|
||||
.ndo_uninit = tun_net_uninit,
|
||||
.ndo_open = tun_net_open,
|
||||
.ndo_stop = tun_net_close,
|
||||
@ -1365,6 +1415,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
|
||||
}
|
||||
|
||||
static const struct net_device_ops tap_netdev_ops = {
|
||||
.ndo_init = tun_net_init,
|
||||
.ndo_uninit = tun_net_uninit,
|
||||
.ndo_open = tun_net_open,
|
||||
.ndo_stop = tun_net_close,
|
||||
@ -1405,7 +1456,7 @@ static void tun_flow_uninit(struct tun_struct *tun)
|
||||
#define MAX_MTU 65535
|
||||
|
||||
/* Initialize net device. */
|
||||
static void tun_net_init(struct net_device *dev)
|
||||
static void tun_net_initialize(struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
|
||||
@ -2839,9 +2890,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
err = dev_get_valid_name(net, dev, name);
|
||||
if (err < 0)
|
||||
goto err_free_dev;
|
||||
|
||||
dev_net_set(dev, net);
|
||||
dev->rtnl_link_ops = &tun_link_ops;
|
||||
@ -2860,41 +2908,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
tun->rx_batched = 0;
|
||||
RCU_INIT_POINTER(tun->steering_prog, NULL);
|
||||
|
||||
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
|
||||
if (!tun->pcpu_stats) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_dev;
|
||||
}
|
||||
tun->ifr = ifr;
|
||||
tun->file = file;
|
||||
|
||||
spin_lock_init(&tun->lock);
|
||||
|
||||
err = security_tun_dev_alloc_security(&tun->security);
|
||||
if (err < 0)
|
||||
goto err_free_stat;
|
||||
|
||||
tun_net_init(dev);
|
||||
tun_flow_init(tun);
|
||||
|
||||
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
|
||||
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX;
|
||||
dev->features = dev->hw_features | NETIF_F_LLTX;
|
||||
dev->vlan_features = dev->features &
|
||||
~(NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_STAG_TX);
|
||||
|
||||
tun->flags = (tun->flags & ~TUN_FEATURES) |
|
||||
(ifr->ifr_flags & TUN_FEATURES);
|
||||
|
||||
INIT_LIST_HEAD(&tun->disabled);
|
||||
err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
|
||||
ifr->ifr_flags & IFF_NAPI_FRAGS, false);
|
||||
if (err < 0)
|
||||
goto err_free_flow;
|
||||
tun_net_initialize(dev);
|
||||
|
||||
err = register_netdevice(tun->dev);
|
||||
if (err < 0)
|
||||
goto err_detach;
|
||||
if (err < 0) {
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
}
|
||||
/* free_netdev() won't check refcnt, to aovid race
|
||||
* with dev_put() we need publish tun after registration.
|
||||
*/
|
||||
@ -2913,20 +2936,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
|
||||
|
||||
strcpy(ifr->ifr_name, tun->dev->name);
|
||||
return 0;
|
||||
|
||||
err_detach:
|
||||
tun_detach_all(dev);
|
||||
/* register_netdevice() already called tun_free_netdev() */
|
||||
goto err_free_dev;
|
||||
|
||||
err_free_flow:
|
||||
tun_flow_uninit(tun);
|
||||
security_tun_dev_free_security(tun->security);
|
||||
err_free_stat:
|
||||
free_percpu(tun->pcpu_stats);
|
||||
err_free_dev:
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
|
||||
|
@ -663,6 +663,11 @@ static const struct usb_device_id mbim_devs[] = {
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
|
||||
},
|
||||
|
||||
/* Telit FE990 */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
|
||||
},
|
||||
|
||||
/* default entry */
|
||||
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
|
||||
|
@ -1319,6 +1319,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
|
||||
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
|
||||
|
@ -1935,6 +1935,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
|
||||
size = (u16)((header & RX_STS_FL_) >> 16);
|
||||
align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
|
||||
|
||||
if (unlikely(size > skb->len)) {
|
||||
netif_dbg(dev, rx_err, dev->net,
|
||||
"size err header=0x%08x\n", header);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (unlikely(header & RX_STS_ES_)) {
|
||||
netif_dbg(dev, rx_err, dev->net,
|
||||
"Error header=0x%08x\n", header);
|
||||
|
@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
|
||||
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
|
||||
grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
|
||||
|
||||
struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
|
||||
struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS];
|
||||
struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
|
||||
struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
|
||||
/* passed to gnttab_[un]map_refs with pages under (un)mapping */
|
||||
|
@ -327,6 +327,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
|
||||
struct xenvif_tx_cb {
|
||||
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
|
||||
u8 copy_count;
|
||||
u32 split_mask;
|
||||
};
|
||||
|
||||
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
|
||||
@ -354,6 +355,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
|
||||
struct sk_buff *skb =
|
||||
alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
|
||||
GFP_ATOMIC | __GFP_NOWARN);
|
||||
|
||||
BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
|
||||
if (unlikely(skb == NULL))
|
||||
return NULL;
|
||||
|
||||
@ -389,11 +392,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
|
||||
nr_slots = shinfo->nr_frags + 1;
|
||||
|
||||
copy_count(skb) = 0;
|
||||
XENVIF_TX_CB(skb)->split_mask = 0;
|
||||
|
||||
/* Create copy ops for exactly data_len bytes into the skb head. */
|
||||
__skb_put(skb, data_len);
|
||||
while (data_len > 0) {
|
||||
int amount = data_len > txp->size ? txp->size : data_len;
|
||||
bool split = false;
|
||||
|
||||
cop->source.u.ref = txp->gref;
|
||||
cop->source.domid = queue->vif->domid;
|
||||
@ -406,6 +411,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
|
||||
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
|
||||
- data_len);
|
||||
|
||||
/* Don't cross local page boundary! */
|
||||
if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
|
||||
amount = XEN_PAGE_SIZE - cop->dest.offset;
|
||||
XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
|
||||
split = true;
|
||||
}
|
||||
|
||||
cop->len = amount;
|
||||
cop->flags = GNTCOPY_source_gref;
|
||||
|
||||
@ -413,7 +425,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
|
||||
pending_idx = queue->pending_ring[index];
|
||||
callback_param(queue, pending_idx).ctx = NULL;
|
||||
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
|
||||
copy_count(skb)++;
|
||||
if (!split)
|
||||
copy_count(skb)++;
|
||||
|
||||
cop++;
|
||||
data_len -= amount;
|
||||
@ -434,7 +447,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
|
||||
nr_slots--;
|
||||
} else {
|
||||
/* The copy op partially covered the tx_request.
|
||||
* The remainder will be mapped.
|
||||
* The remainder will be mapped or copied in the next
|
||||
* iteration.
|
||||
*/
|
||||
txp->offset += amount;
|
||||
txp->size -= amount;
|
||||
@ -532,6 +546,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
|
||||
pending_idx = copy_pending_idx(skb, i);
|
||||
|
||||
newerr = (*gopp_copy)->status;
|
||||
|
||||
/* Split copies need to be handled together. */
|
||||
if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
|
||||
(*gopp_copy)++;
|
||||
if (!newerr)
|
||||
newerr = (*gopp_copy)->status;
|
||||
}
|
||||
if (likely(!newerr)) {
|
||||
/* The first frag might still have this slot mapped */
|
||||
if (i < copy_count(skb) - 1 || !sharedslot)
|
||||
|
@ -1069,7 +1069,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
|
||||
dev_err(dev, "can't add the irq domain\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
atmel_pioctrl->irq_domain->name = "atmel gpio";
|
||||
|
||||
for (i = 0; i < atmel_pioctrl->npins; i++) {
|
||||
int irq = irq_create_mapping(atmel_pioctrl->irq_domain, i);
|
||||
|
@ -420,7 +420,7 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
|
||||
regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
|
||||
BIT(p), f << p);
|
||||
regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
|
||||
BIT(p), f << (p - 1));
|
||||
BIT(p), (f >> 1) << p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -285,7 +285,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
|
||||
u_cmd.insize > EC_MAX_MSG_BYTES)
|
||||
return -EINVAL;
|
||||
|
||||
s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
|
||||
s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
|
||||
GFP_KERNEL);
|
||||
if (!s_cmd)
|
||||
return -ENOMEM;
|
||||
|
@ -662,6 +662,7 @@ static int da9150_charger_remove(struct platform_device *pdev)
|
||||
|
||||
if (!IS_ERR_OR_NULL(charger->usb_phy))
|
||||
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
|
||||
cancel_work_sync(&charger->otg_work);
|
||||
|
||||
power_supply_unregister(charger->battery);
|
||||
power_supply_unregister(charger->usb);
|
||||
|
@ -604,7 +604,7 @@ static int ptp_qoriq_probe(struct platform_device *dev)
|
||||
return 0;
|
||||
|
||||
no_clock:
|
||||
iounmap(ptp_qoriq->base);
|
||||
iounmap(base);
|
||||
no_ioremap:
|
||||
release_resource(ptp_qoriq->rsrc);
|
||||
no_resource:
|
||||
|
@ -181,8 +181,8 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
|
||||
|
||||
drvdata->enable_clock = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(drvdata->enable_clock)) {
|
||||
dev_err(dev, "Cant get enable-clock from devicetree\n");
|
||||
return -ENOENT;
|
||||
dev_err(dev, "Can't get enable-clock from devicetree\n");
|
||||
return PTR_ERR(drvdata->enable_clock);
|
||||
}
|
||||
} else {
|
||||
drvdata->desc.ops = &fixed_voltage_ops;
|
||||
|
@ -82,8 +82,9 @@ static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
|
||||
|
||||
static void vfio_ap_matrix_dev_release(struct device *dev)
|
||||
{
|
||||
struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
|
||||
struct ap_matrix_dev *matrix_dev;
|
||||
|
||||
matrix_dev = container_of(dev, struct ap_matrix_dev, device);
|
||||
kfree(matrix_dev);
|
||||
}
|
||||
|
||||
|
@ -1036,10 +1036,12 @@ static int alua_activate(struct scsi_device *sdev,
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&h->init_mutex);
|
||||
|
||||
if (alua_rtpg_queue(pg, sdev, qdata, true))
|
||||
if (alua_rtpg_queue(pg, sdev, qdata, true)) {
|
||||
fn = NULL;
|
||||
else
|
||||
} else {
|
||||
kfree(qdata);
|
||||
err = SCSI_DH_DEV_OFFLINED;
|
||||
}
|
||||
kref_put(&pg->kref, release_port_group);
|
||||
out:
|
||||
if (fn)
|
||||
|
@ -20407,20 +20407,20 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
|
||||
static struct lpfc_io_buf *
|
||||
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_io_buf *lpfc_ncmd;
|
||||
struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
|
||||
struct lpfc_io_buf *lpfc_ncmd_next;
|
||||
unsigned long iflag;
|
||||
struct lpfc_epd_pool *epd_pool;
|
||||
|
||||
epd_pool = &phba->epd_pool;
|
||||
lpfc_ncmd = NULL;
|
||||
|
||||
spin_lock_irqsave(&epd_pool->lock, iflag);
|
||||
if (epd_pool->count > 0) {
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
list_for_each_entry_safe(iter, lpfc_ncmd_next,
|
||||
&epd_pool->list, list) {
|
||||
list_del(&lpfc_ncmd->list);
|
||||
list_del(&iter->list);
|
||||
epd_pool->count--;
|
||||
lpfc_ncmd = iter;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -4659,7 +4659,7 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
|
||||
devhandle = megasas_get_tm_devhandle(scmd->device);
|
||||
|
||||
if (devhandle == (u16)ULONG_MAX) {
|
||||
ret = SUCCESS;
|
||||
ret = FAILED;
|
||||
sdev_printk(KERN_INFO, scmd->device,
|
||||
"task abort issued for invalid devhandle\n");
|
||||
mutex_unlock(&instance->reset_mutex);
|
||||
@ -4729,7 +4729,7 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
|
||||
devhandle = megasas_get_tm_devhandle(scmd->device);
|
||||
|
||||
if (devhandle == (u16)ULONG_MAX) {
|
||||
ret = SUCCESS;
|
||||
ret = FAILED;
|
||||
sdev_printk(KERN_INFO, scmd->device,
|
||||
"target reset issued for invalid devhandle\n");
|
||||
mutex_unlock(&instance->reset_mutex);
|
||||
|
@ -1738,6 +1738,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
|
||||
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
|
||||
sp = req->outstanding_cmds[cnt];
|
||||
if (sp) {
|
||||
/*
|
||||
* perform lockless completion during driver unload
|
||||
*/
|
||||
if (qla2x00_chip_is_down(vha)) {
|
||||
req->outstanding_cmds[cnt] = NULL;
|
||||
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
|
||||
sp->done(sp, res);
|
||||
spin_lock_irqsave(qp->qp_lock_ptr, flags);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (sp->cmd_type) {
|
||||
case TYPE_SRB:
|
||||
qla2x00_abort_srb(qp, sp, res, &flags);
|
||||
|
@ -232,6 +232,7 @@ static struct {
|
||||
{"SGI", "RAID5", "*", BLIST_SPARSELUN},
|
||||
{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
|
||||
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
|
||||
{"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
|
||||
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
|
||||
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
|
||||
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
|
||||
|
@ -9324,5 +9324,6 @@ EXPORT_SYMBOL_GPL(ufshcd_init);
|
||||
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
|
||||
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
|
||||
MODULE_DESCRIPTION("Generic UFS host controller driver Core");
|
||||
MODULE_SOFTDEP("pre: governor_simpleondemand");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
|
||||
|
@ -1262,18 +1262,20 @@ static struct iscsi_param *iscsi_check_key(
|
||||
return param;
|
||||
|
||||
if (!(param->phase & phase)) {
|
||||
pr_err("Key \"%s\" may not be negotiated during ",
|
||||
param->name);
|
||||
char *phase_name;
|
||||
|
||||
switch (phase) {
|
||||
case PHASE_SECURITY:
|
||||
pr_debug("Security phase.\n");
|
||||
phase_name = "Security";
|
||||
break;
|
||||
case PHASE_OPERATIONAL:
|
||||
pr_debug("Operational phase.\n");
|
||||
phase_name = "Operational";
|
||||
break;
|
||||
default:
|
||||
pr_debug("Unknown phase.\n");
|
||||
phase_name = "Unknown";
|
||||
}
|
||||
pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
|
||||
param->name, phase_name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@
|
||||
|
||||
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
|
||||
|
||||
static int ring_interrupt_index(struct tb_ring *ring)
|
||||
static int ring_interrupt_index(const struct tb_ring *ring)
|
||||
{
|
||||
int bit = ring->hop;
|
||||
if (!ring->is_tx)
|
||||
|
@ -43,6 +43,7 @@ struct xencons_info {
|
||||
int irq;
|
||||
int vtermno;
|
||||
grant_ref_t gntref;
|
||||
spinlock_t ring_lock;
|
||||
};
|
||||
|
||||
static LIST_HEAD(xenconsoles);
|
||||
@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons,
|
||||
XENCONS_RING_IDX cons, prod;
|
||||
struct xencons_interface *intf = xencons->intf;
|
||||
int sent = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&xencons->ring_lock, flags);
|
||||
cons = intf->out_cons;
|
||||
prod = intf->out_prod;
|
||||
mb(); /* update queue values before going on */
|
||||
|
||||
if ((prod - cons) > sizeof(intf->out)) {
|
||||
spin_unlock_irqrestore(&xencons->ring_lock, flags);
|
||||
pr_err_once("xencons: Illegal ring page indices");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons,
|
||||
|
||||
wmb(); /* write ring before updating pointer */
|
||||
intf->out_prod = prod;
|
||||
spin_unlock_irqrestore(&xencons->ring_lock, flags);
|
||||
|
||||
if (sent)
|
||||
notify_daemon(xencons);
|
||||
@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
|
||||
int recv = 0;
|
||||
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
|
||||
unsigned int eoiflag = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (xencons == NULL)
|
||||
return -EINVAL;
|
||||
intf = xencons->intf;
|
||||
|
||||
spin_lock_irqsave(&xencons->ring_lock, flags);
|
||||
cons = intf->in_cons;
|
||||
prod = intf->in_prod;
|
||||
mb(); /* get pointers before reading ring */
|
||||
|
||||
if ((prod - cons) > sizeof(intf->in)) {
|
||||
spin_unlock_irqrestore(&xencons->ring_lock, flags);
|
||||
pr_err_once("xencons: Illegal ring page indices");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
|
||||
xencons->out_cons = intf->out_cons;
|
||||
xencons->out_cons_same = 0;
|
||||
}
|
||||
if (!recv && xencons->out_cons_same++ > 1) {
|
||||
eoiflag = XEN_EOI_FLAG_SPURIOUS;
|
||||
}
|
||||
spin_unlock_irqrestore(&xencons->ring_lock, flags);
|
||||
|
||||
if (recv) {
|
||||
notify_daemon(xencons);
|
||||
} else if (xencons->out_cons_same++ > 1) {
|
||||
eoiflag = XEN_EOI_FLAG_SPURIOUS;
|
||||
}
|
||||
|
||||
xen_irq_lateeoi(xencons->irq, eoiflag);
|
||||
@ -239,6 +250,7 @@ static int xen_hvm_console_init(void)
|
||||
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&info->ring_lock);
|
||||
} else if (info->intf != NULL) {
|
||||
/* already configured */
|
||||
return 0;
|
||||
@ -275,6 +287,7 @@ static int xen_hvm_console_init(void)
|
||||
|
||||
static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
|
||||
{
|
||||
spin_lock_init(&info->ring_lock);
|
||||
info->evtchn = xen_start_info->console.domU.evtchn;
|
||||
/* GFN == MFN for PV guest */
|
||||
info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
|
||||
@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void)
|
||||
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&info->ring_lock);
|
||||
}
|
||||
|
||||
info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
|
||||
@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev,
|
||||
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&info->ring_lock);
|
||||
dev_set_drvdata(&dev->dev, info);
|
||||
info->xbdev = dev;
|
||||
info->vtermno = xenbus_devid_to_vtermno(devid);
|
||||
|
@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
|
||||
func->devfn != PCI_DEV_FN_OTG) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return func;
|
||||
}
|
||||
|
||||
|
@ -203,6 +203,7 @@ struct hw_bank {
|
||||
* @in_lpm: if the core in low power mode
|
||||
* @wakeup_int: if wakeup interrupt occur
|
||||
* @rev: The revision number for controller
|
||||
* @mutex: protect code from concorrent running when doing role switch
|
||||
*/
|
||||
struct ci_hdrc {
|
||||
struct device *dev;
|
||||
@ -256,6 +257,7 @@ struct ci_hdrc {
|
||||
bool in_lpm;
|
||||
bool wakeup_int;
|
||||
enum ci_revision rev;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
|
||||
|
@ -960,9 +960,16 @@ static ssize_t role_store(struct device *dev,
|
||||
strlen(ci->roles[role]->name)))
|
||||
break;
|
||||
|
||||
if (role == CI_ROLE_END || role == ci->role)
|
||||
if (role == CI_ROLE_END)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&ci->mutex);
|
||||
|
||||
if (role == ci->role) {
|
||||
mutex_unlock(&ci->mutex);
|
||||
return n;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
disable_irq(ci->irq);
|
||||
ci_role_stop(ci);
|
||||
@ -971,6 +978,7 @@ static ssize_t role_store(struct device *dev,
|
||||
ci_handle_vbus_change(ci);
|
||||
enable_irq(ci->irq);
|
||||
pm_runtime_put_sync(dev);
|
||||
mutex_unlock(&ci->mutex);
|
||||
|
||||
return (ret == 0) ? n : ret;
|
||||
}
|
||||
@ -1006,6 +1014,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&ci->lock);
|
||||
mutex_init(&ci->mutex);
|
||||
ci->dev = dev;
|
||||
ci->platdata = dev_get_platdata(dev);
|
||||
ci->imx28_write_fix = !!(ci->platdata->flags &
|
||||
|
@ -164,8 +164,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
|
||||
|
||||
static void ci_handle_id_switch(struct ci_hdrc *ci)
|
||||
{
|
||||
enum ci_role role = ci_otg_role(ci);
|
||||
enum ci_role role;
|
||||
|
||||
mutex_lock(&ci->mutex);
|
||||
role = ci_otg_role(ci);
|
||||
if (role != ci->role) {
|
||||
dev_dbg(ci->dev, "switching from %s to %s\n",
|
||||
ci_role(ci)->name, ci->roles[role]->name);
|
||||
@ -188,6 +190,7 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
|
||||
if (role == CI_ROLE_GADGET)
|
||||
ci_handle_vbus_change(ci);
|
||||
}
|
||||
mutex_unlock(&ci->mutex);
|
||||
}
|
||||
/**
|
||||
* ci_otg_work - perform otg (vbus/id) event handle
|
||||
|
@ -626,7 +626,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
|
||||
uac = g_audio->uac;
|
||||
card = uac->card;
|
||||
if (card)
|
||||
snd_card_free(card);
|
||||
snd_card_free_when_closed(card);
|
||||
|
||||
kfree(uac->p_prm.ureq);
|
||||
kfree(uac->c_prm.ureq);
|
||||
|
@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||||
US_FL_BROKEN_FUA),
|
||||
|
||||
/* Reported by: Yaroslav Furman <yaro330@gmail.com> */
|
||||
UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
|
||||
"JMicron",
|
||||
"JMS583Gen 2",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
|
||||
US_FL_NO_REPORT_OPCODES),
|
||||
|
||||
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
|
||||
UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
|
||||
"PNY",
|
||||
|
@ -1040,6 +1040,9 @@ static int au1200fb_fb_check_var(struct fb_var_screeninfo *var,
|
||||
u32 pixclock;
|
||||
int screen_size, plane;
|
||||
|
||||
if (!var->pixclock)
|
||||
return -EINVAL;
|
||||
|
||||
plane = fbdev->plane;
|
||||
|
||||
/* Make sure that the mode respect all LCD controller and
|
||||
|
@ -234,6 +234,9 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
|
||||
|
||||
static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
|
||||
{
|
||||
if (!var->pixclock)
|
||||
return -EINVAL;
|
||||
|
||||
if (var->xres > 1920 || var->yres > 1440)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1214,6 +1214,9 @@ static int intelfb_check_var(struct fb_var_screeninfo *var,
|
||||
|
||||
dinfo = GET_DINFO(info);
|
||||
|
||||
if (!var->pixclock)
|
||||
return -EINVAL;
|
||||
|
||||
/* update the pitch */
|
||||
if (intelfbhw_validate_mode(dinfo, var) != 0)
|
||||
return -EINVAL;
|
||||
|
@ -766,6 +766,8 @@ static int nvidiafb_check_var(struct fb_var_screeninfo *var,
|
||||
int pitch, err = 0;
|
||||
|
||||
NVTRACE_ENTER();
|
||||
if (!var->pixclock)
|
||||
return -EINVAL;
|
||||
|
||||
var->transp.offset = 0;
|
||||
var->transp.length = 0;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user