Linux 5.4.143
-----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE4n5dijQDou9mhzu83qZv95d3LNwFAmEnj2AACgkQ3qZv95d3 LNxNEQ//auFOSmgsMtI8LDmKlP/f22+FmICk8+IHeBMRBMDY0WGEEdsRZgcf4R7M hgyBn8ISmU5W0idpoxzVTiNxDJ0YVbVSIX12lZO6OHnwcv6hNW7iOW5TaGjd8EN+ fkh8MtAToBQrp4fFb1QkC11pYNMPiuvDNB2nW+F3ixfYLyC1EF4g2/qVUKy7s6rZ dbqDfuI3Q7R2opsIkpmsV7ClKGbJzsP7oo0H5EOQMpmOowhg3oJy8oYqMMTgij1T bJU8kujNElsK+/nbpVzJPrpprQH9eGP+hB5ZAv6s/FuJ6RmkoAczYQnX3HL6TfCS ymoyJ01gsmDic9RnG6qei5LkCwf5Td2SKjRZdqGWKTluWD1ZAwzUX8Ww6K+t5uWk PQPyCfU2wk2D3JjJWt0vTxl/GZGAkYbZpy5ISZFJhK7/j9/oTSrPWra7/BRu4K2I 2PK7XGjNyQxSguQqmG064Q0nYEOU03pR2H8tyG3iH0nBBd9p54D0Bg0D73I2h0az PoGhBo71m9SYCPP1zSXl+xLFyWGZZDUYCaU9KPlwkYCCcRUSQbfCKwrYHfEcHZgL 4QtYlpUi+/C0Ga7gAK9ierqCKSNTOpoVna618j97uqCYVIU8estLBqX4mMAQquVF R8+cy6L/aTBVw4Zwd0Jmt85GwBHlHahUGEq87+Qpw/laqjkBFcg= =SIVq -----END PGP SIGNATURE----- Merge 5.4.143 into android11-5.4-lts Changes in 5.4.143 ext4: fix EXT4_MAX_LOGICAL_BLOCK macro x86/fpu: Make init_fpstate correct with optimized XSAVE ath: Use safer key clearing with key cache entries ath9k: Clear key cache explicitly on disabling hardware ath: Export ath_hw_keysetmac() ath: Modify ath_key_delete() to not need full key entry ath9k: Postpone key cache entry deletion for TXQ frames reference it mtd: cfi_cmdset_0002: fix crash when erasing/writing AMD cards media: zr364xx: propagate errors from zr364xx_start_readpipe() media: zr364xx: fix memory leaks in probe() media: drivers/media/usb: fix memory leak in zr364xx_probe USB: core: Avoid WARNings for 0-length descriptor requests dmaengine: xilinx_dma: Fix read-after-free bug when terminating transfers dmaengine: usb-dmac: Fix PM reference leak in usb_dmac_probe() ARM: dts: am43x-epos-evm: Reduce i2c0 bus speed for tps65218 dmaengine: of-dma: router_xlate to return -EPROBE_DEFER if controller is not yet available scsi: megaraid_mm: Fix end of loop tests for list_for_each_entry() scsi: scsi_dh_rdac: Avoid crash during rdac_bus_attach() scsi: core: Avoid printing an error if target_alloc() returns -ENXIO scsi: core: Fix capacity set to zero after offlinining device ARM: dts: nomadik: Fix up interrupt controller node names net: usb: lan78xx: don't modify phy_device state concurrently drm/amd/display: Fix Dynamic bpp issue with 8K30 with Navi 1X Bluetooth: hidp: use correct wait queue when removing ctrl_wait iommu: Check if group is NULL before remove device cpufreq: armada-37xx: forbid cpufreq for 1.2 GHz variant dccp: add do-while-0 stubs for dccp_pr_debug macros virtio: Protect vqs list access vhost: Fix the calculation in vhost_overflow() bpf: Clear zext_dst of dead insns bnxt: don't lock the tx queue from napi poll bnxt: disable napi before canceling DIM net: 6pack: fix slab-out-of-bounds in decode_data ptp_pch: Restore dependency on PCI bnxt_en: Add missing DMA memory barriers vrf: Reset skb conntrack connection on VRF rcv virtio-net: support XDP when not more queues virtio-net: use NETIF_F_GRO_HW instead of NETIF_F_LRO net: qlcnic: add missed unlock in qlcnic_83xx_flash_read32 net: mdio-mux: Don't ignore memory allocation errors net: mdio-mux: Handle -EPROBE_DEFER correctly ovs: clear skb->tstamp in forwarding path i40e: Fix ATR queue selection iavf: Fix ping is lost after untrusted VF had tried to change MAC ovl: add splice file read write helper mmc: dw_mmc: Fix hang on data CRC error ALSA: hda - fix the 'Capture Switch' value change notifications tracing / histogram: Fix NULL pointer dereference on strcmp() on NULL event name slimbus: messaging: start transaction ids from 1 instead of zero slimbus: messaging: check for valid transaction id slimbus: ngd: reset dma setup during runtime pm ipack: tpci200: fix many double free issues in tpci200_pci_probe ipack: tpci200: fix memory leak in the tpci200_register btrfs: prevent rename2 from exchanging a subvol with a directory from different parents PCI: Increase D3 delay for AMD Renoir/Cezanne XHCI ASoC: intel: atom: Fix breakage for PCM buffer address setup mm, memcg: avoid stale protection values when cgroup is above protection mm: memcontrol: fix occasional OOMs due to proportional memory.low reclaim fs: warn about impending deprecation of mandatory locks netfilter: nft_exthdr: fix endianness of tcp option cast Linux 5.4.143 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I5220b773c51448c4b92f8b51dc002afe144a8499
This commit is contained in:
commit
874997f95f
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 142
|
||||
SUBLEVEL = 143
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -589,7 +589,7 @@
|
||||
status = "okay";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2c0_pins>;
|
||||
clock-frequency = <400000>;
|
||||
clock-frequency = <100000>;
|
||||
|
||||
tps65218: tps65218@24 {
|
||||
reg = <0x24>;
|
||||
|
@ -755,14 +755,14 @@
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
vica: intc@10140000 {
|
||||
vica: interrupt-controller@10140000 {
|
||||
compatible = "arm,versatile-vic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
reg = <0x10140000 0x20>;
|
||||
};
|
||||
|
||||
vicb: intc@10140020 {
|
||||
vicb: interrupt-controller@10140020 {
|
||||
compatible = "arm,versatile-vic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
@ -204,6 +204,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
|
||||
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
|
||||
}
|
||||
|
||||
static inline void fxsave(struct fxregs_state *fx)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
|
||||
else
|
||||
asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
|
||||
}
|
||||
|
||||
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
|
||||
#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
|
||||
#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
|
||||
@ -272,28 +280,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
|
||||
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
|
||||
: "memory")
|
||||
|
||||
/*
|
||||
* This function is called only during boot time when x86 caps are not set
|
||||
* up and alternative can not be used yet.
|
||||
*/
|
||||
static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
|
||||
{
|
||||
u64 mask = -1;
|
||||
u32 lmask = mask;
|
||||
u32 hmask = mask >> 32;
|
||||
int err;
|
||||
|
||||
WARN_ON(system_state != SYSTEM_BOOTING);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_XSAVES))
|
||||
XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
|
||||
else
|
||||
XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
|
||||
|
||||
/* We should never fault when copying to a kernel buffer: */
|
||||
WARN_ON_FPU(err);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is called only during boot time when x86 caps are not set
|
||||
* up and alternative can not be used yet.
|
||||
|
@ -398,6 +398,24 @@ static void __init print_xstate_offset_size(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* All supported features have either init state all zeros or are
|
||||
* handled in setup_init_fpu() individually. This is an explicit
|
||||
* feature list and does not use XFEATURE_MASK*SUPPORTED to catch
|
||||
* newly added supported features at build time and make people
|
||||
* actually look at the init state for the new feature.
|
||||
*/
|
||||
#define XFEATURES_INIT_FPSTATE_HANDLED \
|
||||
(XFEATURE_MASK_FP | \
|
||||
XFEATURE_MASK_SSE | \
|
||||
XFEATURE_MASK_YMM | \
|
||||
XFEATURE_MASK_OPMASK | \
|
||||
XFEATURE_MASK_ZMM_Hi256 | \
|
||||
XFEATURE_MASK_Hi16_ZMM | \
|
||||
XFEATURE_MASK_PKRU | \
|
||||
XFEATURE_MASK_BNDREGS | \
|
||||
XFEATURE_MASK_BNDCSR)
|
||||
|
||||
/*
|
||||
* setup the xstate image representing the init state
|
||||
*/
|
||||
@ -405,6 +423,8 @@ static void __init setup_init_fpu_buf(void)
|
||||
{
|
||||
static int on_boot_cpu __initdata = 1;
|
||||
|
||||
BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED);
|
||||
|
||||
WARN_ON_FPU(!on_boot_cpu);
|
||||
on_boot_cpu = 0;
|
||||
|
||||
@ -423,10 +443,22 @@ static void __init setup_init_fpu_buf(void)
|
||||
copy_kernel_to_xregs_booting(&init_fpstate.xsave);
|
||||
|
||||
/*
|
||||
* Dump the init state again. This is to identify the init state
|
||||
* of any feature which is not represented by all zero's.
|
||||
* All components are now in init state. Read the state back so
|
||||
* that init_fpstate contains all non-zero init state. This only
|
||||
* works with XSAVE, but not with XSAVEOPT and XSAVES because
|
||||
* those use the init optimization which skips writing data for
|
||||
* components in init state.
|
||||
*
|
||||
* XSAVE could be used, but that would require to reshuffle the
|
||||
* data when XSAVES is available because XSAVES uses xstate
|
||||
* compaction. But doing so is a pointless exercise because most
|
||||
* components have an all zeros init state except for the legacy
|
||||
* ones (FP and SSE). Those can be saved with FXSAVE into the
|
||||
* legacy area. Adding new features requires to ensure that init
|
||||
* state is all zeroes or if not to add the necessary handling
|
||||
* here.
|
||||
*/
|
||||
copy_xregs_to_kernel_booting(&init_fpstate.xsave);
|
||||
fxsave(&init_fpstate.fxsave);
|
||||
}
|
||||
|
||||
static int xfeature_uncompacted_offset(int xfeature_nr)
|
||||
|
@ -102,7 +102,11 @@ struct armada_37xx_dvfs {
|
||||
};
|
||||
|
||||
static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
|
||||
{.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
|
||||
/*
|
||||
* The cpufreq scaling for 1.2 GHz variant of the SOC is currently
|
||||
* unstable because we do not know how to configure it properly.
|
||||
*/
|
||||
/* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
|
||||
{.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
|
||||
{.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
|
||||
{.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
|
||||
|
@ -65,8 +65,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
|
||||
return NULL;
|
||||
|
||||
ofdma_target = of_dma_find_controller(&dma_spec_target);
|
||||
if (!ofdma_target)
|
||||
return NULL;
|
||||
if (!ofdma_target) {
|
||||
ofdma->dma_router->route_free(ofdma->dma_router->dev,
|
||||
route_data);
|
||||
chan = ERR_PTR(-EPROBE_DEFER);
|
||||
goto err;
|
||||
}
|
||||
|
||||
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
|
||||
if (IS_ERR_OR_NULL(chan)) {
|
||||
@ -77,6 +81,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
|
||||
chan->route_data = route_data;
|
||||
}
|
||||
|
||||
err:
|
||||
/*
|
||||
* Need to put the node back since the ofdma->of_dma_route_allocate
|
||||
* has taken it for generating the new, translated dma_spec
|
||||
|
@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
|
||||
|
||||
error:
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
pm_runtime_put(&pdev->dev);
|
||||
error_pm:
|
||||
pm_runtime_put(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -333,6 +333,7 @@ struct xilinx_dma_tx_descriptor {
|
||||
* @genlock: Support genlock mode
|
||||
* @err: Channel has errors
|
||||
* @idle: Check for channel idle
|
||||
* @terminating: Check for channel being synchronized by user
|
||||
* @tasklet: Cleanup work after irq
|
||||
* @config: Device configuration info
|
||||
* @flush_on_fsync: Flush on Frame sync
|
||||
@ -370,6 +371,7 @@ struct xilinx_dma_chan {
|
||||
bool genlock;
|
||||
bool err;
|
||||
bool idle;
|
||||
bool terminating;
|
||||
struct tasklet_struct tasklet;
|
||||
struct xilinx_vdma_config config;
|
||||
bool flush_on_fsync;
|
||||
@ -844,6 +846,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
|
||||
/* Run any dependencies, then free the descriptor */
|
||||
dma_run_dependencies(&desc->async_tx);
|
||||
xilinx_dma_free_tx_descriptor(chan, desc);
|
||||
|
||||
/*
|
||||
* While we ran a callback the user called a terminate function,
|
||||
* which takes care of cleaning up any remaining descriptors
|
||||
*/
|
||||
if (chan->terminating)
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
@ -1618,6 +1627,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
if (desc->cyclic)
|
||||
chan->cyclic = true;
|
||||
|
||||
chan->terminating = false;
|
||||
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
||||
return cookie;
|
||||
@ -2074,6 +2085,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
|
||||
}
|
||||
|
||||
/* Remove and free all of the descriptors in the lists */
|
||||
chan->terminating = true;
|
||||
xilinx_dma_free_descriptors(chan);
|
||||
chan->idle = true;
|
||||
|
||||
|
@ -362,7 +362,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
|
||||
|
||||
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
|
||||
MASTER_UPDATE_LOCK_DB_X,
|
||||
h_blank_start - 200 - 1,
|
||||
(h_blank_start - 200 - 1) / optc1->opp_count,
|
||||
MASTER_UPDATE_LOCK_DB_Y,
|
||||
v_blank_start - 1);
|
||||
}
|
||||
|
@ -792,6 +792,9 @@ void iommu_group_remove_device(struct device *dev)
|
||||
struct iommu_group *group = dev->iommu_group;
|
||||
struct group_device *tmp_device, *device = NULL;
|
||||
|
||||
if (!group)
|
||||
return;
|
||||
|
||||
dev_info(dev, "Removing from iommu group %d\n", group->id);
|
||||
|
||||
/* Pre-notify listeners that a device is being removed. */
|
||||
|
@ -91,16 +91,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
|
||||
free_irq(tpci200->info->pdev->irq, (void *) tpci200);
|
||||
|
||||
pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
|
||||
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
|
||||
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
|
||||
|
||||
pci_disable_device(tpci200->info->pdev);
|
||||
pci_dev_put(tpci200->info->pdev);
|
||||
}
|
||||
|
||||
static void tpci200_enable_irq(struct tpci200_board *tpci200,
|
||||
@ -259,7 +256,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
|
||||
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
|
||||
tpci200->info->pdev->bus->number,
|
||||
tpci200->info->pdev->devfn);
|
||||
goto out_disable_pci;
|
||||
goto err_disable_device;
|
||||
}
|
||||
|
||||
/* Request IO ID INT space (Bar 3) */
|
||||
@ -271,7 +268,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
|
||||
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
|
||||
tpci200->info->pdev->bus->number,
|
||||
tpci200->info->pdev->devfn);
|
||||
goto out_release_ip_space;
|
||||
goto err_ip_interface_bar;
|
||||
}
|
||||
|
||||
/* Request MEM8 space (Bar 5) */
|
||||
@ -282,7 +279,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
|
||||
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
|
||||
tpci200->info->pdev->bus->number,
|
||||
tpci200->info->pdev->devfn);
|
||||
goto out_release_ioid_int_space;
|
||||
goto err_io_id_int_spaces_bar;
|
||||
}
|
||||
|
||||
/* Request MEM16 space (Bar 4) */
|
||||
@ -293,7 +290,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
|
||||
"(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
|
||||
tpci200->info->pdev->bus->number,
|
||||
tpci200->info->pdev->devfn);
|
||||
goto out_release_mem8_space;
|
||||
goto err_mem8_space_bar;
|
||||
}
|
||||
|
||||
/* Map internal tpci200 driver user space */
|
||||
@ -307,7 +304,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
|
||||
tpci200->info->pdev->bus->number,
|
||||
tpci200->info->pdev->devfn);
|
||||
res = -ENOMEM;
|
||||
goto out_release_mem8_space;
|
||||
goto err_mem16_space_bar;
|
||||
}
|
||||
|
||||
/* Initialize lock that protects interface_regs */
|
||||
@ -346,18 +343,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
|
||||
"(bn 0x%X, sn 0x%X) unable to register IRQ !",
|
||||
tpci200->info->pdev->bus->number,
|
||||
tpci200->info->pdev->devfn);
|
||||
goto out_release_ioid_int_space;
|
||||
goto err_interface_regs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_release_mem8_space:
|
||||
err_interface_regs:
|
||||
pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
|
||||
err_mem16_space_bar:
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
|
||||
err_mem8_space_bar:
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
|
||||
out_release_ioid_int_space:
|
||||
err_io_id_int_spaces_bar:
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
|
||||
out_release_ip_space:
|
||||
err_ip_interface_bar:
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
|
||||
out_disable_pci:
|
||||
err_disable_device:
|
||||
pci_disable_device(tpci200->info->pdev);
|
||||
return res;
|
||||
}
|
||||
@ -529,7 +530,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
|
||||
tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
|
||||
if (!tpci200->info) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err_info;
|
||||
goto err_tpci200;
|
||||
}
|
||||
|
||||
pci_dev_get(pdev);
|
||||
@ -540,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
|
||||
ret = -EBUSY;
|
||||
goto out_err_pci_request;
|
||||
goto err_tpci200_info;
|
||||
}
|
||||
tpci200->info->cfg_regs = ioremap_nocache(
|
||||
pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
|
||||
@ -548,7 +549,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
|
||||
if (!tpci200->info->cfg_regs) {
|
||||
dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
|
||||
ret = -EFAULT;
|
||||
goto out_err_ioremap;
|
||||
goto err_request_region;
|
||||
}
|
||||
|
||||
/* Disable byte swapping for 16 bit IP module access. This will ensure
|
||||
@ -571,7 +572,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "error during tpci200 install\n");
|
||||
ret = -ENODEV;
|
||||
goto out_err_install;
|
||||
goto err_cfg_regs;
|
||||
}
|
||||
|
||||
/* Register the carrier in the industry pack bus driver */
|
||||
@ -583,7 +584,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
|
||||
dev_err(&pdev->dev,
|
||||
"error registering the carrier on ipack driver\n");
|
||||
ret = -EFAULT;
|
||||
goto out_err_bus_register;
|
||||
goto err_tpci200_install;
|
||||
}
|
||||
|
||||
/* save the bus number given by ipack to logging purpose */
|
||||
@ -594,19 +595,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
|
||||
tpci200_create_device(tpci200, i);
|
||||
return 0;
|
||||
|
||||
out_err_bus_register:
|
||||
err_tpci200_install:
|
||||
tpci200_uninstall(tpci200);
|
||||
/* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
|
||||
tpci200->info->cfg_regs = NULL;
|
||||
out_err_install:
|
||||
if (tpci200->info->cfg_regs)
|
||||
iounmap(tpci200->info->cfg_regs);
|
||||
out_err_ioremap:
|
||||
err_cfg_regs:
|
||||
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
|
||||
err_request_region:
|
||||
pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
|
||||
out_err_pci_request:
|
||||
pci_dev_put(pdev);
|
||||
err_tpci200_info:
|
||||
kfree(tpci200->info);
|
||||
out_err_info:
|
||||
pci_dev_put(pdev);
|
||||
err_tpci200:
|
||||
kfree(tpci200);
|
||||
return ret;
|
||||
}
|
||||
@ -616,6 +614,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
|
||||
ipack_bus_unregister(tpci200->info->ipack_bus);
|
||||
tpci200_uninstall(tpci200);
|
||||
|
||||
pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
|
||||
|
||||
pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
|
||||
|
||||
pci_dev_put(tpci200->info->pdev);
|
||||
|
||||
kfree(tpci200->info);
|
||||
kfree(tpci200);
|
||||
}
|
||||
|
@ -1187,15 +1187,11 @@ static int zr364xx_open(struct file *file)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void zr364xx_release(struct v4l2_device *v4l2_dev)
|
||||
static void zr364xx_board_uninit(struct zr364xx_camera *cam)
|
||||
{
|
||||
struct zr364xx_camera *cam =
|
||||
container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
|
||||
unsigned long i;
|
||||
|
||||
v4l2_device_unregister(&cam->v4l2_dev);
|
||||
|
||||
videobuf_mmap_free(&cam->vb_vidq);
|
||||
zr364xx_stop_readpipe(cam);
|
||||
|
||||
/* release sys buffers */
|
||||
for (i = 0; i < FRAMES; i++) {
|
||||
@ -1206,9 +1202,19 @@ static void zr364xx_release(struct v4l2_device *v4l2_dev)
|
||||
cam->buffer.frame[i].lpvbits = NULL;
|
||||
}
|
||||
|
||||
v4l2_ctrl_handler_free(&cam->ctrl_handler);
|
||||
/* release transfer buffer */
|
||||
kfree(cam->pipe->transfer_buffer);
|
||||
}
|
||||
|
||||
static void zr364xx_release(struct v4l2_device *v4l2_dev)
|
||||
{
|
||||
struct zr364xx_camera *cam =
|
||||
container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
|
||||
|
||||
videobuf_mmap_free(&cam->vb_vidq);
|
||||
v4l2_ctrl_handler_free(&cam->ctrl_handler);
|
||||
zr364xx_board_uninit(cam);
|
||||
v4l2_device_unregister(&cam->v4l2_dev);
|
||||
kfree(cam);
|
||||
}
|
||||
|
||||
@ -1331,6 +1337,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
|
||||
{
|
||||
struct zr364xx_pipeinfo *pipe = cam->pipe;
|
||||
unsigned long i;
|
||||
int err;
|
||||
|
||||
DBG("board init: %p\n", cam);
|
||||
memset(pipe, 0, sizeof(*pipe));
|
||||
@ -1363,9 +1370,8 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
|
||||
|
||||
if (i == 0) {
|
||||
printk(KERN_INFO KBUILD_MODNAME ": out of memory. Aborting\n");
|
||||
kfree(cam->pipe->transfer_buffer);
|
||||
cam->pipe->transfer_buffer = NULL;
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
} else
|
||||
cam->buffer.dwFrames = i;
|
||||
|
||||
@ -1380,9 +1386,20 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
|
||||
/*** end create system buffers ***/
|
||||
|
||||
/* start read pipe */
|
||||
zr364xx_start_readpipe(cam);
|
||||
err = zr364xx_start_readpipe(cam);
|
||||
if (err)
|
||||
goto err_free_frames;
|
||||
|
||||
DBG(": board initialized\n");
|
||||
return 0;
|
||||
|
||||
err_free_frames:
|
||||
for (i = 0; i < FRAMES; i++)
|
||||
vfree(cam->buffer.frame[i].lpvbits);
|
||||
err_free:
|
||||
kfree(cam->pipe->transfer_buffer);
|
||||
cam->pipe->transfer_buffer = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int zr364xx_probe(struct usb_interface *intf,
|
||||
@ -1407,12 +1424,10 @@ static int zr364xx_probe(struct usb_interface *intf,
|
||||
if (!cam)
|
||||
return -ENOMEM;
|
||||
|
||||
cam->v4l2_dev.release = zr364xx_release;
|
||||
err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
|
||||
if (err < 0) {
|
||||
dev_err(&udev->dev, "couldn't register v4l2_device\n");
|
||||
kfree(cam);
|
||||
return err;
|
||||
goto free_cam;
|
||||
}
|
||||
hdl = &cam->ctrl_handler;
|
||||
v4l2_ctrl_handler_init(hdl, 1);
|
||||
@ -1421,7 +1436,7 @@ static int zr364xx_probe(struct usb_interface *intf,
|
||||
if (hdl->error) {
|
||||
err = hdl->error;
|
||||
dev_err(&udev->dev, "couldn't register control\n");
|
||||
goto fail;
|
||||
goto free_hdlr_and_unreg_dev;
|
||||
}
|
||||
/* save the init method used by this camera */
|
||||
cam->method = id->driver_info;
|
||||
@ -1494,7 +1509,7 @@ static int zr364xx_probe(struct usb_interface *intf,
|
||||
if (!cam->read_endpoint) {
|
||||
err = -ENOMEM;
|
||||
dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
|
||||
goto fail;
|
||||
goto free_hdlr_and_unreg_dev;
|
||||
}
|
||||
|
||||
/* v4l */
|
||||
@ -1505,10 +1520,11 @@ static int zr364xx_probe(struct usb_interface *intf,
|
||||
|
||||
/* load zr364xx board specific */
|
||||
err = zr364xx_board_init(cam);
|
||||
if (!err)
|
||||
if (err)
|
||||
goto free_hdlr_and_unreg_dev;
|
||||
err = v4l2_ctrl_handler_setup(hdl);
|
||||
if (err)
|
||||
goto fail;
|
||||
goto board_uninit;
|
||||
|
||||
spin_lock_init(&cam->slock);
|
||||
|
||||
@ -1523,16 +1539,20 @@ static int zr364xx_probe(struct usb_interface *intf,
|
||||
err = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
|
||||
if (err) {
|
||||
dev_err(&udev->dev, "video_register_device failed\n");
|
||||
goto fail;
|
||||
goto board_uninit;
|
||||
}
|
||||
cam->v4l2_dev.release = zr364xx_release;
|
||||
|
||||
dev_info(&udev->dev, DRIVER_DESC " controlling device %s\n",
|
||||
video_device_node_name(&cam->vdev));
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
board_uninit:
|
||||
zr364xx_board_uninit(cam);
|
||||
free_hdlr_and_unreg_dev:
|
||||
v4l2_ctrl_handler_free(hdl);
|
||||
v4l2_device_unregister(&cam->v4l2_dev);
|
||||
free_cam:
|
||||
kfree(cam);
|
||||
return err;
|
||||
}
|
||||
@ -1579,10 +1599,19 @@ static int zr364xx_resume(struct usb_interface *intf)
|
||||
if (!cam->was_streaming)
|
||||
return 0;
|
||||
|
||||
zr364xx_start_readpipe(cam);
|
||||
res = zr364xx_start_readpipe(cam);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
res = zr364xx_prepare(cam);
|
||||
if (!res)
|
||||
if (res)
|
||||
goto err_prepare;
|
||||
|
||||
zr364xx_start_acquire(cam);
|
||||
return 0;
|
||||
|
||||
err_prepare:
|
||||
zr364xx_stop_readpipe(cam);
|
||||
return res;
|
||||
}
|
||||
#endif
|
||||
|
@ -2017,8 +2017,8 @@ static void dw_mci_tasklet_func(unsigned long priv)
|
||||
continue;
|
||||
}
|
||||
|
||||
dw_mci_stop_dma(host);
|
||||
send_stop_abort(host, data);
|
||||
dw_mci_stop_dma(host);
|
||||
state = STATE_SENDING_STOP;
|
||||
break;
|
||||
}
|
||||
@ -2042,10 +2042,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
|
||||
*/
|
||||
if (test_and_clear_bit(EVENT_DATA_ERROR,
|
||||
&host->pending_events)) {
|
||||
dw_mci_stop_dma(host);
|
||||
if (!(host->data_status & (SDMMC_INT_DRTO |
|
||||
SDMMC_INT_EBE)))
|
||||
send_stop_abort(host, data);
|
||||
dw_mci_stop_dma(host);
|
||||
state = STATE_DATA_ERROR;
|
||||
break;
|
||||
}
|
||||
@ -2078,10 +2078,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
|
||||
*/
|
||||
if (test_and_clear_bit(EVENT_DATA_ERROR,
|
||||
&host->pending_events)) {
|
||||
dw_mci_stop_dma(host);
|
||||
if (!(host->data_status & (SDMMC_INT_DRTO |
|
||||
SDMMC_INT_EBE)))
|
||||
send_stop_abort(host, data);
|
||||
dw_mci_stop_dma(host);
|
||||
state = STATE_DATA_ERROR;
|
||||
break;
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
|
||||
struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
|
||||
u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
|
||||
|
||||
return extp->MinorVersion >= '5' &&
|
||||
return extp && extp->MinorVersion >= '5' &&
|
||||
(extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
|
||||
}
|
||||
|
||||
|
@ -360,6 +360,26 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
|
||||
return md_dst->u.port_info.port_id;
|
||||
}
|
||||
|
||||
static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
|
||||
struct bnxt_tx_ring_info *txr,
|
||||
struct netdev_queue *txq)
|
||||
{
|
||||
netif_tx_stop_queue(txq);
|
||||
|
||||
/* netif_tx_stop_queue() must be done before checking
|
||||
* tx index in bnxt_tx_avail() below, because in
|
||||
* bnxt_tx_int(), we update tx index before checking for
|
||||
* netif_tx_queue_stopped().
|
||||
*/
|
||||
smp_mb();
|
||||
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
|
||||
netif_tx_wake_queue(txq);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
@ -387,7 +407,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
free_size = bnxt_tx_avail(bp, txr);
|
||||
if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
|
||||
netif_tx_stop_queue(txq);
|
||||
if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
@ -597,16 +617,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
if (netdev_xmit_more() && !tx_buf->is_push)
|
||||
bnxt_db_write(bp, &txr->tx_db, prod);
|
||||
|
||||
netif_tx_stop_queue(txq);
|
||||
|
||||
/* netif_tx_stop_queue() must be done before checking
|
||||
* tx index in bnxt_tx_avail() below, because in
|
||||
* bnxt_tx_int(), we update tx index before checking for
|
||||
* netif_tx_queue_stopped().
|
||||
*/
|
||||
smp_mb();
|
||||
if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
|
||||
netif_tx_wake_queue(txq);
|
||||
bnxt_txr_netif_try_stop_queue(bp, txr, txq);
|
||||
}
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
@ -690,14 +701,9 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
|
||||
smp_mb();
|
||||
|
||||
if (unlikely(netif_tx_queue_stopped(txq)) &&
|
||||
(bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
|
||||
__netif_tx_lock(txq, smp_processor_id());
|
||||
if (netif_tx_queue_stopped(txq) &&
|
||||
bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
|
||||
txr->dev_state != BNXT_DEV_STATE_CLOSING)
|
||||
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
|
||||
netif_tx_wake_queue(txq);
|
||||
__netif_tx_unlock(txq);
|
||||
}
|
||||
}
|
||||
|
||||
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
|
||||
@ -1718,6 +1724,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
|
||||
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
|
||||
return -EBUSY;
|
||||
|
||||
/* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
dma_rmb();
|
||||
prod = rxr->rx_prod;
|
||||
|
||||
if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
|
||||
@ -1912,6 +1922,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
|
||||
if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
|
||||
return -EBUSY;
|
||||
|
||||
/* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
dma_rmb();
|
||||
cmp_type = RX_CMP_TYPE(rxcmp);
|
||||
if (cmp_type == CMP_TYPE_RX_L2_CMP) {
|
||||
rxcmp1->rx_cmp_cfa_code_errors_v2 |=
|
||||
@ -2308,6 +2322,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
|
||||
if (!TX_CMP_VALID(txcmp, raw_cons))
|
||||
break;
|
||||
|
||||
/* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
dma_rmb();
|
||||
if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
|
||||
tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
|
||||
cp_cons = RING_CMP(tmp_raw_cons);
|
||||
@ -8340,10 +8358,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
|
||||
for (i = 0; i < bp->cp_nr_rings; i++) {
|
||||
struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
|
||||
|
||||
napi_disable(&bp->bnapi[i]->napi);
|
||||
if (bp->bnapi[i]->rx_ring)
|
||||
cancel_work_sync(&cpr->dim.work);
|
||||
|
||||
napi_disable(&bp->bnapi[i]->napi);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8371,9 +8388,11 @@ void bnxt_tx_disable(struct bnxt *bp)
|
||||
if (bp->tx_ring) {
|
||||
for (i = 0; i < bp->tx_nr_rings; i++) {
|
||||
txr = &bp->tx_ring[i];
|
||||
txr->dev_state = BNXT_DEV_STATE_CLOSING;
|
||||
WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
|
||||
}
|
||||
}
|
||||
/* Make sure napi polls see @dev_state change */
|
||||
synchronize_net();
|
||||
/* Drop carrier first to prevent TX timeout */
|
||||
netif_carrier_off(bp->dev);
|
||||
/* Stop all TX queues */
|
||||
@ -8387,8 +8406,10 @@ void bnxt_tx_enable(struct bnxt *bp)
|
||||
|
||||
for (i = 0; i < bp->tx_nr_rings; i++) {
|
||||
txr = &bp->tx_ring[i];
|
||||
txr->dev_state = 0;
|
||||
WRITE_ONCE(txr->dev_state, 0);
|
||||
}
|
||||
/* Make sure napi polls see @dev_state change */
|
||||
synchronize_net();
|
||||
netif_tx_wake_all_queues(bp->dev);
|
||||
if (bp->link_info.link_up)
|
||||
netif_carrier_on(bp->dev);
|
||||
|
@ -3553,8 +3553,7 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
|
||||
|
||||
/* is DCB enabled at all? */
|
||||
if (vsi->tc_config.numtc == 1)
|
||||
return i40e_swdcb_skb_tx_hash(netdev, skb,
|
||||
netdev->real_num_tx_queues);
|
||||
return netdev_pick_tx(netdev, skb, sb_dev);
|
||||
|
||||
prio = skb->priority;
|
||||
hw = &vsi->back->hw;
|
||||
|
@ -134,6 +134,7 @@ struct iavf_q_vector {
|
||||
struct iavf_mac_filter {
|
||||
struct list_head list;
|
||||
u8 macaddr[ETH_ALEN];
|
||||
bool is_new_mac; /* filter is new, wait for PF decision */
|
||||
bool remove; /* filter needs to be removed */
|
||||
bool add; /* filter needs to be added */
|
||||
};
|
||||
|
@ -761,6 +761,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
|
||||
|
||||
list_add_tail(&f->list, &adapter->mac_filter_list);
|
||||
f->add = true;
|
||||
f->is_new_mac = true;
|
||||
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
|
||||
} else {
|
||||
f->remove = false;
|
||||
|
@ -564,6 +564,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
|
||||
kfree(veal);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_mac_add_ok
|
||||
* @adapter: adapter structure
|
||||
*
|
||||
* Submit list of filters based on PF response.
|
||||
**/
|
||||
static void iavf_mac_add_ok(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct iavf_mac_filter *f, *ftmp;
|
||||
|
||||
spin_lock_bh(&adapter->mac_vlan_list_lock);
|
||||
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
|
||||
f->is_new_mac = false;
|
||||
}
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_mac_add_reject
|
||||
* @adapter: adapter structure
|
||||
*
|
||||
* Remove filters from list based on PF response.
|
||||
**/
|
||||
static void iavf_mac_add_reject(struct iavf_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct iavf_mac_filter *f, *ftmp;
|
||||
|
||||
spin_lock_bh(&adapter->mac_vlan_list_lock);
|
||||
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
|
||||
if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
|
||||
f->remove = false;
|
||||
|
||||
if (f->is_new_mac) {
|
||||
list_del(&f->list);
|
||||
kfree(f);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&adapter->mac_vlan_list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* iavf_add_vlans
|
||||
* @adapter: adapter structure
|
||||
@ -1316,6 +1357,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
||||
case VIRTCHNL_OP_ADD_ETH_ADDR:
|
||||
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
|
||||
iavf_stat_str(&adapter->hw, v_retval));
|
||||
iavf_mac_add_reject(adapter);
|
||||
/* restore administratively set MAC address */
|
||||
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
|
||||
break;
|
||||
@ -1385,10 +1427,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
|
||||
}
|
||||
}
|
||||
switch (v_opcode) {
|
||||
case VIRTCHNL_OP_ADD_ETH_ADDR: {
|
||||
case VIRTCHNL_OP_ADD_ETH_ADDR:
|
||||
if (!v_retval)
|
||||
iavf_mac_add_ok(adapter);
|
||||
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
|
||||
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
|
||||
}
|
||||
break;
|
||||
case VIRTCHNL_OP_GET_STATS: {
|
||||
struct iavf_eth_stats *stats =
|
||||
|
@ -3158,8 +3158,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
|
||||
|
||||
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
|
||||
ret = QLCRD32(adapter, indirect_addr, &err);
|
||||
if (err == -EIO)
|
||||
if (err == -EIO) {
|
||||
qlcnic_83xx_unlock_flash(adapter);
|
||||
return err;
|
||||
}
|
||||
|
||||
word = ret;
|
||||
*(u32 *)p_data = word;
|
||||
|
@ -839,6 +839,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
|
||||
return;
|
||||
}
|
||||
|
||||
if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
|
||||
pr_err("6pack: cooked buffer overrun, data loss\n");
|
||||
sp->rx_count = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
buf = sp->raw_buf;
|
||||
sp->cooked_buf[sp->rx_count_cooked++] =
|
||||
buf[0] | ((buf[1] << 2) & 0xc0);
|
||||
|
@ -82,6 +82,17 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
|
||||
|
||||
static int parent_count;
|
||||
|
||||
static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
|
||||
{
|
||||
struct mdio_mux_child_bus *cb = pb->children;
|
||||
|
||||
while (cb) {
|
||||
mdiobus_unregister(cb->mii_bus);
|
||||
mdiobus_free(cb->mii_bus);
|
||||
cb = cb->next;
|
||||
}
|
||||
}
|
||||
|
||||
int mdio_mux_init(struct device *dev,
|
||||
struct device_node *mux_node,
|
||||
int (*switch_fn)(int cur, int desired, void *data),
|
||||
@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
|
||||
cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
|
||||
if (!cb) {
|
||||
ret_val = -ENOMEM;
|
||||
continue;
|
||||
goto err_loop;
|
||||
}
|
||||
cb->bus_number = v;
|
||||
cb->parent = pb;
|
||||
@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
|
||||
cb->mii_bus = mdiobus_alloc();
|
||||
if (!cb->mii_bus) {
|
||||
ret_val = -ENOMEM;
|
||||
devm_kfree(dev, cb);
|
||||
continue;
|
||||
goto err_loop;
|
||||
}
|
||||
cb->mii_bus->priv = cb;
|
||||
|
||||
@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
|
||||
cb->mii_bus->write = mdio_mux_write;
|
||||
r = of_mdiobus_register(cb->mii_bus, child_bus_node);
|
||||
if (r) {
|
||||
mdiobus_free(cb->mii_bus);
|
||||
if (r == -EPROBE_DEFER) {
|
||||
ret_val = r;
|
||||
goto err_loop;
|
||||
}
|
||||
devm_kfree(dev, cb);
|
||||
dev_err(dev,
|
||||
"Error: Failed to register MDIO bus for child %pOF\n",
|
||||
child_bus_node);
|
||||
mdiobus_free(cb->mii_bus);
|
||||
devm_kfree(dev, cb);
|
||||
} else {
|
||||
cb->next = pb->children;
|
||||
pb->children = cb;
|
||||
@ -182,6 +196,10 @@ int mdio_mux_init(struct device *dev,
|
||||
|
||||
dev_err(dev, "Error: No acceptable child buses found\n");
|
||||
devm_kfree(dev, pb);
|
||||
|
||||
err_loop:
|
||||
mdio_mux_uninit_children(pb);
|
||||
of_node_put(child_bus_node);
|
||||
err_pb_kz:
|
||||
put_device(&parent_bus->dev);
|
||||
err_parent_bus:
|
||||
@ -193,14 +211,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
|
||||
void mdio_mux_uninit(void *mux_handle)
|
||||
{
|
||||
struct mdio_mux_parent_bus *pb = mux_handle;
|
||||
struct mdio_mux_child_bus *cb = pb->children;
|
||||
|
||||
while (cb) {
|
||||
mdiobus_unregister(cb->mii_bus);
|
||||
mdiobus_free(cb->mii_bus);
|
||||
cb = cb->next;
|
||||
}
|
||||
|
||||
mdio_mux_uninit_children(pb);
|
||||
put_device(&pb->mii_bus->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mdio_mux_uninit);
|
||||
|
@ -1159,7 +1159,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
|
||||
{
|
||||
struct phy_device *phydev = dev->net->phydev;
|
||||
struct ethtool_link_ksettings ecmd;
|
||||
int ladv, radv, ret;
|
||||
int ladv, radv, ret, link;
|
||||
u32 buf;
|
||||
|
||||
/* clear LAN78xx interrupt status */
|
||||
@ -1167,9 +1167,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
|
||||
if (unlikely(ret < 0))
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
phy_read_status(phydev);
|
||||
link = phydev->link;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
if (!phydev->link && dev->link_on) {
|
||||
if (!link && dev->link_on) {
|
||||
dev->link_on = false;
|
||||
|
||||
/* reset MAC */
|
||||
@ -1182,7 +1185,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
|
||||
return -EIO;
|
||||
|
||||
del_timer(&dev->stat_monitor);
|
||||
} else if (phydev->link && !dev->link_on) {
|
||||
} else if (link && !dev->link_on) {
|
||||
dev->link_on = true;
|
||||
|
||||
phy_ethtool_ksettings_get(phydev, &ecmd);
|
||||
@ -1471,9 +1474,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
|
||||
|
||||
static u32 lan78xx_get_link(struct net_device *net)
|
||||
{
|
||||
phy_read_status(net->phydev);
|
||||
u32 link;
|
||||
|
||||
return net->phydev->link;
|
||||
mutex_lock(&net->phydev->lock);
|
||||
phy_read_status(net->phydev);
|
||||
link = net->phydev->link;
|
||||
mutex_unlock(&net->phydev->lock);
|
||||
|
||||
return link;
|
||||
}
|
||||
|
||||
static void lan78xx_get_drvinfo(struct net_device *net,
|
||||
|
@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
|
||||
VIRTIO_NET_F_GUEST_CSUM
|
||||
};
|
||||
|
||||
#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
|
||||
#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
|
||||
(1ULL << VIRTIO_NET_F_GUEST_UFO))
|
||||
@ -195,6 +195,9 @@ struct virtnet_info {
|
||||
/* # of XDP queue pairs currently used by the driver */
|
||||
u16 xdp_queue_pairs;
|
||||
|
||||
/* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
|
||||
bool xdp_enabled;
|
||||
|
||||
/* I like... big packets and I cannot lie! */
|
||||
bool big_packets;
|
||||
|
||||
@ -485,12 +488,41 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
|
||||
{
|
||||
unsigned int qp;
|
||||
/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
|
||||
* the current cpu, so it does not need to be locked.
|
||||
*
|
||||
* Here we use marco instead of inline functions because we have to deal with
|
||||
* three issues at the same time: 1. the choice of sq. 2. judge and execute the
|
||||
* lock/unlock of txq 3. make sparse happy. It is difficult for two inline
|
||||
* functions to perfectly solve these three problems at the same time.
|
||||
*/
|
||||
#define virtnet_xdp_get_sq(vi) ({ \
|
||||
struct netdev_queue *txq; \
|
||||
typeof(vi) v = (vi); \
|
||||
unsigned int qp; \
|
||||
\
|
||||
if (v->curr_queue_pairs > nr_cpu_ids) { \
|
||||
qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
|
||||
qp += smp_processor_id(); \
|
||||
txq = netdev_get_tx_queue(v->dev, qp); \
|
||||
__netif_tx_acquire(txq); \
|
||||
} else { \
|
||||
qp = smp_processor_id() % v->curr_queue_pairs; \
|
||||
txq = netdev_get_tx_queue(v->dev, qp); \
|
||||
__netif_tx_lock(txq, raw_smp_processor_id()); \
|
||||
} \
|
||||
v->sq + qp; \
|
||||
})
|
||||
|
||||
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
|
||||
return &vi->sq[qp];
|
||||
#define virtnet_xdp_put_sq(vi, q) { \
|
||||
struct netdev_queue *txq; \
|
||||
typeof(vi) v = (vi); \
|
||||
\
|
||||
txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
|
||||
if (v->curr_queue_pairs > nr_cpu_ids) \
|
||||
__netif_tx_release(txq); \
|
||||
else \
|
||||
__netif_tx_unlock(txq); \
|
||||
}
|
||||
|
||||
static int virtnet_xdp_xmit(struct net_device *dev,
|
||||
@ -516,7 +548,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||
if (!xdp_prog)
|
||||
return -ENXIO;
|
||||
|
||||
sq = virtnet_xdp_sq(vi);
|
||||
sq = virtnet_xdp_get_sq(vi);
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
|
||||
ret = -EINVAL;
|
||||
@ -564,12 +596,13 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||
sq->stats.kicks += kicks;
|
||||
u64_stats_update_end(&sq->stats.syncp);
|
||||
|
||||
virtnet_xdp_put_sq(vi, sq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
|
||||
{
|
||||
return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
|
||||
return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
|
||||
}
|
||||
|
||||
/* We copy the packet for XDP in the following cases:
|
||||
@ -1458,12 +1491,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
|
||||
xdp_do_flush_map();
|
||||
|
||||
if (xdp_xmit & VIRTIO_XDP_TX) {
|
||||
sq = virtnet_xdp_sq(vi);
|
||||
sq = virtnet_xdp_get_sq(vi);
|
||||
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
|
||||
u64_stats_update_begin(&sq->stats.syncp);
|
||||
sq->stats.kicks++;
|
||||
u64_stats_update_end(&sq->stats.syncp);
|
||||
}
|
||||
virtnet_xdp_put_sq(vi, sq);
|
||||
}
|
||||
|
||||
return received;
|
||||
@ -2459,7 +2493,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
|
||||
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -2480,10 +2514,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
|
||||
/* XDP requires extra queues for XDP_TX */
|
||||
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
|
||||
netdev_warn(dev, "request %i queues but max is %i\n",
|
||||
netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
|
||||
curr_qp + xdp_qp, vi->max_queue_pairs);
|
||||
return -ENOMEM;
|
||||
xdp_qp = 0;
|
||||
}
|
||||
|
||||
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
|
||||
@ -2520,11 +2553,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
vi->xdp_queue_pairs = xdp_qp;
|
||||
|
||||
if (prog) {
|
||||
vi->xdp_enabled = true;
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
|
||||
if (i == 0 && !old_prog)
|
||||
virtnet_clear_guest_offloads(vi);
|
||||
}
|
||||
} else {
|
||||
vi->xdp_enabled = false;
|
||||
}
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
@ -2611,15 +2647,15 @@ static int virtnet_set_features(struct net_device *dev,
|
||||
if (!vi->has_cvq)
|
||||
return 0;
|
||||
|
||||
if ((dev->features ^ features) & NETIF_F_LRO) {
|
||||
if (vi->xdp_queue_pairs)
|
||||
if ((dev->features ^ features) & NETIF_F_GRO_HW) {
|
||||
if (vi->xdp_enabled)
|
||||
return -EBUSY;
|
||||
|
||||
if (features & NETIF_F_LRO)
|
||||
if (features & NETIF_F_GRO_HW)
|
||||
offloads = vi->guest_offloads_capable;
|
||||
else
|
||||
offloads = vi->guest_offloads_capable &
|
||||
~GUEST_OFFLOAD_LRO_MASK;
|
||||
~GUEST_OFFLOAD_GRO_HW_MASK;
|
||||
|
||||
err = virtnet_set_guest_offloads(vi, offloads);
|
||||
if (err)
|
||||
@ -3095,9 +3131,9 @@ static int virtnet_probe(struct virtio_device *vdev)
|
||||
dev->features |= NETIF_F_RXCSUM;
|
||||
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
|
||||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
|
||||
dev->features |= NETIF_F_LRO;
|
||||
dev->features |= NETIF_F_GRO_HW;
|
||||
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
|
||||
dev->hw_features |= NETIF_F_LRO;
|
||||
dev->hw_features |= NETIF_F_GRO_HW;
|
||||
|
||||
dev->vlan_features = dev->features;
|
||||
|
||||
|
@ -1036,6 +1036,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
||||
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
|
||||
bool is_ndisc = ipv6_ndisc_frame(skb);
|
||||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
/* loopback, multicast & non-ND link-local traffic; do not push through
|
||||
* packet taps again. Reset pkt_type for upper layers to process skb.
|
||||
* For strict packets with a source LLA, determine the dst using the
|
||||
@ -1092,6 +1094,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
||||
skb->skb_iif = vrf_dev->ifindex;
|
||||
IPCB(skb)->flags |= IPSKB_L3SLAVE;
|
||||
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
|
||||
goto out;
|
||||
|
||||
|
@ -197,12 +197,13 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
|
||||
bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
|
||||
|
||||
void ath_hw_setbssidmask(struct ath_common *common);
|
||||
void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
|
||||
void ath_key_delete(struct ath_common *common, u8 hw_key_idx);
|
||||
int ath_key_config(struct ath_common *common,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key);
|
||||
bool ath_hw_keyreset(struct ath_common *common, u16 entry);
|
||||
bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac);
|
||||
void ath_hw_cycle_counters_update(struct ath_common *common);
|
||||
int32_t ath_hw_get_listen_time(struct ath_common *common);
|
||||
|
||||
|
@ -521,7 +521,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
}
|
||||
break;
|
||||
case DISABLE_KEY:
|
||||
ath_key_delete(common, key);
|
||||
ath_key_delete(common, key->hw_key_idx);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -1460,7 +1460,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
|
||||
}
|
||||
break;
|
||||
case DISABLE_KEY:
|
||||
ath_key_delete(common, key);
|
||||
ath_key_delete(common, key->hw_key_idx);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -819,6 +819,7 @@ struct ath_hw {
|
||||
struct ath9k_pacal_info pacal_info;
|
||||
struct ar5416Stats stats;
|
||||
struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
|
||||
DECLARE_BITMAP(pending_del_keymap, ATH_KEYMAX);
|
||||
|
||||
enum ath9k_int imask;
|
||||
u32 imrs2_reg;
|
||||
|
@ -823,12 +823,80 @@ static void ath9k_tx(struct ieee80211_hw *hw,
|
||||
ieee80211_free_txskb(hw, skb);
|
||||
}
|
||||
|
||||
static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
|
||||
{
|
||||
struct ath_buf *bf;
|
||||
struct ieee80211_tx_info *txinfo;
|
||||
struct ath_frame_info *fi;
|
||||
|
||||
list_for_each_entry(bf, txq_list, list) {
|
||||
if (bf->bf_state.stale || !bf->bf_mpdu)
|
||||
continue;
|
||||
|
||||
txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
|
||||
fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
|
||||
if (fi->keyix == keyix)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
|
||||
{
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
int i;
|
||||
struct ath_txq *txq;
|
||||
bool key_in_use = false;
|
||||
|
||||
for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) {
|
||||
if (!ATH_TXQ_SETUP(sc, i))
|
||||
continue;
|
||||
txq = &sc->tx.txq[i];
|
||||
if (!txq->axq_depth)
|
||||
continue;
|
||||
if (!ath9k_hw_numtxpending(ah, txq->axq_qnum))
|
||||
continue;
|
||||
|
||||
ath_txq_lock(sc, txq);
|
||||
key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix);
|
||||
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
|
||||
int idx = txq->txq_tailidx;
|
||||
|
||||
while (!key_in_use &&
|
||||
!list_empty(&txq->txq_fifo[idx])) {
|
||||
key_in_use = ath9k_txq_list_has_key(
|
||||
&txq->txq_fifo[idx], keyix);
|
||||
INCR(idx, ATH_TXFIFO_DEPTH);
|
||||
}
|
||||
}
|
||||
ath_txq_unlock(sc, txq);
|
||||
}
|
||||
|
||||
return key_in_use;
|
||||
}
|
||||
|
||||
static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
|
||||
{
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
|
||||
if (!test_bit(keyix, ah->pending_del_keymap) ||
|
||||
ath9k_txq_has_key(sc, keyix))
|
||||
return;
|
||||
|
||||
/* No more TXQ frames point to this key cache entry, so delete it. */
|
||||
clear_bit(keyix, ah->pending_del_keymap);
|
||||
ath_key_delete(common, keyix);
|
||||
}
|
||||
|
||||
static void ath9k_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath_softc *sc = hw->priv;
|
||||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
bool prev_idle;
|
||||
int i;
|
||||
|
||||
ath9k_deinit_channel_context(sc);
|
||||
|
||||
@ -896,6 +964,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
|
||||
|
||||
spin_unlock_bh(&sc->sc_pcu_lock);
|
||||
|
||||
for (i = 0; i < ATH_KEYMAX; i++)
|
||||
ath9k_pending_key_del(sc, i);
|
||||
|
||||
/* Clear key cache entries explicitly to get rid of any potentially
|
||||
* remaining keys.
|
||||
*/
|
||||
ath9k_cmn_init_crypto(sc->sc_ah);
|
||||
|
||||
ath9k_ps_restore(sc);
|
||||
|
||||
sc->ps_idle = prev_idle;
|
||||
@ -1541,12 +1617,11 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
|
||||
{
|
||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||
struct ath_node *an = (struct ath_node *) sta->drv_priv;
|
||||
struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
|
||||
|
||||
if (!an->ps_key)
|
||||
return;
|
||||
|
||||
ath_key_delete(common, &ps_key);
|
||||
ath_key_delete(common, an->ps_key);
|
||||
an->ps_key = 0;
|
||||
an->key_idx[0] = 0;
|
||||
}
|
||||
@ -1708,6 +1783,12 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
|
||||
if (sta)
|
||||
an = (struct ath_node *)sta->drv_priv;
|
||||
|
||||
/* Delete pending key cache entries if no more frames are pointing to
|
||||
* them in TXQs.
|
||||
*/
|
||||
for (i = 0; i < ATH_KEYMAX; i++)
|
||||
ath9k_pending_key_del(sc, i);
|
||||
|
||||
switch (cmd) {
|
||||
case SET_KEY:
|
||||
if (sta)
|
||||
@ -1737,7 +1818,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
|
||||
}
|
||||
break;
|
||||
case DISABLE_KEY:
|
||||
ath_key_delete(common, key);
|
||||
if (ath9k_txq_has_key(sc, key->hw_key_idx)) {
|
||||
/* Delay key cache entry deletion until there are no
|
||||
* remaining TXQ frames pointing to this entry.
|
||||
*/
|
||||
set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap);
|
||||
ath_hw_keysetmac(common, key->hw_key_idx, NULL);
|
||||
} else {
|
||||
ath_key_delete(common, key->hw_key_idx);
|
||||
}
|
||||
if (an) {
|
||||
for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
|
||||
if (an->key_idx[i] != key->hw_key_idx)
|
||||
|
@ -84,8 +84,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
|
||||
}
|
||||
EXPORT_SYMBOL(ath_hw_keyreset);
|
||||
|
||||
static bool ath_hw_keysetmac(struct ath_common *common,
|
||||
u16 entry, const u8 *mac)
|
||||
bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
|
||||
{
|
||||
u32 macHi, macLo;
|
||||
u32 unicast_flag = AR_KEYTABLE_VALID;
|
||||
@ -125,6 +124,7 @@ static bool ath_hw_keysetmac(struct ath_common *common,
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(ath_hw_keysetmac);
|
||||
|
||||
static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
|
||||
const struct ath_keyval *k,
|
||||
@ -581,29 +581,38 @@ EXPORT_SYMBOL(ath_key_config);
|
||||
/*
|
||||
* Delete Key.
|
||||
*/
|
||||
void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
|
||||
void ath_key_delete(struct ath_common *common, u8 hw_key_idx)
|
||||
{
|
||||
ath_hw_keyreset(common, key->hw_key_idx);
|
||||
if (key->hw_key_idx < IEEE80211_WEP_NKID)
|
||||
/* Leave CCMP and TKIP (main key) configured to avoid disabling
|
||||
* encryption for potentially pending frames already in a TXQ with the
|
||||
* keyix pointing to this key entry. Instead, only clear the MAC address
|
||||
* to prevent RX processing from using this key cache entry.
|
||||
*/
|
||||
if (test_bit(hw_key_idx, common->ccmp_keymap) ||
|
||||
test_bit(hw_key_idx, common->tkip_keymap))
|
||||
ath_hw_keysetmac(common, hw_key_idx, NULL);
|
||||
else
|
||||
ath_hw_keyreset(common, hw_key_idx);
|
||||
if (hw_key_idx < IEEE80211_WEP_NKID)
|
||||
return;
|
||||
|
||||
clear_bit(key->hw_key_idx, common->keymap);
|
||||
clear_bit(key->hw_key_idx, common->ccmp_keymap);
|
||||
if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
|
||||
clear_bit(hw_key_idx, common->keymap);
|
||||
clear_bit(hw_key_idx, common->ccmp_keymap);
|
||||
if (!test_bit(hw_key_idx, common->tkip_keymap))
|
||||
return;
|
||||
|
||||
clear_bit(key->hw_key_idx + 64, common->keymap);
|
||||
clear_bit(hw_key_idx + 64, common->keymap);
|
||||
|
||||
clear_bit(key->hw_key_idx, common->tkip_keymap);
|
||||
clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
|
||||
clear_bit(hw_key_idx, common->tkip_keymap);
|
||||
clear_bit(hw_key_idx + 64, common->tkip_keymap);
|
||||
|
||||
if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
|
||||
ath_hw_keyreset(common, key->hw_key_idx + 32);
|
||||
clear_bit(key->hw_key_idx + 32, common->keymap);
|
||||
clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
|
||||
ath_hw_keyreset(common, hw_key_idx + 32);
|
||||
clear_bit(hw_key_idx + 32, common->keymap);
|
||||
clear_bit(hw_key_idx + 64 + 32, common->keymap);
|
||||
|
||||
clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
|
||||
clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
|
||||
clear_bit(hw_key_idx + 32, common->tkip_keymap);
|
||||
clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ath_key_delete);
|
||||
|
@ -1905,6 +1905,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
|
||||
|
@ -92,7 +92,8 @@ config DP83640_PHY
|
||||
config PTP_1588_CLOCK_PCH
|
||||
tristate "Intel PCH EG20T as PTP clock"
|
||||
depends on X86_32 || COMPILE_TEST
|
||||
depends on HAS_IOMEM && NET
|
||||
depends on HAS_IOMEM && PCI
|
||||
depends on NET
|
||||
imply PTP_1588_CLOCK
|
||||
help
|
||||
This driver adds support for using the PCH EG20T as a PTP
|
||||
|
@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
|
||||
if (!h->ctlr)
|
||||
err = SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
else {
|
||||
list_add_rcu(&h->node, &h->ctlr->dh_list);
|
||||
h->sdev = sdev;
|
||||
list_add_rcu(&h->node, &h->ctlr->dh_list);
|
||||
}
|
||||
spin_unlock(&list_lock);
|
||||
err = SCSI_DH_OK;
|
||||
@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
|
||||
spin_lock(&list_lock);
|
||||
if (h->ctlr) {
|
||||
list_del_rcu(&h->node);
|
||||
h->sdev = NULL;
|
||||
kref_put(&h->ctlr->kref, release_controller);
|
||||
}
|
||||
spin_unlock(&list_lock);
|
||||
sdev->handler_data = NULL;
|
||||
synchronize_rcu();
|
||||
kfree(h);
|
||||
}
|
||||
|
||||
|
@ -246,7 +246,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
|
||||
mimd_t mimd;
|
||||
uint32_t adapno;
|
||||
int iterator;
|
||||
|
||||
bool is_found;
|
||||
|
||||
if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
|
||||
*rval = -EFAULT;
|
||||
@ -262,12 +262,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
|
||||
|
||||
adapter = NULL;
|
||||
iterator = 0;
|
||||
is_found = false;
|
||||
|
||||
list_for_each_entry(adapter, &adapters_list_g, list) {
|
||||
if (iterator++ == adapno) break;
|
||||
if (iterator++ == adapno) {
|
||||
is_found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!adapter) {
|
||||
if (!is_found) {
|
||||
*rval = -ENODEV;
|
||||
return NULL;
|
||||
}
|
||||
@ -733,6 +737,7 @@ ioctl_done(uioc_t *kioc)
|
||||
uint32_t adapno;
|
||||
int iterator;
|
||||
mraid_mmadp_t* adapter;
|
||||
bool is_found;
|
||||
|
||||
/*
|
||||
* When the kioc returns from driver, make sure it still doesn't
|
||||
@ -755,19 +760,23 @@ ioctl_done(uioc_t *kioc)
|
||||
iterator = 0;
|
||||
adapter = NULL;
|
||||
adapno = kioc->adapno;
|
||||
is_found = false;
|
||||
|
||||
con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
|
||||
"ioctl that was timedout before\n"));
|
||||
|
||||
list_for_each_entry(adapter, &adapters_list_g, list) {
|
||||
if (iterator++ == adapno) break;
|
||||
if (iterator++ == adapno) {
|
||||
is_found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kioc->timedout = 0;
|
||||
|
||||
if (adapter) {
|
||||
if (is_found)
|
||||
mraid_mm_dealloc_kioc( adapter, kioc );
|
||||
}
|
||||
|
||||
}
|
||||
else {
|
||||
wake_up(&wait_q);
|
||||
|
@ -454,7 +454,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
|
||||
error = shost->hostt->target_alloc(starget);
|
||||
|
||||
if(error) {
|
||||
dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
|
||||
if (error != -ENXIO)
|
||||
dev_err(dev, "target allocation failed, error %d\n", error);
|
||||
/* don't want scsi_target_reap to do the final
|
||||
* put because it will be under the host lock */
|
||||
scsi_target_destroy(starget);
|
||||
|
@ -787,11 +787,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
|
||||
mutex_lock(&sdev->state_mutex);
|
||||
ret = scsi_device_set_state(sdev, state);
|
||||
/*
|
||||
* If the device state changes to SDEV_RUNNING, we need to run
|
||||
* the queue to avoid I/O hang.
|
||||
* If the device state changes to SDEV_RUNNING, we need to
|
||||
* rescan the device to revalidate it, and run the queue to
|
||||
* avoid I/O hang.
|
||||
*/
|
||||
if (ret == 0 && state == SDEV_RUNNING)
|
||||
if (ret == 0 && state == SDEV_RUNNING) {
|
||||
scsi_rescan_device(dev);
|
||||
blk_mq_run_hw_queues(sdev->request_queue, true);
|
||||
}
|
||||
mutex_unlock(&sdev->state_mutex);
|
||||
|
||||
return ret == 0 ? count : -EINVAL;
|
||||
|
@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&ctrl->txn_lock, flags);
|
||||
ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
|
||||
ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
|
||||
SLIM_MAX_TIDS, GFP_ATOMIC);
|
||||
if (ret < 0) {
|
||||
spin_unlock_irqrestore(&ctrl->txn_lock, flags);
|
||||
@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
|
||||
goto slim_xfer_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize tid to invalid value */
|
||||
txn->tid = 0;
|
||||
need_tid = slim_tid_txn(txn->mt, txn->mc);
|
||||
|
||||
if (need_tid) {
|
||||
@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
|
||||
txn->mt, txn->mc, txn->la, ret);
|
||||
|
||||
slim_xfer_err:
|
||||
if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) {
|
||||
if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) {
|
||||
/*
|
||||
* remove runtime-pm vote if this was TX only, or
|
||||
* if there was error during this transaction
|
||||
|
@ -1061,7 +1061,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
|
||||
{
|
||||
u32 cfg = readl_relaxed(ctrl->ngd->base);
|
||||
|
||||
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
|
||||
if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
|
||||
ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
|
||||
qcom_slim_ngd_init_dma(ctrl);
|
||||
|
||||
/* By default enable message queues */
|
||||
@ -1112,6 +1113,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
|
||||
dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
|
||||
return 0;
|
||||
}
|
||||
qcom_slim_ngd_setup(ctrl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1500,6 +1502,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
|
||||
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
||||
qcom_slim_ngd_exit_dma(ctrl);
|
||||
if (!ctrl->qmi.handle)
|
||||
return 0;
|
||||
|
||||
|
@ -647,6 +647,9 @@ int usb_get_descriptor(struct usb_device *dev, unsigned char type,
|
||||
int i;
|
||||
int result;
|
||||
|
||||
if (size <= 0) /* No point in asking for no data */
|
||||
return -EINVAL;
|
||||
|
||||
memset(buf, 0, size); /* Make sure we parse really received data */
|
||||
|
||||
for (i = 0; i < 3; ++i) {
|
||||
@ -695,6 +698,9 @@ static int usb_get_string(struct usb_device *dev, unsigned short langid,
|
||||
int i;
|
||||
int result;
|
||||
|
||||
if (size <= 0) /* No point in asking for no data */
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < 3; ++i) {
|
||||
/* retry on length 0 or stall; some devices are flakey */
|
||||
result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
|
||||
|
@ -708,10 +708,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
|
||||
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
|
||||
}
|
||||
|
||||
/* Make sure 64 bit math will not overflow. */
|
||||
static bool vhost_overflow(u64 uaddr, u64 size)
|
||||
{
|
||||
/* Make sure 64 bit math will not overflow. */
|
||||
return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
|
||||
if (uaddr > ULONG_MAX || size > ULONG_MAX)
|
||||
return true;
|
||||
|
||||
if (!size)
|
||||
return false;
|
||||
|
||||
return uaddr > ULONG_MAX - size + 1;
|
||||
}
|
||||
|
||||
/* Caller should have vq mutex and device mutex. */
|
||||
|
@ -342,6 +342,7 @@ int register_virtio_device(struct virtio_device *dev)
|
||||
virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
|
||||
|
||||
INIT_LIST_HEAD(&dev->vqs);
|
||||
spin_lock_init(&dev->vqs_list_lock);
|
||||
|
||||
/*
|
||||
* device_add() causes the bus infrastructure to look for a matching
|
||||
|
@ -1668,7 +1668,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
|
||||
cpu_to_le16(vq->packed.event_flags_shadow);
|
||||
}
|
||||
|
||||
spin_lock(&vdev->vqs_list_lock);
|
||||
list_add_tail(&vq->vq.list, &vdev->vqs);
|
||||
spin_unlock(&vdev->vqs_list_lock);
|
||||
return &vq->vq;
|
||||
|
||||
err_desc_extra:
|
||||
@ -2126,7 +2128,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
|
||||
memset(vq->split.desc_state, 0, vring.num *
|
||||
sizeof(struct vring_desc_state_split));
|
||||
|
||||
spin_lock(&vdev->vqs_list_lock);
|
||||
list_add_tail(&vq->vq.list, &vdev->vqs);
|
||||
spin_unlock(&vdev->vqs_list_lock);
|
||||
return &vq->vq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
|
||||
@ -2210,7 +2214,9 @@ void vring_del_virtqueue(struct virtqueue *_vq)
|
||||
}
|
||||
if (!vq->packed_ring)
|
||||
kfree(vq->split.desc_state);
|
||||
spin_lock(&vq->vq.vdev->vqs_list_lock);
|
||||
list_del(&_vq->list);
|
||||
spin_unlock(&vq->vq.vdev->vqs_list_lock);
|
||||
kfree(vq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vring_del_virtqueue);
|
||||
@ -2274,10 +2280,12 @@ void virtio_break_device(struct virtio_device *dev)
|
||||
{
|
||||
struct virtqueue *_vq;
|
||||
|
||||
spin_lock(&dev->vqs_list_lock);
|
||||
list_for_each_entry(_vq, &dev->vqs, list) {
|
||||
struct vring_virtqueue *vq = to_vvq(_vq);
|
||||
vq->broken = true;
|
||||
}
|
||||
spin_unlock(&dev->vqs_list_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(virtio_break_device);
|
||||
|
||||
|
@ -9702,8 +9702,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
||||
bool root_log_pinned = false;
|
||||
bool dest_log_pinned = false;
|
||||
|
||||
/* we only allow rename subvolume link between subvolumes */
|
||||
if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
|
||||
/*
|
||||
* For non-subvolumes allow exchange only within one subvolume, in the
|
||||
* same inode namespace. Two subvolumes (represented as directory) can
|
||||
* be exchanged as they're a logical link and have a fixed inode number.
|
||||
*/
|
||||
if (root != dest &&
|
||||
(old_ino != BTRFS_FIRST_FREE_OBJECTID ||
|
||||
new_ino != BTRFS_FIRST_FREE_OBJECTID))
|
||||
return -EXDEV;
|
||||
|
||||
/* close the race window with snapshot create/destroy ioctl */
|
||||
|
@ -719,7 +719,7 @@ enum {
|
||||
#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
|
||||
|
||||
/* Max logical block we can support */
|
||||
#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF
|
||||
#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFE
|
||||
|
||||
/*
|
||||
* Structure of an inode on the disk
|
||||
|
@ -1669,8 +1669,12 @@ static inline bool may_mount(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MANDATORY_FILE_LOCKING
|
||||
static inline bool may_mandlock(void)
|
||||
static bool may_mandlock(void)
|
||||
{
|
||||
pr_warn_once("======================================================\n"
|
||||
"WARNING: the mand mount option is being deprecated and\n"
|
||||
" will be removed in v5.15!\n"
|
||||
"======================================================\n");
|
||||
return capable(CAP_SYS_ADMIN);
|
||||
}
|
||||
#else
|
||||
|
@ -9,6 +9,9 @@
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/splice.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/fs.h>
|
||||
#include "overlayfs.h"
|
||||
|
||||
static char ovl_whatisit(struct inode *inode, struct inode *realinode)
|
||||
@ -293,6 +296,48 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe, size_t len,
|
||||
unsigned int flags)
|
||||
{
|
||||
ssize_t ret;
|
||||
struct fd real;
|
||||
const struct cred *old_cred;
|
||||
|
||||
ret = ovl_real_fdget(in, &real);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(in)->i_sb);
|
||||
ret = generic_file_splice_read(real.file, ppos, pipe, len, flags);
|
||||
revert_creds(old_cred);
|
||||
|
||||
ovl_file_accessed(in);
|
||||
fdput(real);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
|
||||
loff_t *ppos, size_t len, unsigned int flags)
|
||||
{
|
||||
struct fd real;
|
||||
const struct cred *old_cred;
|
||||
ssize_t ret;
|
||||
|
||||
ret = ovl_real_fdget(out, &real);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
old_cred = ovl_override_creds(file_inode(out)->i_sb);
|
||||
ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
|
||||
revert_creds(old_cred);
|
||||
|
||||
ovl_file_accessed(out);
|
||||
fdput(real);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
|
||||
{
|
||||
struct fd real;
|
||||
@ -649,6 +694,8 @@ const struct file_operations ovl_file_operations = {
|
||||
.fadvise = ovl_fadvise,
|
||||
.unlocked_ioctl = ovl_ioctl,
|
||||
.compat_ioctl = ovl_compat_ioctl,
|
||||
.splice_read = ovl_splice_read,
|
||||
.splice_write = ovl_splice_write,
|
||||
|
||||
.copy_file_range = ovl_copy_file_range,
|
||||
.remap_file_range = ovl_remap_file_range,
|
||||
|
@ -356,17 +356,54 @@ static inline bool mem_cgroup_disabled(void)
|
||||
return !cgroup_subsys_enabled(memory_cgrp_subsys);
|
||||
}
|
||||
|
||||
static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
|
||||
bool in_low_reclaim)
|
||||
static inline void mem_cgroup_protection(struct mem_cgroup *root,
|
||||
struct mem_cgroup *memcg,
|
||||
unsigned long *min,
|
||||
unsigned long *low)
|
||||
{
|
||||
*min = *low = 0;
|
||||
|
||||
if (mem_cgroup_disabled())
|
||||
return 0;
|
||||
return;
|
||||
|
||||
if (in_low_reclaim)
|
||||
return READ_ONCE(memcg->memory.emin);
|
||||
/*
|
||||
* There is no reclaim protection applied to a targeted reclaim.
|
||||
* We are special casing this specific case here because
|
||||
* mem_cgroup_protected calculation is not robust enough to keep
|
||||
* the protection invariant for calculated effective values for
|
||||
* parallel reclaimers with different reclaim target. This is
|
||||
* especially a problem for tail memcgs (as they have pages on LRU)
|
||||
* which would want to have effective values 0 for targeted reclaim
|
||||
* but a different value for external reclaim.
|
||||
*
|
||||
* Example
|
||||
* Let's have global and A's reclaim in parallel:
|
||||
* |
|
||||
* A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
|
||||
* |\
|
||||
* | C (low = 1G, usage = 2.5G)
|
||||
* B (low = 1G, usage = 0.5G)
|
||||
*
|
||||
* For the global reclaim
|
||||
* A.elow = A.low
|
||||
* B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
|
||||
* C.elow = min(C.usage, C.low)
|
||||
*
|
||||
* With the effective values resetting we have A reclaim
|
||||
* A.elow = 0
|
||||
* B.elow = B.low
|
||||
* C.elow = C.low
|
||||
*
|
||||
* If the global reclaim races with A's reclaim then
|
||||
* B.elow = C.elow = 0 because children_low_usage > A.elow)
|
||||
* is possible and reclaiming B would be violating the protection.
|
||||
*
|
||||
*/
|
||||
if (root == memcg)
|
||||
return;
|
||||
|
||||
return max(READ_ONCE(memcg->memory.emin),
|
||||
READ_ONCE(memcg->memory.elow));
|
||||
*min = READ_ONCE(memcg->memory.emin);
|
||||
*low = READ_ONCE(memcg->memory.elow);
|
||||
}
|
||||
|
||||
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
|
||||
@ -847,10 +884,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
|
||||
bool in_low_reclaim)
|
||||
static inline void mem_cgroup_protection(struct mem_cgroup *root,
|
||||
struct mem_cgroup *memcg,
|
||||
unsigned long *min,
|
||||
unsigned long *low)
|
||||
{
|
||||
return 0;
|
||||
*min = *low = 0;
|
||||
}
|
||||
|
||||
static inline enum mem_cgroup_protection mem_cgroup_protected(
|
||||
|
@ -111,6 +111,7 @@ struct virtio_device {
|
||||
bool config_enabled;
|
||||
bool config_change_pending;
|
||||
spinlock_t config_lock;
|
||||
spinlock_t vqs_list_lock; /* Protects VQs list access */
|
||||
struct device dev;
|
||||
struct virtio_device_id id;
|
||||
const struct virtio_config_ops *config;
|
||||
|
@ -8586,6 +8586,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
|
||||
if (aux_data[i].seen)
|
||||
continue;
|
||||
memcpy(insn + i, &trap, sizeof(trap));
|
||||
aux_data[i].zext_dst = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4291,6 +4291,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
|
||||
event = data->match_data.event;
|
||||
}
|
||||
|
||||
if (!event)
|
||||
goto free;
|
||||
/*
|
||||
* At this point, we're looking at a field on another
|
||||
* event. Because we can't modify a hist trigger on
|
||||
|
@ -6446,6 +6446,14 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
|
||||
|
||||
if (!root)
|
||||
root = root_mem_cgroup;
|
||||
|
||||
/*
|
||||
* Effective values of the reclaim targets are ignored so they
|
||||
* can be stale. Have a look at mem_cgroup_protection for more
|
||||
* details.
|
||||
* TODO: calculation should be more robust so that we do not need
|
||||
* that special casing.
|
||||
*/
|
||||
if (memcg == root)
|
||||
return MEMCG_PROT_NONE;
|
||||
|
||||
|
26
mm/vmscan.c
26
mm/vmscan.c
@ -89,9 +89,12 @@ struct scan_control {
|
||||
unsigned int may_swap:1;
|
||||
|
||||
/*
|
||||
* Cgroups are not reclaimed below their configured memory.low,
|
||||
* unless we threaten to OOM. If any cgroups are skipped due to
|
||||
* memory.low and nothing was reclaimed, go back for memory.low.
|
||||
* Cgroup memory below memory.low is protected as long as we
|
||||
* don't threaten to OOM. If any cgroup is reclaimed at
|
||||
* reduced force or passed over entirely due to its memory.low
|
||||
* setting (memcg_low_skipped), and nothing is reclaimed as a
|
||||
* result, then go back for one more cycle that reclaims the protected
|
||||
* memory (memcg_low_reclaim) to avert OOM.
|
||||
*/
|
||||
unsigned int memcg_low_reclaim:1;
|
||||
unsigned int memcg_low_skipped:1;
|
||||
@ -2458,14 +2461,14 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
|
||||
for_each_evictable_lru(lru) {
|
||||
int file = is_file_lru(lru);
|
||||
unsigned long lruvec_size;
|
||||
unsigned long low, min;
|
||||
unsigned long scan;
|
||||
unsigned long protection;
|
||||
|
||||
lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
|
||||
protection = mem_cgroup_protection(memcg,
|
||||
sc->memcg_low_reclaim);
|
||||
mem_cgroup_protection(sc->target_mem_cgroup, memcg,
|
||||
&min, &low);
|
||||
|
||||
if (protection) {
|
||||
if (min || low) {
|
||||
/*
|
||||
* Scale a cgroup's reclaim pressure by proportioning
|
||||
* its current usage to its memory.low or memory.min
|
||||
@ -2496,6 +2499,15 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
|
||||
* hard protection.
|
||||
*/
|
||||
unsigned long cgroup_size = mem_cgroup_size(memcg);
|
||||
unsigned long protection;
|
||||
|
||||
/* memory.low scaling, make sure we retry before OOM */
|
||||
if (!sc->memcg_low_reclaim && low > min) {
|
||||
protection = low;
|
||||
sc->memcg_low_skipped = 1;
|
||||
} else {
|
||||
protection = min;
|
||||
}
|
||||
|
||||
/* Avoid TOCTOU with earlier protection check */
|
||||
cgroup_size = max(cgroup_size, protection);
|
||||
|
@ -1290,7 +1290,7 @@ static int hidp_session_thread(void *arg)
|
||||
|
||||
/* cleanup runtime environment */
|
||||
remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
|
||||
remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
|
||||
remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
|
||||
wake_up_interruptible(&session->report_queue);
|
||||
hidp_del_timer(session);
|
||||
|
||||
|
@ -41,9 +41,9 @@ extern bool dccp_debug;
|
||||
#define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
|
||||
#define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
|
||||
#else
|
||||
#define dccp_pr_debug(format, a...)
|
||||
#define dccp_pr_debug_cat(format, a...)
|
||||
#define dccp_debug(format, a...)
|
||||
#define dccp_pr_debug(format, a...) do {} while (0)
|
||||
#define dccp_pr_debug_cat(format, a...) do {} while (0)
|
||||
#define dccp_debug(format, a...) do {} while (0)
|
||||
#endif
|
||||
|
||||
extern struct inet_hashinfo dccp_hashinfo;
|
||||
|
@ -231,7 +231,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
|
||||
unsigned int i, optl, tcphdr_len, offset;
|
||||
struct tcphdr *tcph;
|
||||
u8 *opt;
|
||||
u32 src;
|
||||
|
||||
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
|
||||
if (!tcph)
|
||||
@ -240,7 +239,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
|
||||
opt = (u8 *)tcph;
|
||||
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
|
||||
union {
|
||||
u8 octet;
|
||||
__be16 v16;
|
||||
__be32 v32;
|
||||
} old, new;
|
||||
@ -262,13 +260,13 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
|
||||
if (!tcph)
|
||||
return;
|
||||
|
||||
src = regs->data[priv->sreg];
|
||||
offset = i + priv->offset;
|
||||
|
||||
switch (priv->len) {
|
||||
case 2:
|
||||
old.v16 = get_unaligned((u16 *)(opt + offset));
|
||||
new.v16 = src;
|
||||
new.v16 = (__force __be16)nft_reg_load16(
|
||||
®s->data[priv->sreg]);
|
||||
|
||||
switch (priv->type) {
|
||||
case TCPOPT_MSS:
|
||||
@ -286,7 +284,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
|
||||
old.v16, new.v16, false);
|
||||
break;
|
||||
case 4:
|
||||
new.v32 = src;
|
||||
new.v32 = regs->data[priv->sreg];
|
||||
old.v32 = get_unaligned((u32 *)(opt + offset));
|
||||
|
||||
if (old.v32 == new.v32)
|
||||
|
@ -499,6 +499,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
|
||||
}
|
||||
|
||||
skb->dev = vport->dev;
|
||||
skb->tstamp = 0;
|
||||
vport->ops->send(skb);
|
||||
return;
|
||||
|
||||
|
@ -3458,7 +3458,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
|
||||
struct hda_gen_spec *spec = codec->spec;
|
||||
const struct hda_input_mux *imux;
|
||||
struct nid_path *path;
|
||||
int i, adc_idx, err = 0;
|
||||
int i, adc_idx, ret, err = 0;
|
||||
|
||||
imux = &spec->input_mux;
|
||||
adc_idx = kcontrol->id.index;
|
||||
@ -3468,10 +3468,14 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
|
||||
if (!path || !path->ctls[type])
|
||||
continue;
|
||||
kcontrol->private_value = path->ctls[type];
|
||||
err = func(kcontrol, ucontrol);
|
||||
if (err < 0)
|
||||
ret = func(kcontrol, ucontrol);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
break;
|
||||
}
|
||||
if (ret > 0)
|
||||
err = 1;
|
||||
}
|
||||
mutex_unlock(&codec->control_mutex);
|
||||
if (err >= 0 && spec->cap_sync_hook)
|
||||
spec->cap_sync_hook(codec, kcontrol, ucontrol);
|
||||
|
@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
|
||||
snd_pcm_uframes_t period_size;
|
||||
ssize_t periodbytes;
|
||||
ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
|
||||
u32 buffer_addr = substream->runtime->dma_addr;
|
||||
u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
|
||||
|
||||
channels = substream->runtime->channels;
|
||||
period_size = substream->runtime->period_size;
|
||||
|
Loading…
Reference in New Issue
Block a user