This is the 5.4.232 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmP2AZoACgkQONu9yGCS aT4Kog//cMOCPvc+9yam5NCZj76k9jzIfteKMZzvSyxjV/ShPGynLIwcR26vE4j1 CtEB0aknuxgpqthfCahjjf51POhYLJD62H62UtTfxgIkWxnETd8F6y2xvuVXsds5 mC0LUzQ9md6slgTTIobQF9ilIGAt/yKPOg89fUXNYzNsO2us46XZCmZOXg5MVwlI hXYQuVBze1VhWt40J8TYDFckjoQLUgH6lBawWHC8/r2MBBydzX1cZEyL2TXhDfFS 7t9gWXKteAFE6GWfgAY6MrtqGx+X25Xe7qds4V8v6FgxR2MFeo94+k3DbhXRnjjY gA6czJBurGzhiXWo2E4laYlEMfsY0qkl17M49C/LwkJhZCSjF60b0Vo0dNfLLogZ cWsG6qcrfV8/js5h97kFSluWZ4VM7xTgcJQ/qtU/O8IprRQioCERjvm4Dl+/emXI ycFaiZOP3RvYdHxADIsItm46C7WzpzqZpjjs+9jHEaACrnOQfepGGFmgImMd9P8r kkU5KUtPQoAgSFfPz1tvJgyQiazONRAKtg1UprPOnLN0PsBQrE8ekCOk9lDoW60l t+G2lC0dJBYkcKC+4jHa9y18U7wz/eYdYE+K/u8kUENYFLSBfYxIqbXPxQZcq6aO TnyVr1n+Dd3HXtLX58+vDE2RUjosvCXctBGrE6Q56d8AKXh6FvM= =rk4j -----END PGP SIGNATURE----- Merge 5.4.232 into android11-5.4-lts Changes in 5.4.232 firewire: fix memory leak for payload of request subaction to IEC 61883-1 FCP region bus: sunxi-rsb: Fix error handling in sunxi_rsb_init() ASoC: Intel: bytcr_rt5651: Drop reference count of ACPI device after use ALSA: hda/via: Avoid potential array out-of-bound in add_secret_dac_path() arm64: dts: imx8mm: Fix pad control for UART1_DTE_RX scsi: Revert "scsi: core: map PQ=1, PDT=other values to SCSI_SCAN_TARGET_PRESENT" WRITE is "data source", not destination... fix iov_iter_bvec() "direction" argument fix "direction" argument of iov_iter_kvec() netrom: Fix use-after-free caused by accept on already connected socket netfilter: br_netfilter: disable sabotage_in hook after first suppression squashfs: harden sanity check in squashfs_read_xattr_id_table net: phy: meson-gxl: Add generic dummy stubs for MMD register access can: j1939: fix errant WARN_ON_ONCE in j1939_session_deactivate ata: libata: Fix sata_down_spd_limit() when no link speed is reported selftests: net: udpgso_bench_rx: Fix 'used uninitialized' compiler warning selftests: net: udpgso_bench_rx/tx: Stop when wrong CLI args are provided selftests: net: udpgso_bench: Fix racing bug between the rx/tx programs selftests: net: udpgso_bench_tx: Cater for pending datagrams zerocopy benchmarking virtio-net: Keep stop() to follow mirror sequence of open() net: openvswitch: fix flow memory leak in ovs_flow_cmd_new efi: fix potential NULL deref in efi_mem_reserve_persistent scsi: target: core: Fix warning on RT kernels scsi: iscsi_tcp: Fix UAF during login when accessing the shost ipaddress i2c: rk3x: fix a bunch of kernel-doc warnings net/x25: Fix to not accept on connected socket iio: adc: stm32-dfsdm: fill module aliases usb: dwc3: dwc3-qcom: Fix typo in the dwc3 vbus override API usb: dwc3: qcom: enable vbus override when in OTG dr-mode usb: gadget: f_fs: Fix unbalanced spinlock in __ffs_ep0_queue_wait vc_screen: move load of struct vc_data pointer in vcs_read() to avoid UAF Input: i8042 - move __initconst to fix code styling warning Input: i8042 - merge quirk tables Input: i8042 - add TUXEDO devices to i8042 quirk tables Input: i8042 - add Clevo PCX0DX to i8042 quirk table fbcon: Check font dimension limits watchdog: diag288_wdt: do not use stack buffers for hardware data watchdog: diag288_wdt: fix __diag288() inline assembly efi: Accept version 2 of memory attributes table iio: hid: fix the retval in accel_3d_capture_sample iio: adc: berlin2-adc: Add missing of_node_put() in error path iio:adc:twl6030: Enable measurements of VUSB, VBAT and others parisc: Fix return code of pdc_iodc_print() parisc: Wire up PTRACE_GETREGS/PTRACE_SETREGS for compat case riscv: disable generation of unwind tables mm: hugetlb: proc: check for hugetlb shared PMD in /proc/PID/smaps fpga: stratix10-soc: Fix return value check in s10_ops_write_init() mm/swapfile: add cond_resched() in get_swap_pages() Squashfs: fix handling and sanity checking of xattr_ids count nvmem: core: fix cell removal on error mm: swap: properly update readahead statistics in unuse_pte_range() xprtrdma: Fix regbuf data not freed in rpcrdma_req_create() serial: 8250_dma: Fix DMA Rx completion race serial: 8250_dma: Fix DMA Rx rearm race powerpc/imc-pmu: Revert nest_init_lock to being a mutex fbdev: smscufx: fix error handling code in ufx_usb_probe f2fs: fix to do sanity check on i_extra_isize in is_alive() wifi: brcmfmac: Check the count value of channel spec to prevent out-of-bounds reads iio:adc:twl6030: Enable measurement of VAC btrfs: limit device extents to the device size btrfs: zlib: zero-initialize zlib workspace ALSA: emux: Avoid potential array out-of-bound in snd_emux_xg_control() tracing: Fix poll() and select() do not work on per_cpu trace_pipe and trace_pipe_raw can: j1939: do not wait 250 ms if the same addr was already claimed IB/hfi1: Restore allocated resources on failed copyout IB/IPoIB: Fix legacy IPoIB due to wrong number of queues iommu: Add gfp parameter to iommu_ops::map RDMA/usnic: use iommu_map_atomic() under spin_lock() xfrm: fix bug with DSCP copy to v6 from v4 tunnel bonding: fix error checking in bond_debug_reregister() net: phy: meson-gxl: use MMD access dummy stubs for GXL, internal PHY ionic: clean interrupt before enabling queue to avoid credit race ice: Do not use WQ_MEM_RECLAIM flag for workqueue rds: rds_rm_zerocopy_callback() use list_first_entry() selftests: forwarding: lib: quote the sysctl values ALSA: pci: lx6464es: fix a debug loop pinctrl: aspeed: Fix confusing types in return value pinctrl: single: fix potential NULL dereference pinctrl: intel: Restore the pins that used to be in Direct IRQ mode net: USB: Fix wrong-direction WARNING in plusb.c usb: core: add quirk for Alcor Link AK9563 smartcard reader usb: typec: altmodes/displayport: Fix probe pin assign check ceph: flush cap releases when the session is flushed riscv: Fixup race condition on PG_dcache_clean in flush_icache_pte arm64: dts: meson-gx: Make mmc host controller interrupts level-sensitive arm64: dts: meson-g12-common: Make mmc host controller interrupts level-sensitive arm64: dts: meson-axg: Make mmc host controller interrupts level-sensitive nvme-pci: Move enumeration by class to be last in the table bpf: Always return target ifindex in bpf_fib_lookup migrate: hugetlb: check for hugetlb shared PMD in node migration selftests/bpf: Verify copy_register_state() preserves parent/live fields ASoC: cs42l56: fix DT probe tools/virtio: fix the vringh test for virtio ring changes net/rose: Fix to not accept on connected socket net: stmmac: do not stop RX_CLK in Rx LPI state for qcs404 SoC net: sched: sch: Bounds check priority s390/decompressor: specify __decompress() buf len to avoid overflow nvme-fc: fix a missing queue put in nvmet_fc_ls_create_association aio: fix mremap after fork null-deref btrfs: free device in btrfs_close_devices for a single device filesystem netfilter: nft_tproxy: restrict to prerouting hook xfs: remove the xfs_efi_log_item_t typedef xfs: remove the xfs_efd_log_item_t typedef xfs: remove the xfs_inode_log_item_t typedef xfs: factor out a xfs_defer_create_intent helper xfs: merge the ->log_item defer op into ->create_intent xfs: merge the ->diff_items defer op into ->create_intent xfs: turn dfp_intent into a xfs_log_item xfs: refactor xfs_defer_finish_noroll xfs: log new intent items created as part of finishing recovered intent items xfs: fix finobt btree block recovery ordering xfs: proper replay of deferred ops queued during log recovery xfs: xfs_defer_capture should absorb remaining block reservations xfs: xfs_defer_capture should absorb remaining transaction reservation xfs: clean up bmap intent item recovery checking xfs: clean up xfs_bui_item_recover iget/trans_alloc/ilock ordering xfs: fix an incore inode UAF in xfs_bui_recover xfs: change the order in which child and parent defer ops are finished xfs: periodically relog deferred intent items xfs: expose the log push threshold xfs: only relog deferred intent items if free space in the log gets low xfs: fix missing CoW blocks writeback conversion retry xfs: ensure inobt record walks always make forward progress xfs: fix the forward progress assertion in xfs_iwalk_run_callbacks xfs: prevent UAF in xfs_log_item_in_current_chkpt xfs: sync lazy sb accounting on quiesce of read-only mounts Revert "ipv4: Fix incorrect route flushing when source address is deleted" ipv4: Fix incorrect route flushing when source address is deleted mmc: sdio: fix possible resource leaks in some error paths mmc: mmc_spi: fix error handling in mmc_spi_probe() ALSA: hda/conexant: add a new hda codec SN6180 ALSA: hda/realtek - fixed wrong gpio assigned sched/psi: Fix use-after-free in ep_remove_wait_queue() hugetlb: check for undefined shift on 32 bit architectures Revert "mm: Always release pages to the buddy allocator in memblock_free_late()." net: Fix unwanted sign extension in netdev_stats_to_stats64() revert "squashfs: harden sanity check in squashfs_read_xattr_id_table" ixgbe: allow to increase MTU to 3K with XDP enabled i40e: add double of VLAN header when computing the max MTU net: bgmac: fix BCM5358 support by setting correct flags sctp: sctp_sock_filter(): avoid list_entry() on possibly empty list dccp/tcp: Avoid negative sk_forward_alloc by ipv6_pinfo.pktoptions. net/usb: kalmia: Don't pass act_len in usb_bulk_msg error path net: stmmac: fix order of dwmac5 FlexPPS parametrization sequence bnxt_en: Fix mqprio and XDP ring checking logic net: stmmac: Restrict warning on disabling DMA store and fwd mode net: mpls: fix stale pointer if allocation fails during device rename ixgbe: add double of VLAN header when computing the max MTU ipv6: Fix datagram socket connection with DSCP. ipv6: Fix tcp socket connection with DSCP. i40e: Add checking for null for nlmsg_find_attr() kvm: initialize all of the kvm_debugregs structure before sending it to userspace nilfs2: fix underflow in second superblock position calculations ASoC: SOF: Intel: hda-dai: fix possible stream_tag leak net: sched: sch: Fix off by one in htb_activate_prios() iommu/amd: Pass gfp flags to iommu_map_page() in amd_iommu_map() Linux 5.4.232 Change-Id: I607aaac0f8477eb9a0f059e0a9d2f5c037fb19fc Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
abc4ede193
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 231
|
||||
SUBLEVEL = 232
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -1705,7 +1705,7 @@
|
||||
sd_emmc_b: sd@5000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0x5000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_B>,
|
||||
<&clkc CLKID_SD_EMMC_B_CLK0>,
|
||||
@ -1717,7 +1717,7 @@
|
||||
sd_emmc_c: mmc@7000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0x7000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_C>,
|
||||
<&clkc CLKID_SD_EMMC_C_CLK0>,
|
||||
|
@ -2317,7 +2317,7 @@
|
||||
sd_emmc_a: sd@ffe03000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0xffe03000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 189 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_A>,
|
||||
<&clkc CLKID_SD_EMMC_A_CLK0>,
|
||||
@ -2329,7 +2329,7 @@
|
||||
sd_emmc_b: sd@ffe05000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0xffe05000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 190 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_B>,
|
||||
<&clkc CLKID_SD_EMMC_B_CLK0>,
|
||||
@ -2341,7 +2341,7 @@
|
||||
sd_emmc_c: mmc@ffe07000 {
|
||||
compatible = "amlogic,meson-axg-mmc";
|
||||
reg = <0x0 0xffe07000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 191 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
clocks = <&clkc CLKID_SD_EMMC_C>,
|
||||
<&clkc CLKID_SD_EMMC_C_CLK0>,
|
||||
|
@ -528,21 +528,21 @@
|
||||
sd_emmc_a: mmc@70000 {
|
||||
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
|
||||
reg = <0x0 0x70000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
sd_emmc_b: mmc@72000 {
|
||||
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
|
||||
reg = <0x0 0x72000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
sd_emmc_c: mmc@74000 {
|
||||
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
|
||||
reg = <0x0 0x74000 0x0 0x800>;
|
||||
interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
|
||||
interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
@ -585,7 +585,7 @@
|
||||
#define MX8MM_IOMUXC_UART1_RXD_GPIO5_IO22 0x234 0x49C 0x000 0x5 0x0
|
||||
#define MX8MM_IOMUXC_UART1_RXD_TPSMP_HDATA24 0x234 0x49C 0x000 0x7 0x0
|
||||
#define MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x238 0x4A0 0x000 0x0 0x0
|
||||
#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x0
|
||||
#define MX8MM_IOMUXC_UART1_TXD_UART1_DTE_RX 0x238 0x4A0 0x4F4 0x0 0x1
|
||||
#define MX8MM_IOMUXC_UART1_TXD_ECSPI3_MOSI 0x238 0x4A0 0x000 0x1 0x0
|
||||
#define MX8MM_IOMUXC_UART1_TXD_GPIO5_IO23 0x238 0x4A0 0x000 0x5 0x0
|
||||
#define MX8MM_IOMUXC_UART1_TXD_TPSMP_HDATA25 0x238 0x4A0 0x000 0x7 0x0
|
||||
|
@ -1229,7 +1229,7 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096];
|
||||
*/
|
||||
int pdc_iodc_print(const unsigned char *str, unsigned count)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int i, found = 0;
|
||||
unsigned long flags;
|
||||
|
||||
for (i = 0; i < count;) {
|
||||
@ -1238,6 +1238,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
|
||||
iodc_dbuf[i+0] = '\r';
|
||||
iodc_dbuf[i+1] = '\n';
|
||||
i += 2;
|
||||
found = 1;
|
||||
goto print;
|
||||
default:
|
||||
iodc_dbuf[i] = str[i];
|
||||
@ -1254,7 +1255,7 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
|
||||
__pa(iodc_retbuf), 0, __pa(iodc_dbuf), i, 0);
|
||||
spin_unlock_irqrestore(&pdc_lock, flags);
|
||||
|
||||
return i;
|
||||
return i - found;
|
||||
}
|
||||
|
||||
#if !defined(BOOTLOADER)
|
||||
|
@ -128,6 +128,12 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
unsigned long tmp;
|
||||
long ret = -EIO;
|
||||
|
||||
unsigned long user_regs_struct_size = sizeof(struct user_regs_struct);
|
||||
#ifdef CONFIG_64BIT
|
||||
if (is_compat_task())
|
||||
user_regs_struct_size /= 2;
|
||||
#endif
|
||||
|
||||
switch (request) {
|
||||
|
||||
/* Read the word at location addr in the USER area. For ptraced
|
||||
@ -183,14 +189,14 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
return copy_regset_to_user(child,
|
||||
task_user_regset_view(current),
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(struct user_regs_struct),
|
||||
0, user_regs_struct_size,
|
||||
datap);
|
||||
|
||||
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
||||
return copy_regset_from_user(child,
|
||||
task_user_regset_view(current),
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(struct user_regs_struct),
|
||||
0, user_regs_struct_size,
|
||||
datap);
|
||||
|
||||
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
||||
@ -304,6 +310,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
|
||||
}
|
||||
}
|
||||
break;
|
||||
case PTRACE_GETREGS:
|
||||
case PTRACE_SETREGS:
|
||||
case PTRACE_GETFPREGS:
|
||||
case PTRACE_SETFPREGS:
|
||||
return arch_ptrace(child, request, addr, data);
|
||||
|
||||
default:
|
||||
ret = compat_ptrace_request(child, request, addr, data);
|
||||
|
@ -21,7 +21,7 @@
|
||||
* Used to avoid races in counting the nest-pmu units during hotplug
|
||||
* register and unregister
|
||||
*/
|
||||
static DEFINE_SPINLOCK(nest_init_lock);
|
||||
static DEFINE_MUTEX(nest_init_lock);
|
||||
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
|
||||
static struct imc_pmu **per_nest_pmu_arr;
|
||||
static cpumask_t nest_imc_cpumask;
|
||||
@ -1605,7 +1605,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
|
||||
static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
||||
{
|
||||
if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
|
||||
spin_lock(&nest_init_lock);
|
||||
mutex_lock(&nest_init_lock);
|
||||
if (nest_pmus == 1) {
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
|
||||
kfree(nest_imc_refc);
|
||||
@ -1615,7 +1615,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
|
||||
|
||||
if (nest_pmus > 0)
|
||||
nest_pmus--;
|
||||
spin_unlock(&nest_init_lock);
|
||||
mutex_unlock(&nest_init_lock);
|
||||
}
|
||||
|
||||
/* Free core_imc memory */
|
||||
@ -1772,11 +1772,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
||||
* rest. To handle the cpuhotplug callback unregister, we track
|
||||
* the number of nest pmus in "nest_pmus".
|
||||
*/
|
||||
spin_lock(&nest_init_lock);
|
||||
mutex_lock(&nest_init_lock);
|
||||
if (nest_pmus == 0) {
|
||||
ret = init_nest_pmu_ref();
|
||||
if (ret) {
|
||||
spin_unlock(&nest_init_lock);
|
||||
mutex_unlock(&nest_init_lock);
|
||||
kfree(per_nest_pmu_arr);
|
||||
per_nest_pmu_arr = NULL;
|
||||
goto err_free_mem;
|
||||
@ -1784,7 +1784,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
||||
/* Register for cpu hotplug notification. */
|
||||
ret = nest_pmu_cpumask_init();
|
||||
if (ret) {
|
||||
spin_unlock(&nest_init_lock);
|
||||
mutex_unlock(&nest_init_lock);
|
||||
kfree(nest_imc_refc);
|
||||
kfree(per_nest_pmu_arr);
|
||||
per_nest_pmu_arr = NULL;
|
||||
@ -1792,7 +1792,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
|
||||
}
|
||||
}
|
||||
nest_pmus++;
|
||||
spin_unlock(&nest_init_lock);
|
||||
mutex_unlock(&nest_init_lock);
|
||||
break;
|
||||
case IMC_DOMAIN_CORE:
|
||||
ret = core_imc_pmu_cpumask_init();
|
||||
|
@ -75,6 +75,9 @@ ifeq ($(CONFIG_PERF_EVENTS),y)
|
||||
KBUILD_CFLAGS += -fno-omit-frame-pointer
|
||||
endif
|
||||
|
||||
# Avoid generating .eh_frame sections.
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
|
||||
|
||||
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
|
||||
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
|
||||
|
||||
|
@ -71,6 +71,8 @@ void flush_icache_pte(pte_t pte)
|
||||
{
|
||||
struct page *page = pte_page(pte);
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
if (!test_bit(PG_dcache_clean, &page->flags)) {
|
||||
flush_icache_all();
|
||||
set_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
}
|
||||
|
@ -80,6 +80,6 @@ void *decompress_kernel(void)
|
||||
void *output = (void *)decompress_offset;
|
||||
|
||||
__decompress(_compressed_start, _compressed_end - _compressed_start,
|
||||
NULL, NULL, output, 0, NULL, error);
|
||||
NULL, NULL, output, vmlinux.image_size, NULL, error);
|
||||
return output;
|
||||
}
|
||||
|
@ -3948,12 +3948,11 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
memset(dbgregs, 0, sizeof(*dbgregs));
|
||||
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
|
||||
kvm_get_dr(vcpu, 6, &val);
|
||||
dbgregs->dr6 = val;
|
||||
dbgregs->dr7 = vcpu->arch.dr7;
|
||||
dbgregs->flags = 0;
|
||||
memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
|
||||
}
|
||||
|
||||
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
||||
|
@ -3096,7 +3096,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
|
||||
*/
|
||||
if (spd > 1)
|
||||
mask &= (1 << (spd - 1)) - 1;
|
||||
else
|
||||
else if (link->sata_spd)
|
||||
return -EINVAL;
|
||||
|
||||
/* were we already at the bottom? */
|
||||
|
@ -781,7 +781,13 @@ static int __init sunxi_rsb_init(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return platform_driver_register(&sunxi_rsb_driver);
|
||||
ret = platform_driver_register(&sunxi_rsb_driver);
|
||||
if (ret) {
|
||||
bus_unregister(&sunxi_rsb_bus);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(sunxi_rsb_init);
|
||||
|
||||
|
@ -818,8 +818,10 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
|
||||
|
||||
r = container_of(resource, struct inbound_transaction_resource,
|
||||
resource);
|
||||
if (is_fcp_request(r->request))
|
||||
if (is_fcp_request(r->request)) {
|
||||
kfree(r->data);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (a->length != fw_get_response_length(r->request)) {
|
||||
ret = -EINVAL;
|
||||
|
@ -1022,6 +1022,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
||||
/* first try to find a slot in an existing linked list entry */
|
||||
for (prsv = efi_memreserve_root->next; prsv; ) {
|
||||
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
|
||||
if (!rsv)
|
||||
return -ENOMEM;
|
||||
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
|
||||
if (index < rsv->size) {
|
||||
rsv->entry[index].base = addr;
|
||||
|
@ -32,7 +32,7 @@ int __init efi_memattr_init(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (tbl->version > 1) {
|
||||
if (tbl->version > 2) {
|
||||
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
|
||||
tbl->version);
|
||||
goto unmap;
|
||||
|
@ -218,9 +218,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
|
||||
/* Allocate buffers from the service layer's pool. */
|
||||
for (i = 0; i < NUM_SVC_BUFS; i++) {
|
||||
kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
|
||||
if (!kbuf) {
|
||||
if (IS_ERR(kbuf)) {
|
||||
s10_free_buffers(mgr);
|
||||
ret = -ENOMEM;
|
||||
ret = PTR_ERR(kbuf);
|
||||
goto init_done;
|
||||
}
|
||||
|
||||
|
@ -640,7 +640,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
|
||||
}
|
||||
ffdc_iov.iov_base = ffdc;
|
||||
ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
|
||||
iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
|
||||
iov_iter_kvec(&ffdc_iter, READ, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
|
||||
cmd[0] = cpu_to_be32(2);
|
||||
cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
|
||||
rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
|
||||
@ -737,7 +737,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
|
||||
rbytes = (*resp_len) * sizeof(__be32);
|
||||
resp_iov.iov_base = response;
|
||||
resp_iov.iov_len = rbytes;
|
||||
iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes);
|
||||
iov_iter_kvec(&resp_iter, READ, &resp_iov, 1, rbytes);
|
||||
|
||||
/* Perform the command */
|
||||
mutex_lock(&sbefifo->lock);
|
||||
@ -817,7 +817,7 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
|
||||
/* Prepare iov iterator */
|
||||
resp_iov.iov_base = buf;
|
||||
resp_iov.iov_len = len;
|
||||
iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
|
||||
iov_iter_init(&resp_iter, READ, &resp_iov, 1, len);
|
||||
|
||||
/* Perform the command */
|
||||
mutex_lock(&sbefifo->lock);
|
||||
|
@ -79,7 +79,7 @@ enum {
|
||||
#define DEFAULT_SCL_RATE (100 * 1000) /* Hz */
|
||||
|
||||
/**
|
||||
* struct i2c_spec_values:
|
||||
* struct i2c_spec_values - I2C specification values for various modes
|
||||
* @min_hold_start_ns: min hold time (repeated) START condition
|
||||
* @min_low_ns: min LOW period of the SCL clock
|
||||
* @min_high_ns: min HIGH period of the SCL cloc
|
||||
@ -135,7 +135,7 @@ static const struct i2c_spec_values fast_mode_plus_spec = {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rk3x_i2c_calced_timings:
|
||||
* struct rk3x_i2c_calced_timings - calculated V1 timings
|
||||
* @div_low: Divider output for low
|
||||
* @div_high: Divider output for high
|
||||
* @tuning: Used to adjust setup/hold data time,
|
||||
@ -158,7 +158,7 @@ enum rk3x_i2c_state {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct rk3x_i2c_soc_data:
|
||||
* struct rk3x_i2c_soc_data - SOC-specific data
|
||||
* @grf_offset: offset inside the grf regmap for setting the i2c type
|
||||
* @calc_timings: Callback function for i2c timing information calculated
|
||||
*/
|
||||
@ -238,7 +238,8 @@ static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a START condition, which triggers a REG_INT_START interrupt.
|
||||
* rk3x_i2c_start - Generate a START condition, which triggers a REG_INT_START interrupt.
|
||||
* @i2c: target controller data
|
||||
*/
|
||||
static void rk3x_i2c_start(struct rk3x_i2c *i2c)
|
||||
{
|
||||
@ -257,8 +258,8 @@ static void rk3x_i2c_start(struct rk3x_i2c *i2c)
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
|
||||
*
|
||||
* rk3x_i2c_stop - Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
|
||||
* @i2c: target controller data
|
||||
* @error: Error code to return in rk3x_i2c_xfer
|
||||
*/
|
||||
static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
|
||||
@ -297,7 +298,8 @@ static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup a read according to i2c->msg
|
||||
* rk3x_i2c_prepare_read - Setup a read according to i2c->msg
|
||||
* @i2c: target controller data
|
||||
*/
|
||||
static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
|
||||
{
|
||||
@ -328,7 +330,8 @@ static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill the transmit buffer with data from i2c->msg
|
||||
* rk3x_i2c_fill_transmit_buf - Fill the transmit buffer with data from i2c->msg
|
||||
* @i2c: target controller data
|
||||
*/
|
||||
static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
|
||||
{
|
||||
@ -531,11 +534,10 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
|
||||
}
|
||||
|
||||
/**
|
||||
* Get timing values of I2C specification
|
||||
*
|
||||
* rk3x_i2c_get_spec - Get timing values of I2C specification
|
||||
* @speed: Desired SCL frequency
|
||||
*
|
||||
* Returns: Matched i2c spec values.
|
||||
* Return: Matched i2c_spec_values.
|
||||
*/
|
||||
static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
|
||||
{
|
||||
@ -548,13 +550,12 @@ static const struct i2c_spec_values *rk3x_i2c_get_spec(unsigned int speed)
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate divider values for desired SCL frequency
|
||||
*
|
||||
* rk3x_i2c_v0_calc_timings - Calculate divider values for desired SCL frequency
|
||||
* @clk_rate: I2C input clock rate
|
||||
* @t: Known I2C timing information
|
||||
* @t_calc: Caculated rk3x private timings that would be written into regs
|
||||
*
|
||||
* Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
|
||||
* Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
|
||||
* a best-effort divider value is returned in divs. If the target rate is
|
||||
* too high, we silently use the highest possible rate.
|
||||
*/
|
||||
@ -709,13 +710,12 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate timing values for desired SCL frequency
|
||||
*
|
||||
* rk3x_i2c_v1_calc_timings - Calculate timing values for desired SCL frequency
|
||||
* @clk_rate: I2C input clock rate
|
||||
* @t: Known I2C timing information
|
||||
* @t_calc: Caculated rk3x private timings that would be written into regs
|
||||
*
|
||||
* Returns: 0 on success, -EINVAL if the goal SCL rate is too slow. In that case
|
||||
* Return: %0 on success, -%EINVAL if the goal SCL rate is too slow. In that case
|
||||
* a best-effort divider value is returned in divs. If the target rate is
|
||||
* too high, we silently use the highest possible rate.
|
||||
* The following formulas are v1's method to calculate timings.
|
||||
@ -959,14 +959,14 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup I2C registers for an I2C operation specified by msgs, num.
|
||||
*
|
||||
* Must be called with i2c->lock held.
|
||||
*
|
||||
* rk3x_i2c_setup - Setup I2C registers for an I2C operation specified by msgs, num.
|
||||
* @i2c: target controller data
|
||||
* @msgs: I2C msgs to process
|
||||
* @num: Number of msgs
|
||||
*
|
||||
* returns: Number of I2C msgs processed or negative in case of error
|
||||
* Must be called with i2c->lock held.
|
||||
*
|
||||
* Return: Number of I2C msgs processed or negative in case of error
|
||||
*/
|
||||
static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
|
||||
{
|
||||
|
@ -279,6 +279,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
|
||||
hid_sensor_convert_timestamp(
|
||||
&accel_state->common_attributes,
|
||||
*(int64_t *)raw_data);
|
||||
ret = 0;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -289,8 +289,10 @@ static int berlin2_adc_probe(struct platform_device *pdev)
|
||||
int ret;
|
||||
|
||||
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
|
||||
if (!indio_dev)
|
||||
if (!indio_dev) {
|
||||
of_node_put(parent_np);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
priv = iio_priv(indio_dev);
|
||||
platform_set_drvdata(pdev, indio_dev);
|
||||
|
@ -1544,6 +1544,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = {
|
||||
},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match);
|
||||
|
||||
static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
|
||||
{
|
||||
|
@ -57,6 +57,18 @@
|
||||
#define TWL6030_GPADCS BIT(1)
|
||||
#define TWL6030_GPADCR BIT(0)
|
||||
|
||||
#define USB_VBUS_CTRL_SET 0x04
|
||||
#define USB_ID_CTRL_SET 0x06
|
||||
|
||||
#define TWL6030_MISC1 0xE4
|
||||
#define VBUS_MEAS 0x01
|
||||
#define ID_MEAS 0x01
|
||||
|
||||
#define VAC_MEAS 0x04
|
||||
#define VBAT_MEAS 0x02
|
||||
#define BB_MEAS 0x01
|
||||
|
||||
|
||||
/**
|
||||
* struct twl6030_chnl_calib - channel calibration
|
||||
* @gain: slope coefficient for ideal curve
|
||||
@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to wire up inputs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to wire up inputs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = twl_i2c_write_u8(TWL6030_MODULE_ID0,
|
||||
VBAT_MEAS | BB_MEAS | VAC_MEAS,
|
||||
TWL6030_MISC1);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to wire up inputs\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
indio_dev->name = DRIVER_NAME;
|
||||
indio_dev->dev.parent = dev;
|
||||
indio_dev->info = &twl6030_gpadc_iio_info;
|
||||
|
@ -1363,12 +1363,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
|
||||
addr = arg + offsetof(struct hfi1_tid_info, length);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.length,
|
||||
if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
|
||||
sizeof(tinfo.length)))
|
||||
ret = -EFAULT;
|
||||
|
||||
if (ret)
|
||||
hfi1_user_exp_rcv_invalid(fd, &tinfo);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -281,8 +281,8 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
|
||||
size = pa_end - pa_start + PAGE_SIZE;
|
||||
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
|
||||
va_start, &pa_start, size, flags);
|
||||
err = iommu_map(pd->domain, va_start, pa_start,
|
||||
size, flags);
|
||||
err = iommu_map_atomic(pd->domain, va_start,
|
||||
pa_start, size, flags);
|
||||
if (err) {
|
||||
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
||||
va_start, &pa_start, size, err);
|
||||
@ -298,8 +298,8 @@ static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
|
||||
size = pa - pa_start + PAGE_SIZE;
|
||||
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
|
||||
va_start, &pa_start, size, flags);
|
||||
err = iommu_map(pd->domain, va_start, pa_start,
|
||||
size, flags);
|
||||
err = iommu_map_atomic(pd->domain, va_start,
|
||||
pa_start, size, flags);
|
||||
if (err) {
|
||||
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
|
||||
va_start, &pa_start, size, err);
|
||||
|
@ -2171,6 +2171,14 @@ int ipoib_intf_init(struct ib_device *hca, u8 port, const char *name,
|
||||
rn->attach_mcast = ipoib_mcast_attach;
|
||||
rn->detach_mcast = ipoib_mcast_detach;
|
||||
rn->hca = hca;
|
||||
|
||||
rc = netif_set_real_num_tx_queues(dev, 1);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = netif_set_real_num_rx_queues(dev, 1);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
priv->rn_ops = dev->netdev_ops;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -3098,7 +3098,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
|
||||
}
|
||||
|
||||
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
||||
phys_addr_t paddr, size_t page_size, int iommu_prot)
|
||||
phys_addr_t paddr, size_t page_size, int iommu_prot,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct protection_domain *domain = to_pdomain(dom);
|
||||
int prot = 0;
|
||||
@ -3113,7 +3114,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
|
||||
prot |= IOMMU_PROT_IW;
|
||||
|
||||
mutex_lock(&domain->api_lock);
|
||||
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
|
||||
ret = iommu_map_page(domain, iova, paddr, page_size, prot, gfp);
|
||||
mutex_unlock(&domain->api_lock);
|
||||
|
||||
domain_flush_np_cache(domain, iova, page_size);
|
||||
|
@ -2446,7 +2446,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
}
|
||||
|
||||
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
|
||||
|
||||
|
@ -1201,7 +1201,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
}
|
||||
|
||||
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
|
||||
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
|
||||
|
@ -523,7 +523,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
|
||||
if (!iova)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
|
||||
if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
|
||||
iommu_dma_free_iova(cookie, iova, size);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
@ -658,7 +658,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
|
||||
arch_dma_prep_coherent(sg_page(sg), sg->length);
|
||||
}
|
||||
|
||||
if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
|
||||
if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
|
||||
< size)
|
||||
goto out_free_sg;
|
||||
|
||||
@ -918,7 +918,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
* We'll leave any physical concatenation to the IOMMU driver's
|
||||
* implementation - it knows better than we do.
|
||||
*/
|
||||
if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
|
||||
if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
|
||||
goto out_free_iova;
|
||||
|
||||
return __finalise_sg(dev, sg, nents, iova);
|
||||
|
@ -1077,7 +1077,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
|
||||
*/
|
||||
static int exynos_iommu_map(struct iommu_domain *iommu_domain,
|
||||
unsigned long l_iova, phys_addr_t paddr, size_t size,
|
||||
int prot)
|
||||
int prot, gfp_t gfp)
|
||||
{
|
||||
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
|
||||
sysmmu_pte_t *entry;
|
||||
|
@ -5458,7 +5458,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
|
||||
|
||||
static int intel_iommu_map(struct iommu_domain *domain,
|
||||
unsigned long iova, phys_addr_t hpa,
|
||||
size_t size, int iommu_prot)
|
||||
size_t size, int iommu_prot, gfp_t gfp)
|
||||
{
|
||||
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
|
||||
u64 max_addr;
|
||||
|
@ -1885,8 +1885,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
|
||||
return pgsize;
|
||||
}
|
||||
|
||||
int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
int __iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
const struct iommu_ops *ops = domain->ops;
|
||||
unsigned long orig_iova = iova;
|
||||
@ -1923,8 +1923,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
|
||||
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
|
||||
iova, &paddr, pgsize);
|
||||
ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
|
||||
|
||||
ret = ops->map(domain, iova, paddr, pgsize, prot);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
@ -1944,8 +1944,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
{
|
||||
might_sleep();
|
||||
return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_map);
|
||||
|
||||
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
{
|
||||
return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_map_atomic);
|
||||
|
||||
static size_t __iommu_unmap(struct iommu_domain *domain,
|
||||
unsigned long iova, size_t size,
|
||||
struct iommu_iotlb_gather *iotlb_gather)
|
||||
@ -2022,8 +2036,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
|
||||
|
||||
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot)
|
||||
size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot,
|
||||
gfp_t gfp)
|
||||
{
|
||||
size_t len = 0, mapped = 0;
|
||||
phys_addr_t start;
|
||||
@ -2034,7 +2049,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t s_phys = sg_phys(sg);
|
||||
|
||||
if (len && s_phys != start + len) {
|
||||
ret = iommu_map(domain, iova + mapped, start, len, prot);
|
||||
ret = __iommu_map(domain, iova + mapped, start,
|
||||
len, prot, gfp);
|
||||
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
@ -2062,8 +2079,22 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot)
|
||||
{
|
||||
might_sleep();
|
||||
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_map_sg);
|
||||
|
||||
size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
|
||||
struct scatterlist *sg, unsigned int nents, int prot)
|
||||
{
|
||||
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
|
||||
|
||||
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||
phys_addr_t paddr, u64 size, int prot)
|
||||
{
|
||||
|
@ -724,7 +724,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
|
||||
}
|
||||
|
||||
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
|
||||
|
||||
|
@ -504,7 +504,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t pa, size_t len, int prot)
|
||||
phys_addr_t pa, size_t len, int prot, gfp_t gfp)
|
||||
{
|
||||
struct msm_priv *priv = to_msm_priv(domain);
|
||||
unsigned long flags;
|
||||
|
@ -427,7 +427,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
|
||||
|
@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
|
||||
}
|
||||
|
||||
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
|
||||
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
|
||||
|
@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
|
||||
}
|
||||
|
||||
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
|
||||
phys_addr_t pa, size_t bytes, int prot)
|
||||
phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
|
||||
{
|
||||
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
|
||||
struct device *dev = omap_domain->dev;
|
||||
|
@ -419,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
|
||||
}
|
||||
|
||||
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
|
@ -758,7 +758,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
|
||||
}
|
||||
|
||||
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
|
||||
unsigned long flags;
|
||||
|
@ -265,7 +265,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
|
||||
}
|
||||
|
||||
static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct s390_domain *s390_domain = to_s390_domain(domain);
|
||||
int flags = ZPCI_PTE_VALID, rc = 0;
|
||||
|
@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
|
||||
}
|
||||
|
||||
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t pa, size_t bytes, int prot)
|
||||
phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
|
||||
{
|
||||
struct gart_device *gart = gart_handle;
|
||||
int ret;
|
||||
|
@ -651,7 +651,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
|
||||
}
|
||||
|
||||
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
struct tegra_smmu_as *as = to_smmu_as(domain);
|
||||
dma_addr_t pte_dma;
|
||||
|
@ -715,7 +715,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
|
||||
}
|
||||
|
||||
static int viommu_map(struct iommu_domain *domain, unsigned long iova,
|
||||
phys_addr_t paddr, size_t size, int prot)
|
||||
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
|
||||
{
|
||||
int ret;
|
||||
u32 flags;
|
||||
|
@ -269,6 +269,12 @@ static void sdio_release_func(struct device *dev)
|
||||
if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
|
||||
sdio_free_func_cis(func);
|
||||
|
||||
/*
|
||||
* We have now removed the link to the tuples in the
|
||||
* card structure, so remove the reference.
|
||||
*/
|
||||
put_device(&func->card->dev);
|
||||
|
||||
kfree(func->info);
|
||||
kfree(func->tmpbuf);
|
||||
kfree(func);
|
||||
@ -299,6 +305,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
|
||||
|
||||
device_initialize(&func->dev);
|
||||
|
||||
/*
|
||||
* We may link to tuples in the card structure,
|
||||
* we need make sure we have a reference to it.
|
||||
*/
|
||||
get_device(&func->card->dev);
|
||||
|
||||
func->dev.parent = &card->dev;
|
||||
func->dev.bus = &sdio_bus_type;
|
||||
func->dev.release = sdio_release_func;
|
||||
@ -352,10 +364,9 @@ int sdio_add_func(struct sdio_func *func)
|
||||
*/
|
||||
void sdio_remove_func(struct sdio_func *func)
|
||||
{
|
||||
if (!sdio_func_present(func))
|
||||
return;
|
||||
if (sdio_func_present(func))
|
||||
device_del(&func->dev);
|
||||
|
||||
device_del(&func->dev);
|
||||
of_node_put(func->dev.of_node);
|
||||
put_device(&func->dev);
|
||||
}
|
||||
|
@ -383,12 +383,6 @@ int sdio_read_func_cis(struct sdio_func *func)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Since we've linked to tuples in the card structure,
|
||||
* we must make sure we have a reference to it.
|
||||
*/
|
||||
get_device(&func->card->dev);
|
||||
|
||||
/*
|
||||
* Vendor/device id is optional for function CIS, so
|
||||
* copy it from the card structure as needed.
|
||||
@ -414,11 +408,5 @@ void sdio_free_func_cis(struct sdio_func *func)
|
||||
}
|
||||
|
||||
func->tuples = NULL;
|
||||
|
||||
/*
|
||||
* We have now removed the link to the tuples in the
|
||||
* card structure, so remove the reference.
|
||||
*/
|
||||
put_device(&func->card->dev);
|
||||
}
|
||||
|
||||
|
@ -1420,7 +1420,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
||||
|
||||
status = mmc_add_host(mmc);
|
||||
if (status != 0)
|
||||
goto fail_add_host;
|
||||
goto fail_glue_init;
|
||||
|
||||
/*
|
||||
* Index 0 is card detect
|
||||
@ -1428,7 +1428,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
||||
*/
|
||||
status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1, NULL);
|
||||
if (status == -EPROBE_DEFER)
|
||||
goto fail_add_host;
|
||||
goto fail_gpiod_request;
|
||||
if (!status) {
|
||||
/*
|
||||
* The platform has a CD GPIO signal that may support
|
||||
@ -1443,7 +1443,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
||||
/* Index 1 is write protect/read only */
|
||||
status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL);
|
||||
if (status == -EPROBE_DEFER)
|
||||
goto fail_add_host;
|
||||
goto fail_gpiod_request;
|
||||
if (!status)
|
||||
has_ro = true;
|
||||
|
||||
@ -1457,7 +1457,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
||||
? ", cd polling" : "");
|
||||
return 0;
|
||||
|
||||
fail_add_host:
|
||||
fail_gpiod_request:
|
||||
mmc_remove_host(mmc);
|
||||
fail_glue_init:
|
||||
if (host->dma_dev)
|
||||
|
@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
|
||||
|
||||
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
|
||||
bonding_debug_root, bond->dev->name);
|
||||
if (d) {
|
||||
if (!IS_ERR(d)) {
|
||||
bond->debug_dir = d;
|
||||
} else {
|
||||
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
|
||||
|
@ -228,12 +228,12 @@ static int bgmac_probe(struct bcma_device *core)
|
||||
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
|
||||
if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
|
||||
ci->pkg == BCMA_PKG_ID_BCM47186) {
|
||||
if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
|
||||
(ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
|
||||
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
|
||||
}
|
||||
if (ci->pkg == BCMA_PKG_ID_BCM5358)
|
||||
if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
|
||||
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
|
||||
break;
|
||||
case BCMA_CHIP_ID_BCM53573:
|
||||
|
@ -8205,10 +8205,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
|
||||
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
|
||||
if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
|
||||
bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
|
||||
netdev_err(bp->dev, "tx ring reservation failure\n");
|
||||
netdev_reset_tc(bp->dev);
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
|
||||
if (bp->tx_nr_rings_xdp)
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
|
||||
else
|
||||
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
|
@ -2702,7 +2702,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
struct i40e_pf *pf = vsi->back;
|
||||
|
||||
if (i40e_enabled_xdp_vsi(vsi)) {
|
||||
int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
|
||||
int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
|
||||
|
||||
if (frame_size > i40e_max_xdp_frame_size(vsi))
|
||||
return -EINVAL;
|
||||
@ -12535,6 +12535,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
|
||||
}
|
||||
|
||||
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
|
||||
if (!br_spec)
|
||||
return -EINVAL;
|
||||
|
||||
nla_for_each_nested(attr, br_spec, rem) {
|
||||
__u16 mode;
|
||||
|
@ -3211,7 +3211,7 @@ static int __init ice_module_init(void)
|
||||
pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
|
||||
pr_info("%s\n", ice_copyright);
|
||||
|
||||
ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
|
||||
ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
|
||||
if (!ice_wq) {
|
||||
pr_err("Failed to create workqueue\n");
|
||||
return -ENOMEM;
|
||||
|
@ -67,6 +67,8 @@
|
||||
#define IXGBE_RXBUFFER_4K 4096
|
||||
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
|
||||
|
||||
#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
|
||||
|
||||
/* Attempt to maximize the headroom available for incoming frames. We
|
||||
* use a 2K buffer for receives and need 1536/1534 to store the data for
|
||||
* the frame. This leaves us with 512 bytes of room. From that we need
|
||||
|
@ -6721,6 +6721,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
|
||||
ixgbe_free_rx_resources(adapter->rx_ring[i]);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
|
||||
* @adapter: device handle, pointer to adapter
|
||||
*/
|
||||
static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
|
||||
return IXGBE_RXBUFFER_2K;
|
||||
else
|
||||
return IXGBE_RXBUFFER_3K;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_change_mtu - Change the Maximum Transfer Unit
|
||||
* @netdev: network interface device structure
|
||||
@ -6732,18 +6744,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (adapter->xdp_prog) {
|
||||
int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
|
||||
VLAN_HLEN;
|
||||
int i;
|
||||
if (ixgbe_enabled_xdp_adapter(adapter)) {
|
||||
int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
|
||||
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
struct ixgbe_ring *ring = adapter->rx_ring[i];
|
||||
|
||||
if (new_frame_size > ixgbe_rx_bufsz(ring)) {
|
||||
e_warn(probe, "Requested MTU size is not supported with XDP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
|
||||
e_warn(probe, "Requested MTU size is not supported with XDP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -190,6 +190,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
|
||||
.oper = IONIC_Q_ENABLE,
|
||||
},
|
||||
};
|
||||
int ret;
|
||||
|
||||
idev = &lif->ionic->idev;
|
||||
dev = lif->ionic->dev;
|
||||
@ -197,16 +198,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
|
||||
dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
|
||||
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
|
||||
|
||||
if (qcq->flags & IONIC_QCQ_F_INTR)
|
||||
ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
|
||||
|
||||
ret = ionic_adminq_post_wait(lif, &ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (qcq->napi.poll)
|
||||
napi_enable(&qcq->napi);
|
||||
|
||||
if (qcq->flags & IONIC_QCQ_F_INTR) {
|
||||
irq_set_affinity_hint(qcq->intr.vector,
|
||||
&qcq->intr.affinity_mask);
|
||||
napi_enable(&qcq->napi);
|
||||
ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
|
||||
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
|
||||
IONIC_INTR_MASK_CLEAR);
|
||||
}
|
||||
|
||||
return ionic_adminq_post_wait(lif, &ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ionic_qcq_disable(struct ionic_qcq *qcq)
|
||||
|
@ -505,6 +505,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
|
||||
plat_dat->has_gmac4 = 1;
|
||||
plat_dat->pmt = 1;
|
||||
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
|
||||
if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
|
||||
plat_dat->rx_clk_runs_in_lpi = 1;
|
||||
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (ret)
|
||||
|
@ -520,9 +520,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
|
||||
return 0;
|
||||
}
|
||||
|
||||
val |= PPSCMDx(index, 0x2);
|
||||
val |= TRGTMODSELx(index, 0x2);
|
||||
val |= PPSEN0;
|
||||
writel(val, ioaddr + MAC_PPS_CONTROL);
|
||||
|
||||
writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
|
||||
|
||||
@ -547,6 +547,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
|
||||
writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
|
||||
|
||||
/* Finally, activate it */
|
||||
val |= PPSCMDx(index, 0x2);
|
||||
writel(val, ioaddr + MAC_PPS_CONTROL);
|
||||
return 0;
|
||||
}
|
||||
|
@ -932,7 +932,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
|
||||
|
||||
stmmac_mac_set(priv, priv->ioaddr, true);
|
||||
if (phy && priv->dma_cap.eee) {
|
||||
priv->eee_active = phy_init_eee(phy, 1) >= 0;
|
||||
priv->eee_active =
|
||||
phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
|
||||
priv->eee_enabled = stmmac_eee_init(priv);
|
||||
stmmac_set_eee_pls(priv, priv->hw, true);
|
||||
}
|
||||
|
@ -554,7 +554,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
|
||||
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
|
||||
|
||||
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
|
||||
if (plat->force_thresh_dma_mode) {
|
||||
if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
|
||||
plat->force_sf_dma_mode = 0;
|
||||
dev_warn(&pdev->dev,
|
||||
"force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
|
||||
|
@ -235,6 +235,8 @@ static struct phy_driver meson_gxl_phy[] = {
|
||||
.config_intr = meson_gxl_config_intr,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
.read_mmd = genphy_read_mmd_unsupported,
|
||||
.write_mmd = genphy_write_mmd_unsupported,
|
||||
}, {
|
||||
PHY_ID_MATCH_EXACT(0x01803301),
|
||||
.name = "Meson G12A Internal PHY",
|
||||
@ -245,6 +247,8 @@ static struct phy_driver meson_gxl_phy[] = {
|
||||
.config_intr = meson_gxl_config_intr,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
.read_mmd = genphy_read_mmd_unsupported,
|
||||
.write_mmd = genphy_write_mmd_unsupported,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
|
||||
init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
|
||||
if (status != 0) {
|
||||
netdev_err(dev->net,
|
||||
"Error sending init packet. Status %i, length %i\n",
|
||||
status, act_len);
|
||||
"Error sending init packet. Status %i\n",
|
||||
status);
|
||||
return status;
|
||||
}
|
||||
else if (act_len != init_msg_len) {
|
||||
@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
|
||||
|
||||
if (status != 0)
|
||||
netdev_err(dev->net,
|
||||
"Error receiving init result. Status %i, length %i\n",
|
||||
status, act_len);
|
||||
"Error receiving init result. Status %i\n",
|
||||
status);
|
||||
else if (act_len != expected_len)
|
||||
netdev_err(dev->net, "Unexpected init result length: %i\n",
|
||||
act_len);
|
||||
|
@ -57,9 +57,7 @@
|
||||
static inline int
|
||||
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
|
||||
{
|
||||
return usbnet_read_cmd(dev, req,
|
||||
USB_DIR_IN | USB_TYPE_VENDOR |
|
||||
USB_RECIP_DEVICE,
|
||||
return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
val, index, NULL, 0);
|
||||
}
|
||||
|
||||
|
@ -1910,8 +1910,8 @@ static int virtnet_close(struct net_device *dev)
|
||||
cancel_delayed_work_sync(&vi->refill);
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
|
||||
napi_disable(&vi->rq[i].napi);
|
||||
xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
|
||||
virtnet_napi_tx_disable(&vi->sq[i].napi);
|
||||
}
|
||||
|
||||
|
@ -87,6 +87,9 @@
|
||||
#define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
|
||||
(sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
|
||||
|
||||
#define BRCMF_MAX_CHANSPEC_LIST \
|
||||
(BRCMF_DCMD_MEDLEN / sizeof(__le32) - 1)
|
||||
|
||||
static bool check_vif_up(struct brcmf_cfg80211_vif *vif)
|
||||
{
|
||||
if (!test_bit(BRCMF_VIF_STATUS_READY, &vif->sme_state)) {
|
||||
@ -6067,6 +6070,13 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
|
||||
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
|
||||
|
||||
total = le32_to_cpu(list->count);
|
||||
if (total > BRCMF_MAX_CHANSPEC_LIST) {
|
||||
bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
|
||||
total);
|
||||
err = -EINVAL;
|
||||
goto fail_pbuf;
|
||||
}
|
||||
|
||||
for (i = 0; i < total; i++) {
|
||||
ch.chspec = (u16)le32_to_cpu(list->element[i]);
|
||||
cfg->d11inf.decchspec(&ch);
|
||||
@ -6212,6 +6222,13 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
|
||||
band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
|
||||
list = (struct brcmf_chanspec_list *)pbuf;
|
||||
num_chan = le32_to_cpu(list->count);
|
||||
if (num_chan > BRCMF_MAX_CHANSPEC_LIST) {
|
||||
bphy_err(drvr, "Invalid count of channel Spec. (%u)\n",
|
||||
num_chan);
|
||||
kfree(pbuf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_chan; i++) {
|
||||
ch.chspec = (u16)le32_to_cpu(list->element[i]);
|
||||
cfg->d11inf.decchspec(&ch);
|
||||
|
@ -3199,7 +3199,6 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */
|
||||
.driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
|
||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||
{ PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */
|
||||
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
|
||||
@ -3209,6 +3208,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
|
||||
NVME_QUIRK_128_BYTES_SQES |
|
||||
NVME_QUIRK_SHARED_TAGS },
|
||||
|
||||
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
|
||||
{ 0, }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, nvme_id_table);
|
||||
|
@ -1362,8 +1362,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
|
||||
else {
|
||||
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
|
||||
be16_to_cpu(rqst->assoc_cmd.sqsize));
|
||||
if (!queue)
|
||||
if (!queue) {
|
||||
ret = VERR_QUEUE_ALLOC_FAIL;
|
||||
nvmet_fc_tgt_a_put(iod->assoc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -439,7 +439,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
||||
if (config->cells) {
|
||||
rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
|
||||
if (rval)
|
||||
goto err_teardown_compat;
|
||||
goto err_remove_cells;
|
||||
}
|
||||
|
||||
rval = nvmem_add_cells_from_table(nvmem);
|
||||
@ -456,7 +456,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
|
||||
|
||||
err_remove_cells:
|
||||
nvmem_device_remove_all_cells(nvmem);
|
||||
err_teardown_compat:
|
||||
if (config->compat)
|
||||
nvmem_sysfs_remove_compat(nvmem, config);
|
||||
err_device_del:
|
||||
|
@ -115,7 +115,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
|
||||
int ret = 0;
|
||||
|
||||
if (!exprs)
|
||||
return true;
|
||||
return -EINVAL;
|
||||
|
||||
while (*exprs && !ret) {
|
||||
ret = aspeed_sig_expr_disable(ctx, *exprs);
|
||||
|
@ -1510,6 +1510,12 @@ int intel_pinctrl_probe_by_uid(struct platform_device *pdev)
|
||||
EXPORT_SYMBOL_GPL(intel_pinctrl_probe_by_uid);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static bool __intel_gpio_is_direct_irq(u32 value)
|
||||
{
|
||||
return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
|
||||
(__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
|
||||
}
|
||||
|
||||
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
|
||||
{
|
||||
const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
|
||||
@ -1543,8 +1549,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
|
||||
* See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
|
||||
*/
|
||||
value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
|
||||
if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
|
||||
(__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO))
|
||||
if (__intel_gpio_is_direct_irq(value))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -1656,7 +1661,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
|
||||
void __iomem *padcfg;
|
||||
u32 val;
|
||||
|
||||
if (!intel_pinctrl_should_save(pctrl, desc->number))
|
||||
if (!(intel_pinctrl_should_save(pctrl, desc->number) ||
|
||||
/*
|
||||
* If the firmware mangled the register contents too much,
|
||||
* check the saved value for the Direct IRQ mode.
|
||||
*/
|
||||
__intel_gpio_is_direct_irq(pads[i].padcfg0)))
|
||||
continue;
|
||||
|
||||
padcfg = intel_get_padcfg(pctrl, desc->number, PADCFG0);
|
||||
|
@ -345,6 +345,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
|
||||
if (!pcs->fmask)
|
||||
return 0;
|
||||
function = pinmux_generic_get_function(pctldev, fselector);
|
||||
if (!function)
|
||||
return -EINVAL;
|
||||
func = function->data;
|
||||
if (!func)
|
||||
return -EINVAL;
|
||||
|
@ -770,7 +770,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
|
||||
enum iscsi_host_param param, char *buf)
|
||||
{
|
||||
struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
|
||||
struct iscsi_session *session = tcp_sw_host->session;
|
||||
struct iscsi_session *session;
|
||||
struct iscsi_conn *conn;
|
||||
struct iscsi_tcp_conn *tcp_conn;
|
||||
struct iscsi_sw_tcp_conn *tcp_sw_conn;
|
||||
@ -779,6 +779,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
|
||||
|
||||
switch (param) {
|
||||
case ISCSI_HOST_PARAM_IPADDRESS:
|
||||
session = tcp_sw_host->session;
|
||||
if (!session)
|
||||
return -ENOTCONN;
|
||||
|
||||
@ -867,12 +868,14 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
||||
if (!cls_session)
|
||||
goto remove_host;
|
||||
session = cls_session->dd_data;
|
||||
tcp_sw_host = iscsi_host_priv(shost);
|
||||
tcp_sw_host->session = session;
|
||||
|
||||
shost->can_queue = session->scsi_cmds_max;
|
||||
if (iscsi_tcp_r2tpool_alloc(session))
|
||||
goto remove_session;
|
||||
|
||||
/* We are now fully setup so expose the session to sysfs. */
|
||||
tcp_sw_host = iscsi_host_priv(shost);
|
||||
tcp_sw_host->session = session;
|
||||
return cls_session;
|
||||
|
||||
remove_session:
|
||||
|
@ -1130,8 +1130,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
||||
* that no LUN is present, so don't add sdev in these cases.
|
||||
* Two specific examples are:
|
||||
* 1) NetApp targets: return PQ=1, PDT=0x1f
|
||||
* 2) IBM/2145 targets: return PQ=1, PDT=0
|
||||
* 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
|
||||
* 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
|
||||
* in the UFI 1.0 spec (we cannot rely on reserved bits).
|
||||
*
|
||||
* References:
|
||||
@ -1145,8 +1144,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
|
||||
* PDT=00h Direct-access device (floppy)
|
||||
* PDT=1Fh none (no FDD connected to the requested logical unit)
|
||||
*/
|
||||
if (((result[0] >> 5) == 1 ||
|
||||
(starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
|
||||
if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
|
||||
(result[0] & 0x1f) == 0x1f &&
|
||||
!scsi_is_wlun(lun)) {
|
||||
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
|
||||
"scsi scan: peripheral device type"
|
||||
|
@ -340,7 +340,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
|
||||
len += sg->length;
|
||||
}
|
||||
|
||||
iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
|
||||
iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
|
||||
if (is_write)
|
||||
ret = vfs_iter_write(fd, &iter, &pos, 0);
|
||||
else
|
||||
@ -477,7 +477,7 @@ fd_execute_write_same(struct se_cmd *cmd)
|
||||
len += se_dev->dev_attrib.block_size;
|
||||
}
|
||||
|
||||
iov_iter_bvec(&iter, READ, bvec, nolb, len);
|
||||
iov_iter_bvec(&iter, WRITE, bvec, nolb, len);
|
||||
ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
|
||||
|
||||
kfree(bvec);
|
||||
|
@ -82,8 +82,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd,
|
||||
{
|
||||
struct se_session *sess = se_cmd->se_sess;
|
||||
|
||||
assert_spin_locked(&sess->sess_cmd_lock);
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
lockdep_assert_held(&sess->sess_cmd_lock);
|
||||
|
||||
/*
|
||||
* If command already reached CMD_T_COMPLETE state within
|
||||
* target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
|
||||
|
@ -46,19 +46,39 @@ static void __dma_rx_complete(void *param)
|
||||
struct uart_8250_dma *dma = p->dma;
|
||||
struct tty_port *tty_port = &p->port.state->port;
|
||||
struct dma_tx_state state;
|
||||
enum dma_status dma_status;
|
||||
int count;
|
||||
|
||||
dma->rx_running = 0;
|
||||
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
|
||||
/*
|
||||
* New DMA Rx can be started during the completion handler before it
|
||||
* could acquire port's lock and it might still be ongoing. Don't to
|
||||
* anything in such case.
|
||||
*/
|
||||
dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
|
||||
if (dma_status == DMA_IN_PROGRESS)
|
||||
return;
|
||||
|
||||
count = dma->rx_size - state.residue;
|
||||
|
||||
tty_insert_flip_string(tty_port, dma->rx_buf, count);
|
||||
p->port.icount.rx += count;
|
||||
dma->rx_running = 0;
|
||||
|
||||
tty_flip_buffer_push(tty_port);
|
||||
}
|
||||
|
||||
static void dma_rx_complete(void *param)
|
||||
{
|
||||
struct uart_8250_port *p = param;
|
||||
struct uart_8250_dma *dma = p->dma;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&p->port.lock, flags);
|
||||
if (dma->rx_running)
|
||||
__dma_rx_complete(p);
|
||||
spin_unlock_irqrestore(&p->port.lock, flags);
|
||||
}
|
||||
|
||||
int serial8250_tx_dma(struct uart_8250_port *p)
|
||||
{
|
||||
struct uart_8250_dma *dma = p->dma;
|
||||
@ -121,7 +141,7 @@ int serial8250_rx_dma(struct uart_8250_port *p)
|
||||
return -EBUSY;
|
||||
|
||||
dma->rx_running = 1;
|
||||
desc->callback = __dma_rx_complete;
|
||||
desc->callback = dma_rx_complete;
|
||||
desc->callback_param = p;
|
||||
|
||||
dma->rx_cookie = dmaengine_submit(desc);
|
||||
|
@ -265,10 +265,6 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
|
||||
uni_mode = use_unicode(inode);
|
||||
attr = use_attributes(inode);
|
||||
ret = -ENXIO;
|
||||
vc = vcs_vc(inode, &viewed);
|
||||
if (!vc)
|
||||
goto unlock_out;
|
||||
|
||||
ret = -EINVAL;
|
||||
if (pos < 0)
|
||||
@ -288,6 +284,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
ssize_t orig_count;
|
||||
long p = pos;
|
||||
|
||||
ret = -ENXIO;
|
||||
vc = vcs_vc(inode, &viewed);
|
||||
if (!vc)
|
||||
goto unlock_out;
|
||||
|
||||
/* Check whether we are above size each round,
|
||||
* as copy_to_user at the end of this loop
|
||||
* could sleep.
|
||||
|
@ -527,6 +527,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
/* DJI CineSSD */
|
||||
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
|
||||
{ USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* DELL USB GEN2 */
|
||||
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
|
||||
|
||||
|
@ -102,7 +102,7 @@ static inline void dwc3_qcom_clrbits(void __iomem *base, u32 offset, u32 val)
|
||||
readl(base + offset);
|
||||
}
|
||||
|
||||
static void dwc3_qcom_vbus_overrride_enable(struct dwc3_qcom *qcom, bool enable)
|
||||
static void dwc3_qcom_vbus_override_enable(struct dwc3_qcom *qcom, bool enable)
|
||||
{
|
||||
if (enable) {
|
||||
dwc3_qcom_setbits(qcom->qscratch_base, QSCRATCH_SS_PHY_CTRL,
|
||||
@ -123,7 +123,7 @@ static int dwc3_qcom_vbus_notifier(struct notifier_block *nb,
|
||||
struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, vbus_nb);
|
||||
|
||||
/* enable vbus override for device mode */
|
||||
dwc3_qcom_vbus_overrride_enable(qcom, event);
|
||||
dwc3_qcom_vbus_override_enable(qcom, event);
|
||||
qcom->mode = event ? USB_DR_MODE_PERIPHERAL : USB_DR_MODE_HOST;
|
||||
|
||||
return NOTIFY_DONE;
|
||||
@ -135,7 +135,7 @@ static int dwc3_qcom_host_notifier(struct notifier_block *nb,
|
||||
struct dwc3_qcom *qcom = container_of(nb, struct dwc3_qcom, host_nb);
|
||||
|
||||
/* disable vbus override in host mode */
|
||||
dwc3_qcom_vbus_overrride_enable(qcom, !event);
|
||||
dwc3_qcom_vbus_override_enable(qcom, !event);
|
||||
qcom->mode = event ? USB_DR_MODE_HOST : USB_DR_MODE_PERIPHERAL;
|
||||
|
||||
return NOTIFY_DONE;
|
||||
@ -669,8 +669,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
|
||||
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
|
||||
|
||||
/* enable vbus override for device mode */
|
||||
if (qcom->mode == USB_DR_MODE_PERIPHERAL)
|
||||
dwc3_qcom_vbus_overrride_enable(qcom, true);
|
||||
if (qcom->mode != USB_DR_MODE_HOST)
|
||||
dwc3_qcom_vbus_override_enable(qcom, true);
|
||||
|
||||
/* register extcon to override sw_vbus on Vbus change later */
|
||||
ret = dwc3_qcom_register_extcon(qcom);
|
||||
|
@ -278,8 +278,10 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
|
||||
struct usb_request *req = ffs->ep0req;
|
||||
int ret;
|
||||
|
||||
if (!req)
|
||||
if (!req) {
|
||||
spin_unlock_irq(&ffs->ev.waitq.lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
|
||||
|
||||
|
@ -522,10 +522,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
|
||||
/* FIXME: Port can only be DFP_U. */
|
||||
|
||||
/* Make sure we have compatiple pin configurations */
|
||||
if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
|
||||
DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
|
||||
!(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
|
||||
DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
|
||||
if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
|
||||
DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
|
||||
!(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
|
||||
DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
|
||||
return -ENODEV;
|
||||
|
||||
ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
|
||||
|
@ -2497,9 +2497,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
|
||||
h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
|
||||
return -EINVAL;
|
||||
|
||||
if (font->width > 32 || font->height > 32)
|
||||
return -EINVAL;
|
||||
|
||||
/* Make sure drawing engine can handle the font */
|
||||
if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
|
||||
!(info->pixmap.blit_y & (1 << (font->height - 1))))
|
||||
if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
|
||||
!(info->pixmap.blit_y & BIT(font->height - 1)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Make sure driver can handle the font length */
|
||||
|
@ -1622,7 +1622,7 @@ static int ufx_usb_probe(struct usb_interface *interface,
|
||||
struct usb_device *usbdev;
|
||||
struct ufx_data *dev;
|
||||
struct fb_info *info;
|
||||
int retval;
|
||||
int retval = -ENOMEM;
|
||||
u32 id_rev, fpga_rev;
|
||||
|
||||
/* usb initialization */
|
||||
@ -1654,15 +1654,17 @@ static int ufx_usb_probe(struct usb_interface *interface,
|
||||
|
||||
if (!ufx_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
|
||||
dev_err(dev->gdev, "ufx_alloc_urb_list failed\n");
|
||||
goto e_nomem;
|
||||
goto put_ref;
|
||||
}
|
||||
|
||||
/* We don't register a new USB class. Our client interface is fbdev */
|
||||
|
||||
/* allocates framebuffer driver structure, not framebuffer memory */
|
||||
info = framebuffer_alloc(0, &usbdev->dev);
|
||||
if (!info)
|
||||
goto e_nomem;
|
||||
if (!info) {
|
||||
dev_err(dev->gdev, "framebuffer_alloc failed\n");
|
||||
goto free_urb_list;
|
||||
}
|
||||
|
||||
dev->info = info;
|
||||
info->par = dev;
|
||||
@ -1705,22 +1707,34 @@ static int ufx_usb_probe(struct usb_interface *interface,
|
||||
check_warn_goto_error(retval, "unable to find common mode for display and adapter");
|
||||
|
||||
retval = ufx_reg_set_bits(dev, 0x4000, 0x00000001);
|
||||
check_warn_goto_error(retval, "error %d enabling graphics engine", retval);
|
||||
if (retval < 0) {
|
||||
dev_err(dev->gdev, "error %d enabling graphics engine", retval);
|
||||
goto setup_modes;
|
||||
}
|
||||
|
||||
/* ready to begin using device */
|
||||
atomic_set(&dev->usb_active, 1);
|
||||
|
||||
dev_dbg(dev->gdev, "checking var");
|
||||
retval = ufx_ops_check_var(&info->var, info);
|
||||
check_warn_goto_error(retval, "error %d ufx_ops_check_var", retval);
|
||||
if (retval < 0) {
|
||||
dev_err(dev->gdev, "error %d ufx_ops_check_var", retval);
|
||||
goto reset_active;
|
||||
}
|
||||
|
||||
dev_dbg(dev->gdev, "setting par");
|
||||
retval = ufx_ops_set_par(info);
|
||||
check_warn_goto_error(retval, "error %d ufx_ops_set_par", retval);
|
||||
if (retval < 0) {
|
||||
dev_err(dev->gdev, "error %d ufx_ops_set_par", retval);
|
||||
goto reset_active;
|
||||
}
|
||||
|
||||
dev_dbg(dev->gdev, "registering framebuffer");
|
||||
retval = register_framebuffer(info);
|
||||
check_warn_goto_error(retval, "error %d register_framebuffer", retval);
|
||||
if (retval < 0) {
|
||||
dev_err(dev->gdev, "error %d register_framebuffer", retval);
|
||||
goto reset_active;
|
||||
}
|
||||
|
||||
dev_info(dev->gdev, "SMSC UDX USB device /dev/fb%d attached. %dx%d resolution."
|
||||
" Using %dK framebuffer memory\n", info->node,
|
||||
@ -1728,21 +1742,23 @@ static int ufx_usb_probe(struct usb_interface *interface,
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
fb_dealloc_cmap(&info->cmap);
|
||||
destroy_modedb:
|
||||
reset_active:
|
||||
atomic_set(&dev->usb_active, 0);
|
||||
setup_modes:
|
||||
fb_destroy_modedb(info->monspecs.modedb);
|
||||
vfree(info->screen_base);
|
||||
fb_destroy_modelist(&info->modelist);
|
||||
error:
|
||||
fb_dealloc_cmap(&info->cmap);
|
||||
destroy_modedb:
|
||||
framebuffer_release(info);
|
||||
free_urb_list:
|
||||
if (dev->urbs.count > 0)
|
||||
ufx_free_urb_list(dev);
|
||||
put_ref:
|
||||
kref_put(&dev->kref, ufx_free); /* ref for framebuffer */
|
||||
kref_put(&dev->kref, ufx_free); /* last ref from kref_init */
|
||||
return retval;
|
||||
|
||||
e_nomem:
|
||||
retval = -ENOMEM;
|
||||
goto put_ref;
|
||||
}
|
||||
|
||||
static void ufx_usb_disconnect(struct usb_interface *interface)
|
||||
|
@ -86,7 +86,7 @@ static int __diag288(unsigned int func, unsigned int timeout,
|
||||
"1:\n"
|
||||
EX_TABLE(0b, 1b)
|
||||
: "+d" (err) : "d"(__func), "d"(__timeout),
|
||||
"d"(__action), "d"(__len) : "1", "cc");
|
||||
"d"(__action), "d"(__len) : "1", "cc", "memory");
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -272,12 +272,21 @@ static int __init diag288_init(void)
|
||||
char ebc_begin[] = {
|
||||
194, 197, 199, 201, 213
|
||||
};
|
||||
char *ebc_cmd;
|
||||
|
||||
watchdog_set_nowayout(&wdt_dev, nowayout_info);
|
||||
|
||||
if (MACHINE_IS_VM) {
|
||||
if (__diag288_vm(WDT_FUNC_INIT, 15,
|
||||
ebc_begin, sizeof(ebc_begin)) != 0) {
|
||||
ebc_cmd = kmalloc(sizeof(ebc_begin), GFP_KERNEL);
|
||||
if (!ebc_cmd) {
|
||||
pr_err("The watchdog cannot be initialized\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(ebc_cmd, ebc_begin, sizeof(ebc_begin));
|
||||
ret = __diag288_vm(WDT_FUNC_INIT, 15,
|
||||
ebc_cmd, sizeof(ebc_begin));
|
||||
kfree(ebc_cmd);
|
||||
if (ret != 0) {
|
||||
pr_err("The watchdog cannot be initialized\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -129,13 +129,13 @@ static bool pvcalls_conn_back_read(void *opaque)
|
||||
if (masked_prod < masked_cons) {
|
||||
vec[0].iov_base = data->in + masked_prod;
|
||||
vec[0].iov_len = wanted;
|
||||
iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
|
||||
iov_iter_kvec(&msg.msg_iter, READ, vec, 1, wanted);
|
||||
} else {
|
||||
vec[0].iov_base = data->in + masked_prod;
|
||||
vec[0].iov_len = array_size - masked_prod;
|
||||
vec[1].iov_base = data->in;
|
||||
vec[1].iov_len = wanted - vec[0].iov_len;
|
||||
iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
|
||||
iov_iter_kvec(&msg.msg_iter, READ, vec, 2, wanted);
|
||||
}
|
||||
|
||||
atomic_set(&map->read, 0);
|
||||
@ -188,13 +188,13 @@ static bool pvcalls_conn_back_write(struct sock_mapping *map)
|
||||
if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
|
||||
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
|
||||
vec[0].iov_len = size;
|
||||
iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
|
||||
iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, size);
|
||||
} else {
|
||||
vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
|
||||
vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
|
||||
vec[1].iov_base = data->out;
|
||||
vec[1].iov_len = size - vec[0].iov_len;
|
||||
iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
|
||||
iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, size);
|
||||
}
|
||||
|
||||
atomic_set(&map->write, 0);
|
||||
|
4
fs/aio.c
4
fs/aio.c
@ -336,6 +336,9 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
|
||||
spin_lock(&mm->ioctx_lock);
|
||||
rcu_read_lock();
|
||||
table = rcu_dereference(mm->ioctx_table);
|
||||
if (!table)
|
||||
goto out_unlock;
|
||||
|
||||
for (i = 0; i < table->nr; i++) {
|
||||
struct kioctx *ctx;
|
||||
|
||||
@ -349,6 +352,7 @@ static int aio_ring_mremap(struct vm_area_struct *vma)
|
||||
}
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&mm->ioctx_lock);
|
||||
return res;
|
||||
|
@ -354,6 +354,7 @@ void btrfs_free_device(struct btrfs_device *device)
|
||||
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
|
||||
{
|
||||
struct btrfs_device *device;
|
||||
|
||||
WARN_ON(fs_devices->opened);
|
||||
while (!list_empty(&fs_devices->devices)) {
|
||||
device = list_entry(fs_devices->devices.next,
|
||||
@ -1401,6 +1402,17 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
|
||||
if (!fs_devices->opened) {
|
||||
seed_devices = fs_devices->seed;
|
||||
fs_devices->seed = NULL;
|
||||
|
||||
/*
|
||||
* If the struct btrfs_fs_devices is not assembled with any
|
||||
* other device, it can be re-initialized during the next mount
|
||||
* without the needing device-scan step. Therefore, it can be
|
||||
* fully freed.
|
||||
*/
|
||||
if (fs_devices->num_devices == 1) {
|
||||
list_del(&fs_devices->fs_list);
|
||||
free_fs_devices(fs_devices);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&uuid_mutex);
|
||||
|
||||
@ -1701,7 +1713,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
while (search_start < search_end) {
|
||||
l = path->nodes[0];
|
||||
slot = path->slots[0];
|
||||
if (slot >= btrfs_header_nritems(l)) {
|
||||
@ -1724,6 +1736,9 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
|
||||
if (key.type != BTRFS_DEV_EXTENT_KEY)
|
||||
goto next;
|
||||
|
||||
if (key.offset > search_end)
|
||||
break;
|
||||
|
||||
if (key.offset > search_start) {
|
||||
hole_size = key.offset - search_start;
|
||||
|
||||
@ -1794,6 +1809,7 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
ASSERT(max_hole_start + max_hole_size <= search_end);
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
*start = max_hole_start;
|
||||
|
@ -74,7 +74,7 @@ static struct list_head *zlib_alloc_workspace(unsigned int level)
|
||||
|
||||
workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL),
|
||||
zlib_inflate_workspacesize());
|
||||
workspace->strm.workspace = kvmalloc(workspacesize, GFP_KERNEL);
|
||||
workspace->strm.workspace = kvzalloc(workspacesize, GFP_KERNEL);
|
||||
workspace->level = level;
|
||||
workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
|
||||
if (!workspace->strm.workspace || !workspace->buf)
|
||||
|
@ -3151,6 +3151,12 @@ static void handle_session(struct ceph_mds_session *session,
|
||||
break;
|
||||
|
||||
case CEPH_SESSION_FLUSHMSG:
|
||||
/* flush cap releases */
|
||||
spin_lock(&session->s_cap_lock);
|
||||
if (session->s_num_cap_releases)
|
||||
ceph_flush_cap_releases(mdsc, session);
|
||||
spin_unlock(&session->s_cap_lock);
|
||||
|
||||
send_flushmsg_ack(mdsc, session, seq);
|
||||
break;
|
||||
|
||||
|
18
fs/f2fs/gc.c
18
fs/f2fs/gc.c
@ -620,7 +620,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
{
|
||||
struct page *node_page;
|
||||
nid_t nid;
|
||||
unsigned int ofs_in_node, max_addrs;
|
||||
unsigned int ofs_in_node, max_addrs, base;
|
||||
block_t source_blkaddr;
|
||||
|
||||
nid = le32_to_cpu(sum->nid);
|
||||
@ -646,11 +646,17 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
|
||||
return false;
|
||||
}
|
||||
|
||||
max_addrs = IS_INODE(node_page) ? DEF_ADDRS_PER_INODE :
|
||||
DEF_ADDRS_PER_BLOCK;
|
||||
if (ofs_in_node >= max_addrs) {
|
||||
f2fs_err(sbi, "Inconsistent ofs_in_node:%u in summary, ino:%u, nid:%u, max:%u",
|
||||
ofs_in_node, dni->ino, dni->nid, max_addrs);
|
||||
if (IS_INODE(node_page)) {
|
||||
base = offset_in_addr(F2FS_INODE(node_page));
|
||||
max_addrs = DEF_ADDRS_PER_INODE;
|
||||
} else {
|
||||
base = 0;
|
||||
max_addrs = DEF_ADDRS_PER_BLOCK;
|
||||
}
|
||||
|
||||
if (base + ofs_in_node >= max_addrs) {
|
||||
f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
|
||||
base, ofs_in_node, max_addrs, dni->ino, dni->nid);
|
||||
f2fs_put_page(node_page, 1);
|
||||
return false;
|
||||
}
|
||||
|
@ -1130,7 +1130,14 @@ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
|
||||
|
||||
minseg = range[0] + segbytes - 1;
|
||||
do_div(minseg, segbytes);
|
||||
|
||||
if (range[1] < 4096)
|
||||
goto out;
|
||||
|
||||
maxseg = NILFS_SB2_OFFSET_BYTES(range[1]);
|
||||
if (maxseg < segbytes)
|
||||
goto out;
|
||||
|
||||
do_div(maxseg, segbytes);
|
||||
maxseg--;
|
||||
|
||||
|
@ -403,6 +403,15 @@ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
|
||||
if (newsize > devsize)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Prevent underflow in second superblock position calculation.
|
||||
* The exact minimum size check is done in nilfs_sufile_resize().
|
||||
*/
|
||||
if (newsize < 4096) {
|
||||
ret = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Write lock is required to protect some functions depending
|
||||
* on the number of segments, the number of reserved segments,
|
||||
|
@ -517,9 +517,15 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
|
||||
{
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
struct buffer_head **sbh = nilfs->ns_sbh;
|
||||
u64 sb2off = NILFS_SB2_OFFSET_BYTES(nilfs->ns_bdev->bd_inode->i_size);
|
||||
u64 sb2off, devsize = nilfs->ns_bdev->bd_inode->i_size;
|
||||
int valid[2], swp = 0;
|
||||
|
||||
if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
|
||||
nilfs_msg(sb, KERN_ERR, "device size too small");
|
||||
return -EINVAL;
|
||||
}
|
||||
sb2off = NILFS_SB2_OFFSET_BYTES(devsize);
|
||||
|
||||
sbp[0] = nilfs_read_super_block(sb, NILFS_SB_OFFSET_BYTES, blocksize,
|
||||
&sbh[0]);
|
||||
sbp[1] = nilfs_read_super_block(sb, sb2off, blocksize, &sbh[1]);
|
||||
|
@ -780,9 +780,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
|
||||
page = device_private_entry_to_page(swpent);
|
||||
}
|
||||
if (page) {
|
||||
int mapcount = page_mapcount(page);
|
||||
|
||||
if (mapcount >= 2)
|
||||
if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
|
||||
mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
|
||||
else
|
||||
mss->private_hugetlb += huge_page_size(hstate_vma(vma));
|
||||
|
@ -183,7 +183,7 @@ static inline int squashfs_block_size(__le32 raw)
|
||||
#define SQUASHFS_ID_BLOCK_BYTES(A) (SQUASHFS_ID_BLOCKS(A) *\
|
||||
sizeof(u64))
|
||||
/* xattr id lookup table defines */
|
||||
#define SQUASHFS_XATTR_BYTES(A) ((A) * sizeof(struct squashfs_xattr_id))
|
||||
#define SQUASHFS_XATTR_BYTES(A) (((u64) (A)) * sizeof(struct squashfs_xattr_id))
|
||||
|
||||
#define SQUASHFS_XATTR_BLOCK(A) (SQUASHFS_XATTR_BYTES(A) / \
|
||||
SQUASHFS_METADATA_SIZE)
|
||||
|
@ -63,7 +63,7 @@ struct squashfs_sb_info {
|
||||
long long bytes_used;
|
||||
unsigned int inodes;
|
||||
unsigned int fragments;
|
||||
int xattr_ids;
|
||||
unsigned int xattr_ids;
|
||||
unsigned int ids;
|
||||
};
|
||||
#endif
|
||||
|
@ -10,12 +10,12 @@
|
||||
|
||||
#ifdef CONFIG_SQUASHFS_XATTR
|
||||
extern __le64 *squashfs_read_xattr_id_table(struct super_block *, u64,
|
||||
u64 *, int *);
|
||||
u64 *, unsigned int *);
|
||||
extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
|
||||
unsigned int *, unsigned long long *);
|
||||
#else
|
||||
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
|
||||
u64 start, u64 *xattr_table_start, int *xattr_ids)
|
||||
u64 start, u64 *xattr_table_start, unsigned int *xattr_ids)
|
||||
{
|
||||
struct squashfs_xattr_id_table *id_table;
|
||||
|
||||
|
@ -56,7 +56,7 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
|
||||
* Read uncompressed xattr id lookup table indexes from disk into memory
|
||||
*/
|
||||
__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
|
||||
u64 *xattr_table_start, int *xattr_ids)
|
||||
u64 *xattr_table_start, unsigned int *xattr_ids)
|
||||
{
|
||||
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
||||
unsigned int len, indexes;
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include "xfs_inode.h"
|
||||
#include "xfs_inode_item.h"
|
||||
#include "xfs_trace.h"
|
||||
#include "xfs_icache.h"
|
||||
#include "xfs_log.h"
|
||||
|
||||
/*
|
||||
* Deferred Operations in XFS
|
||||
@ -178,6 +180,19 @@ static const struct xfs_defer_op_type *defer_op_types[] = {
|
||||
[XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
|
||||
};
|
||||
|
||||
static void
|
||||
xfs_defer_create_intent(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_defer_pending *dfp,
|
||||
bool sort)
|
||||
{
|
||||
const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
|
||||
|
||||
if (!dfp->dfp_intent)
|
||||
dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
|
||||
dfp->dfp_count, sort);
|
||||
}
|
||||
|
||||
/*
|
||||
* For each pending item in the intake list, log its intent item and the
|
||||
* associated extents, then add the entire intake list to the end of
|
||||
@ -187,17 +202,11 @@ STATIC void
|
||||
xfs_defer_create_intents(
|
||||
struct xfs_trans *tp)
|
||||
{
|
||||
struct list_head *li;
|
||||
struct xfs_defer_pending *dfp;
|
||||
const struct xfs_defer_op_type *ops;
|
||||
|
||||
list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
|
||||
ops = defer_op_types[dfp->dfp_type];
|
||||
dfp->dfp_intent = ops->create_intent(tp, dfp->dfp_count);
|
||||
trace_xfs_defer_create_intent(tp->t_mountp, dfp);
|
||||
list_sort(tp->t_mountp, &dfp->dfp_work, ops->diff_items);
|
||||
list_for_each(li, &dfp->dfp_work)
|
||||
ops->log_item(tp, dfp->dfp_intent, li);
|
||||
xfs_defer_create_intent(tp, dfp, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -353,6 +362,106 @@ xfs_defer_cancel_list(
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Prevent a log intent item from pinning the tail of the log by logging a
|
||||
* done item to release the intent item; and then log a new intent item.
|
||||
* The caller should provide a fresh transaction and roll it after we're done.
|
||||
*/
|
||||
static int
|
||||
xfs_defer_relog(
|
||||
struct xfs_trans **tpp,
|
||||
struct list_head *dfops)
|
||||
{
|
||||
struct xlog *log = (*tpp)->t_mountp->m_log;
|
||||
struct xfs_defer_pending *dfp;
|
||||
xfs_lsn_t threshold_lsn = NULLCOMMITLSN;
|
||||
|
||||
|
||||
ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
|
||||
|
||||
list_for_each_entry(dfp, dfops, dfp_list) {
|
||||
/*
|
||||
* If the log intent item for this deferred op is not a part of
|
||||
* the current log checkpoint, relog the intent item to keep
|
||||
* the log tail moving forward. We're ok with this being racy
|
||||
* because an incorrect decision means we'll be a little slower
|
||||
* at pushing the tail.
|
||||
*/
|
||||
if (dfp->dfp_intent == NULL ||
|
||||
xfs_log_item_in_current_chkpt(dfp->dfp_intent))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Figure out where we need the tail to be in order to maintain
|
||||
* the minimum required free space in the log. Only sample
|
||||
* the log threshold once per call.
|
||||
*/
|
||||
if (threshold_lsn == NULLCOMMITLSN) {
|
||||
threshold_lsn = xlog_grant_push_threshold(log, 0);
|
||||
if (threshold_lsn == NULLCOMMITLSN)
|
||||
break;
|
||||
}
|
||||
if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
|
||||
continue;
|
||||
|
||||
trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
|
||||
XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
|
||||
dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
|
||||
}
|
||||
|
||||
if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
|
||||
return xfs_defer_trans_roll(tpp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Log an intent-done item for the first pending intent, and finish the work
|
||||
* items.
|
||||
*/
|
||||
static int
|
||||
xfs_defer_finish_one(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_defer_pending *dfp)
|
||||
{
|
||||
const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
|
||||
void *state = NULL;
|
||||
struct list_head *li, *n;
|
||||
int error;
|
||||
|
||||
trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
|
||||
|
||||
dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
|
||||
list_for_each_safe(li, n, &dfp->dfp_work) {
|
||||
list_del(li);
|
||||
dfp->dfp_count--;
|
||||
error = ops->finish_item(tp, li, dfp->dfp_done, &state);
|
||||
if (error == -EAGAIN) {
|
||||
/*
|
||||
* Caller wants a fresh transaction; put the work item
|
||||
* back on the list and log a new log intent item to
|
||||
* replace the old one. See "Requesting a Fresh
|
||||
* Transaction while Finishing Deferred Work" above.
|
||||
*/
|
||||
list_add(li, &dfp->dfp_work);
|
||||
dfp->dfp_count++;
|
||||
dfp->dfp_done = NULL;
|
||||
dfp->dfp_intent = NULL;
|
||||
xfs_defer_create_intent(tp, dfp, false);
|
||||
}
|
||||
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Done with the dfp, free it. */
|
||||
list_del(&dfp->dfp_list);
|
||||
kmem_free(dfp);
|
||||
out:
|
||||
if (ops->finish_cleanup)
|
||||
ops->finish_cleanup(tp, state, error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finish all the pending work. This involves logging intent items for
|
||||
* any work items that wandered in since the last transaction roll (if
|
||||
@ -366,11 +475,7 @@ xfs_defer_finish_noroll(
|
||||
struct xfs_trans **tp)
|
||||
{
|
||||
struct xfs_defer_pending *dfp;
|
||||
struct list_head *li;
|
||||
struct list_head *n;
|
||||
void *state;
|
||||
int error = 0;
|
||||
const struct xfs_defer_op_type *ops;
|
||||
LIST_HEAD(dop_pending);
|
||||
|
||||
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
|
||||
@ -379,87 +484,44 @@ xfs_defer_finish_noroll(
|
||||
|
||||
/* Until we run out of pending work to finish... */
|
||||
while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
|
||||
/* log intents and pull in intake items */
|
||||
xfs_defer_create_intents(*tp);
|
||||
list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
|
||||
|
||||
/*
|
||||
* Roll the transaction.
|
||||
* Deferred items that are created in the process of finishing
|
||||
* other deferred work items should be queued at the head of
|
||||
* the pending list, which puts them ahead of the deferred work
|
||||
* that was created by the caller. This keeps the number of
|
||||
* pending work items to a minimum, which decreases the amount
|
||||
* of time that any one intent item can stick around in memory,
|
||||
* pinning the log tail.
|
||||
*/
|
||||
xfs_defer_create_intents(*tp);
|
||||
list_splice_init(&(*tp)->t_dfops, &dop_pending);
|
||||
|
||||
error = xfs_defer_trans_roll(tp);
|
||||
if (error)
|
||||
goto out;
|
||||
goto out_shutdown;
|
||||
|
||||
/* Possibly relog intent items to keep the log moving. */
|
||||
error = xfs_defer_relog(tp, &dop_pending);
|
||||
if (error)
|
||||
goto out_shutdown;
|
||||
|
||||
/* Log an intent-done item for the first pending item. */
|
||||
dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
|
||||
dfp_list);
|
||||
ops = defer_op_types[dfp->dfp_type];
|
||||
trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
|
||||
dfp->dfp_done = ops->create_done(*tp, dfp->dfp_intent,
|
||||
dfp->dfp_count);
|
||||
|
||||
/* Finish the work items. */
|
||||
state = NULL;
|
||||
list_for_each_safe(li, n, &dfp->dfp_work) {
|
||||
list_del(li);
|
||||
dfp->dfp_count--;
|
||||
error = ops->finish_item(*tp, li, dfp->dfp_done,
|
||||
&state);
|
||||
if (error == -EAGAIN) {
|
||||
/*
|
||||
* Caller wants a fresh transaction;
|
||||
* put the work item back on the list
|
||||
* and jump out.
|
||||
*/
|
||||
list_add(li, &dfp->dfp_work);
|
||||
dfp->dfp_count++;
|
||||
break;
|
||||
} else if (error) {
|
||||
/*
|
||||
* Clean up after ourselves and jump out.
|
||||
* xfs_defer_cancel will take care of freeing
|
||||
* all these lists and stuff.
|
||||
*/
|
||||
if (ops->finish_cleanup)
|
||||
ops->finish_cleanup(*tp, state, error);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (error == -EAGAIN) {
|
||||
/*
|
||||
* Caller wants a fresh transaction, so log a
|
||||
* new log intent item to replace the old one
|
||||
* and roll the transaction. See "Requesting
|
||||
* a Fresh Transaction while Finishing
|
||||
* Deferred Work" above.
|
||||
*/
|
||||
dfp->dfp_intent = ops->create_intent(*tp,
|
||||
dfp->dfp_count);
|
||||
dfp->dfp_done = NULL;
|
||||
list_for_each(li, &dfp->dfp_work)
|
||||
ops->log_item(*tp, dfp->dfp_intent, li);
|
||||
} else {
|
||||
/* Done with the dfp, free it. */
|
||||
list_del(&dfp->dfp_list);
|
||||
kmem_free(dfp);
|
||||
}
|
||||
|
||||
if (ops->finish_cleanup)
|
||||
ops->finish_cleanup(*tp, state, error);
|
||||
}
|
||||
|
||||
out:
|
||||
if (error) {
|
||||
xfs_defer_trans_abort(*tp, &dop_pending);
|
||||
xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
|
||||
trace_xfs_defer_finish_error(*tp, error);
|
||||
xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
|
||||
xfs_defer_cancel(*tp);
|
||||
return error;
|
||||
error = xfs_defer_finish_one(*tp, dfp);
|
||||
if (error && error != -EAGAIN)
|
||||
goto out_shutdown;
|
||||
}
|
||||
|
||||
trace_xfs_defer_finish_done(*tp, _RET_IP_);
|
||||
return 0;
|
||||
|
||||
out_shutdown:
|
||||
xfs_defer_trans_abort(*tp, &dop_pending);
|
||||
xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
|
||||
trace_xfs_defer_finish_error(*tp, error);
|
||||
xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
|
||||
xfs_defer_cancel(*tp);
|
||||
return error;
|
||||
}
|
||||
|
||||
int
|
||||
@ -560,3 +622,137 @@ xfs_defer_move(
|
||||
|
||||
xfs_defer_reset(stp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare a chain of fresh deferred ops work items to be completed later. Log
|
||||
* recovery requires the ability to put off until later the actual finishing
|
||||
* work so that it can process unfinished items recovered from the log in
|
||||
* correct order.
|
||||
*
|
||||
* Create and log intent items for all the work that we're capturing so that we
|
||||
* can be assured that the items will get replayed if the system goes down
|
||||
* before log recovery gets a chance to finish the work it put off. The entire
|
||||
* deferred ops state is transferred to the capture structure and the
|
||||
* transaction is then ready for the caller to commit it. If there are no
|
||||
* intent items to capture, this function returns NULL.
|
||||
*
|
||||
* If capture_ip is not NULL, the capture structure will obtain an extra
|
||||
* reference to the inode.
|
||||
*/
|
||||
static struct xfs_defer_capture *
|
||||
xfs_defer_ops_capture(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *capture_ip)
|
||||
{
|
||||
struct xfs_defer_capture *dfc;
|
||||
|
||||
if (list_empty(&tp->t_dfops))
|
||||
return NULL;
|
||||
|
||||
/* Create an object to capture the defer ops. */
|
||||
dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
|
||||
INIT_LIST_HEAD(&dfc->dfc_list);
|
||||
INIT_LIST_HEAD(&dfc->dfc_dfops);
|
||||
|
||||
xfs_defer_create_intents(tp);
|
||||
|
||||
/* Move the dfops chain and transaction state to the capture struct. */
|
||||
list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
|
||||
dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
|
||||
tp->t_flags &= ~XFS_TRANS_LOWMODE;
|
||||
|
||||
/* Capture the remaining block reservations along with the dfops. */
|
||||
dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
|
||||
dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
|
||||
|
||||
/* Preserve the log reservation size. */
|
||||
dfc->dfc_logres = tp->t_log_res;
|
||||
|
||||
/*
|
||||
* Grab an extra reference to this inode and attach it to the capture
|
||||
* structure.
|
||||
*/
|
||||
if (capture_ip) {
|
||||
ihold(VFS_I(capture_ip));
|
||||
dfc->dfc_capture_ip = capture_ip;
|
||||
}
|
||||
|
||||
return dfc;
|
||||
}
|
||||
|
||||
/* Release all resources that we used to capture deferred ops. */
|
||||
void
|
||||
xfs_defer_ops_release(
|
||||
struct xfs_mount *mp,
|
||||
struct xfs_defer_capture *dfc)
|
||||
{
|
||||
xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
|
||||
if (dfc->dfc_capture_ip)
|
||||
xfs_irele(dfc->dfc_capture_ip);
|
||||
kmem_free(dfc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Capture any deferred ops and commit the transaction. This is the last step
|
||||
* needed to finish a log intent item that we recovered from the log. If any
|
||||
* of the deferred ops operate on an inode, the caller must pass in that inode
|
||||
* so that the reference can be transferred to the capture structure. The
|
||||
* caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
|
||||
* xfs_defer_ops_continue.
|
||||
*/
|
||||
int
|
||||
xfs_defer_ops_capture_and_commit(
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode *capture_ip,
|
||||
struct list_head *capture_list)
|
||||
{
|
||||
struct xfs_mount *mp = tp->t_mountp;
|
||||
struct xfs_defer_capture *dfc;
|
||||
int error;
|
||||
|
||||
ASSERT(!capture_ip || xfs_isilocked(capture_ip, XFS_ILOCK_EXCL));
|
||||
|
||||
/* If we don't capture anything, commit transaction and exit. */
|
||||
dfc = xfs_defer_ops_capture(tp, capture_ip);
|
||||
if (!dfc)
|
||||
return xfs_trans_commit(tp);
|
||||
|
||||
/* Commit the transaction and add the capture structure to the list. */
|
||||
error = xfs_trans_commit(tp);
|
||||
if (error) {
|
||||
xfs_defer_ops_release(mp, dfc);
|
||||
return error;
|
||||
}
|
||||
|
||||
list_add_tail(&dfc->dfc_list, capture_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Attach a chain of captured deferred ops to a new transaction and free the
|
||||
* capture structure. If an inode was captured, it will be passed back to the
|
||||
* caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
|
||||
* The caller now owns the inode reference.
|
||||
*/
|
||||
void
|
||||
xfs_defer_ops_continue(
|
||||
struct xfs_defer_capture *dfc,
|
||||
struct xfs_trans *tp,
|
||||
struct xfs_inode **captured_ipp)
|
||||
{
|
||||
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
|
||||
ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
|
||||
|
||||
/* Lock and join the captured inode to the new transaction. */
|
||||
if (dfc->dfc_capture_ip) {
|
||||
xfs_ilock(dfc->dfc_capture_ip, XFS_ILOCK_EXCL);
|
||||
xfs_trans_ijoin(tp, dfc->dfc_capture_ip, 0);
|
||||
}
|
||||
*captured_ipp = dfc->dfc_capture_ip;
|
||||
|
||||
/* Move captured dfops chain and state to the transaction. */
|
||||
list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
|
||||
tp->t_flags |= dfc->dfc_tpflags;
|
||||
|
||||
kmem_free(dfc);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user