This is the 5.10.122 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmKouFYACgkQONu9yGCS aT4ZOQ/+LjJruqYS4VVYb/MkIySB4MUdox8aXzu1zX8mlCc7h4DJnWaGjt6nrr62 ZaUTi3gTslajn2PCGzejDVppAdC7K/JRcvHUWWu8otHEZy1itauiwCEKWuUSxOl/ yYdN6AXwBLF1xUZWstDxJOAelAFsQs9IdtsBLc8eTq4VXjnAJYSLWbUjZYwbA+q6 5qAWbdNnnpKML69T8EXdts4rZdtinhVHxZGxu+V+SFJoyi1UxOHgCTwGsJB5Pa0P EpJ69VCQQfpoju6dWtinFZh0EFW1ycCGZJT0jQ4MuvZO4mDKjaFM0kY70xsDLA6I ZVSxAMTD80aoCljHY0aJZZGCcOO7o8C3k7uUgeYcW1YqRfG2xz3hNs8TtEVUl+q+ Pnxbn9rPW0gERVMs7jRvkGgXS7Xgs81rCD2NrHVJQz32qDYkTKOeBRo/veWtVPBP eqt6v0314SiKZuMOwNg4NIPvGykJ+/HrER8fEBVzfHAM16JHkqPBBopG4KESPR2T b2+xfGQRGu/ZJPcrU0M9efP034OmXEJ/wDY8ExRXULSFlIW3HaYK1sWhOUYoolwn 0Eew8Ej/wq9UzhuWs3QOvJK7XVQch9VLSZiZwbZBfRHTQ1pFGyKyDh4Ab/uWns61 AYyM++VCIOGv4UgHBH6dhT4ff4x33t2CC6+Yr5/yX5t9fu+V5J4= =7sqT -----END PGP SIGNATURE----- Merge 5.10.122 into android12-5.10-lts Changes in 5.10.122 pcmcia: db1xxx_ss: restrict to MIPS_DB1XXX boards staging: greybus: codecs: fix type confusion of list iterator variable iio: adc: ad7124: Remove shift from scan_type lkdtm/bugs: Check for the NULL pointer after calling kmalloc tty: goldfish: Use tty_port_destroy() to destroy port tty: serial: owl: Fix missing clk_disable_unprepare() in owl_uart_probe tty: n_tty: Restore EOF push handling behavior tty: serial: fsl_lpuart: fix potential bug when using both of_alias_get_id and ida_simple_get usb: usbip: fix a refcount leak in stub_probe() usb: usbip: add missing device lock on tweak configuration cmd USB: storage: karma: fix rio_karma_init return usb: musb: Fix missing of_node_put() in omap2430_probe staging: fieldbus: Fix the error handling path in anybuss_host_common_probe() pwm: lp3943: Fix duty calculation in case period was clamped rpmsg: qcom_smd: Fix irq_of_parse_and_map() return value usb: dwc3: pci: Fix pm_runtime_get_sync() error checking misc: fastrpc: fix an incorrect NULL check on list iterator firmware: stratix10-svc: fix a missing check on list iterator usb: typec: mux: Check dev_set_name() return value iio: adc: stmpe-adc: Fix wait_for_completion_timeout return value check iio: proximity: vl53l0x: Fix return value check of wait_for_completion_timeout iio: adc: sc27xx: fix read big scale voltage not right iio: adc: sc27xx: Fine tune the scale calibration values rpmsg: qcom_smd: Fix returning 0 if irq_of_parse_and_map() fails phy: qcom-qmp: fix pipe-clock imbalance on power-on failure serial: sifive: Report actual baud base rather than fixed 115200 coresight: cpu-debug: Replace mutex with mutex_trylock on panic notifier extcon: ptn5150: Add queue work sync before driver release soc: rockchip: Fix refcount leak in rockchip_grf_init clocksource/drivers/riscv: Events are stopped during CPU suspend rtc: mt6397: check return value after calling platform_get_resource() serial: meson: acquire port->lock in startup() serial: 8250_fintek: Check SER_RS485_RTS_* only with RS485 serial: digicolor-usart: Don't allow CS5-6 serial: rda-uart: Don't allow CS5-6 serial: txx9: Don't allow CS5-6 serial: sh-sci: Don't allow CS5-6 serial: sifive: Sanitize CSIZE and c_iflag serial: st-asc: Sanitize CSIZE and correct PARENB for CS7 serial: stm32-usart: Correct CSIZE, bits, and parity firmware: dmi-sysfs: Fix memory leak in dmi_sysfs_register_handle bus: ti-sysc: Fix warnings for unbind for serial driver: base: fix UAF when driver_attach failed driver core: fix deadlock in __device_attach watchdog: rti-wdt: Fix pm_runtime_get_sync() error checking watchdog: ts4800_wdt: Fix refcount leak in ts4800_wdt_probe ASoC: fsl_sai: Fix FSL_SAI_xDR/xFR definition clocksource/drivers/oxnas-rps: Fix irq_of_parse_and_map() return value s390/crypto: fix scatterwalk_unmap() callers in AES-GCM net: sched: fixed barrier to prevent skbuff sticking in qdisc backlog net: ethernet: mtk_eth_soc: out of bounds read in mtk_hwlro_get_fdir_entry() net: ethernet: ti: am65-cpsw-nuss: Fix some refcount leaks net: dsa: mv88e6xxx: Fix refcount leak in mv88e6xxx_mdios_register modpost: fix removing numeric suffixes jffs2: fix memory leak in jffs2_do_fill_super ubi: fastmap: Fix high cpu usage of ubi_bgt by making sure wl_pool not empty ubi: ubi_create_volume: Fix use-after-free when volume creation failed bpf: Fix probe read error in ___bpf_prog_run() riscv: read-only pages should not be writable net/smc: fixes for converting from "struct smc_cdc_tx_pend **" to "struct smc_wr_tx_pend_priv *" nfp: only report pause frame configuration for physical device sfc: fix considering that all channels have TX queues sfc: fix wrong tx channel offset with efx_separate_tx_channels net/mlx5: Don't use already freed action pointer net/mlx5: correct ECE offset in query qp output net/mlx5e: Update netdev features after changing XDP state net: sched: add barrier to fix packet stuck problem for lockless qdisc tcp: tcp_rtx_synack() can be called from process context gpio: pca953x: use the correct register address to do regcache sync afs: Fix infinite loop found by xfstest generic/676 scsi: sd: Fix potential NULL pointer dereference tipc: check attribute length for bearer name driver core: Fix wait_for_device_probe() & deferred_probe_timeout interaction perf c2c: Fix sorting in percent_rmt_hitm_cmp() dmaengine: idxd: set DMA_INTERRUPT cap bit mips: cpc: Fix refcount leak in mips_cpc_default_phys_base bootconfig: Make the bootconfig.o as a normal object file tracing: Fix sleeping function called from invalid context on RT kernel tracing: Avoid adding tracer option before update_tracer_options iommu/arm-smmu: fix possible null-ptr-deref in arm_smmu_device_probe() iommu/arm-smmu-v3: check return value after calling platform_get_resource() f2fs: remove WARN_ON in f2fs_is_valid_blkaddr i2c: cadence: Increase timeout per message if necessary m68knommu: set ZERO_PAGE() to the allocated zeroed page m68knommu: fix undefined reference to `_init_sp' dmaengine: zynqmp_dma: In struct zynqmp_dma_chan fix desc_size data type NFSv4: Don't hold the layoutget locks across multiple RPC calls video: fbdev: hyperv_fb: Allow resolutions with size > 64 MB for Gen1 video: fbdev: pxa3xx-gcu: release the resources correctly in pxa3xx_gcu_probe/remove() xprtrdma: treat all calls not a bcall when bc_serv is NULL netfilter: nat: really support inet nat without l3 address netfilter: nf_tables: delete flowtable hooks via transaction list powerpc/kasan: Force thread size increase with KASAN netfilter: nf_tables: always initialize flowtable hook list in transaction ata: pata_octeon_cf: Fix refcount leak in octeon_cf_probe netfilter: nf_tables: release new hooks on unsupported flowtable flags netfilter: nf_tables: memleak flow rule from commit path netfilter: nf_tables: bail out early if hardware offload is not supported xen: unexport __init-annotated xen_xlate_map_ballooned_pages() af_unix: Fix a data-race in unix_dgram_peer_wake_me(). bpf, arm64: Clear prog->jited_len along prog->jited net: dsa: lantiq_gswip: Fix refcount leak in gswip_gphy_fw_list net/mlx4_en: Fix wrong return value on ioctl EEPROM query failure SUNRPC: Fix the calculation of xdr->end in xdr_get_next_encode_buffer() net: mdio: unexport __init-annotated mdio_bus_init() net: xfrm: unexport __init-annotated xfrm4_protocol_init() net: ipv6: unexport __init-annotated seg6_hmac_init() net/mlx5: Rearm the FW tracer after each tracer event net/mlx5: fs, fail conflicting actions ip_gre: test csum_start instead of transport header net: altera: Fix refcount leak in altera_tse_mdio_create drm: imx: fix compiler warning with gcc-12 iio: dummy: iio_simple_dummy: check the return value of kstrdup() staging: rtl8712: fix a potential memory leak in r871xu_drv_init() iio: st_sensors: Add a local lock for protecting odr lkdtm/usercopy: Expand size of "out of frame" object tty: synclink_gt: Fix null-pointer-dereference in slgt_clean() tty: Fix a possible resource leak in icom_probe drivers: staging: rtl8192u: Fix deadlock in ieee80211_beacons_stop() drivers: staging: rtl8192e: Fix deadlock in rtllib_beacons_stop() USB: host: isp116x: check return value after calling platform_get_resource() drivers: tty: serial: Fix deadlock in sa1100_set_termios() drivers: usb: host: Fix deadlock in oxu_bus_suspend() USB: hcd-pci: Fully suspend across freeze/thaw cycle sysrq: do not omit current cpu when showing backtrace of all active CPUs usb: dwc2: gadget: don't reset gadget's driver->bus misc: rtsx: set NULL intfdata when probe fails extcon: Modify extcon device to be created after driver data is set clocksource/drivers/sp804: Avoid error on multiple instances staging: rtl8712: fix uninit-value in usb_read8() and friends staging: rtl8712: fix uninit-value in r871xu_drv_init() serial: msm_serial: disable interrupts in __msm_console_write() kernfs: Separate kernfs_pr_cont_buf and rename_lock. watchdog: wdat_wdt: Stop watchdog when rebooting the system md: protect md_unregister_thread from reentrancy scsi: myrb: Fix up null pointer access on myrb_cleanup() Revert "net: af_key: add check for pfkey_broadcast in function pfkey_process" ceph: allow ceph.dir.rctime xattr to be updatable drm/radeon: fix a possible null pointer dereference modpost: fix undefined behavior of is_arm_mapping_symbol() x86/cpu: Elide KCSAN for cpu_has() and friends jump_label,noinstr: Avoid instrumentation for JUMP_LABEL=n builds nbd: call genl_unregister_family() first in nbd_cleanup() nbd: fix race between nbd_alloc_config() and module removal nbd: fix io hung while disconnecting device s390/gmap: voluntarily schedule during key setting cifs: version operations for smb20 unneeded when legacy support disabled nodemask: Fix return values to be unsigned vringh: Fix loop descriptors check in the indirect cases scripts/gdb: change kernel config dumping method ALSA: hda/conexant - Fix loopback issue with CX20632 ALSA: hda/realtek: Fix for quirk to enable speaker output on the Lenovo Yoga DuetITL 2021 cifs: return errors during session setup during reconnects cifs: fix reconnect on smb3 mount types ata: libata-transport: fix {dma|pio|xfer}_mode sysfs files mmc: block: Fix CQE recovery reset success net: phy: dp83867: retrigger SGMII AN when link change nfc: st21nfca: fix incorrect validating logic in EVT_TRANSACTION nfc: st21nfca: fix memory leaks in EVT_TRANSACTION handling nfc: st21nfca: fix incorrect sizing calculations in EVT_TRANSACTION ixgbe: fix bcast packets Rx on VF after promisc removal ixgbe: fix unexpected VLAN Rx in promisc mode on VF Input: bcm5974 - set missing URB_NO_TRANSFER_DMA_MAP urb flag drm/bridge: analogix_dp: Support PSR-exit to disable transition drm/atomic: Force bridge self-refresh-exit on CRTC switch powerpc/32: Fix overread/overwrite of thread_struct via ptrace powerpc/mm: Switch obsolete dssall to .long interconnect: qcom: sc7180: Drop IP0 interconnects interconnect: Restore sync state by ignoring ipa-virt in provider count md/raid0: Ignore RAID0 layout if the second zone has only one device PCI: qcom: Fix pipe clock imbalance zonefs: fix handling of explicit_open option on mount dmaengine: idxd: add missing callback function to support DMA_INTERRUPT tcp: fix tcp_mtup_probe_success vs wrong snd_cwnd Linux 5.10.122 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I8b96565fbcb635b2faaf2adcf287c963180c0b92
This commit is contained in:
commit
8a8eb074ed
@ -107,13 +107,14 @@ Description:
|
||||
described in ATA8 7.16 and 7.17. Only valid if
|
||||
the device is not a PM.
|
||||
|
||||
pio_mode: (RO) Transfer modes supported by the device when
|
||||
in PIO mode. Mostly used by PATA device.
|
||||
pio_mode: (RO) PIO transfer mode used by the device.
|
||||
Mostly used by PATA devices.
|
||||
|
||||
xfer_mode: (RO) Current transfer mode
|
||||
xfer_mode: (RO) Current transfer mode. Mostly used by
|
||||
PATA devices.
|
||||
|
||||
dma_mode: (RO) Transfer modes supported by the device when
|
||||
in DMA mode. Mostly used by PATA device.
|
||||
dma_mode: (RO) DMA transfer mode used by the device.
|
||||
Mostly used by PATA devices.
|
||||
|
||||
class: (RO) Device class. Can be "ata" for disk,
|
||||
"atapi" for packet device, "pmp" for PM, or
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 121
|
||||
SUBLEVEL = 122
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -1112,6 +1112,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
bpf_jit_binary_free(header);
|
||||
prog->bpf_func = NULL;
|
||||
prog->jited = 0;
|
||||
prog->jited_len = 0;
|
||||
goto out_off;
|
||||
}
|
||||
bpf_jit_binary_lock_ro(header);
|
||||
|
@ -321,6 +321,7 @@ comment "Machine Options"
|
||||
|
||||
config UBOOT
|
||||
bool "Support for U-Boot command line parameters"
|
||||
depends on COLDFIRE
|
||||
help
|
||||
If you say Y here kernel will try to collect command
|
||||
line parameters from the initial u-boot stack.
|
||||
|
@ -42,7 +42,8 @@ extern void paging_init(void);
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
*/
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(0))
|
||||
extern void *empty_zero_page;
|
||||
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
||||
|
||||
/*
|
||||
* All 32bit addresses are effectively valid for vmalloc...
|
||||
|
@ -27,6 +27,7 @@ phys_addr_t __weak mips_cpc_default_phys_base(void)
|
||||
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
|
||||
if (cpc_node) {
|
||||
err = of_address_to_resource(cpc_node, 0, &res);
|
||||
of_node_put(cpc_node);
|
||||
if (!err)
|
||||
return res.start;
|
||||
}
|
||||
|
@ -786,7 +786,6 @@ config THREAD_SHIFT
|
||||
range 13 15
|
||||
default "15" if PPC_256K_PAGES
|
||||
default "14" if PPC64
|
||||
default "14" if KASAN
|
||||
default "13"
|
||||
help
|
||||
Used to define the stack size. The default is almost always what you
|
||||
|
@ -212,6 +212,7 @@
|
||||
#define PPC_INST_COPY 0x7c20060c
|
||||
#define PPC_INST_DCBA 0x7c0005ec
|
||||
#define PPC_INST_DCBA_MASK 0xfc0007fe
|
||||
#define PPC_INST_DSSALL 0x7e00066c
|
||||
#define PPC_INST_ISEL 0x7c00001e
|
||||
#define PPC_INST_ISEL_MASK 0xfc00003e
|
||||
#define PPC_INST_LSWI 0x7c0004aa
|
||||
@ -517,6 +518,7 @@
|
||||
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b))
|
||||
#define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b))
|
||||
#define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b))
|
||||
#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
|
||||
#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh))
|
||||
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LDARX(t, a, b, eh))
|
||||
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LWARX(t, a, b, eh))
|
||||
|
@ -14,10 +14,16 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
|
||||
#ifdef CONFIG_KASAN
|
||||
#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
|
||||
#else
|
||||
#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT
|
||||
#define THREAD_SHIFT PAGE_SHIFT
|
||||
#else
|
||||
#define THREAD_SHIFT CONFIG_THREAD_SHIFT
|
||||
#define THREAD_SHIFT MIN_THREAD_SHIFT
|
||||
#endif
|
||||
|
||||
#define THREAD_SIZE (1 << THREAD_SHIFT)
|
||||
|
@ -82,7 +82,7 @@ void power4_idle(void)
|
||||
return;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
asm volatile("DSSALL ; sync" ::: "memory");
|
||||
asm volatile(PPC_DSSALL " ; sync" ::: "memory");
|
||||
|
||||
power4_idle_nap();
|
||||
|
||||
|
@ -129,7 +129,7 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
|
||||
mtspr SPRN_HID0,r4
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */
|
||||
|
@ -96,7 +96,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
|
||||
|
||||
/* Stop DST streams */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
|
||||
@ -292,7 +292,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
|
||||
isync
|
||||
|
||||
/* Stop DST streams */
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
|
||||
/* Get the current enable bit of the L3CR into r4 */
|
||||
@ -401,7 +401,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
|
||||
_GLOBAL(__flush_disable_L1)
|
||||
/* Stop pending alitvec streams and memory accesses */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
sync
|
||||
|
||||
|
@ -75,8 +75,13 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
|
||||
sizeof(long));
|
||||
if (IS_ENABLED(CONFIG_PPC32)) {
|
||||
// On 32-bit the index we are passed refers to 32-bit words
|
||||
tmp = ((u32 *)child->thread.fp_state.fpr)[fpidx];
|
||||
} else {
|
||||
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
|
||||
sizeof(long));
|
||||
}
|
||||
else
|
||||
tmp = child->thread.fp_state.fpscr;
|
||||
}
|
||||
@ -108,8 +113,13 @@ long arch_ptrace(struct task_struct *child, long request,
|
||||
|
||||
flush_fp_to_thread(child);
|
||||
if (fpidx < (PT_FPSCR - PT_FPR0))
|
||||
memcpy(&child->thread.TS_FPR(fpidx), &data,
|
||||
sizeof(long));
|
||||
if (IS_ENABLED(CONFIG_PPC32)) {
|
||||
// On 32-bit the index we are passed refers to 32-bit words
|
||||
((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
|
||||
} else {
|
||||
memcpy(&child->thread.TS_FPR(fpidx), &data,
|
||||
sizeof(long));
|
||||
}
|
||||
else
|
||||
child->thread.fp_state.fpscr = data;
|
||||
ret = 0;
|
||||
@ -478,4 +488,7 @@ void __init pt_regs_check(void)
|
||||
* real registers.
|
||||
*/
|
||||
BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
|
||||
|
||||
// ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
|
||||
BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
|
||||
}
|
||||
|
@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* Stop pending alitvec streams and memory accesses */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
#endif
|
||||
sync
|
||||
|
@ -142,7 +142,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
|
||||
_GLOBAL(swsusp_arch_resume)
|
||||
/* Stop pending alitvec streams and memory accesses */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
sync
|
||||
|
||||
|
@ -79,7 +79,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
* context
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
asm volatile ("dssall");
|
||||
asm volatile (PPC_DSSALL);
|
||||
|
||||
if (new_on_cpu)
|
||||
radix_kvm_prefetch_workaround(next);
|
||||
|
@ -48,7 +48,7 @@ flush_disable_75x:
|
||||
|
||||
/* Stop DST streams */
|
||||
BEGIN_FTR_SECTION
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
|
||||
@ -197,7 +197,7 @@ flush_disable_745x:
|
||||
isync
|
||||
|
||||
/* Stop prefetch streams */
|
||||
DSSALL
|
||||
PPC_DSSALL
|
||||
sync
|
||||
|
||||
/* Disable L2 prefetching */
|
||||
|
@ -65,7 +65,7 @@ static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
|
||||
|
||||
if (md->attribute & EFI_MEMORY_RO) {
|
||||
val = pte_val(pte) & ~_PAGE_WRITE;
|
||||
val = pte_val(pte) | _PAGE_READ;
|
||||
val |= _PAGE_READ;
|
||||
pte = __pte(val);
|
||||
}
|
||||
if (md->attribute & EFI_MEMORY_XP) {
|
||||
|
@ -701,7 +701,7 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
gw->walk_bytes_remain -= nbytes;
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_unmap(gw->walk_ptr);
|
||||
scatterwalk_advance(&gw->walk, nbytes);
|
||||
scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
|
||||
gw->walk_ptr = NULL;
|
||||
@ -776,7 +776,7 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||
goto out;
|
||||
}
|
||||
|
||||
scatterwalk_unmap(&gw->walk);
|
||||
scatterwalk_unmap(gw->walk_ptr);
|
||||
gw->walk_ptr = NULL;
|
||||
|
||||
gw->ptr = gw->buf;
|
||||
|
@ -2596,6 +2596,18 @@ static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Give a chance to schedule after setting a key to 256 pages.
|
||||
* We only hold the mm lock, which is a rwsem and the kvm srcu.
|
||||
* Both can sleep.
|
||||
*/
|
||||
static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
cond_resched();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
||||
unsigned long hmask, unsigned long next,
|
||||
struct mm_walk *walk)
|
||||
@ -2618,12 +2630,14 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
|
||||
end = start + HPAGE_SIZE - 1;
|
||||
__storage_key_init_range(start, end);
|
||||
set_bit(PG_arch_1, &page->flags);
|
||||
cond_resched();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct mm_walk_ops enable_skey_walk_ops = {
|
||||
.hugetlb_entry = __s390_enable_skey_hugetlb,
|
||||
.pte_entry = __s390_enable_skey_pte,
|
||||
.pmd_entry = __s390_enable_skey_pmd,
|
||||
};
|
||||
|
||||
int s390_enable_skey(void)
|
||||
|
@ -49,7 +49,7 @@ extern const char * const x86_power_flags[32];
|
||||
extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||
|
||||
#define test_cpu_cap(c, bit) \
|
||||
test_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
arch_test_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
|
||||
/*
|
||||
* There are 32 bits/features in each mask word. The high bits
|
||||
|
@ -196,7 +196,7 @@ static struct {
|
||||
{ XFER_PIO_0, "XFER_PIO_0" },
|
||||
{ XFER_PIO_SLOW, "XFER_PIO_SLOW" }
|
||||
};
|
||||
ata_bitfield_name_match(xfer,ata_xfer_names)
|
||||
ata_bitfield_name_search(xfer, ata_xfer_names)
|
||||
|
||||
/*
|
||||
* ATA Port attributes
|
||||
|
@ -888,12 +888,14 @@ static int octeon_cf_probe(struct platform_device *pdev)
|
||||
int i;
|
||||
res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0);
|
||||
if (!res_dma) {
|
||||
put_device(&dma_dev->dev);
|
||||
of_node_put(dma_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start,
|
||||
resource_size(res_dma));
|
||||
if (!cf_port->dma_base) {
|
||||
put_device(&dma_dev->dev);
|
||||
of_node_put(dma_node);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -903,6 +905,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
|
||||
irq = i;
|
||||
irq_handler = octeon_cf_interrupt;
|
||||
}
|
||||
put_device(&dma_dev->dev);
|
||||
}
|
||||
of_node_put(dma_node);
|
||||
}
|
||||
|
@ -621,7 +621,7 @@ int bus_add_driver(struct device_driver *drv)
|
||||
if (drv->bus->p->drivers_autoprobe) {
|
||||
error = driver_attach(drv);
|
||||
if (error)
|
||||
goto out_unregister;
|
||||
goto out_del_list;
|
||||
}
|
||||
module_add_driver(drv->owner, drv);
|
||||
|
||||
@ -648,6 +648,8 @@ int bus_add_driver(struct device_driver *drv)
|
||||
|
||||
return 0;
|
||||
|
||||
out_del_list:
|
||||
klist_del(&priv->knode_bus);
|
||||
out_unregister:
|
||||
kobject_put(&priv->kobj);
|
||||
/* drv->p is freed in driver_release() */
|
||||
|
@ -250,7 +250,6 @@ DEFINE_SHOW_ATTRIBUTE(deferred_devs);
|
||||
|
||||
int driver_deferred_probe_timeout;
|
||||
EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(probe_timeout_waitqueue);
|
||||
|
||||
static int __init deferred_probe_timeout_setup(char *str)
|
||||
{
|
||||
@ -302,7 +301,6 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
|
||||
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
|
||||
dev_info(p->device, "deferred probe pending\n");
|
||||
mutex_unlock(&deferred_probe_mutex);
|
||||
wake_up_all(&probe_timeout_waitqueue);
|
||||
}
|
||||
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);
|
||||
|
||||
@ -706,9 +704,6 @@ int driver_probe_done(void)
|
||||
*/
|
||||
void wait_for_device_probe(void)
|
||||
{
|
||||
/* wait for probe timeout */
|
||||
wait_event(probe_timeout_waitqueue, !driver_deferred_probe_timeout);
|
||||
|
||||
/* wait for the deferred probe workqueue to finish */
|
||||
flush_work(&deferred_probe_work);
|
||||
|
||||
@ -897,6 +892,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
|
||||
static int __device_attach(struct device *dev, bool allow_async)
|
||||
{
|
||||
int ret = 0;
|
||||
bool async = false;
|
||||
|
||||
device_lock(dev);
|
||||
if (dev->p->dead) {
|
||||
@ -935,7 +931,7 @@ static int __device_attach(struct device *dev, bool allow_async)
|
||||
*/
|
||||
dev_dbg(dev, "scheduling asynchronous probe\n");
|
||||
get_device(dev);
|
||||
async_schedule_dev(__device_attach_async_helper, dev);
|
||||
async = true;
|
||||
} else {
|
||||
pm_request_idle(dev);
|
||||
}
|
||||
@ -945,6 +941,8 @@ static int __device_attach(struct device *dev, bool allow_async)
|
||||
}
|
||||
out_unlock:
|
||||
device_unlock(dev);
|
||||
if (async)
|
||||
async_schedule_dev(__device_attach_async_helper, dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1359,7 +1359,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b
|
||||
static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
|
||||
struct block_device *bdev)
|
||||
{
|
||||
sock_shutdown(nbd);
|
||||
nbd_clear_sock(nbd);
|
||||
__invalidate_device(bdev, true);
|
||||
nbd_bdev_reset(bdev);
|
||||
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
||||
@ -1472,15 +1472,20 @@ static struct nbd_config *nbd_alloc_config(void)
|
||||
{
|
||||
struct nbd_config *config;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
|
||||
if (!config)
|
||||
return NULL;
|
||||
if (!config) {
|
||||
module_put(THIS_MODULE);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
atomic_set(&config->recv_threads, 0);
|
||||
init_waitqueue_head(&config->recv_wq);
|
||||
init_waitqueue_head(&config->conn_wait);
|
||||
config->blksize = NBD_DEF_BLKSIZE;
|
||||
atomic_set(&config->live_connections, 0);
|
||||
try_module_get(THIS_MODULE);
|
||||
return config;
|
||||
}
|
||||
|
||||
@ -1507,12 +1512,13 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
goto out;
|
||||
}
|
||||
config = nbd->config = nbd_alloc_config();
|
||||
if (!config) {
|
||||
ret = -ENOMEM;
|
||||
config = nbd_alloc_config();
|
||||
if (IS_ERR(config)) {
|
||||
ret = PTR_ERR(config);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
goto out;
|
||||
}
|
||||
nbd->config = config;
|
||||
refcount_set(&nbd->config_refs, 1);
|
||||
refcount_inc(&nbd->refs);
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
@ -1934,13 +1940,14 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
|
||||
nbd_put(nbd);
|
||||
return -EINVAL;
|
||||
}
|
||||
config = nbd->config = nbd_alloc_config();
|
||||
if (!nbd->config) {
|
||||
config = nbd_alloc_config();
|
||||
if (IS_ERR(config)) {
|
||||
mutex_unlock(&nbd->config_lock);
|
||||
nbd_put(nbd);
|
||||
printk(KERN_ERR "nbd: couldn't allocate config\n");
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(config);
|
||||
}
|
||||
nbd->config = config;
|
||||
refcount_set(&nbd->config_refs, 1);
|
||||
set_bit(NBD_RT_BOUND, &config->runtime_flags);
|
||||
|
||||
@ -2461,6 +2468,12 @@ static void __exit nbd_cleanup(void)
|
||||
struct nbd_device *nbd;
|
||||
LIST_HEAD(del_list);
|
||||
|
||||
/*
|
||||
* Unregister netlink interface prior to waiting
|
||||
* for the completion of netlink commands.
|
||||
*/
|
||||
genl_unregister_family(&nbd_genl_family);
|
||||
|
||||
nbd_dbg_close();
|
||||
|
||||
mutex_lock(&nbd_index_mutex);
|
||||
@ -2470,13 +2483,15 @@ static void __exit nbd_cleanup(void)
|
||||
while (!list_empty(&del_list)) {
|
||||
nbd = list_first_entry(&del_list, struct nbd_device, list);
|
||||
list_del_init(&nbd->list);
|
||||
if (refcount_read(&nbd->config_refs))
|
||||
printk(KERN_ERR "nbd: possibly leaking nbd_config (ref %d)\n",
|
||||
refcount_read(&nbd->config_refs));
|
||||
if (refcount_read(&nbd->refs) != 1)
|
||||
printk(KERN_ERR "nbd: possibly leaking a device\n");
|
||||
nbd_put(nbd);
|
||||
}
|
||||
|
||||
idr_destroy(&nbd_index_idr);
|
||||
genl_unregister_family(&nbd_genl_family);
|
||||
unregister_blkdev(NBD_MAJOR, "nbd");
|
||||
}
|
||||
|
||||
|
@ -3291,7 +3291,9 @@ static int sysc_remove(struct platform_device *pdev)
|
||||
struct sysc *ddata = platform_get_drvdata(pdev);
|
||||
int error;
|
||||
|
||||
cancel_delayed_work_sync(&ddata->idle_work);
|
||||
/* Device can still be enabled, see deferred idle quirk in probe */
|
||||
if (cancel_delayed_work_sync(&ddata->idle_work))
|
||||
ti_sysc_idle(&ddata->idle_work.work);
|
||||
|
||||
error = pm_runtime_get_sync(ddata->dev);
|
||||
if (error < 0) {
|
||||
|
@ -236,7 +236,7 @@ static int __init oxnas_rps_timer_init(struct device_node *np)
|
||||
}
|
||||
|
||||
rps->irq = irq_of_parse_and_map(np, 0);
|
||||
if (rps->irq < 0) {
|
||||
if (!rps->irq) {
|
||||
ret = -EINVAL;
|
||||
goto err_iomap;
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ static int riscv_clock_next_event(unsigned long delta,
|
||||
static unsigned int riscv_clock_event_irq;
|
||||
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
|
||||
.name = "riscv_timer_clockevent",
|
||||
.features = CLOCK_EVT_FEAT_ONESHOT,
|
||||
.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP,
|
||||
.rating = 100,
|
||||
.set_next_event = riscv_clock_next_event,
|
||||
};
|
||||
|
@ -274,6 +274,11 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
|
||||
struct clk *clk1, *clk2;
|
||||
const char *name = of_get_property(np, "compatible", NULL);
|
||||
|
||||
if (initialized) {
|
||||
pr_debug("%pOF: skipping further SP804 timer device\n", np);
|
||||
return 0;
|
||||
}
|
||||
|
||||
base = of_iomap(np, 0);
|
||||
if (!base)
|
||||
return -ENXIO;
|
||||
@ -285,11 +290,6 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
|
||||
writel(0, timer1_base + timer->ctrl);
|
||||
writel(0, timer2_base + timer->ctrl);
|
||||
|
||||
if (initialized || !of_device_is_available(np)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
clk1 = of_clk_get(np, 0);
|
||||
if (IS_ERR(clk1))
|
||||
clk1 = NULL;
|
||||
|
@ -82,6 +82,27 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
|
||||
hw->int_handle = wq->vec_ptr;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
|
||||
{
|
||||
struct idxd_wq *wq = to_idxd_wq(c);
|
||||
u32 desc_flags;
|
||||
struct idxd_desc *desc;
|
||||
|
||||
if (wq->state != IDXD_WQ_ENABLED)
|
||||
return NULL;
|
||||
|
||||
op_flag_setup(flags, &desc_flags);
|
||||
desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
|
||||
if (IS_ERR(desc))
|
||||
return NULL;
|
||||
|
||||
idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
|
||||
0, 0, 0, desc->compl_dma, desc_flags);
|
||||
desc->txd.flags = flags;
|
||||
return &desc->txd;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
|
||||
dma_addr_t dma_src, size_t len, unsigned long flags)
|
||||
@ -188,10 +209,12 @@ int idxd_register_dma_device(struct idxd_device *idxd)
|
||||
INIT_LIST_HEAD(&dma->channels);
|
||||
dma->dev = dev;
|
||||
|
||||
dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
|
||||
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
|
||||
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
|
||||
dma->device_release = idxd_dma_release;
|
||||
|
||||
dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
|
||||
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
|
||||
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
|
||||
dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
|
||||
|
@ -232,7 +232,7 @@ struct zynqmp_dma_chan {
|
||||
bool is_dmacoherent;
|
||||
struct tasklet_struct tasklet;
|
||||
bool idle;
|
||||
u32 desc_size;
|
||||
size_t desc_size;
|
||||
bool err;
|
||||
u32 bus_width;
|
||||
u32 src_burst_len;
|
||||
@ -490,7 +490,8 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
|
||||
}
|
||||
|
||||
chan->desc_pool_v = dma_alloc_coherent(chan->dev,
|
||||
(2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
|
||||
(2 * ZYNQMP_DMA_DESC_SIZE(chan) *
|
||||
ZYNQMP_DMA_NUM_DESCS),
|
||||
&chan->desc_pool_p, GFP_KERNEL);
|
||||
if (!chan->desc_pool_v)
|
||||
return -ENOMEM;
|
||||
|
@ -194,6 +194,13 @@ static int ptn5150_init_dev_type(struct ptn5150_info *info)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ptn5150_work_sync_and_put(void *data)
|
||||
{
|
||||
struct ptn5150_info *info = data;
|
||||
|
||||
cancel_work_sync(&info->irq_work);
|
||||
}
|
||||
|
||||
static int ptn5150_i2c_probe(struct i2c_client *i2c)
|
||||
{
|
||||
struct device *dev = &i2c->dev;
|
||||
@ -284,6 +291,10 @@ static int ptn5150_i2c_probe(struct i2c_client *i2c)
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
ret = devm_add_action_or_reset(dev, ptn5150_work_sync_and_put, info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Update current extcon state if for example OTG connection was there
|
||||
* before the probe
|
||||
|
@ -1230,19 +1230,14 @@ int extcon_dev_register(struct extcon_dev *edev)
|
||||
edev->dev.type = &edev->extcon_dev_type;
|
||||
}
|
||||
|
||||
ret = device_register(&edev->dev);
|
||||
if (ret) {
|
||||
put_device(&edev->dev);
|
||||
goto err_dev;
|
||||
}
|
||||
|
||||
spin_lock_init(&edev->lock);
|
||||
edev->nh = devm_kcalloc(&edev->dev, edev->max_supported,
|
||||
sizeof(*edev->nh), GFP_KERNEL);
|
||||
if (!edev->nh) {
|
||||
ret = -ENOMEM;
|
||||
device_unregister(&edev->dev);
|
||||
goto err_dev;
|
||||
if (edev->max_supported) {
|
||||
edev->nh = kcalloc(edev->max_supported, sizeof(*edev->nh),
|
||||
GFP_KERNEL);
|
||||
if (!edev->nh) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_nh;
|
||||
}
|
||||
}
|
||||
|
||||
for (index = 0; index < edev->max_supported; index++)
|
||||
@ -1253,6 +1248,12 @@ int extcon_dev_register(struct extcon_dev *edev)
|
||||
dev_set_drvdata(&edev->dev, edev);
|
||||
edev->state = 0;
|
||||
|
||||
ret = device_register(&edev->dev);
|
||||
if (ret) {
|
||||
put_device(&edev->dev);
|
||||
goto err_dev;
|
||||
}
|
||||
|
||||
mutex_lock(&extcon_dev_list_lock);
|
||||
list_add(&edev->entry, &extcon_dev_list);
|
||||
mutex_unlock(&extcon_dev_list_lock);
|
||||
@ -1260,6 +1261,9 @@ int extcon_dev_register(struct extcon_dev *edev)
|
||||
return 0;
|
||||
|
||||
err_dev:
|
||||
if (edev->max_supported)
|
||||
kfree(edev->nh);
|
||||
err_alloc_nh:
|
||||
if (edev->max_supported)
|
||||
kfree(edev->extcon_dev_type.groups);
|
||||
err_alloc_groups:
|
||||
@ -1320,6 +1324,7 @@ void extcon_dev_unregister(struct extcon_dev *edev)
|
||||
if (edev->max_supported) {
|
||||
kfree(edev->extcon_dev_type.groups);
|
||||
kfree(edev->cables);
|
||||
kfree(edev->nh);
|
||||
}
|
||||
|
||||
put_device(&edev->dev);
|
||||
|
@ -603,7 +603,7 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
|
||||
"%d-%d", dh->type, entry->instance);
|
||||
|
||||
if (*ret) {
|
||||
kfree(entry);
|
||||
kobject_put(&entry->kobj);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -941,17 +941,17 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
|
||||
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
|
||||
{
|
||||
struct stratix10_svc_data_mem *pmem;
|
||||
size_t size = 0;
|
||||
|
||||
list_for_each_entry(pmem, &svc_data_mem, node)
|
||||
if (pmem->vaddr == kaddr) {
|
||||
size = pmem->size;
|
||||
break;
|
||||
gen_pool_free(chan->ctrl->genpool,
|
||||
(unsigned long)kaddr, pmem->size);
|
||||
pmem->vaddr = NULL;
|
||||
list_del(&pmem->node);
|
||||
return;
|
||||
}
|
||||
|
||||
gen_pool_free(chan->ctrl->genpool, (unsigned long)kaddr, size);
|
||||
pmem->vaddr = NULL;
|
||||
list_del(&pmem->node);
|
||||
list_del(&svc_data_mem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(stratix10_svc_free_memory);
|
||||
|
||||
|
@ -1107,20 +1107,21 @@ static int pca953x_regcache_sync(struct device *dev)
|
||||
{
|
||||
struct pca953x_chip *chip = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
u8 regaddr;
|
||||
|
||||
/*
|
||||
* The ordering between direction and output is important,
|
||||
* sync these registers first and only then sync the rest.
|
||||
*/
|
||||
ret = regcache_sync_region(chip->regmap, chip->regs->direction,
|
||||
chip->regs->direction + NBANK(chip));
|
||||
regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0);
|
||||
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regcache_sync_region(chip->regmap, chip->regs->output,
|
||||
chip->regs->output + NBANK(chip));
|
||||
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0);
|
||||
ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip));
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret);
|
||||
return ret;
|
||||
@ -1128,16 +1129,18 @@ static int pca953x_regcache_sync(struct device *dev)
|
||||
|
||||
#ifdef CONFIG_GPIO_PCA953X_IRQ
|
||||
if (chip->driver_data & PCA_PCAL) {
|
||||
ret = regcache_sync_region(chip->regmap, PCAL953X_IN_LATCH,
|
||||
PCAL953X_IN_LATCH + NBANK(chip));
|
||||
regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0);
|
||||
ret = regcache_sync_region(chip->regmap, regaddr,
|
||||
regaddr + NBANK(chip));
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to sync INT latch registers: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regcache_sync_region(chip->regmap, PCAL953X_INT_MASK,
|
||||
PCAL953X_INT_MASK + NBANK(chip));
|
||||
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0);
|
||||
ret = regcache_sync_region(chip->regmap, regaddr,
|
||||
regaddr + NBANK(chip));
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to sync INT mask registers: %d\n",
|
||||
ret);
|
||||
|
@ -1268,6 +1268,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_encoder *encoder = dp->encoder;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *conn_state;
|
||||
|
||||
connector = drm_atomic_get_old_connector_for_encoder(state, encoder);
|
||||
if (!connector)
|
||||
return NULL;
|
||||
|
||||
conn_state = drm_atomic_get_old_connector_state(state, connector);
|
||||
if (!conn_state)
|
||||
return NULL;
|
||||
|
||||
return conn_state->crtc;
|
||||
}
|
||||
|
||||
static
|
||||
struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
|
||||
struct drm_atomic_state *state)
|
||||
@ -1448,14 +1467,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
|
||||
{
|
||||
struct drm_atomic_state *old_state = old_bridge_state->base.state;
|
||||
struct analogix_dp_device *dp = bridge->driver_private;
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc *old_crtc, *new_crtc;
|
||||
struct drm_crtc_state *old_crtc_state = NULL;
|
||||
struct drm_crtc_state *new_crtc_state = NULL;
|
||||
int ret;
|
||||
|
||||
crtc = analogix_dp_get_new_crtc(dp, old_state);
|
||||
if (!crtc)
|
||||
new_crtc = analogix_dp_get_new_crtc(dp, old_state);
|
||||
if (!new_crtc)
|
||||
goto out;
|
||||
|
||||
new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
|
||||
new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc);
|
||||
if (!new_crtc_state)
|
||||
goto out;
|
||||
|
||||
@ -1464,6 +1485,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
|
||||
return;
|
||||
|
||||
out:
|
||||
old_crtc = analogix_dp_get_old_crtc(dp, old_state);
|
||||
if (old_crtc) {
|
||||
old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
|
||||
old_crtc);
|
||||
|
||||
/* When moving from PSR to fully disabled, exit PSR first. */
|
||||
if (old_crtc_state && old_crtc_state->self_refresh_active) {
|
||||
ret = analogix_dp_disable_psr(dp);
|
||||
if (ret)
|
||||
DRM_ERROR("Failed to disable psr (%d)\n", ret);
|
||||
}
|
||||
}
|
||||
|
||||
analogix_dp_bridge_disable(bridge);
|
||||
}
|
||||
|
||||
|
@ -996,9 +996,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
|
||||
return drm_atomic_crtc_effectively_active(old_state);
|
||||
|
||||
/*
|
||||
* We need to run through the crtc_funcs->disable() function if the CRTC
|
||||
* is currently on, if it's transitioning to self refresh mode, or if
|
||||
* it's in self refresh mode and needs to be fully disabled.
|
||||
* We need to disable bridge(s) and CRTC if we're transitioning out of
|
||||
* self-refresh and changing CRTCs at the same time, because the
|
||||
* bridge tracks self-refresh status via CRTC state.
|
||||
*/
|
||||
if (old_state->self_refresh_active &&
|
||||
old_state->crtc != new_state->crtc)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* We also need to run through the crtc_funcs->disable() function if
|
||||
* the CRTC is currently on, if it's transitioning to self refresh
|
||||
* mode, or if it's in self refresh mode and needs to be fully
|
||||
* disabled.
|
||||
*/
|
||||
return old_state->active ||
|
||||
(old_state->self_refresh_active && !new_state->enable) ||
|
||||
|
@ -68,7 +68,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
|
||||
drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
|
||||
if (plane == &ipu_crtc->plane[0]->base)
|
||||
disable_full = true;
|
||||
if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
|
||||
if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
|
||||
disable_partial = true;
|
||||
}
|
||||
|
||||
|
@ -473,6 +473,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
|
||||
native_mode->vdisplay != 0 &&
|
||||
native_mode->clock != 0) {
|
||||
mode = drm_mode_duplicate(dev, native_mode);
|
||||
if (!mode)
|
||||
return NULL;
|
||||
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
@ -487,6 +489,8 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
|
||||
* simpler.
|
||||
*/
|
||||
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
|
||||
if (!mode)
|
||||
return NULL;
|
||||
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
|
||||
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
|
||||
}
|
||||
|
@ -379,9 +379,10 @@ static int debug_notifier_call(struct notifier_block *self,
|
||||
int cpu;
|
||||
struct debug_drvdata *drvdata;
|
||||
|
||||
mutex_lock(&debug_lock);
|
||||
/* Bail out if we can't acquire the mutex or the functionality is off */
|
||||
if (!mutex_trylock(&debug_lock))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Bail out if the functionality is disabled */
|
||||
if (!debug_enable)
|
||||
goto skip_dump;
|
||||
|
||||
@ -400,7 +401,7 @@ static int debug_notifier_call(struct notifier_block *self,
|
||||
|
||||
skip_dump:
|
||||
mutex_unlock(&debug_lock);
|
||||
return 0;
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block debug_notifier = {
|
||||
|
@ -724,7 +724,7 @@ static void cdns_i2c_master_reset(struct i2c_adapter *adap)
|
||||
static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
|
||||
struct i2c_adapter *adap)
|
||||
{
|
||||
unsigned long time_left;
|
||||
unsigned long time_left, msg_timeout;
|
||||
u32 reg;
|
||||
|
||||
id->p_msg = msg;
|
||||
@ -749,8 +749,16 @@ static int cdns_i2c_process_msg(struct cdns_i2c *id, struct i2c_msg *msg,
|
||||
else
|
||||
cdns_i2c_msend(id);
|
||||
|
||||
/* Minimal time to execute this message */
|
||||
msg_timeout = msecs_to_jiffies((1000 * msg->len * BITS_PER_BYTE) / id->i2c_clk);
|
||||
/* Plus some wiggle room */
|
||||
msg_timeout += msecs_to_jiffies(500);
|
||||
|
||||
if (msg_timeout < adap->timeout)
|
||||
msg_timeout = adap->timeout;
|
||||
|
||||
/* Wait for the signal of completion */
|
||||
time_left = wait_for_completion_timeout(&id->xfer_done, adap->timeout);
|
||||
time_left = wait_for_completion_timeout(&id->xfer_done, msg_timeout);
|
||||
if (time_left == 0) {
|
||||
cdns_i2c_master_reset(adap);
|
||||
dev_err(id->adap.dev.parent,
|
||||
|
@ -170,7 +170,6 @@ static const struct iio_chan_spec ad7124_channel_template = {
|
||||
.sign = 'u',
|
||||
.realbits = 24,
|
||||
.storagebits = 32,
|
||||
.shift = 8,
|
||||
.endianness = IIO_BE,
|
||||
},
|
||||
};
|
||||
|
@ -36,8 +36,8 @@
|
||||
|
||||
/* Bits and mask definition for SC27XX_ADC_CH_CFG register */
|
||||
#define SC27XX_ADC_CHN_ID_MASK GENMASK(4, 0)
|
||||
#define SC27XX_ADC_SCALE_MASK GENMASK(10, 8)
|
||||
#define SC27XX_ADC_SCALE_SHIFT 8
|
||||
#define SC27XX_ADC_SCALE_MASK GENMASK(10, 9)
|
||||
#define SC27XX_ADC_SCALE_SHIFT 9
|
||||
|
||||
/* Bits definitions for SC27XX_ADC_INT_EN registers */
|
||||
#define SC27XX_ADC_IRQ_EN BIT(0)
|
||||
@ -103,14 +103,14 @@ static struct sc27xx_adc_linear_graph small_scale_graph = {
|
||||
100, 341,
|
||||
};
|
||||
|
||||
static const struct sc27xx_adc_linear_graph big_scale_graph_calib = {
|
||||
4200, 856,
|
||||
3600, 733,
|
||||
static const struct sc27xx_adc_linear_graph sc2731_big_scale_graph_calib = {
|
||||
4200, 850,
|
||||
3600, 728,
|
||||
};
|
||||
|
||||
static const struct sc27xx_adc_linear_graph small_scale_graph_calib = {
|
||||
1000, 833,
|
||||
100, 80,
|
||||
static const struct sc27xx_adc_linear_graph sc2731_small_scale_graph_calib = {
|
||||
1000, 838,
|
||||
100, 84,
|
||||
};
|
||||
|
||||
static int sc27xx_adc_get_calib_data(u32 calib_data, int calib_adc)
|
||||
@ -130,11 +130,11 @@ static int sc27xx_adc_scale_calibration(struct sc27xx_adc_data *data,
|
||||
size_t len;
|
||||
|
||||
if (big_scale) {
|
||||
calib_graph = &big_scale_graph_calib;
|
||||
calib_graph = &sc2731_big_scale_graph_calib;
|
||||
graph = &big_scale_graph;
|
||||
cell_name = "big_scale_calib";
|
||||
} else {
|
||||
calib_graph = &small_scale_graph_calib;
|
||||
calib_graph = &sc2731_small_scale_graph_calib;
|
||||
graph = &small_scale_graph;
|
||||
cell_name = "small_scale_calib";
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ struct stmpe_adc {
|
||||
static int stmpe_read_voltage(struct stmpe_adc *info,
|
||||
struct iio_chan_spec const *chan, int *val)
|
||||
{
|
||||
long ret;
|
||||
unsigned long ret;
|
||||
|
||||
mutex_lock(&info->lock);
|
||||
|
||||
@ -79,7 +79,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
|
||||
|
||||
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
|
||||
|
||||
if (ret <= 0) {
|
||||
if (ret == 0) {
|
||||
stmpe_reg_write(info->stmpe, STMPE_REG_ADC_INT_STA,
|
||||
STMPE_ADC_CH(info->channel));
|
||||
mutex_unlock(&info->lock);
|
||||
@ -96,7 +96,7 @@ static int stmpe_read_voltage(struct stmpe_adc *info,
|
||||
static int stmpe_read_temp(struct stmpe_adc *info,
|
||||
struct iio_chan_spec const *chan, int *val)
|
||||
{
|
||||
long ret;
|
||||
unsigned long ret;
|
||||
|
||||
mutex_lock(&info->lock);
|
||||
|
||||
@ -114,7 +114,7 @@ static int stmpe_read_temp(struct stmpe_adc *info,
|
||||
|
||||
ret = wait_for_completion_timeout(&info->completion, STMPE_ADC_TIMEOUT);
|
||||
|
||||
if (ret <= 0) {
|
||||
if (ret == 0) {
|
||||
mutex_unlock(&info->lock);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -70,16 +70,18 @@ static int st_sensors_match_odr(struct st_sensor_settings *sensor_settings,
|
||||
|
||||
int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
|
||||
{
|
||||
int err;
|
||||
int err = 0;
|
||||
struct st_sensor_odr_avl odr_out = {0, 0};
|
||||
struct st_sensor_data *sdata = iio_priv(indio_dev);
|
||||
|
||||
mutex_lock(&sdata->odr_lock);
|
||||
|
||||
if (!sdata->sensor_settings->odr.mask)
|
||||
return 0;
|
||||
goto unlock_mutex;
|
||||
|
||||
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
|
||||
if (err < 0)
|
||||
goto st_sensors_match_odr_error;
|
||||
goto unlock_mutex;
|
||||
|
||||
if ((sdata->sensor_settings->odr.addr ==
|
||||
sdata->sensor_settings->pw.addr) &&
|
||||
@ -102,7 +104,9 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
|
||||
if (err >= 0)
|
||||
sdata->odr = odr_out.hz;
|
||||
|
||||
st_sensors_match_odr_error:
|
||||
unlock_mutex:
|
||||
mutex_unlock(&sdata->odr_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(st_sensors_set_odr);
|
||||
@ -364,6 +368,8 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
|
||||
struct st_sensors_platform_data *of_pdata;
|
||||
int err = 0;
|
||||
|
||||
mutex_init(&sdata->odr_lock);
|
||||
|
||||
/* If OF/DT pdata exists, it will take precedence of anything else */
|
||||
of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata);
|
||||
if (IS_ERR(of_pdata))
|
||||
@ -557,18 +563,24 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
} else {
|
||||
mutex_lock(&sdata->odr_lock);
|
||||
err = st_sensors_set_enable(indio_dev, true);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
mutex_unlock(&sdata->odr_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
msleep((sdata->sensor_settings->bootime * 1000) / sdata->odr);
|
||||
err = st_sensors_read_axis_data(indio_dev, ch, val);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
mutex_unlock(&sdata->odr_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
*val = *val >> ch->scan_type.shift;
|
||||
|
||||
err = st_sensors_set_enable(indio_dev, false);
|
||||
mutex_unlock(&sdata->odr_lock);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
|
@ -575,10 +575,9 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
|
||||
*/
|
||||
|
||||
swd = kzalloc(sizeof(*swd), GFP_KERNEL);
|
||||
if (!swd) {
|
||||
ret = -ENOMEM;
|
||||
goto error_kzalloc;
|
||||
}
|
||||
if (!swd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/*
|
||||
* Allocate an IIO device.
|
||||
*
|
||||
@ -590,7 +589,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
|
||||
indio_dev = iio_device_alloc(parent, sizeof(*st));
|
||||
if (!indio_dev) {
|
||||
ret = -ENOMEM;
|
||||
goto error_ret;
|
||||
goto error_free_swd;
|
||||
}
|
||||
|
||||
st = iio_priv(indio_dev);
|
||||
@ -616,6 +615,10 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
|
||||
* indio_dev->name = spi_get_device_id(spi)->name;
|
||||
*/
|
||||
indio_dev->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!indio_dev->name) {
|
||||
ret = -ENOMEM;
|
||||
goto error_free_device;
|
||||
}
|
||||
|
||||
/* Provide description of available channels */
|
||||
indio_dev->channels = iio_dummy_channels;
|
||||
@ -632,7 +635,7 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
|
||||
|
||||
ret = iio_simple_dummy_events_register(indio_dev);
|
||||
if (ret < 0)
|
||||
goto error_free_device;
|
||||
goto error_free_name;
|
||||
|
||||
ret = iio_simple_dummy_configure_buffer(indio_dev);
|
||||
if (ret < 0)
|
||||
@ -649,11 +652,12 @@ static struct iio_sw_device *iio_dummy_probe(const char *name)
|
||||
iio_simple_dummy_unconfigure_buffer(indio_dev);
|
||||
error_unregister_events:
|
||||
iio_simple_dummy_events_unregister(indio_dev);
|
||||
error_free_name:
|
||||
kfree(indio_dev->name);
|
||||
error_free_device:
|
||||
iio_device_free(indio_dev);
|
||||
error_ret:
|
||||
error_free_swd:
|
||||
kfree(swd);
|
||||
error_kzalloc:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
@ -104,6 +104,7 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
|
||||
u16 tries = 20;
|
||||
u8 buffer[12];
|
||||
int ret;
|
||||
unsigned long time_left;
|
||||
|
||||
ret = i2c_smbus_write_byte_data(client, VL_REG_SYSRANGE_START, 1);
|
||||
if (ret < 0)
|
||||
@ -112,10 +113,8 @@ static int vl53l0x_read_proximity(struct vl53l0x_data *data,
|
||||
if (data->client->irq) {
|
||||
reinit_completion(&data->completion);
|
||||
|
||||
ret = wait_for_completion_timeout(&data->completion, HZ/10);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else if (ret == 0)
|
||||
time_left = wait_for_completion_timeout(&data->completion, HZ/10);
|
||||
if (time_left == 0)
|
||||
return -ETIMEDOUT;
|
||||
|
||||
vl53l0x_clear_irq(data);
|
||||
|
@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface,
|
||||
if (!dev->tp_data)
|
||||
goto err_free_bt_buffer;
|
||||
|
||||
if (dev->bt_urb)
|
||||
if (dev->bt_urb) {
|
||||
usb_fill_int_urb(dev->bt_urb, udev,
|
||||
usb_rcvintpipe(udev, cfg->bt_ep),
|
||||
dev->bt_data, dev->cfg.bt_datalen,
|
||||
bcm5974_irq_button, dev, 1);
|
||||
|
||||
dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
||||
}
|
||||
|
||||
usb_fill_int_urb(dev->tp_urb, udev,
|
||||
usb_rcvintpipe(udev, cfg->tp_ep),
|
||||
dev->tp_data, dev->cfg.tp_datalen,
|
||||
bcm5974_irq_trackpad, dev, 1);
|
||||
|
||||
dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
|
||||
|
||||
/* create bcm5974 device */
|
||||
usb_make_path(udev, dev->phys, sizeof(dev->phys));
|
||||
strlcat(dev->phys, "/input0", sizeof(dev->phys));
|
||||
|
@ -1084,9 +1084,14 @@ static int of_count_icc_providers(struct device_node *np)
|
||||
{
|
||||
struct device_node *child;
|
||||
int count = 0;
|
||||
const struct of_device_id __maybe_unused ignore_list[] = {
|
||||
{ .compatible = "qcom,sc7180-ipa-virt" },
|
||||
{}
|
||||
};
|
||||
|
||||
for_each_available_child_of_node(np, child) {
|
||||
if (of_property_read_bool(child, "#interconnect-cells"))
|
||||
if (of_property_read_bool(child, "#interconnect-cells") &&
|
||||
likely(!of_match_node(ignore_list, child)))
|
||||
count++;
|
||||
count += of_count_icc_providers(child);
|
||||
}
|
||||
|
@ -47,7 +47,6 @@ DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM
|
||||
DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC);
|
||||
DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC);
|
||||
DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
|
||||
DEFINE_QNODE(ipa_core_master, SC7180_MASTER_IPA_CORE, 1, 8, SC7180_SLAVE_IPA_CORE);
|
||||
DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1);
|
||||
DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC);
|
||||
DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
|
||||
@ -129,7 +128,6 @@ DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
|
||||
DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC);
|
||||
DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC);
|
||||
DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4);
|
||||
DEFINE_QNODE(ipa_core_slave, SC7180_SLAVE_IPA_CORE, 1, 8);
|
||||
DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4);
|
||||
DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC);
|
||||
DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC);
|
||||
@ -160,7 +158,6 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
|
||||
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
|
||||
DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
|
||||
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
|
||||
DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
|
||||
DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
|
||||
DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
|
||||
DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
|
||||
@ -372,22 +369,6 @@ static struct qcom_icc_desc sc7180_gem_noc = {
|
||||
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
|
||||
};
|
||||
|
||||
static struct qcom_icc_bcm *ipa_virt_bcms[] = {
|
||||
&bcm_ip0,
|
||||
};
|
||||
|
||||
static struct qcom_icc_node *ipa_virt_nodes[] = {
|
||||
[MASTER_IPA_CORE] = &ipa_core_master,
|
||||
[SLAVE_IPA_CORE] = &ipa_core_slave,
|
||||
};
|
||||
|
||||
static struct qcom_icc_desc sc7180_ipa_virt = {
|
||||
.nodes = ipa_virt_nodes,
|
||||
.num_nodes = ARRAY_SIZE(ipa_virt_nodes),
|
||||
.bcms = ipa_virt_bcms,
|
||||
.num_bcms = ARRAY_SIZE(ipa_virt_bcms),
|
||||
};
|
||||
|
||||
static struct qcom_icc_bcm *mc_virt_bcms[] = {
|
||||
&bcm_acv,
|
||||
&bcm_mc0,
|
||||
@ -611,8 +592,6 @@ static const struct of_device_id qnoc_of_match[] = {
|
||||
.data = &sc7180_dc_noc},
|
||||
{ .compatible = "qcom,sc7180-gem-noc",
|
||||
.data = &sc7180_gem_noc},
|
||||
{ .compatible = "qcom,sc7180-ipa-virt",
|
||||
.data = &sc7180_ipa_virt},
|
||||
{ .compatible = "qcom,sc7180-mc-virt",
|
||||
.data = &sc7180_mc_virt},
|
||||
{ .compatible = "qcom,sc7180-mmss-noc",
|
||||
|
@ -3505,6 +3505,8 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
|
||||
/* Base address */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
if (resource_size(res) < arm_smmu_resource_size(smmu)) {
|
||||
dev_err(dev, "MMIO region too small (%pr)\n", res);
|
||||
return -EINVAL;
|
||||
|
@ -2117,11 +2117,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ioaddr = res->start;
|
||||
smmu->base = devm_ioremap_resource(dev, res);
|
||||
smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
|
||||
if (IS_ERR(smmu->base))
|
||||
return PTR_ERR(smmu->base);
|
||||
ioaddr = res->start;
|
||||
/*
|
||||
* The resource size should effectively match the value of SMMU_TOP;
|
||||
* stash that temporarily until we know PAGESIZE to validate it with.
|
||||
|
@ -7970,17 +7970,22 @@ EXPORT_SYMBOL(md_register_thread);
|
||||
|
||||
void md_unregister_thread(struct md_thread **threadp)
|
||||
{
|
||||
struct md_thread *thread = *threadp;
|
||||
if (!thread)
|
||||
return;
|
||||
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
|
||||
/* Locking ensures that mddev_unlock does not wake_up a
|
||||
struct md_thread *thread;
|
||||
|
||||
/*
|
||||
* Locking ensures that mddev_unlock does not wake_up a
|
||||
* non-existent thread
|
||||
*/
|
||||
spin_lock(&pers_lock);
|
||||
thread = *threadp;
|
||||
if (!thread) {
|
||||
spin_unlock(&pers_lock);
|
||||
return;
|
||||
}
|
||||
*threadp = NULL;
|
||||
spin_unlock(&pers_lock);
|
||||
|
||||
pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
|
||||
kthread_stop(thread->tsk);
|
||||
kfree(thread);
|
||||
}
|
||||
|
@ -128,21 +128,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
||||
pr_debug("md/raid0:%s: FINAL %d zones\n",
|
||||
mdname(mddev), conf->nr_strip_zones);
|
||||
|
||||
if (conf->nr_strip_zones == 1) {
|
||||
conf->layout = RAID0_ORIG_LAYOUT;
|
||||
} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
|
||||
mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
|
||||
conf->layout = mddev->layout;
|
||||
} else if (default_layout == RAID0_ORIG_LAYOUT ||
|
||||
default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
|
||||
conf->layout = default_layout;
|
||||
} else {
|
||||
pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
|
||||
mdname(mddev));
|
||||
pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
|
||||
err = -ENOTSUPP;
|
||||
goto abort;
|
||||
}
|
||||
/*
|
||||
* now since we have the hard sector sizes, we can make sure
|
||||
* chunk size is a multiple of that sector size
|
||||
@ -273,6 +258,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
|
||||
(unsigned long long)smallest->sectors);
|
||||
}
|
||||
|
||||
if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) {
|
||||
conf->layout = RAID0_ORIG_LAYOUT;
|
||||
} else if (mddev->layout == RAID0_ORIG_LAYOUT ||
|
||||
mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
|
||||
conf->layout = mddev->layout;
|
||||
} else if (default_layout == RAID0_ORIG_LAYOUT ||
|
||||
default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
|
||||
conf->layout = default_layout;
|
||||
} else {
|
||||
pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
|
||||
mdname(mddev));
|
||||
pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
|
||||
err = -EOPNOTSUPP;
|
||||
goto abort;
|
||||
}
|
||||
|
||||
pr_debug("md/raid0:%s: done.\n", mdname(mddev));
|
||||
*private_conf = conf;
|
||||
|
||||
|
@ -667,6 +667,7 @@ static int rtsx_usb_probe(struct usb_interface *intf,
|
||||
return 0;
|
||||
|
||||
out_init_fail:
|
||||
usb_set_intfdata(ucr->pusb_intf, NULL);
|
||||
usb_free_coherent(ucr->pusb_dev, IOBUF_SIZE, ucr->iobuf,
|
||||
ucr->iobuf_dma);
|
||||
return ret;
|
||||
|
@ -1349,17 +1349,18 @@ static int fastrpc_req_munmap_impl(struct fastrpc_user *fl,
|
||||
struct fastrpc_req_munmap *req)
|
||||
{
|
||||
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
|
||||
struct fastrpc_buf *buf, *b;
|
||||
struct fastrpc_buf *buf = NULL, *iter, *b;
|
||||
struct fastrpc_munmap_req_msg req_msg;
|
||||
struct device *dev = fl->sctx->dev;
|
||||
int err;
|
||||
u32 sc;
|
||||
|
||||
spin_lock(&fl->lock);
|
||||
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
|
||||
if ((buf->raddr == req->vaddrout) && (buf->size == req->size))
|
||||
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
|
||||
if ((iter->raddr == req->vaddrout) && (iter->size == req->size)) {
|
||||
buf = iter;
|
||||
break;
|
||||
buf = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fl->lock);
|
||||
|
||||
|
@ -231,6 +231,11 @@ void lkdtm_ARRAY_BOUNDS(void)
|
||||
|
||||
not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
|
||||
checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
|
||||
if (!not_checked || !checked) {
|
||||
kfree(not_checked);
|
||||
kfree(checked);
|
||||
return;
|
||||
}
|
||||
|
||||
pr_info("Array access within bounds ...\n");
|
||||
/* For both, touch all bytes in the actual member size. */
|
||||
|
@ -30,12 +30,12 @@ static const unsigned char test_text[] = "This is a test.\n";
|
||||
*/
|
||||
static noinline unsigned char *trick_compiler(unsigned char *stack)
|
||||
{
|
||||
return stack + 0;
|
||||
return stack + unconst;
|
||||
}
|
||||
|
||||
static noinline unsigned char *do_usercopy_stack_callee(int value)
|
||||
{
|
||||
unsigned char buf[32];
|
||||
unsigned char buf[128];
|
||||
int i;
|
||||
|
||||
/* Exercise stack to avoid everything living in registers. */
|
||||
@ -43,7 +43,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value)
|
||||
buf[i] = value & 0xff;
|
||||
}
|
||||
|
||||
return trick_compiler(buf);
|
||||
/*
|
||||
* Put the target buffer in the middle of stack allocation
|
||||
* so that we don't step on future stack users regardless
|
||||
* of stack growth direction.
|
||||
*/
|
||||
return trick_compiler(&buf[(128/2)-32]);
|
||||
}
|
||||
|
||||
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
|
||||
@ -66,6 +71,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
|
||||
bad_stack -= sizeof(unsigned long);
|
||||
}
|
||||
|
||||
#ifdef ARCH_HAS_CURRENT_STACK_POINTER
|
||||
pr_info("stack : %px\n", (void *)current_stack_pointer);
|
||||
#endif
|
||||
pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
|
||||
pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
|
||||
|
||||
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
|
||||
PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, 0);
|
||||
|
@ -1452,8 +1452,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq)
|
||||
err = mmc_cqe_recovery(host);
|
||||
if (err)
|
||||
mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY);
|
||||
else
|
||||
mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
|
||||
mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY);
|
||||
|
||||
pr_debug("%s: CQE recovery done\n", mmc_hostname(host));
|
||||
}
|
||||
|
@ -97,6 +97,33 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
|
||||
return e;
|
||||
}
|
||||
|
||||
/*
|
||||
* has_enough_free_count - whether ubi has enough free pebs to fill fm pools
|
||||
* @ubi: UBI device description object
|
||||
* @is_wl_pool: whether UBI is filling wear leveling pool
|
||||
*
|
||||
* This helper function checks whether there are enough free pebs (deducted
|
||||
* by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
|
||||
* there is at least one of free pebs is filled into fm_wl_pool.
|
||||
* For wear leveling pool, UBI should also reserve free pebs for bad pebs
|
||||
* handling, because there maybe no enough free pebs for user volumes after
|
||||
* producing new bad pebs.
|
||||
*/
|
||||
static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
|
||||
{
|
||||
int fm_used = 0; // fastmap non anchor pebs.
|
||||
int beb_rsvd_pebs;
|
||||
|
||||
if (!ubi->free.rb_node)
|
||||
return false;
|
||||
|
||||
beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
|
||||
if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
|
||||
fm_used = ubi->fm_size / ubi->leb_size - 1;
|
||||
|
||||
return ubi->free_count - beb_rsvd_pebs > fm_used;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_refill_pools - refills all fastmap PEB pools.
|
||||
* @ubi: UBI device description object
|
||||
@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi)
|
||||
wl_tree_add(ubi->fm_anchor, &ubi->free);
|
||||
ubi->free_count++;
|
||||
}
|
||||
if (ubi->fm_next_anchor) {
|
||||
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
|
||||
ubi->free_count++;
|
||||
}
|
||||
|
||||
/* All available PEBs are in ubi->free, now is the time to get
|
||||
/*
|
||||
* All available PEBs are in ubi->free, now is the time to get
|
||||
* the best anchor PEBs.
|
||||
*/
|
||||
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
|
||||
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
|
||||
|
||||
for (;;) {
|
||||
enough = 0;
|
||||
if (pool->size < pool->max_size) {
|
||||
if (!ubi->free.rb_node)
|
||||
if (!has_enough_free_count(ubi, false))
|
||||
break;
|
||||
|
||||
e = wl_get_wle(ubi);
|
||||
@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
|
||||
enough++;
|
||||
|
||||
if (wl_pool->size < wl_pool->max_size) {
|
||||
if (!ubi->free.rb_node ||
|
||||
(ubi->free_count - ubi->beb_rsvd_pebs < 5))
|
||||
if (!has_enough_free_count(ubi, true))
|
||||
break;
|
||||
|
||||
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
|
||||
@ -286,20 +308,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
|
||||
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
|
||||
{
|
||||
struct ubi_work *wrk;
|
||||
struct ubi_wl_entry *anchor;
|
||||
|
||||
spin_lock(&ubi->wl_lock);
|
||||
|
||||
/* Do we have a next anchor? */
|
||||
if (!ubi->fm_next_anchor) {
|
||||
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
|
||||
if (!ubi->fm_next_anchor)
|
||||
/* Tell wear leveling to produce a new anchor PEB */
|
||||
ubi->fm_do_produce_anchor = 1;
|
||||
/* Do we already have an anchor? */
|
||||
if (ubi->fm_anchor) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do wear leveling to get a new anchor PEB or check the
|
||||
* existing next anchor candidate.
|
||||
*/
|
||||
/* See if we can find an anchor PEB on the list of free PEBs */
|
||||
anchor = ubi_wl_get_fm_peb(ubi, 1);
|
||||
if (anchor) {
|
||||
ubi->fm_anchor = anchor;
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ubi->fm_do_produce_anchor = 1;
|
||||
/* No luck, trigger wear leveling to produce a new anchor PEB. */
|
||||
if (ubi->wl_scheduled) {
|
||||
spin_unlock(&ubi->wl_lock);
|
||||
return 0;
|
||||
@ -381,11 +409,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
|
||||
ubi->fm_anchor = NULL;
|
||||
}
|
||||
|
||||
if (ubi->fm_next_anchor) {
|
||||
return_unused_peb(ubi, ubi->fm_next_anchor);
|
||||
ubi->fm_next_anchor = NULL;
|
||||
}
|
||||
|
||||
if (ubi->fm) {
|
||||
for (i = 0; i < ubi->fm->used_blocks; i++)
|
||||
kfree(ubi->fm->e[i]);
|
||||
|
@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
|
||||
fm_pos += sizeof(*fec);
|
||||
ubi_assert(fm_pos <= ubi->fm_size);
|
||||
}
|
||||
if (ubi->fm_next_anchor) {
|
||||
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
|
||||
|
||||
fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
|
||||
set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
|
||||
fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
|
||||
|
||||
free_peb_count++;
|
||||
fm_pos += sizeof(*fec);
|
||||
ubi_assert(fm_pos <= ubi->fm_size);
|
||||
}
|
||||
fmh->free_peb_count = cpu_to_be32(free_peb_count);
|
||||
|
||||
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
|
||||
|
@ -491,8 +491,7 @@ struct ubi_debug_info {
|
||||
* @fm_work: fastmap work queue
|
||||
* @fm_work_scheduled: non-zero if fastmap work was scheduled
|
||||
* @fast_attach: non-zero if UBI was attached by fastmap
|
||||
* @fm_anchor: The new anchor PEB used during fastmap update
|
||||
* @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
|
||||
* @fm_anchor: The next anchor PEB to use for fastmap
|
||||
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
|
||||
*
|
||||
* @used: RB-tree of used physical eraseblocks
|
||||
@ -603,7 +602,6 @@ struct ubi_device {
|
||||
int fm_work_scheduled;
|
||||
int fast_attach;
|
||||
struct ubi_wl_entry *fm_anchor;
|
||||
struct ubi_wl_entry *fm_next_anchor;
|
||||
int fm_do_produce_anchor;
|
||||
|
||||
/* Wear-leveling sub-system's stuff */
|
||||
|
@ -309,7 +309,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
|
||||
ubi->volumes[vol_id] = NULL;
|
||||
ubi->vol_count -= 1;
|
||||
spin_unlock(&ubi->volumes_lock);
|
||||
ubi_eba_destroy_table(eba_tbl);
|
||||
out_acc:
|
||||
spin_lock(&ubi->volumes_lock);
|
||||
ubi->rsvd_pebs -= vol->reserved_pebs;
|
||||
|
@ -688,16 +688,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
|
||||
|
||||
#ifdef CONFIG_MTD_UBI_FASTMAP
|
||||
e1 = find_anchor_wl_entry(&ubi->used);
|
||||
if (e1 && ubi->fm_next_anchor &&
|
||||
(ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
|
||||
if (e1 && ubi->fm_anchor &&
|
||||
(ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
|
||||
ubi->fm_do_produce_anchor = 1;
|
||||
/* fm_next_anchor is no longer considered a good anchor
|
||||
* candidate.
|
||||
/*
|
||||
* fm_anchor is no longer considered a good anchor.
|
||||
* NULL assignment also prevents multiple wear level checks
|
||||
* of this PEB.
|
||||
*/
|
||||
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
|
||||
ubi->fm_next_anchor = NULL;
|
||||
wl_tree_add(ubi->fm_anchor, &ubi->free);
|
||||
ubi->fm_anchor = NULL;
|
||||
ubi->free_count++;
|
||||
}
|
||||
|
||||
@ -1086,12 +1086,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
|
||||
if (!err) {
|
||||
spin_lock(&ubi->wl_lock);
|
||||
|
||||
if (!ubi->fm_disabled && !ubi->fm_next_anchor &&
|
||||
if (!ubi->fm_disabled && !ubi->fm_anchor &&
|
||||
e->pnum < UBI_FM_MAX_START) {
|
||||
/* Abort anchor production, if needed it will be
|
||||
/*
|
||||
* Abort anchor production, if needed it will be
|
||||
* enabled again in the wear leveling started below.
|
||||
*/
|
||||
ubi->fm_next_anchor = e;
|
||||
ubi->fm_anchor = e;
|
||||
ubi->fm_do_produce_anchor = 0;
|
||||
} else {
|
||||
wl_tree_add(e, &ubi->free);
|
||||
|
@ -1981,8 +1981,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
|
||||
for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) {
|
||||
err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i],
|
||||
gphy_fw_np, i);
|
||||
if (err)
|
||||
if (err) {
|
||||
of_node_put(gphy_fw_np);
|
||||
goto remove_gphy;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -3148,6 +3148,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
|
||||
*/
|
||||
child = of_get_child_by_name(np, "mdio");
|
||||
err = mv88e6xxx_mdio_register(chip, child, false);
|
||||
of_node_put(child);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
|
||||
mdio = mdiobus_alloc();
|
||||
if (mdio == NULL) {
|
||||
netdev_err(dev, "Error allocating MDIO bus\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto put_node;
|
||||
}
|
||||
|
||||
mdio->name = ALTERA_TSE_RESOURCE_NAME;
|
||||
@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
|
||||
mdio->id);
|
||||
goto out_free_mdio;
|
||||
}
|
||||
of_node_put(mdio_node);
|
||||
|
||||
if (netif_msg_drv(priv))
|
||||
netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
|
||||
@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
|
||||
out_free_mdio:
|
||||
mdiobus_free(mdio);
|
||||
mdio = NULL;
|
||||
put_node:
|
||||
of_node_put(mdio_node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1157,9 +1157,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
|
||||
|
||||
switch (xcast_mode) {
|
||||
case IXGBEVF_XCAST_MODE_NONE:
|
||||
disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
|
||||
disable = IXGBE_VMOLR_ROMPE |
|
||||
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
||||
enable = 0;
|
||||
enable = IXGBE_VMOLR_BAM;
|
||||
break;
|
||||
case IXGBEVF_XCAST_MODE_MULTI:
|
||||
disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
||||
@ -1181,9 +1181,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
disable = 0;
|
||||
disable = IXGBE_VMOLR_VPE;
|
||||
enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
|
||||
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
|
||||
IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE;
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -1966,6 +1966,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
|
||||
struct ethtool_rx_flow_spec *fsp =
|
||||
(struct ethtool_rx_flow_spec *)&cmd->fs;
|
||||
|
||||
if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
|
||||
return -EINVAL;
|
||||
|
||||
/* only tcp dst ipv4 is meaningful, others are meaningless */
|
||||
fsp->flow_type = TCP_V4_FLOW;
|
||||
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
|
||||
|
@ -2099,7 +2099,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev,
|
||||
en_err(priv,
|
||||
"mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
|
||||
i, offset, ee->len - i, ret);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
i += ret;
|
||||
|
@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
|
||||
if (!tracer->owner)
|
||||
return;
|
||||
|
||||
if (unlikely(!tracer->str_db.loaded))
|
||||
goto arm;
|
||||
|
||||
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
|
||||
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
|
||||
|
||||
@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
|
||||
&tmp_trace_block[TRACES_PER_BLOCK - 1]);
|
||||
}
|
||||
|
||||
arm:
|
||||
mlx5_fw_tracer_arm(dev);
|
||||
}
|
||||
|
||||
@ -1138,8 +1142,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
|
||||
queue_work(tracer->work_queue, &tracer->ownership_change_work);
|
||||
break;
|
||||
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
|
||||
if (likely(tracer->str_db.loaded))
|
||||
queue_work(tracer->work_queue, &tracer->handle_traces_work);
|
||||
queue_work(tracer->work_queue, &tracer->handle_traces_work);
|
||||
break;
|
||||
default:
|
||||
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
|
||||
|
@ -4576,6 +4576,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
/* Need to fix some features. */
|
||||
if (!err)
|
||||
netdev_update_features(netdev);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1520,9 +1520,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool check_conflicting_actions(u32 action1, u32 action2)
|
||||
static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
|
||||
const struct mlx5_fs_vlan *vlan1)
|
||||
{
|
||||
u32 xored_actions = action1 ^ action2;
|
||||
return vlan0->ethtype != vlan1->ethtype ||
|
||||
vlan0->vid != vlan1->vid ||
|
||||
vlan0->prio != vlan1->prio;
|
||||
}
|
||||
|
||||
static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
|
||||
const struct mlx5_flow_act *act2)
|
||||
{
|
||||
u32 action1 = act1->action;
|
||||
u32 action2 = act2->action;
|
||||
u32 xored_actions;
|
||||
|
||||
xored_actions = action1 ^ action2;
|
||||
|
||||
/* if one rule only wants to count, it's ok */
|
||||
if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
|
||||
@ -1539,6 +1552,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
|
||||
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
|
||||
return true;
|
||||
|
||||
if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
|
||||
act1->pkt_reformat != act2->pkt_reformat)
|
||||
return true;
|
||||
|
||||
if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
|
||||
act1->modify_hdr != act2->modify_hdr)
|
||||
return true;
|
||||
|
||||
if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
|
||||
check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
|
||||
return true;
|
||||
|
||||
if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
|
||||
check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1546,7 +1575,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
|
||||
const struct mlx5_flow_context *flow_context,
|
||||
const struct mlx5_flow_act *flow_act)
|
||||
{
|
||||
if (check_conflicting_actions(flow_act->action, fte->action.action)) {
|
||||
if (check_conflicting_actions(flow_act, &fte->action)) {
|
||||
mlx5_core_warn(get_dev(&fte->node),
|
||||
"Found two FTEs with conflicting actions\n");
|
||||
return -EEXIST;
|
||||
|
@ -43,11 +43,10 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
|
||||
err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
|
||||
if (err && action) {
|
||||
err = mlx5dr_action_destroy(action);
|
||||
if (err) {
|
||||
action = NULL;
|
||||
mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
|
||||
err);
|
||||
}
|
||||
if (err)
|
||||
mlx5_core_err(ns->dev,
|
||||
"Failed to destroy action (%d)\n", err);
|
||||
action = NULL;
|
||||
}
|
||||
ft->fs_dr_table.miss_action = action;
|
||||
if (old_miss_action) {
|
||||
|
@ -286,8 +286,6 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
|
||||
|
||||
/* Init to unknowns */
|
||||
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
|
||||
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
|
||||
cmd->base.port = PORT_OTHER;
|
||||
cmd->base.speed = SPEED_UNKNOWN;
|
||||
cmd->base.duplex = DUPLEX_UNKNOWN;
|
||||
@ -295,6 +293,8 @@ nfp_net_get_link_ksettings(struct net_device *netdev,
|
||||
port = nfp_port_from_netdev(netdev);
|
||||
eth_port = nfp_port_get_eth_port(port);
|
||||
if (eth_port) {
|
||||
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
|
||||
cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ?
|
||||
AUTONEG_ENABLE : AUTONEG_DISABLE;
|
||||
nfp_net_set_fec_link_mode(eth_port, cmd);
|
||||
|
@ -287,6 +287,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
|
||||
efx->n_channels = 1;
|
||||
efx->n_rx_channels = 1;
|
||||
efx->n_tx_channels = 1;
|
||||
efx->tx_channel_offset = 0;
|
||||
efx->n_xdp_channels = 0;
|
||||
efx->xdp_channel_offset = efx->n_channels;
|
||||
rc = pci_enable_msi(efx->pci_dev);
|
||||
@ -307,6 +308,7 @@ int efx_probe_interrupts(struct efx_nic *efx)
|
||||
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
|
||||
efx->n_rx_channels = 1;
|
||||
efx->n_tx_channels = 1;
|
||||
efx->tx_channel_offset = 1;
|
||||
efx->n_xdp_channels = 0;
|
||||
efx->xdp_channel_offset = efx->n_channels;
|
||||
efx->legacy_irq = efx->pci_dev->irq;
|
||||
@ -858,10 +860,6 @@ int efx_set_channels(struct efx_nic *efx)
|
||||
int xdp_queue_number;
|
||||
int rc;
|
||||
|
||||
efx->tx_channel_offset =
|
||||
efx_separate_tx_channels ?
|
||||
efx->n_channels - efx->n_tx_channels : 0;
|
||||
|
||||
if (efx->xdp_tx_queue_count) {
|
||||
EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
|
||||
|
||||
|
@ -1522,7 +1522,7 @@ static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
|
||||
|
||||
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
|
||||
{
|
||||
return true;
|
||||
return channel && channel->channel >= channel->efx->tx_channel_offset;
|
||||
}
|
||||
|
||||
static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
|
||||
|
@ -1716,6 +1716,7 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
|
||||
if (IS_ERR(cpts)) {
|
||||
int ret = PTR_ERR(cpts);
|
||||
|
||||
of_node_put(node);
|
||||
if (ret == -EOPNOTSUPP) {
|
||||
dev_info(dev, "cpts disabled\n");
|
||||
return 0;
|
||||
@ -2064,9 +2065,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
|
||||
if (!node)
|
||||
return -ENOENT;
|
||||
common->port_num = of_get_child_count(node);
|
||||
of_node_put(node);
|
||||
if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
|
||||
return -ENOENT;
|
||||
of_node_put(node);
|
||||
|
||||
if (common->port_num != 1)
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -137,6 +137,7 @@
|
||||
#define DP83867_DOWNSHIFT_2_COUNT 2
|
||||
#define DP83867_DOWNSHIFT_4_COUNT 4
|
||||
#define DP83867_DOWNSHIFT_8_COUNT 8
|
||||
#define DP83867_SGMII_AUTONEG_EN BIT(7)
|
||||
|
||||
/* CFG3 bits */
|
||||
#define DP83867_CFG3_INT_OE BIT(7)
|
||||
@ -802,6 +803,32 @@ static int dp83867_phy_reset(struct phy_device *phydev)
|
||||
DP83867_PHYCR_FORCE_LINK_GOOD, 0);
|
||||
}
|
||||
|
||||
static void dp83867_link_change_notify(struct phy_device *phydev)
|
||||
{
|
||||
/* There is a limitation in DP83867 PHY device where SGMII AN is
|
||||
* only triggered once after the device is booted up. Even after the
|
||||
* PHY TPI is down and up again, SGMII AN is not triggered and
|
||||
* hence no new in-band message from PHY to MAC side SGMII.
|
||||
* This could cause an issue during power up, when PHY is up prior
|
||||
* to MAC. At this condition, once MAC side SGMII is up, MAC side
|
||||
* SGMII wouldn`t receive new in-band message from TI PHY with
|
||||
* correct link status, speed and duplex info.
|
||||
* Thus, implemented a SW solution here to retrigger SGMII Auto-Neg
|
||||
* whenever there is a link change.
|
||||
*/
|
||||
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
|
||||
int val = 0;
|
||||
|
||||
val = phy_clear_bits(phydev, DP83867_CFG2,
|
||||
DP83867_SGMII_AUTONEG_EN);
|
||||
if (val < 0)
|
||||
return;
|
||||
|
||||
phy_set_bits(phydev, DP83867_CFG2,
|
||||
DP83867_SGMII_AUTONEG_EN);
|
||||
}
|
||||
}
|
||||
|
||||
static struct phy_driver dp83867_driver[] = {
|
||||
{
|
||||
.phy_id = DP83867_PHY_ID,
|
||||
@ -826,6 +853,8 @@ static struct phy_driver dp83867_driver[] = {
|
||||
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
|
||||
.link_change_notify = dp83867_link_change_notify,
|
||||
},
|
||||
};
|
||||
module_phy_driver(dp83867_driver);
|
||||
|
@ -1008,7 +1008,6 @@ int __init mdio_bus_init(void)
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mdio_bus_init);
|
||||
|
||||
#if IS_ENABLED(CONFIG_PHYLIB)
|
||||
void mdio_bus_exit(void)
|
||||
|
@ -304,6 +304,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
||||
int r = 0;
|
||||
struct device *dev = &hdev->ndev->dev;
|
||||
struct nfc_evt_transaction *transaction;
|
||||
u32 aid_len;
|
||||
u8 params_len;
|
||||
|
||||
pr_debug("connectivity gate event: %x\n", event);
|
||||
|
||||
@ -312,43 +314,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
|
||||
r = nfc_se_connectivity(hdev->ndev, host);
|
||||
break;
|
||||
case ST21NFCA_EVT_TRANSACTION:
|
||||
/*
|
||||
* According to specification etsi 102 622
|
||||
/* According to specification etsi 102 622
|
||||
* 11.2.2.4 EVT_TRANSACTION Table 52
|
||||
* Description Tag Length
|
||||
* AID 81 5 to 16
|
||||
* PARAMETERS 82 0 to 255
|
||||
*
|
||||
* The key differences are aid storage length is variably sized
|
||||
* in the packet, but fixed in nfc_evt_transaction, and that the aid_len
|
||||
* is u8 in the packet, but u32 in the structure, and the tags in
|
||||
* the packet are not included in nfc_evt_transaction.
|
||||
*
|
||||
* size in bytes: 1 1 5-16 1 1 0-255
|
||||
* offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4
|
||||
* member name: aid_tag(M) aid_len aid params_tag(M) params_len params
|
||||
* example: 0x81 5-16 X 0x82 0-255 X
|
||||
*/
|
||||
if (skb->len < NFC_MIN_AID_LENGTH + 2 &&
|
||||
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
|
||||
if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
|
||||
return -EPROTO;
|
||||
|
||||
transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
|
||||
aid_len = skb->data[1];
|
||||
|
||||
if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid))
|
||||
return -EPROTO;
|
||||
|
||||
params_len = skb->data[aid_len + 3];
|
||||
|
||||
/* Verify PARAMETERS tag is (82), and final check that there is enough
|
||||
* space in the packet to read everything.
|
||||
*/
|
||||
if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) ||
|
||||
(skb->len < aid_len + 4 + params_len))
|
||||
return -EPROTO;
|
||||
|
||||
transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL);
|
||||
if (!transaction)
|
||||
return -ENOMEM;
|
||||
|
||||
transaction->aid_len = skb->data[1];
|
||||
transaction->aid_len = aid_len;
|
||||
transaction->params_len = params_len;
|
||||
|
||||
/* Checking if the length of the AID is valid */
|
||||
if (transaction->aid_len > sizeof(transaction->aid))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(transaction->aid, &skb->data[2],
|
||||
transaction->aid_len);
|
||||
|
||||
/* Check next byte is PARAMETERS tag (82) */
|
||||
if (skb->data[transaction->aid_len + 2] !=
|
||||
NFC_EVT_TRANSACTION_PARAMS_TAG)
|
||||
return -EPROTO;
|
||||
|
||||
transaction->params_len = skb->data[transaction->aid_len + 3];
|
||||
|
||||
/* Total size is allocated (skb->len - 2) minus fixed array members */
|
||||
if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(transaction->params, skb->data +
|
||||
transaction->aid_len + 4, transaction->params_len);
|
||||
memcpy(transaction->aid, &skb->data[2], aid_len);
|
||||
memcpy(transaction->params, &skb->data[aid_len + 4], params_len);
|
||||
|
||||
r = nfc_se_transaction(hdev->ndev, host, transaction);
|
||||
break;
|
||||
|
@ -1192,12 +1192,6 @@ static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
|
||||
goto err_disable_clocks;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(res->pipe_clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "cannot prepare/enable pipe clock\n");
|
||||
goto err_disable_clocks;
|
||||
}
|
||||
|
||||
/* configure PCIe to RC mode */
|
||||
writel(DEVICE_TYPE_RC, pcie->parf + PCIE20_PARF_DEVICE_TYPE);
|
||||
|
||||
|
@ -151,7 +151,7 @@ config TCIC
|
||||
|
||||
config PCMCIA_ALCHEMY_DEVBOARD
|
||||
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
|
||||
depends on MIPS_ALCHEMY && PCMCIA
|
||||
depends on MIPS_DB1XXX && PCMCIA
|
||||
help
|
||||
Enable this driver of you want PCMCIA support on your Alchemy
|
||||
Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
|
||||
|
@ -3141,7 +3141,7 @@ static int qcom_qmp_phy_power_on(struct phy *phy)
|
||||
|
||||
ret = reset_control_deassert(qmp->ufs_reset);
|
||||
if (ret)
|
||||
goto err_lane_rst;
|
||||
goto err_pcs_ready;
|
||||
|
||||
qcom_qmp_phy_configure(pcs_misc, cfg->regs, cfg->pcs_misc_tbl,
|
||||
cfg->pcs_misc_tbl_num);
|
||||
|
@ -125,6 +125,7 @@ static int lp3943_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
duty_ns = min(duty_ns, period_ns);
|
||||
val = (u8)(duty_ns * LP3943_MAX_DUTY / period_ns);
|
||||
|
||||
return lp3943_write_byte(lp3943, reg_duty, val);
|
||||
|
@ -1388,9 +1388,9 @@ static int qcom_smd_parse_edge(struct device *dev,
|
||||
edge->name = node->name;
|
||||
|
||||
irq = irq_of_parse_and_map(node, 0);
|
||||
if (irq < 0) {
|
||||
if (!irq) {
|
||||
dev_err(dev, "required smd interrupt missing\n");
|
||||
ret = irq;
|
||||
ret = -EINVAL;
|
||||
goto put_node;
|
||||
}
|
||||
|
||||
|
@ -269,6 +269,8 @@ static int mtk_rtc_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
return -EINVAL;
|
||||
rtc->addr_base = res->start;
|
||||
|
||||
rtc->data = of_device_get_match_data(&pdev->dev);
|
||||
|
@ -1241,7 +1241,8 @@ static void myrb_cleanup(struct myrb_hba *cb)
|
||||
myrb_unmap(cb);
|
||||
|
||||
if (cb->mmio_base) {
|
||||
cb->disable_intr(cb->io_base);
|
||||
if (cb->disable_intr)
|
||||
cb->disable_intr(cb->io_base);
|
||||
iounmap(cb->mmio_base);
|
||||
}
|
||||
if (cb->irq)
|
||||
@ -3515,9 +3516,13 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
|
||||
mutex_init(&cb->dcmd_mutex);
|
||||
mutex_init(&cb->dma_mutex);
|
||||
cb->pdev = pdev;
|
||||
cb->host = shost;
|
||||
|
||||
if (pci_enable_device(pdev))
|
||||
goto failure;
|
||||
if (pci_enable_device(pdev)) {
|
||||
dev_err(&pdev->dev, "Failed to enable PCI device\n");
|
||||
scsi_host_put(shost);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (privdata->hw_init == DAC960_PD_hw_init ||
|
||||
privdata->hw_init == DAC960_P_hw_init) {
|
||||
|
@ -3511,7 +3511,6 @@ static int sd_probe(struct device *dev)
|
||||
out_put:
|
||||
put_disk(gd);
|
||||
out_free:
|
||||
sd_zbc_release_disk(sdkp);
|
||||
kfree(sdkp);
|
||||
out:
|
||||
scsi_autopm_put_device(sdp);
|
||||
|
@ -148,12 +148,14 @@ static int __init rockchip_grf_init(void)
|
||||
return -ENODEV;
|
||||
if (!match || !match->data) {
|
||||
pr_err("%s: missing grf data\n", __func__);
|
||||
of_node_put(np);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
grf_info = match->data;
|
||||
|
||||
grf = syscon_node_to_regmap(np);
|
||||
of_node_put(np);
|
||||
if (IS_ERR(grf)) {
|
||||
pr_err("%s: could not get grf syscon\n", __func__);
|
||||
return PTR_ERR(grf);
|
||||
|
@ -1384,7 +1384,7 @@ anybuss_host_common_probe(struct device *dev,
|
||||
goto err_device;
|
||||
return cd;
|
||||
err_device:
|
||||
device_unregister(&cd->client->dev);
|
||||
put_device(&cd->client->dev);
|
||||
err_kthread:
|
||||
kthread_stop(cd->qthread);
|
||||
err_reset:
|
||||
|
@ -621,8 +621,8 @@ static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
|
||||
break;
|
||||
}
|
||||
if (!data) {
|
||||
dev_err(dai->dev, "%s:%s DATA connection missing\n",
|
||||
dai->name, module->name);
|
||||
dev_err(dai->dev, "%s DATA connection missing\n",
|
||||
dai->name);
|
||||
mutex_unlock(&codec->lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -651,9 +651,9 @@ static void rtllib_beacons_stop(struct rtllib_device *ieee)
|
||||
spin_lock_irqsave(&ieee->beacon_lock, flags);
|
||||
|
||||
ieee->beacon_txing = 0;
|
||||
del_timer_sync(&ieee->beacon_timer);
|
||||
|
||||
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
|
||||
del_timer_sync(&ieee->beacon_timer);
|
||||
|
||||
}
|
||||
|
||||
|
@ -528,9 +528,9 @@ static void ieee80211_beacons_stop(struct ieee80211_device *ieee)
|
||||
spin_lock_irqsave(&ieee->beacon_lock, flags);
|
||||
|
||||
ieee->beacon_txing = 0;
|
||||
del_timer_sync(&ieee->beacon_timer);
|
||||
|
||||
spin_unlock_irqrestore(&ieee->beacon_lock, flags);
|
||||
del_timer_sync(&ieee->beacon_timer);
|
||||
}
|
||||
|
||||
void ieee80211_stop_send_beacons(struct ieee80211_device *ieee)
|
||||
|
@ -332,7 +332,6 @@ void r8712_free_drv_sw(struct _adapter *padapter)
|
||||
r8712_free_evt_priv(&padapter->evtpriv);
|
||||
r8712_DeInitSwLeds(padapter);
|
||||
r8712_free_mlme_priv(&padapter->mlmepriv);
|
||||
r8712_free_io_queue(padapter);
|
||||
_free_xmit_priv(&padapter->xmitpriv);
|
||||
_r8712_free_sta_priv(&padapter->stapriv);
|
||||
_r8712_free_recv_priv(&padapter->recvpriv);
|
||||
|
@ -266,6 +266,7 @@ static uint r8712_usb_dvobj_init(struct _adapter *padapter)
|
||||
|
||||
static void r8712_usb_dvobj_deinit(struct _adapter *padapter)
|
||||
{
|
||||
r8712_free_io_queue(padapter);
|
||||
}
|
||||
|
||||
void rtl871x_intf_stop(struct _adapter *padapter)
|
||||
@ -303,9 +304,6 @@ void r871x_dev_unload(struct _adapter *padapter)
|
||||
rtl8712_hal_deinit(padapter);
|
||||
}
|
||||
|
||||
/*s6.*/
|
||||
if (padapter->dvobj_deinit)
|
||||
padapter->dvobj_deinit(padapter);
|
||||
padapter->bup = false;
|
||||
}
|
||||
}
|
||||
@ -541,13 +539,13 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
|
||||
} else {
|
||||
AutoloadFail = false;
|
||||
}
|
||||
if (((mac[0] == 0xff) && (mac[1] == 0xff) &&
|
||||
if ((!AutoloadFail) ||
|
||||
((mac[0] == 0xff) && (mac[1] == 0xff) &&
|
||||
(mac[2] == 0xff) && (mac[3] == 0xff) &&
|
||||
(mac[4] == 0xff) && (mac[5] == 0xff)) ||
|
||||
((mac[0] == 0x00) && (mac[1] == 0x00) &&
|
||||
(mac[2] == 0x00) && (mac[3] == 0x00) &&
|
||||
(mac[4] == 0x00) && (mac[5] == 0x00)) ||
|
||||
(!AutoloadFail)) {
|
||||
(mac[4] == 0x00) && (mac[5] == 0x00))) {
|
||||
mac[0] = 0x00;
|
||||
mac[1] = 0xe0;
|
||||
mac[2] = 0x4c;
|
||||
@ -610,6 +608,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
|
||||
/* Stop driver mlme relation timer */
|
||||
r8712_stop_drv_timers(padapter);
|
||||
r871x_dev_unload(padapter);
|
||||
if (padapter->dvobj_deinit)
|
||||
padapter->dvobj_deinit(padapter);
|
||||
r8712_free_drv_sw(padapter);
|
||||
free_netdev(pnetdev);
|
||||
|
||||
|
@ -29,7 +29,8 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
|
||||
u16 wvalue;
|
||||
u16 index;
|
||||
u16 len;
|
||||
__le32 data;
|
||||
int status;
|
||||
__le32 data = 0;
|
||||
struct intf_priv *intfpriv = intfhdl->pintfpriv;
|
||||
|
||||
request = 0x05;
|
||||
@ -37,8 +38,10 @@ static u8 usb_read8(struct intf_hdl *intfhdl, u32 addr)
|
||||
index = 0;
|
||||
wvalue = (u16)(addr & 0x0000ffff);
|
||||
len = 1;
|
||||
r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
|
||||
requesttype);
|
||||
status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
|
||||
&data, len, requesttype);
|
||||
if (status < 0)
|
||||
return 0;
|
||||
return (u8)(le32_to_cpu(data) & 0x0ff);
|
||||
}
|
||||
|
||||
@ -49,7 +52,8 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
|
||||
u16 wvalue;
|
||||
u16 index;
|
||||
u16 len;
|
||||
__le32 data;
|
||||
int status;
|
||||
__le32 data = 0;
|
||||
struct intf_priv *intfpriv = intfhdl->pintfpriv;
|
||||
|
||||
request = 0x05;
|
||||
@ -57,8 +61,10 @@ static u16 usb_read16(struct intf_hdl *intfhdl, u32 addr)
|
||||
index = 0;
|
||||
wvalue = (u16)(addr & 0x0000ffff);
|
||||
len = 2;
|
||||
r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
|
||||
requesttype);
|
||||
status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
|
||||
&data, len, requesttype);
|
||||
if (status < 0)
|
||||
return 0;
|
||||
return (u16)(le32_to_cpu(data) & 0xffff);
|
||||
}
|
||||
|
||||
@ -69,7 +75,8 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
|
||||
u16 wvalue;
|
||||
u16 index;
|
||||
u16 len;
|
||||
__le32 data;
|
||||
int status;
|
||||
__le32 data = 0;
|
||||
struct intf_priv *intfpriv = intfhdl->pintfpriv;
|
||||
|
||||
request = 0x05;
|
||||
@ -77,8 +84,10 @@ static u32 usb_read32(struct intf_hdl *intfhdl, u32 addr)
|
||||
index = 0;
|
||||
wvalue = (u16)(addr & 0x0000ffff);
|
||||
len = 4;
|
||||
r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index, &data, len,
|
||||
requesttype);
|
||||
status = r8712_usbctrl_vendorreq(intfpriv, request, wvalue, index,
|
||||
&data, len, requesttype);
|
||||
if (status < 0)
|
||||
return 0;
|
||||
return le32_to_cpu(data);
|
||||
}
|
||||
|
||||
|
@ -407,6 +407,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
|
||||
err_tty_register_device_failed:
|
||||
free_irq(irq, qtty);
|
||||
err_dec_line_count:
|
||||
tty_port_destroy(&qtty->port);
|
||||
goldfish_tty_current_line_count--;
|
||||
if (goldfish_tty_current_line_count == 0)
|
||||
goldfish_tty_delete_driver();
|
||||
@ -428,6 +429,7 @@ static int goldfish_tty_remove(struct platform_device *pdev)
|
||||
iounmap(qtty->base);
|
||||
qtty->base = NULL;
|
||||
free_irq(qtty->irq, pdev);
|
||||
tty_port_destroy(&qtty->port);
|
||||
goldfish_tty_current_line_count--;
|
||||
if (goldfish_tty_current_line_count == 0)
|
||||
goldfish_tty_delete_driver();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user