This is the 5.4.36 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl6pkDUACgkQONu9yGCS aT7cRxAAgnedn6pSj8x/LcbtqeQv52CDVXF0j1xOeK+o8hbIbvkjqAB1ZpPwAXaK PPiI34lzLBRo9i5nw/rOL7TR7q+uqLE/bT4Z8rrlbeq85SmP8PI2HwpPnRc3Iwhi RReIq00q5gBqF6AL7+Of3dEytrpOtyzf3Ff/3vadJ2WZEcblFoemGDjMbubaoI9E e2uE6WSe4tYk/pbLu5HduMQ46YGsWvTJAnN0RIefX4WsGmK0sCJRmJ78qIabWTct rUxoqhNHshPam7Qm6xVXe1pHa3U7zMNNtG52aJwoDzZ32rOTpBJly0F5FYYYW01Z zZbY/8eeGn4OIwGr+wvw/XmB0uYlBw35HH8f5OYpvSnfgjmT7wa8QmRJAS6um7dD elNqO1QuLa8lA/Tm5O9lzNIc3Vko322XQmGlsIU2xVBX0EdTig4Io+xuJkMMkU7q JJF4Ic4xOYa330TZBIKEoXgf4hGhNgKKRML00yhDNWROWXdB9W9tLbFELDiiiF+K ooeTB4aCsS2PheS/kZFL2U1RKlnMzBhYeZzPAg4ulfaVMHo5Zp8mBv4L17j9yU0+ MtKtS9tSV0SiDe2SpDCRKSMx+m5jpmgXxuX4HlkbSJ4d/5oAwNKQOTQj9xt3UmbL JUghr8OOyk6V2wwgW1tFkTcFnzqCqzmvSeJf6AvBSr7ZHnqH130= =7Fsb -----END PGP SIGNATURE----- Merge 5.4.36 into android-5.4-stable Changes in 5.4.36 ext4: fix extent_status fragmentation for plain files f2fs: fix to avoid memory leakage in f2fs_listxattr net, ip_tunnel: fix interface lookup with no key arm64: errata: Hide CTR_EL0.DIC on systems affected by Neoverse-N1 #1542419 arm64: Fake the IminLine size on systems affected by Neoverse-N1 #1542419 arm64: compat: Workaround Neoverse-N1 #1542419 for compat user-space arm64: Silence clang warning on mismatched value/register sizes tools/testing/nvdimm: Fix compilation failure without CONFIG_DEV_DAX_PMEM_COMPAT watchdog: reset last_hw_keepalive time at start scsi: lpfc: Fix kasan slab-out-of-bounds error in lpfc_unreg_login scsi: lpfc: Fix crash after handling a pci error scsi: lpfc: Fix crash in target side cable pulls hitting WAIT_FOR_UNREG scsi: libfc: If PRLI rejected, move rport to PLOGI state ceph: return ceph_mdsc_do_request() errors from __get_parent() ceph: don't skip updating wanted caps when cap is stale pwm: rcar: Fix late Runtime PM enablement nvme-tcp: fix possible crash in write_zeroes processing scsi: iscsi: Report unbind session event when the target has been removed tools/test/nvdimm: Fix out of tree build ASoC: Intel: atom: Take the drv->lock mutex before calling sst_send_slot_map() nvme: fix deadlock caused by ANA update wrong locking drm/amd/display: Update stream adjust in dc_stream_adjust_vmin_vmax dma-direct: fix data truncation in dma_direct_get_required_mask() kernel/gcov/fs.c: gcov_seq_next() should increase position index selftests: kmod: fix handling test numbers above 9 ipc/util.c: sysvipc_find_ipc() should increase position index kconfig: qconf: Fix a few alignment issues lib/raid6/test: fix build on distros whose /bin/sh is not bash s390/cio: generate delayed uevent for vfio-ccw subchannels s390/cio: avoid duplicated 'ADD' uevents loop: Better discard support for block devices Revert "powerpc/64: irq_work avoid interrupt when called with hardware irqs enabled" powerpc/pseries: Fix MCE handling on pseries nvme: fix compat address handling in several ioctls pwm: renesas-tpu: Fix late Runtime PM enablement pwm: bcm2835: Dynamically allocate base perf/core: Disable page faults when getting phys address drm/amd/display: Calculate scaling ratios on every medium/full update ASoC: Intel: bytcr_rt5640: Add quirk for MPMAN MPWIN895CL tablet ALSA: usb-audio: Add Pioneer DJ DJM-250MK2 quirk xhci: Ensure link state is U3 after setting USB_SS_PORT_LS_U3 xhci: Wait until link state trainsits to U0 after setting USB_SS_PORT_LS_U0 xhci: Finetune host initiated USB3 rootport link suspend and resume drm/amd/display: Not doing optimize bandwidth if flip pending. PCI/PM: Add pcie_wait_for_link_delay() libbpf: Fix readelf output parsing on powerpc with recent binutils PCI: pciehp: Prevent deadlock on disconnect ASoC: SOF: trace: fix unconditional free in trace release tracing/selftests: Turn off timeout setting virtio-blk: improve virtqueue error to BLK_STS scsi: smartpqi: fix controller lockup observed during force reboot scsi: smartpqi: fix call trace in device discovery scsi: smartpqi: fix problem with unique ID for physical device PCI/ASPM: Allow re-enabling Clock PM PCI/PM: Add missing link delays required by the PCIe spec cxgb4: fix adapter crash due to wrong MC size cxgb4: fix large delays in PTP synchronization ipv4: Update fib_select_default to handle nexthop objects ipv6: fix restrict IPV6_ADDRFORM operation macsec: avoid to set wrong mtu macvlan: fix null dereference in macvlan_device_event() mlxsw: Fix some IS_ERR() vs NULL bugs net: bcmgenet: correct per TX/RX ring statistics net/mlx4_en: avoid indirect call in TX completion net: netrom: Fix potential nr_neigh refcnt leak in nr_add_node net: openvswitch: ovs_ct_exit to be done under ovs_lock net: stmmac: dwmac-meson8b: Add missing boundary to RGMII TX clock array net/x25: Fix x25_neigh refcnt leak when receiving frame sched: etf: do not assume all sockets are full blown selftests: Fix suppress test in fib_tests.sh tcp: cache line align MAX_TCP_HEADER team: fix hang in team_mode_get() vrf: Fix IPv6 with qdisc and xfrm net: dsa: b53: Lookup VID in ARL searches when VLAN is enabled net: dsa: b53: Fix valid setting for MDB entries net: dsa: b53: Fix ARL register definitions net: dsa: b53: Rework ARL bin logic net: dsa: b53: b53_arl_rw_op() needs to select IVL or SVL vxlan: use the correct nlattr array in NL_SET_ERR_MSG_ATTR geneve: use the correct nlattr array in NL_SET_ERR_MSG_ATTR xfrm: Always set XFRM_TRANSFORMED in xfrm{4,6}_output_finish vrf: Check skb for XFRM_TRANSFORMED flag KEYS: Avoid false positive ENOMEM error on key read ALSA: hda: Remove ASUS ROG Zenith from the blacklist ALSA: usb-audio: Add static mapping table for ALC1220-VB-based mobos ALSA: usb-audio: Add connector notifier delegation iio: core: remove extra semi-colon from devm_iio_device_register() macro iio: st_sensors: rely on odr mask to know if odr can be set iio: adc: stm32-adc: fix sleep in atomic context iio: adc: ti-ads8344: properly byte swap value iio: xilinx-xadc: Fix ADC-B powerdown iio: xilinx-xadc: Fix clearing interrupt when enabling trigger iio: xilinx-xadc: Fix sequencer configuration for aux channels in simultaneous mode iio: xilinx-xadc: Make sure not exceed maximum samplerate USB: sisusbvga: Change port variable from signed to unsigned USB: Add USB_QUIRK_DELAY_CTRL_MSG and USB_QUIRK_DELAY_INIT for Corsair K70 RGB RAPIDFIRE USB: early: Handle AMD's spec-compliant identifiers, too USB: core: Fix free-while-in-use bug in the USB S-Glibrary USB: hub: Fix handling of connect changes during sleep USB: hub: Revert commit bd0e6c9614b9 ("usb: hub: try old enumeration scheme first for high speed devices") tty: serial: owl: add "much needed" clk_prepare_enable() vmalloc: fix remap_vmalloc_range() bounds checks staging: gasket: Fix incongruency in handling of sysfs entries creation coredump: fix null pointer dereference on coredump mm/hugetlb: fix a addressing exception caused by huge_pte_offset mm/ksm: fix NULL pointer dereference when KSM zero page is enabled tools/vm: fix cross-compile build ALSA: usx2y: Fix potential NULL dereference ALSA: hda/realtek - Fix unexpected init_amp override ALSA: hda/realtek - Add new codec supported for ALC245 ALSA: hda/hdmi: Add module option to disable audio component binding ALSA: usb-audio: Fix usb audio refcnt leak when getting spdif ALSA: usb-audio: Filter out unsupported sample rates on Focusrite devices tpm/tpm_tis: Free IRQ if probing fails tpm: fix wrong return value in tpm_pcr_extend tpm: ibmvtpm: retry on H_CLOSED in tpm_ibmvtpm_send() KVM: s390: Return last valid slot if approx index is out-of-bounds KVM: Check validity of resolved slot when searching memslots KVM: VMX: Enable machine check support for 32bit targets tty: hvc: fix buffer overflow during hvc_alloc(). tty: rocket, avoid OOB access usb-storage: Add unusual_devs entry for JMicron JMS566 signal: Avoid corrupting si_pid and si_uid in do_notify_parent audit: check the length of userspace generated audit records ASoC: dapm: fixup dapm kcontrol widget mac80211: populate debugfs only after cfg80211 init SUNRPC: Fix backchannel RPC soft lockups iwlwifi: pcie: actually release queue memory in TVQM iwlwifi: mvm: beacon statistics shouldn't go backwards iwlwifi: mvm: limit maximum queue appropriately iwlwifi: mvm: Do not declare support for ACK Enabled Aggregation iwlwifi: mvm: fix inactive TID removal return value usage cifs: fix uninitialised lease_key in open_shroot() ARM: imx: provide v7_cpu_resume() only on ARM_CPU_SUSPEND=y powerpc/setup_64: Set cache-line-size based on cache-block-size staging: comedi: dt2815: fix writing hi byte of analog output staging: comedi: Fix comedi_device refcnt leak in comedi_open vt: don't hardcode the mem allocation upper bound vt: don't use kmalloc() for the unicode screen buffer staging: vt6656: Don't set RCR_MULTICAST or RCR_BROADCAST by default. staging: vt6656: Fix calling conditions of vnt_set_bss_mode staging: vt6656: Fix drivers TBTT timing counter. staging: vt6656: Fix pairwise key entry save. staging: vt6656: Power save stop wake_up_count wrap around. cdc-acm: close race betrween suspend() and acm_softint cdc-acm: introduce a cool down UAS: no use logging any details in case of ENODEV UAS: fix deadlock in error handling and PM flushing work fpga: dfl: pci: fix return value of cci_pci_sriov_configure usb: dwc3: gadget: Fix request completion check usb: f_fs: Clear OS Extended descriptor counts to zero in ffs_data_reset() usb: typec: tcpm: Ignore CC and vbus changes in PORT_RESET change usb: typec: altmode: Fix typec_altmode_get_partner sometimes returning an invalid pointer xhci: Fix handling halted endpoint even if endpoint ring appears empty xhci: prevent bus suspend if a roothub port detected a over-current condition xhci: Don't clear hub TT buffer on ep0 protocol stall serial: sh-sci: Make sure status register SCxSR is read in correct sequence Revert "serial: uartps: Fix uartps_major handling" Revert "serial: uartps: Use the same dynamic major number for all ports" Revert "serial: uartps: Fix error path when alloc failed" Revert "serial: uartps: Do not allow use aliases >= MAX_UART_INSTANCES" Revert "serial: uartps: Change uart ID port allocation" Revert "serial: uartps: Move Port ID to device data structure" Revert "serial: uartps: Register own uart console and driver structures" powerpc/kuap: PPC_KUAP_DEBUG should depend on PPC_KUAP powerpc/mm: Fix CONFIG_PPC_KUAP_DEBUG on PPC32 compat: ARM64: always include asm-generic/compat.h s390/mm: fix page table upgrade vs 2ndary address mode accesses Linux 5.4.36 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Idd8d97e7f00eb7389e184fc4186c4c0dd14f1704
This commit is contained in:
@ -5011,8 +5011,7 @@
|
||||
|
||||
usbcore.old_scheme_first=
|
||||
[USB] Start with the old device initialization
|
||||
scheme, applies only to low and full-speed devices
|
||||
(default 0 = off).
|
||||
scheme (default 0 = off).
|
||||
|
||||
usbcore.usbfs_memory_mb=
|
||||
[USB] Memory limit (in MB) for buffers allocated by
|
||||
|
@ -88,6 +88,8 @@ stable kernels.
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1349291 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | MMU-500 | #841119,826419 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 35
|
||||
SUBLEVEL = 36
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -91,8 +91,10 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
|
||||
obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
|
||||
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
|
||||
endif
|
||||
ifeq ($(CONFIG_ARM_CPU_SUSPEND),y)
|
||||
AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
|
||||
obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
|
||||
endif
|
||||
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
|
||||
|
||||
obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
|
||||
|
@ -562,6 +562,22 @@ config ARM64_ERRATUM_1463225
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_1542419
|
||||
bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
|
||||
default y
|
||||
help
|
||||
This option adds a workaround for ARM Neoverse-N1 erratum
|
||||
1542419.
|
||||
|
||||
Affected Neoverse-N1 cores could execute a stale instruction when
|
||||
modified by another CPU. The workaround depends on a firmware
|
||||
counterpart.
|
||||
|
||||
Workaround the issue by hiding the DIC feature from EL0. This
|
||||
forces user-space to perform cache maintenance.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
@ -11,6 +11,7 @@
|
||||
#define CTR_L1IP_MASK 3
|
||||
#define CTR_DMINLINE_SHIFT 16
|
||||
#define CTR_IMINLINE_SHIFT 0
|
||||
#define CTR_IMINLINE_MASK 0xf
|
||||
#define CTR_ERG_SHIFT 20
|
||||
#define CTR_CWG_SHIFT 24
|
||||
#define CTR_CWG_MASK 15
|
||||
@ -18,7 +19,7 @@
|
||||
#define CTR_DIC_SHIFT 29
|
||||
|
||||
#define CTR_CACHE_MINLINE_MASK \
|
||||
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
|
||||
(0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
|
||||
|
||||
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
|
||||
|
||||
|
@ -4,6 +4,9 @@
|
||||
*/
|
||||
#ifndef __ASM_COMPAT_H
|
||||
#define __ASM_COMPAT_H
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
||||
/*
|
||||
@ -13,8 +16,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <asm-generic/compat.h>
|
||||
|
||||
#define COMPAT_USER_HZ 100
|
||||
#ifdef __AARCH64EB__
|
||||
#define COMPAT_UTS_MACHINE "armv8b\0\0"
|
||||
|
@ -54,6 +54,7 @@
|
||||
#define ARM64_WORKAROUND_1463225 44
|
||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
|
||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
|
||||
#define ARM64_WORKAROUND_1542419 47
|
||||
|
||||
/* kabi: reserve 48 - 62 for future cpu capabilities */
|
||||
#define ARM64_NCAPS 62
|
||||
|
@ -88,13 +88,21 @@ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
|
||||
}
|
||||
|
||||
static void
|
||||
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
|
||||
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
|
||||
{
|
||||
u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
|
||||
bool enable_uct_trap = false;
|
||||
|
||||
/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
|
||||
if ((read_cpuid_cachetype() & mask) !=
|
||||
(arm64_ftr_reg_ctrel0.sys_val & mask))
|
||||
enable_uct_trap = true;
|
||||
|
||||
/* ... or if the system is affected by an erratum */
|
||||
if (cap->capability == ARM64_WORKAROUND_1542419)
|
||||
enable_uct_trap = true;
|
||||
|
||||
if (enable_uct_trap)
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
|
||||
}
|
||||
|
||||
@ -651,6 +659,18 @@ needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool __maybe_unused
|
||||
has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
u32 midr = read_cpuid_id();
|
||||
bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
|
||||
const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
return is_midr_in_range(midr, &range) && has_dic;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_EL2_VECTORS
|
||||
|
||||
static const struct midr_range arm64_harden_el2_vectors[] = {
|
||||
@ -927,6 +947,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
|
||||
ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1542419
|
||||
{
|
||||
/* we depend on the firmware portion for correctness */
|
||||
.desc = "ARM erratum 1542419 (kernel portion)",
|
||||
.capability = ARM64_WORKAROUND_1542419,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.matches = has_neoverse_n1_erratum_1542419,
|
||||
.cpu_enable = cpu_enable_trap_ctr_access,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/signal.h>
|
||||
@ -17,6 +18,7 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
static long
|
||||
@ -30,6 +32,15 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
|
||||
if (fatal_signal_pending(current))
|
||||
return 0;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
|
||||
/*
|
||||
* The workaround requires an inner-shareable tlbi.
|
||||
* We pick the reserved-ASID to minimise the impact.
|
||||
*/
|
||||
__tlbi(aside1is, __TLBI_VADDR(0, 0));
|
||||
dsb(ish);
|
||||
}
|
||||
|
||||
ret = __flush_cache_user_range(start, start + chunk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -470,6 +470,15 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
|
||||
int rt = ESR_ELx_SYS64_ISS_RT(esr);
|
||||
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
|
||||
/* Hide DIC so that we can trap the unnecessary maintenance...*/
|
||||
val &= ~BIT(CTR_DIC_SHIFT);
|
||||
|
||||
/* ... and fake IminLine to reduce the number of traps. */
|
||||
val &= ~CTR_IMINLINE_MASK;
|
||||
val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
|
||||
}
|
||||
|
||||
pt_regs_write_reg(regs, rt, val);
|
||||
|
||||
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
|
||||
|
@ -705,7 +705,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
|
||||
stw r10,_CCR(r1)
|
||||
stw r1,KSP(r3) /* Set old stack pointer */
|
||||
|
||||
kuap_check r2, r4
|
||||
kuap_check r2, r0
|
||||
#ifdef CONFIG_SMP
|
||||
/* We need a sync somewhere here to make sure that if the
|
||||
* previous task gets rescheduled on another CPU, it sees all
|
||||
|
@ -541,6 +541,8 @@ static bool __init parse_cache_info(struct device_node *np,
|
||||
lsizep = of_get_property(np, propnames[3], NULL);
|
||||
if (bsizep == NULL)
|
||||
bsizep = lsizep;
|
||||
if (lsizep == NULL)
|
||||
lsizep = bsizep;
|
||||
if (lsizep != NULL)
|
||||
lsize = be32_to_cpu(*lsizep);
|
||||
if (bsizep != NULL)
|
||||
|
@ -522,35 +522,6 @@ static inline void clear_irq_work_pending(void)
|
||||
"i" (offsetof(struct paca_struct, irq_work_pending)));
|
||||
}
|
||||
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
preempt_disable();
|
||||
set_irq_work_pending_flag();
|
||||
/*
|
||||
* Non-nmi code running with interrupts disabled will replay
|
||||
* irq_happened before it re-enables interrupts, so setthe
|
||||
* decrementer there instead of causing a hardware exception
|
||||
* which would immediately hit the masked interrupt handler
|
||||
* and have the net effect of setting the decrementer in
|
||||
* irq_happened.
|
||||
*
|
||||
* NMI interrupts can not check this when they return, so the
|
||||
* decrementer hardware exception is raised, which will fire
|
||||
* when interrupts are next enabled.
|
||||
*
|
||||
* BookE does not support this yet, it must audit all NMI
|
||||
* interrupt handlers to ensure they call nmi_enter() so this
|
||||
* check would be correct.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
|
||||
set_dec(1);
|
||||
} else {
|
||||
hard_irq_disable();
|
||||
local_paca->irq_happened |= PACA_IRQ_DEC;
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#else /* 32-bit */
|
||||
|
||||
DEFINE_PER_CPU(u8, irq_work_pending);
|
||||
@ -559,16 +530,27 @@ DEFINE_PER_CPU(u8, irq_work_pending);
|
||||
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
|
||||
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
|
||||
|
||||
#endif /* 32 vs 64 bit */
|
||||
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
/*
|
||||
* 64-bit code that uses irq soft-mask can just cause an immediate
|
||||
* interrupt here that gets soft masked, if this is called under
|
||||
* local_irq_disable(). It might be possible to prevent that happening
|
||||
* by noticing interrupts are disabled and setting decrementer pending
|
||||
* to be replayed when irqs are enabled. The problem there is that
|
||||
* tracing can call irq_work_raise, including in code that does low
|
||||
* level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
|
||||
* which could get tangled up if we're messing with the same state
|
||||
* here.
|
||||
*/
|
||||
preempt_disable();
|
||||
set_irq_work_pending_flag();
|
||||
set_dec(1);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#endif /* 32 vs 64 bit */
|
||||
|
||||
#else /* CONFIG_IRQ_WORK */
|
||||
|
||||
#define test_irq_work_pending() 0
|
||||
|
@ -389,7 +389,7 @@ config PPC_KUAP
|
||||
|
||||
config PPC_KUAP_DEBUG
|
||||
bool "Extra debugging for Kernel Userspace Access Protection"
|
||||
depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32)
|
||||
depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32)
|
||||
help
|
||||
Add extra debugging for Kernel Userspace Access Protection (KUAP)
|
||||
If you're unsure, say N.
|
||||
|
@ -683,6 +683,17 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
||||
#endif
|
||||
|
||||
out:
|
||||
/*
|
||||
* Enable translation as we will be accessing per-cpu variables
|
||||
* in save_mce_event() which may fall outside RMO region, also
|
||||
* leave it enabled because subsequently we will be queuing work
|
||||
* to workqueues where again per-cpu variables accessed, besides
|
||||
* fwnmi_release_errinfo() crashes when called in realmode on
|
||||
* pseries.
|
||||
* Note: All the realmode handling like flushing SLB entries for
|
||||
* SLB multihit is done by now.
|
||||
*/
|
||||
mtmsr(mfmsr() | MSR_IR | MSR_DR);
|
||||
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
|
||||
&mce_err, regs->nip, eaddr, paddr);
|
||||
|
||||
|
@ -1932,6 +1932,9 @@ static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
|
||||
start = slot + 1;
|
||||
}
|
||||
|
||||
if (start >= slots->used_slots)
|
||||
return slots->used_slots - 1;
|
||||
|
||||
if (gfn >= memslots[start].base_gfn &&
|
||||
gfn < memslots[start].base_gfn + memslots[start].npages) {
|
||||
atomic_set(&slots->lru_slot, start);
|
||||
|
@ -64,10 +64,13 @@ mm_segment_t enable_sacf_uaccess(void)
|
||||
{
|
||||
mm_segment_t old_fs;
|
||||
unsigned long asce, cr;
|
||||
unsigned long flags;
|
||||
|
||||
old_fs = current->thread.mm_segment;
|
||||
if (old_fs & 1)
|
||||
return old_fs;
|
||||
/* protect against a concurrent page table upgrade */
|
||||
local_irq_save(flags);
|
||||
current->thread.mm_segment |= 1;
|
||||
asce = S390_lowcore.kernel_asce;
|
||||
if (likely(old_fs == USER_DS)) {
|
||||
@ -83,6 +86,7 @@ mm_segment_t enable_sacf_uaccess(void)
|
||||
__ctl_load(asce, 7, 7);
|
||||
set_cpu_flag(CIF_ASCE_SECONDARY);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
return old_fs;
|
||||
}
|
||||
EXPORT_SYMBOL(enable_sacf_uaccess);
|
||||
|
@ -70,8 +70,20 @@ static void __crst_table_upgrade(void *arg)
|
||||
{
|
||||
struct mm_struct *mm = arg;
|
||||
|
||||
if (current->active_mm == mm)
|
||||
set_user_asce(mm);
|
||||
/* we must change all active ASCEs to avoid the creation of new TLBs */
|
||||
if (current->active_mm == mm) {
|
||||
S390_lowcore.user_asce = mm->context.asce;
|
||||
if (current->thread.mm_segment == USER_DS) {
|
||||
__ctl_load(S390_lowcore.user_asce, 1, 1);
|
||||
/* Mark user-ASCE present in CR1 */
|
||||
clear_cpu_flag(CIF_ASCE_PRIMARY);
|
||||
}
|
||||
if (current->thread.mm_segment == USER_DS_SACF) {
|
||||
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
||||
/* enable_sacf_uaccess does all or nothing */
|
||||
WARN_ON(!test_cpu_flag(CIF_ASCE_SECONDARY));
|
||||
}
|
||||
}
|
||||
__tlb_flush_local();
|
||||
}
|
||||
|
||||
|
@ -4566,7 +4566,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
static void kvm_machine_check(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
|
||||
#if defined(CONFIG_X86_MCE)
|
||||
struct pt_regs regs = {
|
||||
.cs = 3, /* Fake ring 3 no matter what the guest ran on */
|
||||
.flags = X86_EFLAGS_IF,
|
||||
|
@ -429,11 +429,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
|
||||
* information.
|
||||
*/
|
||||
struct file *file = lo->lo_backing_file;
|
||||
struct request_queue *q = lo->lo_queue;
|
||||
int ret;
|
||||
|
||||
mode |= FALLOC_FL_KEEP_SIZE;
|
||||
|
||||
if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
|
||||
if (!blk_queue_discard(q)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
@ -865,28 +866,47 @@ static void loop_config_discard(struct loop_device *lo)
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct request_queue *q = lo->lo_queue;
|
||||
|
||||
/*
|
||||
* If the backing device is a block device, mirror its zeroing
|
||||
* capability. Set the discard sectors to the block device's zeroing
|
||||
* capabilities because loop discards result in blkdev_issue_zeroout(),
|
||||
* not blkdev_issue_discard(). This maintains consistent behavior with
|
||||
* file-backed loop devices: discarded regions read back as zero.
|
||||
*/
|
||||
if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
|
||||
struct request_queue *backingq;
|
||||
|
||||
backingq = bdev_get_queue(inode->i_bdev);
|
||||
blk_queue_max_discard_sectors(q,
|
||||
backingq->limits.max_write_zeroes_sectors);
|
||||
|
||||
blk_queue_max_write_zeroes_sectors(q,
|
||||
backingq->limits.max_write_zeroes_sectors);
|
||||
|
||||
/*
|
||||
* We use punch hole to reclaim the free space used by the
|
||||
* image a.k.a. discard. However we do not support discard if
|
||||
* encryption is enabled, because it may give an attacker
|
||||
* useful information.
|
||||
*/
|
||||
if ((!file->f_op->fallocate) ||
|
||||
lo->lo_encrypt_key_size) {
|
||||
} else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
|
||||
q->limits.discard_granularity = 0;
|
||||
q->limits.discard_alignment = 0;
|
||||
blk_queue_max_discard_sectors(q, 0);
|
||||
blk_queue_max_write_zeroes_sectors(q, 0);
|
||||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
|
||||
return;
|
||||
|
||||
} else {
|
||||
q->limits.discard_granularity = inode->i_sb->s_blocksize;
|
||||
q->limits.discard_alignment = 0;
|
||||
|
||||
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
|
||||
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
|
||||
}
|
||||
|
||||
q->limits.discard_granularity = inode->i_sb->s_blocksize;
|
||||
q->limits.discard_alignment = 0;
|
||||
|
||||
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
|
||||
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
|
||||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
||||
if (q->limits.max_write_zeroes_sectors)
|
||||
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
|
||||
else
|
||||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
|
||||
}
|
||||
|
||||
static void loop_unprepare_queue(struct loop_device *lo)
|
||||
|
@ -347,9 +347,14 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
if (err == -ENOSPC)
|
||||
blk_mq_stop_hw_queue(hctx);
|
||||
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
|
||||
if (err == -ENOMEM || err == -ENOSPC)
|
||||
switch (err) {
|
||||
case -ENOSPC:
|
||||
return BLK_STS_DEV_RESOURCE;
|
||||
return BLK_STS_IOERR;
|
||||
case -ENOMEM:
|
||||
return BLK_STS_RESOURCE;
|
||||
default:
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
}
|
||||
|
||||
if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
|
||||
|
@ -322,7 +322,7 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
|
||||
|
||||
for (i = 0; i < chip->nr_allocated_banks; i++) {
|
||||
if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
|
||||
rc = EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2012 IBM Corporation
|
||||
* Copyright (C) 2012-2020 IBM Corporation
|
||||
*
|
||||
* Author: Ashley Lai <ashleydlai@gmail.com>
|
||||
*
|
||||
@ -133,6 +133,64 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvtpm_crq_send_init - Send a CRQ initialize message
|
||||
* @ibmvtpm: vtpm device struct
|
||||
*
|
||||
* Return:
|
||||
* 0 on success.
|
||||
* Non-zero on failure.
|
||||
*/
|
||||
static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
|
||||
if (rc != H_SUCCESS)
|
||||
dev_err(ibmvtpm->dev,
|
||||
"%s failed rc=%d\n", __func__, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_ibmvtpm_resume - Resume from suspend
|
||||
*
|
||||
* @dev: device struct
|
||||
*
|
||||
* Return: Always 0.
|
||||
*/
|
||||
static int tpm_ibmvtpm_resume(struct device *dev)
|
||||
{
|
||||
struct tpm_chip *chip = dev_get_drvdata(dev);
|
||||
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
|
||||
int rc = 0;
|
||||
|
||||
do {
|
||||
if (rc)
|
||||
msleep(100);
|
||||
rc = plpar_hcall_norets(H_ENABLE_CRQ,
|
||||
ibmvtpm->vdev->unit_address);
|
||||
} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
|
||||
|
||||
if (rc) {
|
||||
dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = vio_enable_interrupts(ibmvtpm->vdev);
|
||||
if (rc) {
|
||||
dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ibmvtpm_crq_send_init(ibmvtpm);
|
||||
if (rc)
|
||||
dev_err(dev, "Error send_init rc=%d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_ibmvtpm_send() - Send a TPM command
|
||||
* @chip: tpm chip struct
|
||||
@ -146,6 +204,7 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
{
|
||||
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
|
||||
bool retry = true;
|
||||
int rc, sig;
|
||||
|
||||
if (!ibmvtpm->rtce_buf) {
|
||||
@ -179,18 +238,27 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
*/
|
||||
ibmvtpm->tpm_processing_cmd = true;
|
||||
|
||||
again:
|
||||
rc = ibmvtpm_send_crq(ibmvtpm->vdev,
|
||||
IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
|
||||
count, ibmvtpm->rtce_dma_handle);
|
||||
if (rc != H_SUCCESS) {
|
||||
/*
|
||||
* H_CLOSED can be returned after LPM resume. Call
|
||||
* tpm_ibmvtpm_resume() to re-enable the CRQ then retry
|
||||
* ibmvtpm_send_crq() once before failing.
|
||||
*/
|
||||
if (rc == H_CLOSED && retry) {
|
||||
tpm_ibmvtpm_resume(ibmvtpm->dev);
|
||||
retry = false;
|
||||
goto again;
|
||||
}
|
||||
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
|
||||
rc = 0;
|
||||
ibmvtpm->tpm_processing_cmd = false;
|
||||
} else
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
spin_unlock(&ibmvtpm->rtce_lock);
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
|
||||
@ -268,26 +336,6 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvtpm_crq_send_init - Send a CRQ initialize message
|
||||
* @ibmvtpm: vtpm device struct
|
||||
*
|
||||
* Return:
|
||||
* 0 on success.
|
||||
* Non-zero on failure.
|
||||
*/
|
||||
static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
|
||||
if (rc != H_SUCCESS)
|
||||
dev_err(ibmvtpm->dev,
|
||||
"ibmvtpm_crq_send_init failed rc=%d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_ibmvtpm_remove - ibm vtpm remove entry point
|
||||
* @vdev: vio device struct
|
||||
@ -400,44 +448,6 @@ static int ibmvtpm_reset_crq(struct ibmvtpm_dev *ibmvtpm)
|
||||
ibmvtpm->crq_dma_handle, CRQ_RES_BUF_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* tpm_ibmvtpm_resume - Resume from suspend
|
||||
*
|
||||
* @dev: device struct
|
||||
*
|
||||
* Return: Always 0.
|
||||
*/
|
||||
static int tpm_ibmvtpm_resume(struct device *dev)
|
||||
{
|
||||
struct tpm_chip *chip = dev_get_drvdata(dev);
|
||||
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
|
||||
int rc = 0;
|
||||
|
||||
do {
|
||||
if (rc)
|
||||
msleep(100);
|
||||
rc = plpar_hcall_norets(H_ENABLE_CRQ,
|
||||
ibmvtpm->vdev->unit_address);
|
||||
} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
|
||||
|
||||
if (rc) {
|
||||
dev_err(dev, "Error enabling ibmvtpm rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = vio_enable_interrupts(ibmvtpm->vdev);
|
||||
if (rc) {
|
||||
dev_err(dev, "Error vio_enable_interrupts rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ibmvtpm_crq_send_init(ibmvtpm);
|
||||
if (rc)
|
||||
dev_err(dev, "Error send_init rc=%d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool tpm_ibmvtpm_req_canceled(struct tpm_chip *chip, u8 status)
|
||||
{
|
||||
return (status == 0);
|
||||
|
@ -433,6 +433,9 @@ static void disable_interrupts(struct tpm_chip *chip)
|
||||
u32 intmask;
|
||||
int rc;
|
||||
|
||||
if (priv->irq == 0)
|
||||
return;
|
||||
|
||||
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
|
||||
if (rc < 0)
|
||||
intmask = 0;
|
||||
@ -983,9 +986,12 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
||||
if (irq) {
|
||||
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
|
||||
irq);
|
||||
if (!(chip->flags & TPM_CHIP_FLAG_IRQ))
|
||||
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
|
||||
dev_err(&chip->dev, FW_BUG
|
||||
"TPM interrupt not working, polling instead\n");
|
||||
|
||||
disable_interrupts(chip);
|
||||
}
|
||||
} else {
|
||||
tpm_tis_probe_irq(chip, intmask);
|
||||
}
|
||||
|
@ -248,11 +248,13 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
|
||||
return ret;
|
||||
|
||||
ret = pci_enable_sriov(pcidev, num_vfs);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dfl_fpga_cdev_config_ports_pf(cdev);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return num_vfs;
|
||||
}
|
||||
|
||||
static void cci_pci_remove(struct pci_dev *pcidev)
|
||||
|
@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
|
||||
int i = 0;
|
||||
bool ret = false;
|
||||
|
||||
stream->adjust = *adjust;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
@ -1180,6 +1182,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
|
||||
return (result == DC_OK);
|
||||
}
|
||||
|
||||
static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
struct pipe_ctx *pipe;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->plane_state)
|
||||
continue;
|
||||
|
||||
/* Must set to false to start with, due to OR in update function */
|
||||
pipe->plane_state->status.is_flip_pending = false;
|
||||
dc->hwss.update_pending_status(pipe);
|
||||
if (pipe->plane_state->status.is_flip_pending)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
{
|
||||
int i;
|
||||
@ -1190,6 +1212,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
|
||||
|
||||
post_surface_trace(dc);
|
||||
|
||||
if (is_flip_pending_in_pipes(dc, context))
|
||||
return true;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++)
|
||||
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
|
||||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
|
||||
@ -2152,7 +2177,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
enum surface_update_type update_type;
|
||||
struct dc_state *context;
|
||||
struct dc_context *dc_ctx = dc->ctx;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
stream_status = dc_stream_get_status(stream);
|
||||
context = dc->current_state;
|
||||
@ -2190,6 +2215,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
|
||||
copy_surface_update_to_plane(surface, &srf_updates[i]);
|
||||
|
||||
if (update_type >= UPDATE_TYPE_MED) {
|
||||
for (j = 0; j < dc->res_pool->pipe_count; j++) {
|
||||
struct pipe_ctx *pipe_ctx =
|
||||
&context->res_ctx.pipe_ctx[j];
|
||||
|
||||
if (pipe_ctx->plane_state != surface)
|
||||
continue;
|
||||
|
||||
resource_build_scaling_params(pipe_ctx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
copy_stream_update_to_stream(dc, context, stream, stream_update);
|
||||
|
@ -1367,8 +1367,30 @@ static unsigned int stm32_adc_dma_residue(struct stm32_adc *adc)
|
||||
static void stm32_adc_dma_buffer_done(void *data)
|
||||
{
|
||||
struct iio_dev *indio_dev = data;
|
||||
struct stm32_adc *adc = iio_priv(indio_dev);
|
||||
int residue = stm32_adc_dma_residue(adc);
|
||||
|
||||
iio_trigger_poll_chained(indio_dev->trig);
|
||||
/*
|
||||
* In DMA mode the trigger services of IIO are not used
|
||||
* (e.g. no call to iio_trigger_poll).
|
||||
* Calling irq handler associated to the hardware trigger is not
|
||||
* relevant as the conversions have already been done. Data
|
||||
* transfers are performed directly in DMA callback instead.
|
||||
* This implementation avoids to call trigger irq handler that
|
||||
* may sleep, in an atomic context (DMA irq handler context).
|
||||
*/
|
||||
dev_dbg(&indio_dev->dev, "%s bufi=%d\n", __func__, adc->bufi);
|
||||
|
||||
while (residue >= indio_dev->scan_bytes) {
|
||||
u16 *buffer = (u16 *)&adc->rx_buf[adc->bufi];
|
||||
|
||||
iio_push_to_buffers(indio_dev, buffer);
|
||||
|
||||
residue -= indio_dev->scan_bytes;
|
||||
adc->bufi += indio_dev->scan_bytes;
|
||||
if (adc->bufi >= adc->rx_buf_sz)
|
||||
adc->bufi = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int stm32_adc_dma_start(struct iio_dev *indio_dev)
|
||||
@ -1778,6 +1800,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct iio_dev *indio_dev;
|
||||
struct device *dev = &pdev->dev;
|
||||
irqreturn_t (*handler)(int irq, void *p) = NULL;
|
||||
struct stm32_adc *adc;
|
||||
int ret;
|
||||
|
||||
@ -1843,9 +1866,11 @@ static int stm32_adc_probe(struct platform_device *pdev)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!adc->dma_chan)
|
||||
handler = &stm32_adc_trigger_handler;
|
||||
|
||||
ret = iio_triggered_buffer_setup(indio_dev,
|
||||
&iio_pollfunc_store_time,
|
||||
&stm32_adc_trigger_handler,
|
||||
&iio_pollfunc_store_time, handler,
|
||||
&stm32_adc_buffer_setup_ops);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "buffer setup failed\n");
|
||||
|
@ -29,7 +29,7 @@ struct ads8344 {
|
||||
struct mutex lock;
|
||||
|
||||
u8 tx_buf ____cacheline_aligned;
|
||||
u16 rx_buf;
|
||||
u8 rx_buf[3];
|
||||
};
|
||||
|
||||
#define ADS8344_VOLTAGE_CHANNEL(chan, si) \
|
||||
@ -89,11 +89,11 @@ static int ads8344_adc_conversion(struct ads8344 *adc, int channel,
|
||||
|
||||
udelay(9);
|
||||
|
||||
ret = spi_read(spi, &adc->rx_buf, 2);
|
||||
ret = spi_read(spi, adc->rx_buf, sizeof(adc->rx_buf));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return adc->rx_buf;
|
||||
return adc->rx_buf[0] << 9 | adc->rx_buf[1] << 1 | adc->rx_buf[2] >> 7;
|
||||
}
|
||||
|
||||
static int ads8344_read_raw(struct iio_dev *iio,
|
||||
|
@ -102,6 +102,16 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
|
||||
|
||||
#define XADC_FLAGS_BUFFERED BIT(0)
|
||||
|
||||
/*
|
||||
* The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
|
||||
* not have a hardware FIFO. Which means an interrupt is generated for each
|
||||
* conversion sequence. At 1MSPS sample rate the CPU in ZYNQ7000 is completely
|
||||
* overloaded by the interrupts that it soft-lockups. For this reason the driver
|
||||
* limits the maximum samplerate 150kSPS. At this rate the CPU is fairly busy,
|
||||
* but still responsive.
|
||||
*/
|
||||
#define XADC_MAX_SAMPLERATE 150000
|
||||
|
||||
static void xadc_write_reg(struct xadc *xadc, unsigned int reg,
|
||||
uint32_t val)
|
||||
{
|
||||
@ -674,7 +684,7 @@ static int xadc_trigger_set_state(struct iio_trigger *trigger, bool state)
|
||||
|
||||
spin_lock_irqsave(&xadc->lock, flags);
|
||||
xadc_read_reg(xadc, XADC_AXI_REG_IPIER, &val);
|
||||
xadc_write_reg(xadc, XADC_AXI_REG_IPISR, val & XADC_AXI_INT_EOS);
|
||||
xadc_write_reg(xadc, XADC_AXI_REG_IPISR, XADC_AXI_INT_EOS);
|
||||
if (state)
|
||||
val |= XADC_AXI_INT_EOS;
|
||||
else
|
||||
@ -722,13 +732,14 @@ static int xadc_power_adc_b(struct xadc *xadc, unsigned int seq_mode)
|
||||
{
|
||||
uint16_t val;
|
||||
|
||||
/* Powerdown the ADC-B when it is not needed. */
|
||||
switch (seq_mode) {
|
||||
case XADC_CONF1_SEQ_SIMULTANEOUS:
|
||||
case XADC_CONF1_SEQ_INDEPENDENT:
|
||||
val = XADC_CONF2_PD_ADC_B;
|
||||
val = 0;
|
||||
break;
|
||||
default:
|
||||
val = 0;
|
||||
val = XADC_CONF2_PD_ADC_B;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -797,6 +808,16 @@ static int xadc_preenable(struct iio_dev *indio_dev)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* In simultaneous mode the upper and lower aux channels are samples at
|
||||
* the same time. In this mode the upper 8 bits in the sequencer
|
||||
* register are don't care and the lower 8 bits control two channels
|
||||
* each. As such we must set the bit if either the channel in the lower
|
||||
* group or the upper group is enabled.
|
||||
*/
|
||||
if (seq_mode == XADC_CONF1_SEQ_SIMULTANEOUS)
|
||||
scan_mask = ((scan_mask >> 8) | scan_mask) & 0xff0000;
|
||||
|
||||
ret = xadc_write_adc_reg(xadc, XADC_REG_SEQ(1), scan_mask >> 16);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -823,11 +844,27 @@ static const struct iio_buffer_setup_ops xadc_buffer_ops = {
|
||||
.postdisable = &xadc_postdisable,
|
||||
};
|
||||
|
||||
static int xadc_read_samplerate(struct xadc *xadc)
|
||||
{
|
||||
unsigned int div;
|
||||
uint16_t val16;
|
||||
int ret;
|
||||
|
||||
ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
|
||||
if (div < 2)
|
||||
div = 2;
|
||||
|
||||
return xadc_get_dclk_rate(xadc) / div / 26;
|
||||
}
|
||||
|
||||
static int xadc_read_raw(struct iio_dev *indio_dev,
|
||||
struct iio_chan_spec const *chan, int *val, int *val2, long info)
|
||||
{
|
||||
struct xadc *xadc = iio_priv(indio_dev);
|
||||
unsigned int div;
|
||||
uint16_t val16;
|
||||
int ret;
|
||||
|
||||
@ -880,41 +917,31 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
|
||||
*val = -((273150 << 12) / 503975);
|
||||
return IIO_VAL_INT;
|
||||
case IIO_CHAN_INFO_SAMP_FREQ:
|
||||
ret = xadc_read_adc_reg(xadc, XADC_REG_CONF2, &val16);
|
||||
if (ret)
|
||||
ret = xadc_read_samplerate(xadc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
div = (val16 & XADC_CONF2_DIV_MASK) >> XADC_CONF2_DIV_OFFSET;
|
||||
if (div < 2)
|
||||
div = 2;
|
||||
|
||||
*val = xadc_get_dclk_rate(xadc) / div / 26;
|
||||
|
||||
*val = ret;
|
||||
return IIO_VAL_INT;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int xadc_write_raw(struct iio_dev *indio_dev,
|
||||
struct iio_chan_spec const *chan, int val, int val2, long info)
|
||||
static int xadc_write_samplerate(struct xadc *xadc, int val)
|
||||
{
|
||||
struct xadc *xadc = iio_priv(indio_dev);
|
||||
unsigned long clk_rate = xadc_get_dclk_rate(xadc);
|
||||
unsigned int div;
|
||||
|
||||
if (!clk_rate)
|
||||
return -EINVAL;
|
||||
|
||||
if (info != IIO_CHAN_INFO_SAMP_FREQ)
|
||||
return -EINVAL;
|
||||
|
||||
if (val <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* Max. 150 kSPS */
|
||||
if (val > 150000)
|
||||
val = 150000;
|
||||
if (val > XADC_MAX_SAMPLERATE)
|
||||
val = XADC_MAX_SAMPLERATE;
|
||||
|
||||
val *= 26;
|
||||
|
||||
@ -927,7 +954,7 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
|
||||
* limit.
|
||||
*/
|
||||
div = clk_rate / val;
|
||||
if (clk_rate / div / 26 > 150000)
|
||||
if (clk_rate / div / 26 > XADC_MAX_SAMPLERATE)
|
||||
div++;
|
||||
if (div < 2)
|
||||
div = 2;
|
||||
@ -938,6 +965,17 @@ static int xadc_write_raw(struct iio_dev *indio_dev,
|
||||
div << XADC_CONF2_DIV_OFFSET);
|
||||
}
|
||||
|
||||
static int xadc_write_raw(struct iio_dev *indio_dev,
|
||||
struct iio_chan_spec const *chan, int val, int val2, long info)
|
||||
{
|
||||
struct xadc *xadc = iio_priv(indio_dev);
|
||||
|
||||
if (info != IIO_CHAN_INFO_SAMP_FREQ)
|
||||
return -EINVAL;
|
||||
|
||||
return xadc_write_samplerate(xadc, val);
|
||||
}
|
||||
|
||||
static const struct iio_event_spec xadc_temp_events[] = {
|
||||
{
|
||||
.type = IIO_EV_TYPE_THRESH,
|
||||
@ -1225,6 +1263,21 @@ static int xadc_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto err_free_samplerate_trigger;
|
||||
|
||||
/*
|
||||
* Make sure not to exceed the maximum samplerate since otherwise the
|
||||
* resulting interrupt storm will soft-lock the system.
|
||||
*/
|
||||
if (xadc->ops->flags & XADC_FLAGS_BUFFERED) {
|
||||
ret = xadc_read_samplerate(xadc);
|
||||
if (ret < 0)
|
||||
goto err_free_samplerate_trigger;
|
||||
if (ret > XADC_MAX_SAMPLERATE) {
|
||||
ret = xadc_write_samplerate(xadc, XADC_MAX_SAMPLERATE);
|
||||
if (ret < 0)
|
||||
goto err_free_samplerate_trigger;
|
||||
}
|
||||
}
|
||||
|
||||
ret = request_irq(xadc->irq, xadc->ops->interrupt_handler, 0,
|
||||
dev_name(&pdev->dev), indio_dev);
|
||||
if (ret)
|
||||
|
@ -80,7 +80,7 @@ int st_sensors_set_odr(struct iio_dev *indio_dev, unsigned int odr)
|
||||
struct st_sensor_odr_avl odr_out = {0, 0};
|
||||
struct st_sensor_data *sdata = iio_priv(indio_dev);
|
||||
|
||||
if (!sdata->sensor_settings->odr.addr)
|
||||
if (!sdata->sensor_settings->odr.mask)
|
||||
return 0;
|
||||
|
||||
err = st_sensors_match_odr(sdata->sensor_settings, odr, &odr_out);
|
||||
|
@ -1441,6 +1441,10 @@ static int b53_arl_rw_op(struct b53_device *dev, unsigned int op)
|
||||
reg |= ARLTBL_RW;
|
||||
else
|
||||
reg &= ~ARLTBL_RW;
|
||||
if (dev->vlan_enabled)
|
||||
reg &= ~ARLTBL_IVL_SVL_SELECT;
|
||||
else
|
||||
reg |= ARLTBL_IVL_SVL_SELECT;
|
||||
b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg);
|
||||
|
||||
return b53_arl_op_wait(dev);
|
||||
@ -1450,6 +1454,7 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
|
||||
u16 vid, struct b53_arl_entry *ent, u8 *idx,
|
||||
bool is_valid)
|
||||
{
|
||||
DECLARE_BITMAP(free_bins, B53_ARLTBL_MAX_BIN_ENTRIES);
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
@ -1457,6 +1462,8 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bitmap_zero(free_bins, dev->num_arl_entries);
|
||||
|
||||
/* Read the bins */
|
||||
for (i = 0; i < dev->num_arl_entries; i++) {
|
||||
u64 mac_vid;
|
||||
@ -1468,13 +1475,24 @@ static int b53_arl_read(struct b53_device *dev, u64 mac,
|
||||
B53_ARLTBL_DATA_ENTRY(i), &fwd_entry);
|
||||
b53_arl_to_entry(ent, mac_vid, fwd_entry);
|
||||
|
||||
if (!(fwd_entry & ARLTBL_VALID))
|
||||
if (!(fwd_entry & ARLTBL_VALID)) {
|
||||
set_bit(i, free_bins);
|
||||
continue;
|
||||
}
|
||||
if ((mac_vid & ARLTBL_MAC_MASK) != mac)
|
||||
continue;
|
||||
if (dev->vlan_enabled &&
|
||||
((mac_vid >> ARLTBL_VID_S) & ARLTBL_VID_MASK) != vid)
|
||||
continue;
|
||||
*idx = i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bitmap_weight(free_bins, dev->num_arl_entries) == 0)
|
||||
return -ENOSPC;
|
||||
|
||||
*idx = find_first_bit(free_bins, dev->num_arl_entries);
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
@ -1504,15 +1522,25 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
|
||||
if (op)
|
||||
return ret;
|
||||
|
||||
/* We could not find a matching MAC, so reset to a new entry */
|
||||
if (ret) {
|
||||
switch (ret) {
|
||||
case -ENOSPC:
|
||||
dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
|
||||
addr, vid);
|
||||
return is_valid ? ret : 0;
|
||||
case -ENOENT:
|
||||
/* We could not find a matching MAC, so reset to a new entry */
|
||||
dev_dbg(dev->dev, "{%pM,%.4d} not found, using idx: %d\n",
|
||||
addr, vid, idx);
|
||||
fwd_entry = 0;
|
||||
idx = 1;
|
||||
break;
|
||||
default:
|
||||
dev_dbg(dev->dev, "{%pM,%.4d} found, using idx: %d\n",
|
||||
addr, vid, idx);
|
||||
break;
|
||||
}
|
||||
|
||||
memset(&ent, 0, sizeof(ent));
|
||||
ent.port = port;
|
||||
ent.is_valid = is_valid;
|
||||
ent.vid = vid;
|
||||
ent.is_static = true;
|
||||
memcpy(ent.mac, addr, ETH_ALEN);
|
||||
|
@ -292,6 +292,7 @@
|
||||
/* ARL Table Read/Write Register (8 bit) */
|
||||
#define B53_ARLTBL_RW_CTRL 0x00
|
||||
#define ARLTBL_RW BIT(0)
|
||||
#define ARLTBL_IVL_SVL_SELECT BIT(6)
|
||||
#define ARLTBL_START_DONE BIT(7)
|
||||
|
||||
/* MAC Address Index Register (48 bit) */
|
||||
@ -304,7 +305,7 @@
|
||||
*
|
||||
* BCM5325 and BCM5365 share most definitions below
|
||||
*/
|
||||
#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n))
|
||||
#define B53_ARLTBL_MAC_VID_ENTRY(n) ((0x10 * (n)) + 0x10)
|
||||
#define ARLTBL_MAC_MASK 0xffffffffffffULL
|
||||
#define ARLTBL_VID_S 48
|
||||
#define ARLTBL_VID_MASK_25 0xff
|
||||
@ -316,13 +317,16 @@
|
||||
#define ARLTBL_VALID_25 BIT(63)
|
||||
|
||||
/* ARL Table Data Entry N Registers (32 bit) */
|
||||
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08)
|
||||
#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x18)
|
||||
#define ARLTBL_DATA_PORT_ID_MASK 0x1ff
|
||||
#define ARLTBL_TC(tc) ((3 & tc) << 11)
|
||||
#define ARLTBL_AGE BIT(14)
|
||||
#define ARLTBL_STATIC BIT(15)
|
||||
#define ARLTBL_VALID BIT(16)
|
||||
|
||||
/* Maximum number of bin entries in the ARL for all switches */
|
||||
#define B53_ARLTBL_MAX_BIN_ENTRIES 4
|
||||
|
||||
/* ARL Search Control Register (8 bit) */
|
||||
#define B53_ARL_SRCH_CTL 0x50
|
||||
#define B53_ARL_SRCH_CTL_25 0x20
|
||||
|
@ -995,6 +995,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
|
||||
if (netif_running(dev))
|
||||
bcmgenet_update_mib_counters(priv);
|
||||
|
||||
dev->netdev_ops->ndo_get_stats(dev);
|
||||
|
||||
for (i = 0; i < BCMGENET_STATS_LEN; i++) {
|
||||
const struct bcmgenet_stats *s;
|
||||
char *p;
|
||||
@ -3204,6 +3206,7 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
|
||||
dev->stats.rx_packets = rx_packets;
|
||||
dev->stats.rx_errors = rx_errors;
|
||||
dev->stats.rx_missed_errors = rx_errors;
|
||||
dev->stats.rx_dropped = rx_dropped;
|
||||
return &dev->stats;
|
||||
}
|
||||
|
||||
|
@ -1054,9 +1054,9 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_error *cudbg_err,
|
||||
u8 mem_type)
|
||||
static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_error *cudbg_err,
|
||||
u8 mem_type, unsigned long *region_size)
|
||||
{
|
||||
struct adapter *padap = pdbg_init->adap;
|
||||
struct cudbg_meminfo mem_info;
|
||||
@ -1065,15 +1065,23 @@ static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
|
||||
|
||||
memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
|
||||
rc = cudbg_fill_meminfo(padap, &mem_info);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
cudbg_err->sys_err = rc;
|
||||
return rc;
|
||||
}
|
||||
|
||||
cudbg_t4_fwcache(pdbg_init, cudbg_err);
|
||||
rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
cudbg_err->sys_err = rc;
|
||||
return rc;
|
||||
}
|
||||
|
||||
return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
|
||||
if (region_size)
|
||||
*region_size = mem_info.avail[mc_idx].limit -
|
||||
mem_info.avail[mc_idx].base;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
|
||||
@ -1081,7 +1089,12 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
|
||||
struct cudbg_error *cudbg_err,
|
||||
u8 mem_type)
|
||||
{
|
||||
unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
|
||||
unsigned long size = 0;
|
||||
int rc;
|
||||
|
||||
rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
|
||||
cudbg_err);
|
||||
|
@ -311,32 +311,17 @@ static int cxgb4_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
*/
|
||||
static int cxgb4_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
||||
{
|
||||
struct adapter *adapter = (struct adapter *)container_of(ptp,
|
||||
struct adapter, ptp_clock_info);
|
||||
struct fw_ptp_cmd c;
|
||||
struct adapter *adapter = container_of(ptp, struct adapter,
|
||||
ptp_clock_info);
|
||||
u64 ns;
|
||||
int err;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PTP_CMD) |
|
||||
FW_CMD_REQUEST_F |
|
||||
FW_CMD_READ_F |
|
||||
FW_PTP_CMD_PORTID_V(0));
|
||||
c.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(sizeof(c) / 16));
|
||||
c.u.ts.sc = FW_PTP_SC_GET_TIME;
|
||||
|
||||
err = t4_wr_mbox(adapter, adapter->mbox, &c, sizeof(c), &c);
|
||||
if (err < 0) {
|
||||
dev_err(adapter->pdev_dev,
|
||||
"PTP: %s error %d\n", __func__, -err);
|
||||
return err;
|
||||
}
|
||||
ns = t4_read_reg(adapter, T5_PORT_REG(0, MAC_PORT_PTP_SUM_LO_A));
|
||||
ns |= (u64)t4_read_reg(adapter,
|
||||
T5_PORT_REG(0, MAC_PORT_PTP_SUM_HI_A)) << 32;
|
||||
|
||||
/* convert to timespec*/
|
||||
ns = be64_to_cpu(c.u.ts.tm);
|
||||
*ts = ns_to_timespec64(ns);
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1900,6 +1900,9 @@
|
||||
|
||||
#define MAC_PORT_CFG2_A 0x818
|
||||
|
||||
#define MAC_PORT_PTP_SUM_LO_A 0x990
|
||||
#define MAC_PORT_PTP_SUM_HI_A 0x994
|
||||
|
||||
#define MPS_CMN_CTL_A 0x9000
|
||||
|
||||
#define COUNTPAUSEMCRX_S 5
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/indirect_call_wrapper.h>
|
||||
|
||||
#include "mlx4_en.h"
|
||||
|
||||
@ -261,6 +262,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
|
||||
}
|
||||
}
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring *ring,
|
||||
int index, u64 timestamp,
|
||||
int napi_mode));
|
||||
|
||||
u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring *ring,
|
||||
@ -329,6 +334,11 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
|
||||
return tx_info->nr_txbb;
|
||||
}
|
||||
|
||||
INDIRECT_CALLABLE_DECLARE(u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring *ring,
|
||||
int index, u64 timestamp,
|
||||
int napi_mode));
|
||||
|
||||
u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_tx_ring *ring,
|
||||
int index, u64 timestamp,
|
||||
@ -449,7 +459,9 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
|
||||
timestamp = mlx4_en_get_cqe_ts(cqe);
|
||||
|
||||
/* free next descriptor */
|
||||
last_nr_txbb = ring->free_tx_desc(
|
||||
last_nr_txbb = INDIRECT_CALL_2(ring->free_tx_desc,
|
||||
mlx4_en_free_tx_desc,
|
||||
mlx4_en_recycle_tx_desc,
|
||||
priv, ring, ring_index,
|
||||
timestamp, napi_budget);
|
||||
|
||||
|
@ -316,7 +316,7 @@ struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
|
||||
|
||||
block = kzalloc(sizeof(*block), GFP_KERNEL);
|
||||
if (!block)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
INIT_LIST_HEAD(&block->resource_list);
|
||||
block->afa = mlxsw_afa;
|
||||
|
||||
@ -344,7 +344,7 @@ err_second_set_create:
|
||||
mlxsw_afa_set_destroy(block->first_set);
|
||||
err_first_set_create:
|
||||
kfree(block);
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
EXPORT_SYMBOL(mlxsw_afa_block_create);
|
||||
|
||||
|
@ -88,8 +88,8 @@ static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
|
||||
* to be written using PEFA register to all indexes for all regions.
|
||||
*/
|
||||
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
|
||||
if (!afa_block) {
|
||||
err = -ENOMEM;
|
||||
if (IS_ERR(afa_block)) {
|
||||
err = PTR_ERR(afa_block);
|
||||
goto err_afa_block;
|
||||
}
|
||||
err = mlxsw_afa_block_continue(afa_block);
|
||||
|
@ -444,7 +444,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
|
||||
|
||||
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
|
||||
if (!rulei)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (afa_block) {
|
||||
rulei->act_block = afa_block;
|
||||
|
@ -199,8 +199,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
|
||||
int err;
|
||||
|
||||
afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
|
||||
if (!afa_block)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (IS_ERR(afa_block))
|
||||
return afa_block;
|
||||
|
||||
err = mlxsw_afa_block_append_allocated_counter(afa_block,
|
||||
counter_index);
|
||||
|
@ -119,6 +119,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
|
||||
{ .div = 5, .val = 5, },
|
||||
{ .div = 6, .val = 6, },
|
||||
{ .div = 7, .val = 7, },
|
||||
{ /* end of array */ }
|
||||
};
|
||||
|
||||
clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
|
||||
|
@ -1207,7 +1207,7 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]);
|
||||
|
||||
if (df < 0 || df > GENEVE_DF_MAX) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF],
|
||||
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_DF],
|
||||
"Invalid DF attribute");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3226,11 +3226,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct macsec_dev *macsec = macsec_priv(dev);
|
||||
struct net_device *real_dev;
|
||||
int err;
|
||||
sci_t sci;
|
||||
u8 icv_len = DEFAULT_ICV_LEN;
|
||||
rx_handler_func_t *rx_handler;
|
||||
u8 icv_len = DEFAULT_ICV_LEN;
|
||||
struct net_device *real_dev;
|
||||
int err, mtu;
|
||||
sci_t sci;
|
||||
|
||||
if (!tb[IFLA_LINK])
|
||||
return -EINVAL;
|
||||
@ -3246,7 +3246,11 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
|
||||
|
||||
if (data && data[IFLA_MACSEC_ICV_LEN])
|
||||
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
|
||||
dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
|
||||
mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
|
||||
if (mtu < 0)
|
||||
dev->mtu = 0;
|
||||
else
|
||||
dev->mtu = mtu;
|
||||
|
||||
rx_handler = rtnl_dereference(real_dev->rx_handler);
|
||||
if (rx_handler && rx_handler != macsec_handle_frame)
|
||||
|
@ -1704,7 +1704,7 @@ static int macvlan_device_event(struct notifier_block *unused,
|
||||
struct macvlan_dev,
|
||||
list);
|
||||
|
||||
if (macvlan_sync_address(vlan->dev, dev->dev_addr))
|
||||
if (vlan && macvlan_sync_address(vlan->dev, dev->dev_addr))
|
||||
return NOTIFY_BAD;
|
||||
|
||||
break;
|
||||
|
@ -468,6 +468,9 @@ static const struct team_mode *team_mode_get(const char *kind)
|
||||
struct team_mode_item *mitem;
|
||||
const struct team_mode *mode = NULL;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return NULL;
|
||||
|
||||
spin_lock(&mode_list_lock);
|
||||
mitem = __find_mode(kind);
|
||||
if (!mitem) {
|
||||
@ -483,6 +486,7 @@ static const struct team_mode *team_mode_get(const char *kind)
|
||||
}
|
||||
|
||||
spin_unlock(&mode_list_lock);
|
||||
module_put(THIS_MODULE);
|
||||
return mode;
|
||||
}
|
||||
|
||||
|
@ -188,8 +188,8 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
|
||||
fl6.flowi6_proto = iph->nexthdr;
|
||||
fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
|
||||
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst == dst_null)
|
||||
dst = ip6_dst_lookup_flow(net, NULL, &fl6, NULL);
|
||||
if (IS_ERR(dst) || dst == dst_null)
|
||||
goto err;
|
||||
|
||||
skb_dst_drop(skb);
|
||||
@ -474,7 +474,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
|
||||
if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
|
||||
return skb;
|
||||
|
||||
if (qdisc_tx_is_default(vrf_dev))
|
||||
if (qdisc_tx_is_default(vrf_dev) ||
|
||||
IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
|
||||
return vrf_ip6_out_direct(vrf_dev, sk, skb);
|
||||
|
||||
return vrf_ip6_out_redirect(vrf_dev, skb);
|
||||
@ -686,7 +687,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
|
||||
ipv4_is_lbcast(ip_hdr(skb)->daddr))
|
||||
return skb;
|
||||
|
||||
if (qdisc_tx_is_default(vrf_dev))
|
||||
if (qdisc_tx_is_default(vrf_dev) ||
|
||||
IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
|
||||
return vrf_ip_out_direct(vrf_dev, sk, skb);
|
||||
|
||||
return vrf_ip_out_redirect(vrf_dev, skb);
|
||||
|
@ -3144,7 +3144,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
|
||||
|
||||
if (id >= VXLAN_N_VID) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID],
|
||||
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_ID],
|
||||
"VXLAN ID must be lower than 16777216");
|
||||
return -ERANGE;
|
||||
}
|
||||
@ -3155,7 +3155,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
= nla_data(data[IFLA_VXLAN_PORT_RANGE]);
|
||||
|
||||
if (ntohs(p->high) < ntohs(p->low)) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE],
|
||||
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_PORT_RANGE],
|
||||
"Invalid source port range");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3165,7 +3165,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
|
||||
enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
|
||||
|
||||
if (df < 0 || df > VXLAN_DF_MAX) {
|
||||
NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
|
||||
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VXLAN_DF],
|
||||
"Invalid DF attribute");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -374,7 +374,7 @@ out:
|
||||
}
|
||||
|
||||
static void *
|
||||
il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
|
||||
il3945_rs_alloc(struct ieee80211_hw *hw)
|
||||
{
|
||||
return hw->priv;
|
||||
}
|
||||
|
@ -2474,7 +2474,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
|
||||
}
|
||||
|
||||
static void *
|
||||
il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
|
||||
il4965_rs_alloc(struct ieee80211_hw *hw)
|
||||
{
|
||||
return hw->priv;
|
||||
}
|
||||
|
@ -3019,7 +3019,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
|
||||
cpu_to_le16(priv->lib->bt_params->agg_time_limit);
|
||||
}
|
||||
|
||||
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
|
||||
static void *rs_alloc(struct ieee80211_hw *hw)
|
||||
{
|
||||
return hw->priv;
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* Copyright(c) 2019 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2019 Intel Corporation
|
||||
* Copyright(c) 2019 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -99,7 +99,7 @@ enum iwl_mvm_dqa_txq {
|
||||
IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
|
||||
IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
|
||||
IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE = 30,
|
||||
};
|
||||
|
||||
enum iwl_mvm_tx_fifo {
|
||||
|
@ -525,8 +525,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
|
||||
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
|
||||
.mac_cap_info[2] =
|
||||
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
|
||||
IEEE80211_HE_MAC_CAP2_ACK_EN,
|
||||
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP,
|
||||
.mac_cap_info[3] =
|
||||
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
|
||||
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
|
||||
@ -610,8 +609,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
||||
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
|
||||
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
|
||||
.mac_cap_info[2] =
|
||||
IEEE80211_HE_MAC_CAP2_BSR |
|
||||
IEEE80211_HE_MAC_CAP2_ACK_EN,
|
||||
IEEE80211_HE_MAC_CAP2_BSR,
|
||||
.mac_cap_info[3] =
|
||||
IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
|
||||
IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
|
||||
|
@ -3663,7 +3663,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
|
||||
cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
|
||||
}
|
||||
|
||||
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
|
||||
static void *rs_alloc(struct ieee80211_hw *hw)
|
||||
{
|
||||
return hw->priv;
|
||||
}
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
@ -31,7 +31,7 @@
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* Copyright(c) 2018 - 2020 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
@ -566,6 +566,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||
|
||||
struct iwl_mvm_stat_data {
|
||||
struct iwl_mvm *mvm;
|
||||
__le32 flags;
|
||||
__le32 mac_id;
|
||||
u8 beacon_filter_average_energy;
|
||||
void *general;
|
||||
@ -606,6 +607,13 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
|
||||
-general->beacon_average_energy[vif_id];
|
||||
}
|
||||
|
||||
/* make sure that beacon statistics don't go backwards with TCM
|
||||
* request to clear statistics
|
||||
*/
|
||||
if (le32_to_cpu(data->flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
|
||||
mvmvif->beacon_stats.accu_num_beacons +=
|
||||
mvmvif->beacon_stats.num_beacons;
|
||||
|
||||
if (mvmvif->id != id)
|
||||
return;
|
||||
|
||||
@ -763,6 +771,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
||||
|
||||
flags = stats->flag;
|
||||
}
|
||||
data.flags = flags;
|
||||
|
||||
iwl_mvm_rx_stats_check_trigger(mvm, pkt);
|
||||
|
||||
|
@ -722,6 +722,11 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
|
||||
"max queue %d >= num_of_queues (%d)", maxq,
|
||||
mvm->trans->trans_cfg->base_params->num_of_queues))
|
||||
maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
|
||||
|
||||
/* This should not be hit with new TX path */
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -ENOSPC;
|
||||
@ -1164,9 +1169,9 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
||||
inactive_tid_bitmap,
|
||||
&unshare_queues,
|
||||
&changetid_queues);
|
||||
if (ret >= 0 && free_queue < 0) {
|
||||
if (ret && free_queue < 0) {
|
||||
queue_owner = sta;
|
||||
free_queue = ret;
|
||||
free_queue = i;
|
||||
}
|
||||
/* only unlock sta lock - we still need the queue info lock */
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
@ -1283,6 +1283,9 @@ void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
|
||||
|
||||
iwl_pcie_gen2_txq_unmap(trans, queue);
|
||||
|
||||
iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]);
|
||||
trans_pcie->txq[queue] = NULL;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
|
||||
}
|
||||
|
||||
|
@ -261,7 +261,7 @@ static void rtl_rate_update(void *ppriv,
|
||||
{
|
||||
}
|
||||
|
||||
static void *rtl_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
|
||||
static void *rtl_rate_alloc(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
return rtlpriv;
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hdreg.h>
|
||||
@ -1244,6 +1245,18 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
|
||||
queue_work(nvme_wq, &ctrl->async_event_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert integer values from ioctl structures to user pointers, silently
|
||||
* ignoring the upper bits in the compat case to match behaviour of 32-bit
|
||||
* kernels.
|
||||
*/
|
||||
static void __user *nvme_to_user_ptr(uintptr_t ptrval)
|
||||
{
|
||||
if (in_compat_syscall())
|
||||
ptrval = (compat_uptr_t)ptrval;
|
||||
return (void __user *)ptrval;
|
||||
}
|
||||
|
||||
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||
{
|
||||
struct nvme_user_io io;
|
||||
@ -1267,7 +1280,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||
|
||||
length = (io.nblocks + 1) << ns->lba_shift;
|
||||
meta_len = (io.nblocks + 1) * ns->ms;
|
||||
metadata = (void __user *)(uintptr_t)io.metadata;
|
||||
metadata = nvme_to_user_ptr(io.metadata);
|
||||
|
||||
if (ns->ext) {
|
||||
length += meta_len;
|
||||
@ -1290,7 +1303,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
|
||||
c.rw.appmask = cpu_to_le16(io.appmask);
|
||||
|
||||
return nvme_submit_user_cmd(ns->queue, &c,
|
||||
(void __user *)(uintptr_t)io.addr, length,
|
||||
nvme_to_user_ptr(io.addr), length,
|
||||
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
|
||||
}
|
||||
|
||||
@ -1410,9 +1423,9 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
|
||||
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
|
||||
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
||||
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
|
||||
(void __user *)(uintptr_t)cmd.metadata,
|
||||
cmd.metadata_len, 0, &result, timeout);
|
||||
nvme_to_user_ptr(cmd.addr), cmd.data_len,
|
||||
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
|
||||
0, &result, timeout);
|
||||
nvme_passthru_end(ctrl, effects);
|
||||
|
||||
if (status >= 0) {
|
||||
@ -1457,8 +1470,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
|
||||
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
|
||||
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
|
||||
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
|
||||
(void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
|
||||
nvme_to_user_ptr(cmd.addr), cmd.data_len,
|
||||
nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
|
||||
0, &cmd.result, timeout);
|
||||
nvme_passthru_end(ctrl, effects);
|
||||
|
||||
|
@ -510,7 +510,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
|
||||
if (!nr_nsids)
|
||||
return 0;
|
||||
|
||||
down_write(&ctrl->namespaces_rwsem);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||
unsigned nsid = le32_to_cpu(desc->nsids[n]);
|
||||
|
||||
@ -521,7 +521,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
|
||||
if (++n == nr_nsids)
|
||||
break;
|
||||
}
|
||||
up_write(&ctrl->namespaces_rwsem);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -164,16 +164,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
|
||||
static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
|
||||
{
|
||||
struct request *rq;
|
||||
unsigned int bytes;
|
||||
|
||||
if (unlikely(nvme_tcp_async_req(req)))
|
||||
return false; /* async events don't have a request */
|
||||
|
||||
rq = blk_mq_rq_from_pdu(req);
|
||||
bytes = blk_rq_payload_bytes(rq);
|
||||
|
||||
return rq_data_dir(rq) == WRITE && bytes &&
|
||||
bytes <= nvme_tcp_inline_data_size(req->queue);
|
||||
return rq_data_dir(rq) == WRITE && req->data_len &&
|
||||
req->data_len <= nvme_tcp_inline_data_size(req->queue);
|
||||
}
|
||||
|
||||
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
|
||||
@ -2090,7 +2088,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
|
||||
|
||||
c->common.flags |= NVME_CMD_SGL_METABUF;
|
||||
|
||||
if (rq_data_dir(rq) == WRITE && req->data_len &&
|
||||
if (!blk_rq_nr_phys_segments(rq))
|
||||
nvme_tcp_set_sg_null(c);
|
||||
else if (rq_data_dir(rq) == WRITE &&
|
||||
req->data_len <= nvme_tcp_inline_data_size(queue))
|
||||
nvme_tcp_set_sg_inline(queue, c, req->data_len);
|
||||
else
|
||||
@ -2117,7 +2117,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
|
||||
req->data_sent = 0;
|
||||
req->pdu_len = 0;
|
||||
req->pdu_sent = 0;
|
||||
req->data_len = blk_rq_payload_bytes(rq);
|
||||
req->data_len = blk_rq_nr_phys_segments(rq) ?
|
||||
blk_rq_payload_bytes(rq) : 0;
|
||||
req->curr_bio = rq->bio;
|
||||
|
||||
if (rq_data_dir(rq) == WRITE &&
|
||||
|
@ -174,10 +174,10 @@ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn);
|
||||
|
||||
void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
|
||||
int pciehp_query_power_fault(struct controller *ctrl);
|
||||
bool pciehp_card_present(struct controller *ctrl);
|
||||
bool pciehp_card_present_or_link_active(struct controller *ctrl);
|
||||
int pciehp_card_present(struct controller *ctrl);
|
||||
int pciehp_card_present_or_link_active(struct controller *ctrl);
|
||||
int pciehp_check_link_status(struct controller *ctrl);
|
||||
bool pciehp_check_link_active(struct controller *ctrl);
|
||||
int pciehp_check_link_active(struct controller *ctrl);
|
||||
void pciehp_release_ctrl(struct controller *ctrl);
|
||||
|
||||
int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
|
||||
|
@ -139,10 +139,15 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
{
|
||||
struct controller *ctrl = to_ctrl(hotplug_slot);
|
||||
struct pci_dev *pdev = ctrl->pcie->port;
|
||||
int ret;
|
||||
|
||||
pci_config_pm_runtime_get(pdev);
|
||||
*value = pciehp_card_present_or_link_active(ctrl);
|
||||
ret = pciehp_card_present_or_link_active(ctrl);
|
||||
pci_config_pm_runtime_put(pdev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*value = ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -158,13 +163,13 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
|
||||
*/
|
||||
static void pciehp_check_presence(struct controller *ctrl)
|
||||
{
|
||||
bool occupied;
|
||||
int occupied;
|
||||
|
||||
down_read(&ctrl->reset_lock);
|
||||
mutex_lock(&ctrl->state_lock);
|
||||
|
||||
occupied = pciehp_card_present_or_link_active(ctrl);
|
||||
if ((occupied && (ctrl->state == OFF_STATE ||
|
||||
if ((occupied > 0 && (ctrl->state == OFF_STATE ||
|
||||
ctrl->state == BLINKINGON_STATE)) ||
|
||||
(!occupied && (ctrl->state == ON_STATE ||
|
||||
ctrl->state == BLINKINGOFF_STATE)))
|
||||
|
@ -226,7 +226,7 @@ void pciehp_handle_disable_request(struct controller *ctrl)
|
||||
|
||||
void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
|
||||
{
|
||||
bool present, link_active;
|
||||
int present, link_active;
|
||||
|
||||
/*
|
||||
* If the slot is on and presence or link has changed, turn it off.
|
||||
@ -257,7 +257,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
|
||||
mutex_lock(&ctrl->state_lock);
|
||||
present = pciehp_card_present(ctrl);
|
||||
link_active = pciehp_check_link_active(ctrl);
|
||||
if (!present && !link_active) {
|
||||
if (present <= 0 && link_active <= 0) {
|
||||
mutex_unlock(&ctrl->state_lock);
|
||||
return;
|
||||
}
|
||||
|
@ -201,17 +201,29 @@ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
|
||||
pcie_do_write_cmd(ctrl, cmd, mask, false);
|
||||
}
|
||||
|
||||
bool pciehp_check_link_active(struct controller *ctrl)
|
||||
/**
|
||||
* pciehp_check_link_active() - Is the link active
|
||||
* @ctrl: PCIe hotplug controller
|
||||
*
|
||||
* Check whether the downstream link is currently active. Note it is
|
||||
* possible that the card is removed immediately after this so the
|
||||
* caller may need to take it into account.
|
||||
*
|
||||
* If the hotplug controller itself is not available anymore returns
|
||||
* %-ENODEV.
|
||||
*/
|
||||
int pciehp_check_link_active(struct controller *ctrl)
|
||||
{
|
||||
struct pci_dev *pdev = ctrl_dev(ctrl);
|
||||
u16 lnk_status;
|
||||
bool ret;
|
||||
int ret;
|
||||
|
||||
ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
|
||||
if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
|
||||
return -ENODEV;
|
||||
|
||||
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
|
||||
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
|
||||
|
||||
if (ret)
|
||||
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
|
||||
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -373,13 +385,29 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
|
||||
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
|
||||
}
|
||||
|
||||
bool pciehp_card_present(struct controller *ctrl)
|
||||
/**
|
||||
* pciehp_card_present() - Is the card present
|
||||
* @ctrl: PCIe hotplug controller
|
||||
*
|
||||
* Function checks whether the card is currently present in the slot and
|
||||
* in that case returns true. Note it is possible that the card is
|
||||
* removed immediately after the check so the caller may need to take
|
||||
* this into account.
|
||||
*
|
||||
* It the hotplug controller itself is not available anymore returns
|
||||
* %-ENODEV.
|
||||
*/
|
||||
int pciehp_card_present(struct controller *ctrl)
|
||||
{
|
||||
struct pci_dev *pdev = ctrl_dev(ctrl);
|
||||
u16 slot_status;
|
||||
int ret;
|
||||
|
||||
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
|
||||
return slot_status & PCI_EXP_SLTSTA_PDS;
|
||||
ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
|
||||
if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
|
||||
return -ENODEV;
|
||||
|
||||
return !!(slot_status & PCI_EXP_SLTSTA_PDS);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -390,10 +418,19 @@ bool pciehp_card_present(struct controller *ctrl)
|
||||
* Presence Detect State bit, this helper also returns true if the Link Active
|
||||
* bit is set. This is a concession to broken hotplug ports which hardwire
|
||||
* Presence Detect State to zero, such as Wilocity's [1ae9:0200].
|
||||
*
|
||||
* Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
|
||||
* port is not present anymore returns %-ENODEV.
|
||||
*/
|
||||
bool pciehp_card_present_or_link_active(struct controller *ctrl)
|
||||
int pciehp_card_present_or_link_active(struct controller *ctrl)
|
||||
{
|
||||
return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
|
||||
int ret;
|
||||
|
||||
ret = pciehp_card_present(ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return pciehp_check_link_active(ctrl);
|
||||
}
|
||||
|
||||
int pciehp_query_power_fault(struct controller *ctrl)
|
||||
|
@ -919,6 +919,8 @@ static int pci_pm_resume_noirq(struct device *dev)
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
struct device_driver *drv = dev->driver;
|
||||
int error = 0;
|
||||
pci_power_t prev_state = pci_dev->current_state;
|
||||
bool skip_bus_pm = pci_dev->skip_bus_pm;
|
||||
|
||||
if (dev_pm_may_skip_resume(dev))
|
||||
return 0;
|
||||
@ -937,12 +939,15 @@ static int pci_pm_resume_noirq(struct device *dev)
|
||||
* configuration here and attempting to put them into D0 again is
|
||||
* pointless, so avoid doing that.
|
||||
*/
|
||||
if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
|
||||
if (!(skip_bus_pm && pm_suspend_no_platform()))
|
||||
pci_pm_default_resume_early(pci_dev);
|
||||
|
||||
pci_fixup_device(pci_fixup_resume_early, pci_dev);
|
||||
pcie_pme_root_status_cleanup(pci_dev);
|
||||
|
||||
if (!skip_bus_pm && prev_state == PCI_D3cold)
|
||||
pci_bridge_wait_for_secondary_bus(pci_dev);
|
||||
|
||||
if (pci_has_legacy_pm_support(pci_dev))
|
||||
return pci_legacy_resume_early(dev);
|
||||
|
||||
@ -1333,6 +1338,7 @@ static int pci_pm_runtime_resume(struct device *dev)
|
||||
int rc = 0;
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
pci_power_t prev_state = pci_dev->current_state;
|
||||
|
||||
/*
|
||||
* Restoring config space is necessary even if the device is not bound
|
||||
@ -1348,6 +1354,9 @@ static int pci_pm_runtime_resume(struct device *dev)
|
||||
pci_enable_wake(pci_dev, PCI_D0, false);
|
||||
pci_fixup_device(pci_fixup_resume, pci_dev);
|
||||
|
||||
if (prev_state == PCI_D3cold)
|
||||
pci_bridge_wait_for_secondary_bus(pci_dev);
|
||||
|
||||
if (pm && pm->runtime_resume)
|
||||
rc = pm->runtime_resume(dev);
|
||||
|
||||
|
@ -1020,8 +1020,6 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
|
||||
* because have already delayed for the bridge.
|
||||
*/
|
||||
if (dev->runtime_d3cold) {
|
||||
if (dev->d3cold_delay && !dev->imm_ready)
|
||||
msleep(dev->d3cold_delay);
|
||||
/*
|
||||
* When powering on a bridge from D3cold, the
|
||||
* whole hierarchy may be powered on into
|
||||
@ -4606,14 +4604,17 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
|
||||
|
||||
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
|
||||
}
|
||||
|
||||
/**
|
||||
* pcie_wait_for_link - Wait until link is active or inactive
|
||||
* pcie_wait_for_link_delay - Wait until link is active or inactive
|
||||
* @pdev: Bridge device
|
||||
* @active: waiting for active or inactive?
|
||||
* @delay: Delay to wait after link has become active (in ms)
|
||||
*
|
||||
* Use this to wait till link becomes active or inactive.
|
||||
*/
|
||||
bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
|
||||
static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
|
||||
int delay)
|
||||
{
|
||||
int timeout = 1000;
|
||||
bool ret;
|
||||
@ -4650,13 +4651,144 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
|
||||
timeout -= 10;
|
||||
}
|
||||
if (active && ret)
|
||||
msleep(100);
|
||||
msleep(delay);
|
||||
else if (ret != active)
|
||||
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
|
||||
active ? "set" : "cleared");
|
||||
return ret == active;
|
||||
}
|
||||
|
||||
/**
|
||||
* pcie_wait_for_link - Wait until link is active or inactive
|
||||
* @pdev: Bridge device
|
||||
* @active: waiting for active or inactive?
|
||||
*
|
||||
* Use this to wait till link becomes active or inactive.
|
||||
*/
|
||||
bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
|
||||
{
|
||||
return pcie_wait_for_link_delay(pdev, active, 100);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find maximum D3cold delay required by all the devices on the bus. The
|
||||
* spec says 100 ms, but firmware can lower it and we allow drivers to
|
||||
* increase it as well.
|
||||
*
|
||||
* Called with @pci_bus_sem locked for reading.
|
||||
*/
|
||||
static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
|
||||
{
|
||||
const struct pci_dev *pdev;
|
||||
int min_delay = 100;
|
||||
int max_delay = 0;
|
||||
|
||||
list_for_each_entry(pdev, &bus->devices, bus_list) {
|
||||
if (pdev->d3cold_delay < min_delay)
|
||||
min_delay = pdev->d3cold_delay;
|
||||
if (pdev->d3cold_delay > max_delay)
|
||||
max_delay = pdev->d3cold_delay;
|
||||
}
|
||||
|
||||
return max(min_delay, max_delay);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
|
||||
* @dev: PCI bridge
|
||||
*
|
||||
* Handle necessary delays before access to the devices on the secondary
|
||||
* side of the bridge are permitted after D3cold to D0 transition.
|
||||
*
|
||||
* For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
|
||||
* conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
|
||||
* 4.3.2.
|
||||
*/
|
||||
void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dev *child;
|
||||
int delay;
|
||||
|
||||
if (pci_dev_is_disconnected(dev))
|
||||
return;
|
||||
|
||||
if (!pci_is_bridge(dev) || !dev->bridge_d3)
|
||||
return;
|
||||
|
||||
down_read(&pci_bus_sem);
|
||||
|
||||
/*
|
||||
* We only deal with devices that are present currently on the bus.
|
||||
* For any hot-added devices the access delay is handled in pciehp
|
||||
* board_added(). In case of ACPI hotplug the firmware is expected
|
||||
* to configure the devices before OS is notified.
|
||||
*/
|
||||
if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
|
||||
up_read(&pci_bus_sem);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Take d3cold_delay requirements into account */
|
||||
delay = pci_bus_max_d3cold_delay(dev->subordinate);
|
||||
if (!delay) {
|
||||
up_read(&pci_bus_sem);
|
||||
return;
|
||||
}
|
||||
|
||||
child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
|
||||
bus_list);
|
||||
up_read(&pci_bus_sem);
|
||||
|
||||
/*
|
||||
* Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
|
||||
* accessing the device after reset (that is 1000 ms + 100 ms). In
|
||||
* practice this should not be needed because we don't do power
|
||||
* management for them (see pci_bridge_d3_possible()).
|
||||
*/
|
||||
if (!pci_is_pcie(dev)) {
|
||||
pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
|
||||
msleep(1000 + delay);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* For PCIe downstream and root ports that do not support speeds
|
||||
* greater than 5 GT/s need to wait minimum 100 ms. For higher
|
||||
* speeds (gen3) we need to wait first for the data link layer to
|
||||
* become active.
|
||||
*
|
||||
* However, 100 ms is the minimum and the PCIe spec says the
|
||||
* software must allow at least 1s before it can determine that the
|
||||
* device that did not respond is a broken device. There is
|
||||
* evidence that 100 ms is not always enough, for example certain
|
||||
* Titan Ridge xHCI controller does not always respond to
|
||||
* configuration requests if we only wait for 100 ms (see
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=203885).
|
||||
*
|
||||
* Therefore we wait for 100 ms and check for the device presence.
|
||||
* If it is still not present give it an additional 100 ms.
|
||||
*/
|
||||
if (!pcie_downstream_port(dev))
|
||||
return;
|
||||
|
||||
if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
|
||||
pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
|
||||
msleep(delay);
|
||||
} else {
|
||||
pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
|
||||
delay);
|
||||
if (!pcie_wait_for_link_delay(dev, true, delay)) {
|
||||
/* Did not train, no need to wait any further */
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!pci_device_is_present(child)) {
|
||||
pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
|
||||
msleep(delay);
|
||||
}
|
||||
}
|
||||
|
||||
void pci_reset_secondary_bus(struct pci_dev *dev)
|
||||
{
|
||||
u16 ctrl;
|
||||
|
@ -107,6 +107,7 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev);
|
||||
void pci_free_cap_save_buffers(struct pci_dev *dev);
|
||||
bool pci_bridge_d3_possible(struct pci_dev *dev);
|
||||
void pci_bridge_d3_update(struct pci_dev *dev);
|
||||
void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
|
||||
|
||||
static inline void pci_wakeup_event(struct pci_dev *dev)
|
||||
{
|
||||
|
@ -64,6 +64,7 @@ struct pcie_link_state {
|
||||
u32 clkpm_capable:1; /* Clock PM capable? */
|
||||
u32 clkpm_enabled:1; /* Current Clock PM state */
|
||||
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
|
||||
u32 clkpm_disable:1; /* Clock PM disabled */
|
||||
|
||||
/* Exit latencies */
|
||||
struct aspm_latency latency_up; /* Upstream direction exit latency */
|
||||
@ -161,8 +162,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
|
||||
|
||||
static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
|
||||
{
|
||||
/* Don't enable Clock PM if the link is not Clock PM capable */
|
||||
if (!link->clkpm_capable)
|
||||
/*
|
||||
* Don't enable Clock PM if the link is not Clock PM capable
|
||||
* or Clock PM is disabled
|
||||
*/
|
||||
if (!link->clkpm_capable || link->clkpm_disable)
|
||||
enable = 0;
|
||||
/* Need nothing if the specified equals to current state */
|
||||
if (link->clkpm_enabled == enable)
|
||||
@ -192,7 +196,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
|
||||
}
|
||||
link->clkpm_enabled = enabled;
|
||||
link->clkpm_default = enabled;
|
||||
link->clkpm_capable = (blacklist) ? 0 : capable;
|
||||
link->clkpm_capable = capable;
|
||||
link->clkpm_disable = blacklist ? 1 : 0;
|
||||
}
|
||||
|
||||
static bool pcie_retrain_link(struct pcie_link_state *link)
|
||||
@ -1097,10 +1102,9 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
|
||||
link->aspm_disable |= ASPM_STATE_L1;
|
||||
pcie_config_aspm_link(link, policy_to_aspm_state(link));
|
||||
|
||||
if (state & PCIE_LINK_STATE_CLKPM) {
|
||||
link->clkpm_capable = 0;
|
||||
pcie_set_clkpm(link, 0);
|
||||
}
|
||||
if (state & PCIE_LINK_STATE_CLKPM)
|
||||
link->clkpm_disable = 1;
|
||||
pcie_set_clkpm(link, policy_to_clkpm_state(link));
|
||||
mutex_unlock(&aspm_lock);
|
||||
if (sem)
|
||||
up_read(&pci_bus_sem);
|
||||
|
@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
|
||||
|
||||
pc->chip.dev = &pdev->dev;
|
||||
pc->chip.ops = &bcm2835_pwm_ops;
|
||||
pc->chip.base = -1;
|
||||
pc->chip.npwm = 2;
|
||||
pc->chip.of_xlate = of_pwm_xlate_with_flags;
|
||||
pc->chip.of_pwm_n_cells = 3;
|
||||
|
@ -228,24 +228,28 @@ static int rcar_pwm_probe(struct platform_device *pdev)
|
||||
rcar_pwm->chip.base = -1;
|
||||
rcar_pwm->chip.npwm = 1;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
ret = pwmchip_add(&rcar_pwm->chip);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rcar_pwm_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
|
||||
int ret;
|
||||
|
||||
ret = pwmchip_remove(&rcar_pwm->chip);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return pwmchip_remove(&rcar_pwm->chip);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id rcar_pwm_of_table[] = {
|
||||
|
@ -415,16 +415,17 @@ static int tpu_probe(struct platform_device *pdev)
|
||||
tpu->chip.base = -1;
|
||||
tpu->chip.npwm = TPU_CHANNEL_MAX;
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
ret = pwmchip_add(&tpu->chip);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to register PWM chip\n");
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id);
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -434,12 +435,10 @@ static int tpu_remove(struct platform_device *pdev)
|
||||
int ret;
|
||||
|
||||
ret = pwmchip_remove(&tpu->chip);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
|
@ -849,8 +849,10 @@ static void io_subchannel_register(struct ccw_device *cdev)
|
||||
* Now we know this subchannel will stay, we can throw
|
||||
* our delayed uevent.
|
||||
*/
|
||||
dev_set_uevent_suppress(&sch->dev, 0);
|
||||
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
|
||||
if (dev_get_uevent_suppress(&sch->dev)) {
|
||||
dev_set_uevent_suppress(&sch->dev, 0);
|
||||
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
|
||||
}
|
||||
/* make it known to the system */
|
||||
ret = ccw_device_add(cdev);
|
||||
if (ret) {
|
||||
@ -1058,8 +1060,11 @@ static int io_subchannel_probe(struct subchannel *sch)
|
||||
* Throw the delayed uevent for the subchannel, register
|
||||
* the ccw_device and exit.
|
||||
*/
|
||||
dev_set_uevent_suppress(&sch->dev, 0);
|
||||
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
|
||||
if (dev_get_uevent_suppress(&sch->dev)) {
|
||||
/* should always be the case for the console */
|
||||
dev_set_uevent_suppress(&sch->dev, 0);
|
||||
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
|
||||
}
|
||||
cdev = sch_get_cdev(sch);
|
||||
rc = ccw_device_add(cdev);
|
||||
if (rc) {
|
||||
|
@ -167,6 +167,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
|
||||
if (ret)
|
||||
goto out_disable;
|
||||
|
||||
if (dev_get_uevent_suppress(&sch->dev)) {
|
||||
dev_set_uevent_suppress(&sch->dev, 0);
|
||||
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
|
||||
}
|
||||
|
||||
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
|
||||
sch->schid.cssid, sch->schid.ssid,
|
||||
sch->schid.sch_no);
|
||||
|
@ -1208,9 +1208,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
|
||||
if (!rjt)
|
||||
FC_RPORT_DBG(rdata, "PRLI bad response\n");
|
||||
else
|
||||
else {
|
||||
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
|
||||
rjt->er_reason, rjt->er_explan);
|
||||
if (rjt->er_reason == ELS_RJT_UNAB &&
|
||||
rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
|
||||
fc_rport_enter_plogi(rdata);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
|
||||
}
|
||||
|
||||
|
@ -342,13 +342,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
||||
if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
|
||||
ndlp->nrport = NULL;
|
||||
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
|
||||
}
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
|
||||
/* Remove original register reference. The host transport
|
||||
* won't reference this rport/remoteport any further.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
/* Remove original register reference. The host transport
|
||||
* won't reference this rport/remoteport any further.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
} else {
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
}
|
||||
|
||||
rport_err:
|
||||
return;
|
||||
|
@ -2481,6 +2481,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
!pmb->u.mb.mbxStatus) {
|
||||
rpi = pmb->u.mb.un.varWords[0];
|
||||
vpi = pmb->u.mb.un.varRegLogin.vpi;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
|
||||
lpfc_unreg_login(phba, vpi, rpi, pmb);
|
||||
pmb->vport = vport;
|
||||
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
|
||||
@ -4011,6 +4013,11 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
|
||||
struct lpfc_iocbq *piocb, *next_iocb;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
if (phba->hba_flag & HBA_IOQ_FLUSH ||
|
||||
!phba->sli4_hba.hdwq) {
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
return;
|
||||
}
|
||||
/* Indicate the I/O queues are flushed */
|
||||
phba->hba_flag |= HBA_IOQ_FLUSH;
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
@ -2012,7 +2012,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
|
||||
if (session->target_id == ISCSI_MAX_TARGET) {
|
||||
spin_unlock_irqrestore(&session->lock, flags);
|
||||
mutex_unlock(&ihost->mutex);
|
||||
return;
|
||||
goto unbind_session_exit;
|
||||
}
|
||||
|
||||
target_id = session->target_id;
|
||||
@ -2024,6 +2024,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
|
||||
ida_simple_remove(&iscsi_sess_ida, target_id);
|
||||
|
||||
scsi_remove_target(&session->dev);
|
||||
|
||||
unbind_session_exit:
|
||||
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
|
||||
}
|
||||
|
@ -907,7 +907,6 @@ struct pqi_scsi_dev {
|
||||
u8 scsi3addr[8];
|
||||
__be64 wwid;
|
||||
u8 volume_id[16];
|
||||
u8 unique_id[16];
|
||||
u8 is_physical_device : 1;
|
||||
u8 is_external_raid_device : 1;
|
||||
u8 is_expander_smp_device : 1;
|
||||
@ -1130,8 +1129,9 @@ struct pqi_ctrl_info {
|
||||
struct mutex ofa_mutex; /* serialize ofa */
|
||||
bool controller_online;
|
||||
bool block_requests;
|
||||
bool in_shutdown;
|
||||
bool block_device_reset;
|
||||
bool in_ofa;
|
||||
bool in_shutdown;
|
||||
u8 inbound_spanning_supported : 1;
|
||||
u8 outbound_spanning_supported : 1;
|
||||
u8 pqi_mode_enabled : 1;
|
||||
@ -1173,6 +1173,7 @@ struct pqi_ctrl_info {
|
||||
struct pqi_ofa_memory *pqi_ofa_mem_virt_addr;
|
||||
dma_addr_t pqi_ofa_mem_dma_handle;
|
||||
void **pqi_ofa_chunk_virt_addr;
|
||||
atomic_t sync_cmds_outstanding;
|
||||
};
|
||||
|
||||
enum pqi_ctrl_mode {
|
||||
@ -1423,6 +1424,11 @@ static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
|
||||
return ctrl_info->block_requests;
|
||||
}
|
||||
|
||||
static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
return ctrl_info->block_device_reset;
|
||||
}
|
||||
|
||||
void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
|
||||
struct sas_rphy *rphy);
|
||||
|
||||
|
@ -249,6 +249,11 @@ static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
|
||||
scsi_unblock_requests(ctrl_info->scsi_host);
|
||||
}
|
||||
|
||||
static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
ctrl_info->block_device_reset = true;
|
||||
}
|
||||
|
||||
static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
|
||||
unsigned long timeout_msecs)
|
||||
{
|
||||
@ -331,6 +336,16 @@ static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info,
|
||||
return device->in_remove && !ctrl_info->in_shutdown;
|
||||
}
|
||||
|
||||
static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
ctrl_info->in_shutdown = true;
|
||||
}
|
||||
|
||||
static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
return ctrl_info->in_shutdown;
|
||||
}
|
||||
|
||||
static inline void pqi_schedule_rescan_worker_with_delay(
|
||||
struct pqi_ctrl_info *ctrl_info, unsigned long delay)
|
||||
{
|
||||
@ -360,6 +375,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
|
||||
cancel_delayed_work_sync(&ctrl_info->rescan_work);
|
||||
}
|
||||
|
||||
static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
cancel_work_sync(&ctrl_info->event_work);
|
||||
}
|
||||
|
||||
static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
if (!ctrl_info->heartbeat_counter)
|
||||
@ -628,79 +648,6 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
|
||||
buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT);
|
||||
}
|
||||
|
||||
static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info,
|
||||
u8 *scsi3addr, u16 vpd_page)
|
||||
{
|
||||
int rc;
|
||||
int i;
|
||||
int pages;
|
||||
unsigned char *buf, bufsize;
|
||||
|
||||
buf = kzalloc(256, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return false;
|
||||
|
||||
/* Get the size of the page list first */
|
||||
rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
|
||||
VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
|
||||
buf, SCSI_VPD_HEADER_SZ);
|
||||
if (rc != 0)
|
||||
goto exit_unsupported;
|
||||
|
||||
pages = buf[3];
|
||||
if ((pages + SCSI_VPD_HEADER_SZ) <= 255)
|
||||
bufsize = pages + SCSI_VPD_HEADER_SZ;
|
||||
else
|
||||
bufsize = 255;
|
||||
|
||||
/* Get the whole VPD page list */
|
||||
rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
|
||||
VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES,
|
||||
buf, bufsize);
|
||||
if (rc != 0)
|
||||
goto exit_unsupported;
|
||||
|
||||
pages = buf[3];
|
||||
for (i = 1; i <= pages; i++)
|
||||
if (buf[3 + i] == vpd_page)
|
||||
goto exit_supported;
|
||||
|
||||
exit_unsupported:
|
||||
kfree(buf);
|
||||
return false;
|
||||
|
||||
exit_supported:
|
||||
kfree(buf);
|
||||
return true;
|
||||
}
|
||||
|
||||
static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info,
|
||||
u8 *scsi3addr, u8 *device_id, int buflen)
|
||||
{
|
||||
int rc;
|
||||
unsigned char *buf;
|
||||
|
||||
if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID))
|
||||
return 1; /* function not supported */
|
||||
|
||||
buf = kzalloc(64, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = pqi_scsi_inquiry(ctrl_info, scsi3addr,
|
||||
VPD_PAGE | SCSI_VPD_DEVICE_ID,
|
||||
buf, 64);
|
||||
if (rc == 0) {
|
||||
if (buflen > 16)
|
||||
buflen = 16;
|
||||
memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen);
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
|
||||
struct pqi_scsi_dev *device,
|
||||
struct bmic_identify_physical_device *buffer,
|
||||
@ -1385,14 +1332,6 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
|
||||
}
|
||||
}
|
||||
|
||||
if (pqi_get_device_id(ctrl_info, device->scsi3addr,
|
||||
device->unique_id, sizeof(device->unique_id)) < 0)
|
||||
dev_warn(&ctrl_info->pci_dev->dev,
|
||||
"Can't get device id for scsi %d:%d:%d:%d\n",
|
||||
ctrl_info->scsi_host->host_no,
|
||||
device->bus, device->target,
|
||||
device->lun);
|
||||
|
||||
out:
|
||||
kfree(buffer);
|
||||
|
||||
@ -4122,6 +4061,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
|
||||
goto out;
|
||||
}
|
||||
|
||||
atomic_inc(&ctrl_info->sync_cmds_outstanding);
|
||||
|
||||
io_request = pqi_alloc_io_request(ctrl_info);
|
||||
|
||||
put_unaligned_le16(io_request->index,
|
||||
@ -4168,6 +4109,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
|
||||
|
||||
pqi_free_io_request(io_request);
|
||||
|
||||
atomic_dec(&ctrl_info->sync_cmds_outstanding);
|
||||
out:
|
||||
up(&ctrl_info->sync_request_sem);
|
||||
|
||||
@ -5402,7 +5344,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
|
||||
|
||||
pqi_ctrl_busy(ctrl_info);
|
||||
if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) ||
|
||||
pqi_ctrl_in_ofa(ctrl_info)) {
|
||||
pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) {
|
||||
rc = SCSI_MLQUEUE_HOST_BUSY;
|
||||
goto out;
|
||||
}
|
||||
@ -5650,6 +5592,18 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
while (atomic_read(&ctrl_info->sync_cmds_outstanding)) {
|
||||
pqi_check_ctrl_health(ctrl_info);
|
||||
if (pqi_ctrl_offline(ctrl_info))
|
||||
return -ENXIO;
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
|
||||
void *context)
|
||||
{
|
||||
@ -5787,17 +5741,17 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
|
||||
shost->host_no, device->bus, device->target, device->lun);
|
||||
|
||||
pqi_check_ctrl_health(ctrl_info);
|
||||
if (pqi_ctrl_offline(ctrl_info)) {
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
"controller %u offlined - cannot send device reset\n",
|
||||
ctrl_info->ctrl_id);
|
||||
if (pqi_ctrl_offline(ctrl_info) ||
|
||||
pqi_device_reset_blocked(ctrl_info)) {
|
||||
rc = FAILED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pqi_wait_until_ofa_finished(ctrl_info);
|
||||
|
||||
atomic_inc(&ctrl_info->sync_cmds_outstanding);
|
||||
rc = pqi_device_reset(ctrl_info, device);
|
||||
atomic_dec(&ctrl_info->sync_cmds_outstanding);
|
||||
|
||||
out:
|
||||
dev_err(&ctrl_info->pci_dev->dev,
|
||||
@ -6119,7 +6073,8 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
|
||||
|
||||
ctrl_info = shost_to_hba(sdev->host);
|
||||
|
||||
if (pqi_ctrl_in_ofa(ctrl_info))
|
||||
if (pqi_ctrl_in_ofa(ctrl_info) ||
|
||||
pqi_ctrl_in_shutdown(ctrl_info))
|
||||
return -EBUSY;
|
||||
|
||||
switch (cmd) {
|
||||
@ -6283,7 +6238,7 @@ static ssize_t pqi_unique_id_show(struct device *dev,
|
||||
struct scsi_device *sdev;
|
||||
struct pqi_scsi_dev *device;
|
||||
unsigned long flags;
|
||||
unsigned char uid[16];
|
||||
u8 unique_id[16];
|
||||
|
||||
sdev = to_scsi_device(dev);
|
||||
ctrl_info = shost_to_hba(sdev->host);
|
||||
@ -6296,16 +6251,22 @@ static ssize_t pqi_unique_id_show(struct device *dev,
|
||||
flags);
|
||||
return -ENODEV;
|
||||
}
|
||||
memcpy(uid, device->unique_id, sizeof(uid));
|
||||
|
||||
if (device->is_physical_device) {
|
||||
memset(unique_id, 0, 8);
|
||||
memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid));
|
||||
} else {
|
||||
memcpy(unique_id, device->volume_id, sizeof(device->volume_id));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
|
||||
|
||||
return snprintf(buffer, PAGE_SIZE,
|
||||
"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
|
||||
uid[0], uid[1], uid[2], uid[3],
|
||||
uid[4], uid[5], uid[6], uid[7],
|
||||
uid[8], uid[9], uid[10], uid[11],
|
||||
uid[12], uid[13], uid[14], uid[15]);
|
||||
unique_id[0], unique_id[1], unique_id[2], unique_id[3],
|
||||
unique_id[4], unique_id[5], unique_id[6], unique_id[7],
|
||||
unique_id[8], unique_id[9], unique_id[10], unique_id[11],
|
||||
unique_id[12], unique_id[13], unique_id[14], unique_id[15]);
|
||||
}
|
||||
|
||||
static ssize_t pqi_lunid_show(struct device *dev,
|
||||
@ -7074,13 +7035,20 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
|
||||
return pqi_revert_to_sis_mode(ctrl_info);
|
||||
}
|
||||
|
||||
#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
|
||||
|
||||
static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = pqi_force_sis_mode(ctrl_info);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (reset_devices) {
|
||||
sis_soft_reset(ctrl_info);
|
||||
msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
|
||||
} else {
|
||||
rc = pqi_force_sis_mode(ctrl_info);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait until the controller is ready to start accepting SIS
|
||||
@ -7514,6 +7482,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
|
||||
|
||||
INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
|
||||
atomic_set(&ctrl_info->num_interrupts, 0);
|
||||
atomic_set(&ctrl_info->sync_cmds_outstanding, 0);
|
||||
|
||||
INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
|
||||
INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
|
||||
@ -7787,8 +7756,6 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
|
||||
0, NULL, NO_TIMEOUT);
|
||||
}
|
||||
|
||||
#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
|
||||
|
||||
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY);
|
||||
@ -7956,28 +7923,74 @@ static void pqi_pci_remove(struct pci_dev *pci_dev)
|
||||
pqi_remove_ctrl(ctrl_info);
|
||||
}
|
||||
|
||||
static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
|
||||
{
|
||||
unsigned int i;
|
||||
struct pqi_io_request *io_request;
|
||||
struct scsi_cmnd *scmd;
|
||||
|
||||
for (i = 0; i < ctrl_info->max_io_slots; i++) {
|
||||
io_request = &ctrl_info->io_request_pool[i];
|
||||
if (atomic_read(&io_request->refcount) == 0)
|
||||
continue;
|
||||
scmd = io_request->scmd;
|
||||
WARN_ON(scmd != NULL); /* IO command from SML */
|
||||
WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/
|
||||
}
|
||||
}
|
||||
|
||||
static void pqi_shutdown(struct pci_dev *pci_dev)
|
||||
{
|
||||
int rc;
|
||||
struct pqi_ctrl_info *ctrl_info;
|
||||
|
||||
ctrl_info = pci_get_drvdata(pci_dev);
|
||||
if (!ctrl_info)
|
||||
goto error;
|
||||
if (!ctrl_info) {
|
||||
dev_err(&pci_dev->dev,
|
||||
"cache could not be flushed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pqi_disable_events(ctrl_info);
|
||||
pqi_wait_until_ofa_finished(ctrl_info);
|
||||
pqi_cancel_update_time_worker(ctrl_info);
|
||||
pqi_cancel_rescan_worker(ctrl_info);
|
||||
pqi_cancel_event_worker(ctrl_info);
|
||||
|
||||
pqi_ctrl_shutdown_start(ctrl_info);
|
||||
pqi_ctrl_wait_until_quiesced(ctrl_info);
|
||||
|
||||
rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT);
|
||||
if (rc) {
|
||||
dev_err(&pci_dev->dev,
|
||||
"wait for pending I/O failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pqi_ctrl_block_device_reset(ctrl_info);
|
||||
pqi_wait_until_lun_reset_finished(ctrl_info);
|
||||
|
||||
/*
|
||||
* Write all data in the controller's battery-backed cache to
|
||||
* storage.
|
||||
*/
|
||||
rc = pqi_flush_cache(ctrl_info, SHUTDOWN);
|
||||
pqi_free_interrupts(ctrl_info);
|
||||
pqi_reset(ctrl_info);
|
||||
if (rc == 0)
|
||||
return;
|
||||
if (rc)
|
||||
dev_err(&pci_dev->dev,
|
||||
"unable to flush controller cache\n");
|
||||
|
||||
pqi_ctrl_block_requests(ctrl_info);
|
||||
|
||||
rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info);
|
||||
if (rc) {
|
||||
dev_err(&pci_dev->dev,
|
||||
"wait for pending sync cmds failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pqi_crash_if_pending_command(ctrl_info);
|
||||
pqi_reset(ctrl_info);
|
||||
|
||||
error:
|
||||
dev_warn(&pci_dev->dev,
|
||||
"unable to flush controller cache\n");
|
||||
}
|
||||
|
||||
static void pqi_process_lockup_action_param(void)
|
||||
|
@ -45,9 +45,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
|
||||
struct sas_phy *phy = pqi_sas_phy->phy;
|
||||
|
||||
sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
|
||||
sas_phy_free(phy);
|
||||
if (pqi_sas_phy->added_to_port)
|
||||
list_del(&pqi_sas_phy->phy_list_entry);
|
||||
sas_phy_delete(phy);
|
||||
kfree(pqi_sas_phy);
|
||||
}
|
||||
|
||||
|
@ -2725,8 +2725,10 @@ static int comedi_open(struct inode *inode, struct file *file)
|
||||
}
|
||||
|
||||
cfp = kzalloc(sizeof(*cfp), GFP_KERNEL);
|
||||
if (!cfp)
|
||||
if (!cfp) {
|
||||
comedi_dev_put(dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cfp->dev = dev;
|
||||
|
||||
|
@ -92,6 +92,7 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < insn->n; i++) {
|
||||
/* FIXME: lo bit 0 chooses voltage output or current output */
|
||||
lo = ((data[i] & 0x0f) << 4) | (chan << 1) | 0x01;
|
||||
hi = (data[i] & 0xff0) >> 4;
|
||||
|
||||
@ -105,6 +106,8 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
outb(hi, dev->iobase + DT2815_DATA);
|
||||
|
||||
devpriv->ao_readback[chan] = data[i];
|
||||
}
|
||||
return i;
|
||||
|
@ -228,8 +228,7 @@ int gasket_sysfs_create_entries(struct device *device,
|
||||
}
|
||||
|
||||
mutex_lock(&mapping->mutex);
|
||||
for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER);
|
||||
i++) {
|
||||
for (i = 0; attrs[i].attr.attr.name != NULL; i++) {
|
||||
if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) {
|
||||
dev_err(device,
|
||||
"Maximum number of sysfs nodes reached for device\n");
|
||||
|
@ -30,10 +30,6 @@
|
||||
*/
|
||||
#define GASKET_SYSFS_MAX_NODES 196
|
||||
|
||||
/* End markers for sysfs struct arrays. */
|
||||
#define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END
|
||||
#define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN)
|
||||
|
||||
/*
|
||||
* Terminator struct for a gasket_sysfs_attr array. Must be at the end of
|
||||
* all gasket_sysfs_attribute arrays.
|
||||
|
@ -145,7 +145,8 @@ void vnt_int_process_data(struct vnt_private *priv)
|
||||
priv->wake_up_count =
|
||||
priv->hw->conf.listen_interval;
|
||||
|
||||
--priv->wake_up_count;
|
||||
if (priv->wake_up_count)
|
||||
--priv->wake_up_count;
|
||||
|
||||
/* Turn on wake up to listen next beacon */
|
||||
if (priv->wake_up_count == 1)
|
||||
|
@ -83,9 +83,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
|
||||
case VNT_KEY_PAIRWISE:
|
||||
key_mode |= mode;
|
||||
key_inx = 4;
|
||||
/* Don't save entry for pairwise key for station mode */
|
||||
if (priv->op_mode == NL80211_IFTYPE_STATION)
|
||||
clear_bit(entry, &priv->key_entry_inuse);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
@ -109,7 +106,6 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
|
||||
int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
|
||||
struct ieee80211_vif *vif, struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct ieee80211_bss_conf *conf = &vif->bss_conf;
|
||||
struct vnt_private *priv = hw->priv;
|
||||
u8 *mac_addr = NULL;
|
||||
u8 key_dec_mode = 0;
|
||||
@ -151,16 +147,12 @@ int vnt_set_keys(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
|
||||
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
}
|
||||
|
||||
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
|
||||
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
|
||||
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_PAIRWISE,
|
||||
key_dec_mode, true);
|
||||
} else {
|
||||
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_DEFAULTKEY,
|
||||
else
|
||||
vnt_set_keymode(hw, mac_addr, key, VNT_KEY_GROUP_ADDRESS,
|
||||
key_dec_mode, true);
|
||||
|
||||
vnt_set_keymode(hw, (u8 *)conf->bssid, key,
|
||||
VNT_KEY_GROUP_ADDRESS, key_dec_mode, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -633,8 +633,6 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
|
||||
priv->op_mode = vif->type;
|
||||
|
||||
vnt_set_bss_mode(priv);
|
||||
|
||||
/* LED blink on TX */
|
||||
vnt_mac_set_led(priv, LEDSTS_STS, LEDSTS_INTER);
|
||||
|
||||
@ -721,7 +719,6 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
|
||||
priv->basic_rates = conf->basic_rates;
|
||||
|
||||
vnt_update_top_rates(priv);
|
||||
vnt_set_bss_mode(priv);
|
||||
|
||||
dev_dbg(&priv->usb->dev, "basic rates %x\n", conf->basic_rates);
|
||||
}
|
||||
@ -750,11 +747,14 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
|
||||
priv->short_slot_time = false;
|
||||
|
||||
vnt_set_short_slot_time(priv);
|
||||
vnt_update_ifs(priv);
|
||||
vnt_set_vga_gain_offset(priv, priv->bb_vga[0]);
|
||||
vnt_update_pre_ed_threshold(priv, false);
|
||||
}
|
||||
|
||||
if (changed & (BSS_CHANGED_BASIC_RATES | BSS_CHANGED_ERP_PREAMBLE |
|
||||
BSS_CHANGED_ERP_SLOT))
|
||||
vnt_set_bss_mode(priv);
|
||||
|
||||
if (changed & BSS_CHANGED_TXPOWER)
|
||||
vnt_rf_setpower(priv, priv->current_rate,
|
||||
conf->chandef.chan->hw_value);
|
||||
@ -778,12 +778,15 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
|
||||
vnt_mac_reg_bits_on(priv, MAC_REG_TFTCTL,
|
||||
TFTCTL_TSFCNTREN);
|
||||
|
||||
vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
|
||||
conf->sync_tsf, priv->current_tsf);
|
||||
|
||||
vnt_mac_set_beacon_interval(priv, conf->beacon_int);
|
||||
|
||||
vnt_reset_next_tbtt(priv, conf->beacon_int);
|
||||
|
||||
vnt_adjust_tsf(priv, conf->beacon_rate->hw_value,
|
||||
conf->sync_tsf, priv->current_tsf);
|
||||
|
||||
vnt_update_next_tbtt(priv,
|
||||
conf->sync_tsf, conf->beacon_int);
|
||||
} else {
|
||||
vnt_clear_current_tsf(priv);
|
||||
|
||||
@ -818,15 +821,11 @@ static void vnt_configure(struct ieee80211_hw *hw,
|
||||
{
|
||||
struct vnt_private *priv = hw->priv;
|
||||
u8 rx_mode = 0;
|
||||
int rc;
|
||||
|
||||
*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
|
||||
|
||||
rc = vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
|
||||
MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
|
||||
|
||||
if (!rc)
|
||||
rx_mode = RCR_MULTICAST | RCR_BROADCAST;
|
||||
vnt_control_in(priv, MESSAGE_TYPE_READ, MAC_REG_RCR,
|
||||
MESSAGE_REQUEST_MACREG, sizeof(u8), &rx_mode);
|
||||
|
||||
dev_dbg(&priv->usb->dev, "rx mode in = %x\n", rx_mode);
|
||||
|
||||
@ -867,8 +866,12 @@ static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
return -EOPNOTSUPP;
|
||||
break;
|
||||
case DISABLE_KEY:
|
||||
if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
|
||||
if (test_bit(key->hw_key_idx, &priv->key_entry_inuse)) {
|
||||
clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
|
||||
|
||||
vnt_mac_disable_keyentry(priv, key->hw_key_idx);
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -302,10 +302,6 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
|
||||
vtermnos[index] = vtermno;
|
||||
cons_ops[index] = ops;
|
||||
|
||||
/* reserve all indices up to and including this index */
|
||||
if (last_hvc < index)
|
||||
last_hvc = index;
|
||||
|
||||
/* check if we need to re-register the kernel console */
|
||||
hvc_check_console(index);
|
||||
|
||||
@ -960,13 +956,22 @@ struct hvc_struct *hvc_alloc(uint32_t vtermno, int data,
|
||||
cons_ops[i] == hp->ops)
|
||||
break;
|
||||
|
||||
/* no matching slot, just use a counter */
|
||||
if (i >= MAX_NR_HVC_CONSOLES)
|
||||
i = ++last_hvc;
|
||||
if (i >= MAX_NR_HVC_CONSOLES) {
|
||||
|
||||
/* find 'empty' slot for console */
|
||||
for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) {
|
||||
}
|
||||
|
||||
/* no matching slot, just use a counter */
|
||||
if (i == MAX_NR_HVC_CONSOLES)
|
||||
i = ++last_hvc + MAX_NR_HVC_CONSOLES;
|
||||
}
|
||||
|
||||
hp->index = i;
|
||||
cons_ops[i] = ops;
|
||||
vtermnos[i] = vtermno;
|
||||
if (i < MAX_NR_HVC_CONSOLES) {
|
||||
cons_ops[i] = ops;
|
||||
vtermnos[i] = vtermno;
|
||||
}
|
||||
|
||||
list_add_tail(&(hp->next), &hvc_structs);
|
||||
mutex_unlock(&hvc_structs_mutex);
|
||||
|
@ -632,18 +632,21 @@ init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
|
||||
tty_port_init(&info->port);
|
||||
info->port.ops = &rocket_port_ops;
|
||||
info->flags &= ~ROCKET_MODE_MASK;
|
||||
switch (pc104[board][line]) {
|
||||
case 422:
|
||||
info->flags |= ROCKET_MODE_RS422;
|
||||
break;
|
||||
case 485:
|
||||
info->flags |= ROCKET_MODE_RS485;
|
||||
break;
|
||||
case 232:
|
||||
default:
|
||||
if (board < ARRAY_SIZE(pc104) && line < ARRAY_SIZE(pc104_1))
|
||||
switch (pc104[board][line]) {
|
||||
case 422:
|
||||
info->flags |= ROCKET_MODE_RS422;
|
||||
break;
|
||||
case 485:
|
||||
info->flags |= ROCKET_MODE_RS485;
|
||||
break;
|
||||
case 232:
|
||||
default:
|
||||
info->flags |= ROCKET_MODE_RS232;
|
||||
break;
|
||||
}
|
||||
else
|
||||
info->flags |= ROCKET_MODE_RS232;
|
||||
break;
|
||||
}
|
||||
|
||||
info->intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR;
|
||||
if (sInitChan(ctlp, &info->channel, aiop, chan) == 0) {
|
||||
|
@ -680,6 +680,12 @@ static int owl_uart_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(owl_port->clk);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(owl_port->clk);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "could not enable clk\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
owl_port->port.dev = &pdev->dev;
|
||||
owl_port->port.line = pdev->id;
|
||||
owl_port->port.type = PORT_OWL;
|
||||
@ -712,6 +718,7 @@ static int owl_uart_remove(struct platform_device *pdev)
|
||||
|
||||
uart_remove_one_port(&owl_uart_driver, &owl_port->port);
|
||||
owl_uart_ports[pdev->id] = NULL;
|
||||
clk_disable_unprepare(owl_port->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -873,9 +873,16 @@ static void sci_receive_chars(struct uart_port *port)
|
||||
tty_insert_flip_char(tport, c, TTY_NORMAL);
|
||||
} else {
|
||||
for (i = 0; i < count; i++) {
|
||||
char c = serial_port_in(port, SCxRDR);
|
||||
char c;
|
||||
|
||||
status = serial_port_in(port, SCxSR);
|
||||
if (port->type == PORT_SCIF ||
|
||||
port->type == PORT_HSCIF) {
|
||||
status = serial_port_in(port, SCxSR);
|
||||
c = serial_port_in(port, SCxRDR);
|
||||
} else {
|
||||
c = serial_port_in(port, SCxRDR);
|
||||
status = serial_port_in(port, SCxSR);
|
||||
}
|
||||
if (uart_handle_sysrq_char(port, c)) {
|
||||
count--; i--;
|
||||
continue;
|
||||
|
@ -30,13 +30,15 @@
|
||||
|
||||
#define CDNS_UART_TTY_NAME "ttyPS"
|
||||
#define CDNS_UART_NAME "xuartps"
|
||||
#define CDNS_UART_MAJOR 0 /* use dynamic node allocation */
|
||||
#define CDNS_UART_MINOR 0 /* works best with devtmpfs */
|
||||
#define CDNS_UART_NR_PORTS 16
|
||||
#define CDNS_UART_FIFO_SIZE 64 /* FIFO size */
|
||||
#define CDNS_UART_REGISTER_SPACE 0x1000
|
||||
#define TX_TIMEOUT 500000
|
||||
|
||||
/* Rx Trigger level */
|
||||
static int rx_trigger_level = 56;
|
||||
static int uartps_major;
|
||||
module_param(rx_trigger_level, uint, 0444);
|
||||
MODULE_PARM_DESC(rx_trigger_level, "Rx trigger level, 1-63 bytes");
|
||||
|
||||
@ -182,7 +184,6 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
|
||||
* @pclk: APB clock
|
||||
* @cdns_uart_driver: Pointer to UART driver
|
||||
* @baud: Current baud rate
|
||||
* @id: Port ID
|
||||
* @clk_rate_change_nb: Notifier block for clock changes
|
||||
* @quirks: Flags for RXBS support.
|
||||
*/
|
||||
@ -192,7 +193,6 @@ struct cdns_uart {
|
||||
struct clk *pclk;
|
||||
struct uart_driver *cdns_uart_driver;
|
||||
unsigned int baud;
|
||||
int id;
|
||||
struct notifier_block clk_rate_change_nb;
|
||||
u32 quirks;
|
||||
bool cts_override;
|
||||
@ -1119,6 +1119,8 @@ static const struct uart_ops cdns_uart_ops = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct uart_driver cdns_uart_uart_driver;
|
||||
|
||||
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
|
||||
/**
|
||||
* cdns_uart_console_putchar - write the character to the FIFO buffer
|
||||
@ -1258,6 +1260,16 @@ static int cdns_uart_console_setup(struct console *co, char *options)
|
||||
|
||||
return uart_set_options(port, co, baud, parity, bits, flow);
|
||||
}
|
||||
|
||||
static struct console cdns_uart_console = {
|
||||
.name = CDNS_UART_TTY_NAME,
|
||||
.write = cdns_uart_console_write,
|
||||
.device = uart_console_device,
|
||||
.setup = cdns_uart_console_setup,
|
||||
.flags = CON_PRINTBUFFER,
|
||||
.index = -1, /* Specified on the cmdline (e.g. console=ttyPS ) */
|
||||
.data = &cdns_uart_uart_driver,
|
||||
};
|
||||
#endif /* CONFIG_SERIAL_XILINX_PS_UART_CONSOLE */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
@ -1389,89 +1401,8 @@ static const struct of_device_id cdns_uart_of_match[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
|
||||
|
||||
/*
|
||||
* Maximum number of instances without alias IDs but if there is alias
|
||||
* which target "< MAX_UART_INSTANCES" range this ID can't be used.
|
||||
*/
|
||||
#define MAX_UART_INSTANCES 32
|
||||
|
||||
/* Stores static aliases list */
|
||||
static DECLARE_BITMAP(alias_bitmap, MAX_UART_INSTANCES);
|
||||
static int alias_bitmap_initialized;
|
||||
|
||||
/* Stores actual bitmap of allocated IDs with alias IDs together */
|
||||
static DECLARE_BITMAP(bitmap, MAX_UART_INSTANCES);
|
||||
/* Protect bitmap operations to have unique IDs */
|
||||
static DEFINE_MUTEX(bitmap_lock);
|
||||
|
||||
static int cdns_get_id(struct platform_device *pdev)
|
||||
{
|
||||
int id, ret;
|
||||
|
||||
mutex_lock(&bitmap_lock);
|
||||
|
||||
/* Alias list is stable that's why get alias bitmap only once */
|
||||
if (!alias_bitmap_initialized) {
|
||||
ret = of_alias_get_alias_list(cdns_uart_of_match, "serial",
|
||||
alias_bitmap, MAX_UART_INSTANCES);
|
||||
if (ret && ret != -EOVERFLOW) {
|
||||
mutex_unlock(&bitmap_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
alias_bitmap_initialized++;
|
||||
}
|
||||
|
||||
/* Make sure that alias ID is not taken by instance without alias */
|
||||
bitmap_or(bitmap, bitmap, alias_bitmap, MAX_UART_INSTANCES);
|
||||
|
||||
dev_dbg(&pdev->dev, "Alias bitmap: %*pb\n",
|
||||
MAX_UART_INSTANCES, bitmap);
|
||||
|
||||
/* Look for a serialN alias */
|
||||
id = of_alias_get_id(pdev->dev.of_node, "serial");
|
||||
if (id < 0) {
|
||||
dev_warn(&pdev->dev,
|
||||
"No serial alias passed. Using the first free id\n");
|
||||
|
||||
/*
|
||||
* Start with id 0 and check if there is no serial0 alias
|
||||
* which points to device which is compatible with this driver.
|
||||
* If alias exists then try next free position.
|
||||
*/
|
||||
id = 0;
|
||||
|
||||
for (;;) {
|
||||
dev_info(&pdev->dev, "Checking id %d\n", id);
|
||||
id = find_next_zero_bit(bitmap, MAX_UART_INSTANCES, id);
|
||||
|
||||
/* No free empty instance */
|
||||
if (id == MAX_UART_INSTANCES) {
|
||||
dev_err(&pdev->dev, "No free ID\n");
|
||||
mutex_unlock(&bitmap_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(&pdev->dev, "The empty id is %d\n", id);
|
||||
/* Check if ID is empty */
|
||||
if (!test_and_set_bit(id, bitmap)) {
|
||||
/* Break the loop if bit is taken */
|
||||
dev_dbg(&pdev->dev,
|
||||
"Selected ID %d allocation passed\n",
|
||||
id);
|
||||
break;
|
||||
}
|
||||
dev_dbg(&pdev->dev,
|
||||
"Selected ID %d allocation failed\n", id);
|
||||
/* if taking bit fails then try next one */
|
||||
id++;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&bitmap_lock);
|
||||
|
||||
return id;
|
||||
}
|
||||
/* Temporary variable for storing number of instances */
|
||||
static int instances;
|
||||
|
||||
/**
|
||||
* cdns_uart_probe - Platform driver probe
|
||||
@ -1481,16 +1412,11 @@ static int cdns_get_id(struct platform_device *pdev)
|
||||
*/
|
||||
static int cdns_uart_probe(struct platform_device *pdev)
|
||||
{
|
||||
int rc, irq;
|
||||
int rc, id, irq;
|
||||
struct uart_port *port;
|
||||
struct resource *res;
|
||||
struct cdns_uart *cdns_uart_data;
|
||||
const struct of_device_id *match;
|
||||
struct uart_driver *cdns_uart_uart_driver;
|
||||
char *driver_name;
|
||||
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
|
||||
struct console *cdns_uart_console;
|
||||
#endif
|
||||
|
||||
cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
|
||||
GFP_KERNEL);
|
||||
@ -1500,64 +1426,35 @@ static int cdns_uart_probe(struct platform_device *pdev)
|
||||
if (!port)
|
||||
return -ENOMEM;
|
||||
|
||||
cdns_uart_uart_driver = devm_kzalloc(&pdev->dev,
|
||||
sizeof(*cdns_uart_uart_driver),
|
||||
GFP_KERNEL);
|
||||
if (!cdns_uart_uart_driver)
|
||||
return -ENOMEM;
|
||||
/* Look for a serialN alias */
|
||||
id = of_alias_get_id(pdev->dev.of_node, "serial");
|
||||
if (id < 0)
|
||||
id = 0;
|
||||
|
||||
cdns_uart_data->id = cdns_get_id(pdev);
|
||||
if (cdns_uart_data->id < 0)
|
||||
return cdns_uart_data->id;
|
||||
|
||||
/* There is a need to use unique driver name */
|
||||
driver_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s%d",
|
||||
CDNS_UART_NAME, cdns_uart_data->id);
|
||||
if (!driver_name) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out_id;
|
||||
if (id >= CDNS_UART_NR_PORTS) {
|
||||
dev_err(&pdev->dev, "Cannot get uart_port structure\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
cdns_uart_uart_driver->owner = THIS_MODULE;
|
||||
cdns_uart_uart_driver->driver_name = driver_name;
|
||||
cdns_uart_uart_driver->dev_name = CDNS_UART_TTY_NAME;
|
||||
cdns_uart_uart_driver->major = uartps_major;
|
||||
cdns_uart_uart_driver->minor = cdns_uart_data->id;
|
||||
cdns_uart_uart_driver->nr = 1;
|
||||
|
||||
if (!cdns_uart_uart_driver.state) {
|
||||
cdns_uart_uart_driver.owner = THIS_MODULE;
|
||||
cdns_uart_uart_driver.driver_name = CDNS_UART_NAME;
|
||||
cdns_uart_uart_driver.dev_name = CDNS_UART_TTY_NAME;
|
||||
cdns_uart_uart_driver.major = CDNS_UART_MAJOR;
|
||||
cdns_uart_uart_driver.minor = CDNS_UART_MINOR;
|
||||
cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
|
||||
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
|
||||
cdns_uart_console = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_console),
|
||||
GFP_KERNEL);
|
||||
if (!cdns_uart_console) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out_id;
|
||||
}
|
||||
|
||||
strncpy(cdns_uart_console->name, CDNS_UART_TTY_NAME,
|
||||
sizeof(cdns_uart_console->name));
|
||||
cdns_uart_console->index = cdns_uart_data->id;
|
||||
cdns_uart_console->write = cdns_uart_console_write;
|
||||
cdns_uart_console->device = uart_console_device;
|
||||
cdns_uart_console->setup = cdns_uart_console_setup;
|
||||
cdns_uart_console->flags = CON_PRINTBUFFER;
|
||||
cdns_uart_console->data = cdns_uart_uart_driver;
|
||||
cdns_uart_uart_driver->cons = cdns_uart_console;
|
||||
cdns_uart_uart_driver.cons = &cdns_uart_console;
|
||||
#endif
|
||||
|
||||
rc = uart_register_driver(cdns_uart_uart_driver);
|
||||
if (rc < 0) {
|
||||
dev_err(&pdev->dev, "Failed to register driver\n");
|
||||
goto err_out_id;
|
||||
rc = uart_register_driver(&cdns_uart_uart_driver);
|
||||
if (rc < 0) {
|
||||
dev_err(&pdev->dev, "Failed to register driver\n");
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver;
|
||||
|
||||
/*
|
||||
* Setting up proper name_base needs to be done after uart
|
||||
* registration because tty_driver structure is not filled.
|
||||
* name_base is 0 by default.
|
||||
*/
|
||||
cdns_uart_uart_driver->tty_driver->name_base = cdns_uart_data->id;
|
||||
cdns_uart_data->cdns_uart_driver = &cdns_uart_uart_driver;
|
||||
|
||||
match = of_match_node(cdns_uart_of_match, pdev->dev.of_node);
|
||||
if (match && match->data) {
|
||||
@ -1634,6 +1531,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
|
||||
port->flags = UPF_BOOT_AUTOCONF;
|
||||
port->ops = &cdns_uart_ops;
|
||||
port->fifosize = CDNS_UART_FIFO_SIZE;
|
||||
port->line = id;
|
||||
|
||||
/*
|
||||
* Register the port.
|
||||
@ -1665,7 +1563,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
|
||||
console_port = port;
|
||||
#endif
|
||||
|
||||
rc = uart_add_one_port(cdns_uart_uart_driver, port);
|
||||
rc = uart_add_one_port(&cdns_uart_uart_driver, port);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev,
|
||||
"uart_add_one_port() failed; err=%i\n", rc);
|
||||
@ -1675,13 +1573,15 @@ static int cdns_uart_probe(struct platform_device *pdev)
|
||||
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
|
||||
/* This is not port which is used for console that's why clean it up */
|
||||
if (console_port == port &&
|
||||
!(cdns_uart_uart_driver->cons->flags & CON_ENABLED))
|
||||
!(cdns_uart_uart_driver.cons->flags & CON_ENABLED))
|
||||
console_port = NULL;
|
||||
#endif
|
||||
|
||||
uartps_major = cdns_uart_uart_driver->tty_driver->major;
|
||||
cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
|
||||
"cts-override");
|
||||
|
||||
instances++;
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_pm_disable:
|
||||
@ -1697,12 +1597,8 @@ err_out_clk_disable:
|
||||
err_out_clk_dis_pclk:
|
||||
clk_disable_unprepare(cdns_uart_data->pclk);
|
||||
err_out_unregister_driver:
|
||||
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
|
||||
err_out_id:
|
||||
mutex_lock(&bitmap_lock);
|
||||
if (cdns_uart_data->id < MAX_UART_INSTANCES)
|
||||
clear_bit(cdns_uart_data->id, bitmap);
|
||||
mutex_unlock(&bitmap_lock);
|
||||
if (!instances)
|
||||
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1725,10 +1621,6 @@ static int cdns_uart_remove(struct platform_device *pdev)
|
||||
#endif
|
||||
rc = uart_remove_one_port(cdns_uart_data->cdns_uart_driver, port);
|
||||
port->mapbase = 0;
|
||||
mutex_lock(&bitmap_lock);
|
||||
if (cdns_uart_data->id < MAX_UART_INSTANCES)
|
||||
clear_bit(cdns_uart_data->id, bitmap);
|
||||
mutex_unlock(&bitmap_lock);
|
||||
clk_disable_unprepare(cdns_uart_data->uartclk);
|
||||
clk_disable_unprepare(cdns_uart_data->pclk);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
@ -1741,13 +1633,8 @@ static int cdns_uart_remove(struct platform_device *pdev)
|
||||
console_port = NULL;
|
||||
#endif
|
||||
|
||||
/* If this is last instance major number should be initialized */
|
||||
mutex_lock(&bitmap_lock);
|
||||
if (bitmap_empty(bitmap, MAX_UART_INSTANCES))
|
||||
uartps_major = 0;
|
||||
mutex_unlock(&bitmap_lock);
|
||||
|
||||
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
|
||||
if (!--instances)
|
||||
uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -81,6 +81,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kd.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/major.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/console.h>
|
||||
@ -350,7 +351,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
|
||||
/* allocate everything in one go */
|
||||
memsize = cols * rows * sizeof(char32_t);
|
||||
memsize += rows * sizeof(char32_t *);
|
||||
p = kmalloc(memsize, GFP_KERNEL);
|
||||
p = vmalloc(memsize);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
@ -366,7 +367,7 @@ static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
|
||||
|
||||
static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
|
||||
{
|
||||
kfree(vc->vc_uni_screen);
|
||||
vfree(vc->vc_uni_screen);
|
||||
vc->vc_uni_screen = new_uniscr;
|
||||
}
|
||||
|
||||
@ -1206,7 +1207,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
|
||||
if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
|
||||
return 0;
|
||||
|
||||
if (new_screen_size > (4 << 20))
|
||||
if (new_screen_size > KMALLOC_MAX_SIZE)
|
||||
return -EINVAL;
|
||||
newscreen = kzalloc(new_screen_size, GFP_USER);
|
||||
if (!newscreen)
|
||||
|
@ -412,9 +412,12 @@ static void acm_ctrl_irq(struct urb *urb)
|
||||
|
||||
exit:
|
||||
retval = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (retval && retval != -EPERM)
|
||||
if (retval && retval != -EPERM && retval != -ENODEV)
|
||||
dev_err(&acm->control->dev,
|
||||
"%s - usb_submit_urb failed: %d\n", __func__, retval);
|
||||
else
|
||||
dev_vdbg(&acm->control->dev,
|
||||
"control resubmission terminated %d\n", retval);
|
||||
}
|
||||
|
||||
static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
|
||||
@ -430,6 +433,8 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
|
||||
dev_err(&acm->data->dev,
|
||||
"urb %d failed submission with %d\n",
|
||||
index, res);
|
||||
} else {
|
||||
dev_vdbg(&acm->data->dev, "intended failure %d\n", res);
|
||||
}
|
||||
set_bit(index, &acm->read_urbs_free);
|
||||
return res;
|
||||
@ -471,6 +476,7 @@ static void acm_read_bulk_callback(struct urb *urb)
|
||||
int status = urb->status;
|
||||
bool stopped = false;
|
||||
bool stalled = false;
|
||||
bool cooldown = false;
|
||||
|
||||
dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
|
||||
rb->index, urb->actual_length, status);
|
||||
@ -497,6 +503,14 @@ static void acm_read_bulk_callback(struct urb *urb)
|
||||
__func__, status);
|
||||
stopped = true;
|
||||
break;
|
||||
case -EOVERFLOW:
|
||||
case -EPROTO:
|
||||
dev_dbg(&acm->data->dev,
|
||||
"%s - cooling babbling device\n", __func__);
|
||||
usb_mark_last_busy(acm->dev);
|
||||
set_bit(rb->index, &acm->urbs_in_error_delay);
|
||||
cooldown = true;
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&acm->data->dev,
|
||||
"%s - nonzero urb status received: %d\n",
|
||||
@ -518,9 +532,11 @@ static void acm_read_bulk_callback(struct urb *urb)
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (stopped || stalled) {
|
||||
if (stopped || stalled || cooldown) {
|
||||
if (stalled)
|
||||
schedule_work(&acm->work);
|
||||
else if (cooldown)
|
||||
schedule_delayed_work(&acm->dwork, HZ / 2);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -557,14 +573,20 @@ static void acm_softint(struct work_struct *work)
|
||||
struct acm *acm = container_of(work, struct acm, work);
|
||||
|
||||
if (test_bit(EVENT_RX_STALL, &acm->flags)) {
|
||||
if (!(usb_autopm_get_interface(acm->data))) {
|
||||
smp_mb(); /* against acm_suspend() */
|
||||
if (!acm->susp_count) {
|
||||
for (i = 0; i < acm->rx_buflimit; i++)
|
||||
usb_kill_urb(acm->read_urbs[i]);
|
||||
usb_clear_halt(acm->dev, acm->in);
|
||||
acm_submit_read_urbs(acm, GFP_KERNEL);
|
||||
usb_autopm_put_interface(acm->data);
|
||||
clear_bit(EVENT_RX_STALL, &acm->flags);
|
||||
}
|
||||
clear_bit(EVENT_RX_STALL, &acm->flags);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(ACM_ERROR_DELAY, &acm->flags)) {
|
||||
for (i = 0; i < ACM_NR; i++)
|
||||
if (test_and_clear_bit(i, &acm->urbs_in_error_delay))
|
||||
acm_submit_read_urb(acm, i, GFP_NOIO);
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
|
||||
@ -1333,6 +1355,7 @@ made_compressed_probe:
|
||||
acm->readsize = readsize;
|
||||
acm->rx_buflimit = num_rx_buf;
|
||||
INIT_WORK(&acm->work, acm_softint);
|
||||
INIT_DELAYED_WORK(&acm->dwork, acm_softint);
|
||||
init_waitqueue_head(&acm->wioctl);
|
||||
spin_lock_init(&acm->write_lock);
|
||||
spin_lock_init(&acm->read_lock);
|
||||
@ -1542,6 +1565,7 @@ static void acm_disconnect(struct usb_interface *intf)
|
||||
|
||||
acm_kill_urbs(acm);
|
||||
cancel_work_sync(&acm->work);
|
||||
cancel_delayed_work_sync(&acm->dwork);
|
||||
|
||||
tty_unregister_device(acm_tty_driver, acm->minor);
|
||||
|
||||
@ -1584,6 +1608,8 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
|
||||
acm_kill_urbs(acm);
|
||||
cancel_work_sync(&acm->work);
|
||||
cancel_delayed_work_sync(&acm->dwork);
|
||||
acm->urbs_in_error_delay = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -109,8 +109,11 @@ struct acm {
|
||||
# define EVENT_TTY_WAKEUP 0
|
||||
# define EVENT_RX_STALL 1
|
||||
# define ACM_THROTTLED 2
|
||||
# define ACM_ERROR_DELAY 3
|
||||
unsigned long urbs_in_error_delay; /* these need to be restarted after a delay */
|
||||
struct usb_cdc_line_coding line; /* bits, stop, parity */
|
||||
struct work_struct work; /* work queue entry for line discipline waking up */
|
||||
struct work_struct work; /* work queue entry for various purposes*/
|
||||
struct delayed_work dwork; /* for cool downs needed in error recovery */
|
||||
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
|
||||
unsigned int ctrlout; /* output control lines (DTR, RTS) */
|
||||
struct async_icount iocount; /* counters for control line changes */
|
||||
|
@ -1223,6 +1223,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
||||
#ifdef CONFIG_PM
|
||||
udev->reset_resume = 1;
|
||||
#endif
|
||||
/* Don't set the change_bits when the device
|
||||
* was powered off.
|
||||
*/
|
||||
if (test_bit(port1, hub->power_bits))
|
||||
set_bit(port1, hub->change_bits);
|
||||
|
||||
} else {
|
||||
/* The power session is gone; tell hub_wq */
|
||||
@ -2723,13 +2728,11 @@ static bool use_new_scheme(struct usb_device *udev, int retry,
|
||||
{
|
||||
int old_scheme_first_port =
|
||||
port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME;
|
||||
int quick_enumeration = (udev->speed == USB_SPEED_HIGH);
|
||||
|
||||
if (udev->speed >= USB_SPEED_SUPER)
|
||||
return false;
|
||||
|
||||
return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first
|
||||
|| quick_enumeration);
|
||||
return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first);
|
||||
}
|
||||
|
||||
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
|
||||
@ -3088,6 +3091,15 @@ static int check_port_resume_type(struct usb_device *udev,
|
||||
if (portchange & USB_PORT_STAT_C_ENABLE)
|
||||
usb_clear_port_feature(hub->hdev, port1,
|
||||
USB_PORT_FEAT_C_ENABLE);
|
||||
|
||||
/*
|
||||
* Whatever made this reset-resume necessary may have
|
||||
* turned on the port1 bit in hub->change_bits. But after
|
||||
* a successful reset-resume we want the bit to be clear;
|
||||
* if it was on it would indicate that something happened
|
||||
* following the reset-resume.
|
||||
*/
|
||||
clear_bit(port1, hub->change_bits);
|
||||
}
|
||||
|
||||
return status;
|
||||
|
@ -588,12 +588,13 @@ void usb_sg_cancel(struct usb_sg_request *io)
|
||||
int i, retval;
|
||||
|
||||
spin_lock_irqsave(&io->lock, flags);
|
||||
if (io->status) {
|
||||
if (io->status || io->count == 0) {
|
||||
spin_unlock_irqrestore(&io->lock, flags);
|
||||
return;
|
||||
}
|
||||
/* shut everything down */
|
||||
io->status = -ECONNRESET;
|
||||
io->count++; /* Keep the request alive until we're done */
|
||||
spin_unlock_irqrestore(&io->lock, flags);
|
||||
|
||||
for (i = io->entries - 1; i >= 0; --i) {
|
||||
@ -607,6 +608,12 @@ void usb_sg_cancel(struct usb_sg_request *io)
|
||||
dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
|
||||
__func__, retval);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&io->lock, flags);
|
||||
io->count--;
|
||||
if (!io->count)
|
||||
complete(&io->complete);
|
||||
spin_unlock_irqrestore(&io->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_sg_cancel);
|
||||
|
||||
|
@ -430,6 +430,10 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
/* Corsair K70 LUX */
|
||||
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
|
||||
/* Corsair K70 RGB RAPDIFIRE */
|
||||
{ USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT |
|
||||
USB_QUIRK_DELAY_CTRL_MSG },
|
||||
|
||||
/* MIDI keyboard WORLDE MINI */
|
||||
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
|
||||
USB_QUIRK_CONFIG_INTF_STRINGS },
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user