This is the 5.10.85 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmG4cugACgkQONu9yGCS aT4+uxAAiLJvOIA6ElsmMq2c3pNu9UDPh58j1FlmhggAxr7baIvR1UuEZURTSLW9 pnu+r9bHkhJGBOpANfPJAQZqv+JtCi3crMhw0mwHJ0Mls3TNSmclzB1+jGM4w93E cT+5hoDeQqwZwpKYvWI7u9UGEE0BXluVTRCvmuncaJ8wGbxnDgV0AEXe32XDlFxB kSLAXO1FFn8Z1yMg9BMVURU9IAszdwCIhqbcNWnPunOPowdHDWBJdF7eSZUqmJfq TMyFgm6c6ENbBrwYt+9qf+1oS/D9r6TEjwaFoRJ4ApWSAD4iKV7U6dA0rxm9mrMl nSAxNXuSXXYszoYjrxPPRGhCY/URahs1Vmju1WK//4vz3brxk5N88T+2nN7PMYKn bIHGTl9SadlrHvi/OBqOvbsNMHX+ln/V/y7ct0fsxXeNBHYXdXCtimRDvtsj9kmp HO9OLXVsF6DUM22ODW28Vxt7HGN3XQzs0y2jwfzdMV81p3oEqP9wWQITwG2LVVAE WdkvQ/3ugdkR9F15Vp0cjkbJQLN4UbYUJW5K7SZTW013TgsYyIHQrP5qj9xvfwNt KdILXVMH2JqPXAkRqmFqIeNIUX2oevWwpgV/SwqIj3T5ytmXPluZsaMlGg5xZ401 Xuhp0LPU6rl+wcCt1bInZvn8nrTVDADpao7xTbSyoel5TAunVrk= =u1Am -----END PGP SIGNATURE----- Merge 5.10.85 into android12-5.10-lts Changes in 5.10.85 usb: gadget: uvc: fix multiple opens gcc-plugins: simplify GCC plugin-dev capability test gcc-plugins: fix gcc 11 indigestion with plugins... HID: quirks: Add quirk for the Microsoft Surface 3 type-cover HID: google: add eel USB id HID: add hid_is_usb() function to make it simpler for USB detection HID: add USB_HID dependancy to hid-prodikeys HID: add USB_HID dependancy to hid-chicony HID: add USB_HID dependancy on some USB HID drivers HID: bigbenff: prevent null pointer dereference HID: wacom: fix problems when device is not a valid USB device HID: check for valid USB device for many HID drivers nft_set_pipapo: Fix bucket load in AVX2 lookup routine for six 8-bit groups IB/hfi1: Insure use of smp_processor_id() is preempt disabled IB/hfi1: Fix early init panic IB/hfi1: Fix leak of rcvhdrtail_dummy_kvaddr can: kvaser_usb: get CAN clock frequency from device can: kvaser_pciefd: kvaser_pciefd_rx_error_frame(): increase correct stats->{rx,tx}_errors counter can: sja1000: fix use after free in ems_pcmcia_add_card() x86/sme: Explicitly map new EFI memmap table as encrypted drm/amd/amdkfd: adjust dummy functions' placement drm/amdkfd: separate kfd_iommu_resume from kfd_resume drm/amdgpu: add amdgpu_amdkfd_resume_iommu drm/amdgpu: move iommu_resume before ip init/resume drm/amdgpu: init iommu after amdkfd device init drm/amdkfd: fix boot failure when iommu is disabled in Picasso. nfc: fix potential NULL pointer deref in nfc_genl_dump_ses_done selftests: netfilter: add a vrf+conntrack testcase vrf: don't run conntrack on vrf with !dflt qdisc bpf, x86: Fix "no previous prototype" warning bpf: Fix the off-by-two error in range markings ice: ignore dropped packets during init bonding: make tx_rebalance_counter an atomic nfp: Fix memory leak in nfp_cpp_area_cache_add() seg6: fix the iif in the IPv6 socket control block udp: using datalen to cap max gso segments netfilter: conntrack: annotate data-races around ct->timeout iavf: restore MSI state on reset iavf: Fix reporting when setting descriptor count IB/hfi1: Correct guard on eager buffer deallocation devlink: fix netns refcount leak in devlink_nl_cmd_reload() net/sched: fq_pie: prevent dismantle issue KVM: x86: Wait for IPIs to be delivered when handling Hyper-V TLB flush hypercall mm: bdi: initialize bdi_min_ratio when bdi is unregistered ALSA: ctl: Fix copy of updated id with element read/write ALSA: hda/realtek - Add headset Mic support for Lenovo ALC897 platform ALSA: hda/realtek: Fix quirk for TongFang PHxTxX1 ALSA: pcm: oss: Fix negative period/buffer sizes ALSA: pcm: oss: Limit the period size to 16MB ALSA: pcm: oss: Handle missing errors in snd_pcm_oss_change_params*() scsi: qla2xxx: Format log strings only if needed btrfs: clear extent buffer uptodate when we fail to write it btrfs: replace the BUG_ON in btrfs_del_root_ref with proper error handling md: fix update super 1.0 on rdev size change nfsd: fix use-after-free due to delegation race nfsd: Fix nsfd startup race (again) tracefs: Have new files inherit the ownership of their parent mmc: renesas_sdhi: initialize variable properly when tuning clk: qcom: regmap-mux: fix parent clock lookup drm/syncobj: Deal with signalled fences in drm_syncobj_find_fence. can: pch_can: pch_can_rx_normal: fix use after free can: m_can: Disable and ignore ELO interrupt libata: add horkage for ASMedia 1092 wait: add wake_up_pollfree() binder: use wake_up_pollfree() signalfd: use wake_up_pollfree() aio: keep poll requests on waitqueue until completed aio: fix use-after-free due to missing POLLFREE handling net: mvpp2: fix XDP rx queues registering tracefs: Set all files to the same group ownership as the mount option block: fix ioprio_get(IOPRIO_WHO_PGRP) vs setuid(2) scsi: pm80xx: Do not call scsi_remove_host() in pm8001_alloc() scsi: scsi_debug: Fix buffer size of REPORT ZONES command qede: validate non LSO skb length PM: runtime: Fix pm_runtime_active() kerneldoc comment ASoC: rt5682: Fix crash due to out of scope stack vars ASoC: qdsp6: q6routing: Fix return value from msm_routing_put_audio_mixer ASoC: codecs: wsa881x: fix return values from kcontrol put ASoC: codecs: wcd934x: handle channel mappping list correctly ASoC: codecs: wcd934x: return correct value from mixer put RDMA/hns: Do not halt commands during reset until later RDMA/hns: Do not destroy QP resources in the hw resetting phase clk: imx: use module_platform_driver i40e: Fix failed opcode appearing if handling messages from VF i40e: Fix pre-set max number of queues for VF mtd: rawnand: fsmc: Take instruction delay into account mtd: rawnand: fsmc: Fix timing computation i40e: Fix NULL pointer dereference in i40e_dbg_dump_desc Revert "PCI: aardvark: Fix support for PCI_ROM_ADDRESS1 on emulated bridge" perf tools: Fix SMT detection fast read path Documentation/locking/locktypes: Update migrate_disable() bits. dt-bindings: net: Reintroduce PHY no lane swap binding tools build: Remove needless libpython-version feature check that breaks test-all fast path net: cdc_ncm: Allow for dwNtbOutMaxSize to be unset or zero net: altera: set a couple error code in probe() net: fec: only clear interrupt of handling queue in fec_enet_rx_queue() net, neigh: clear whole pneigh_entry at alloc time net/qla3xxx: fix an error code in ql_adapter_up() selftests/fib_tests: Rework fib_rp_filter_test() USB: gadget: detect too-big endpoint 0 requests USB: gadget: zero allocate endpoint 0 buffers usb: core: config: fix validation of wMaxPacketValue entries xhci: Remove CONFIG_USB_DEFAULT_PERSIST to prevent xHCI from runtime suspending usb: core: config: using bit mask instead of individual bits xhci: avoid race between disable slot command and host runtime suspend iio: gyro: adxrs290: fix data signedness iio: trigger: Fix reference counting iio: trigger: stm32-timer: fix MODULE_ALIAS iio: stk3310: Don't return error code in interrupt handler iio: mma8452: Fix trigger reference couting iio: ltr501: Don't return error code in trigger handler iio: kxsd9: Don't return error code in trigger handler iio: itg3200: Call iio_trigger_notify_done() on error iio: dln2-adc: Fix lockdep complaint iio: dln2: Check return value of devm_iio_trigger_register() iio: at91-sama5d2: Fix incorrect sign extension iio: adc: stm32: fix a current leak by resetting pcsel before disabling vdda iio: adc: axp20x_adc: fix charging current reporting on AXP22x iio: ad7768-1: Call iio_trigger_notify_done() on error iio: accel: kxcjk-1013: Fix possible memory leak in probe and remove csky: fix typo of fpu config macro irqchip/aspeed-scu: Replace update_bits with write_bits. irqchip/armada-370-xp: Fix return value of armada_370_xp_msi_alloc() irqchip/armada-370-xp: Fix support for Multi-MSI interrupts irqchip/irq-gic-v3-its.c: Force synchronisation when issuing INVALL irqchip: nvic: Fix offset for Interrupt Priority Offsets misc: fastrpc: fix improper packet size calculation bpf: Add selftests to cover packet access corner cases kbuild: simplify GCC_PLUGINS enablement in dummy-tools/gcc doc: gcc-plugins: update gcc-plugins.rst MAINTAINERS: adjust GCC PLUGINS after gcc-plugin.sh removal Documentation/Kbuild: Remove references to gcc-plugin.sh Linux 5.10.85 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I351da1b00f30a370b021125180a48b1c1ecb97ce
This commit is contained in:
commit
afc997898e
@ -91,6 +91,14 @@ properties:
|
||||
compensate for the board being designed with the lanes
|
||||
swapped.
|
||||
|
||||
enet-phy-lane-no-swap:
|
||||
$ref: /schemas/types.yaml#/definitions/flag
|
||||
description:
|
||||
If set, indicates that PHY will disable swap of the
|
||||
TX/RX lanes. This property allows the PHY to work correcly after
|
||||
e.g. wrong bootstrap configuration caused by issues in PCB
|
||||
layout design.
|
||||
|
||||
eee-broken-100tx:
|
||||
$ref: /schemas/types.yaml#definitions/flag
|
||||
description:
|
||||
|
@ -11,16 +11,13 @@ compiler [1]_. They are useful for runtime instrumentation and static analysis.
|
||||
We can analyse, change and add further code during compilation via
|
||||
callbacks [2]_, GIMPLE [3]_, IPA [4]_ and RTL passes [5]_.
|
||||
|
||||
The GCC plugin infrastructure of the kernel supports all gcc versions from
|
||||
4.5 to 6.0, building out-of-tree modules, cross-compilation and building in a
|
||||
separate directory.
|
||||
Plugin source files have to be compilable by both a C and a C++ compiler as well
|
||||
because gcc versions 4.5 and 4.6 are compiled by a C compiler,
|
||||
gcc-4.7 can be compiled by a C or a C++ compiler,
|
||||
and versions 4.8+ can only be compiled by a C++ compiler.
|
||||
The GCC plugin infrastructure of the kernel supports building out-of-tree
|
||||
modules, cross-compilation and building in a separate directory.
|
||||
Plugin source files have to be compilable by a C++ compiler.
|
||||
|
||||
Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and
|
||||
powerpc architectures.
|
||||
Currently the GCC plugin infrastructure supports only some architectures.
|
||||
Grep "select HAVE_GCC_PLUGINS" to find out which architectures support
|
||||
GCC plugins.
|
||||
|
||||
This infrastructure was ported from grsecurity [6]_ and PaX [7]_.
|
||||
|
||||
@ -47,20 +44,13 @@ Files
|
||||
This is a compatibility header for GCC plugins.
|
||||
It should be always included instead of individual gcc headers.
|
||||
|
||||
**$(src)/scripts/gcc-plugin.sh**
|
||||
|
||||
This script checks the availability of the included headers in
|
||||
gcc-common.h and chooses the proper host compiler to build the plugins
|
||||
(gcc-4.7 can be built by either gcc or g++).
|
||||
|
||||
**$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h,
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h,
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h,
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h**
|
||||
|
||||
These headers automatically generate the registration structures for
|
||||
GIMPLE, SIMPLE_IPA, IPA and RTL passes. They support all gcc versions
|
||||
from 4.5 to 6.0.
|
||||
GIMPLE, SIMPLE_IPA, IPA and RTL passes.
|
||||
They should be preferred to creating the structures by hand.
|
||||
|
||||
|
||||
@ -68,21 +58,25 @@ Usage
|
||||
=====
|
||||
|
||||
You must install the gcc plugin headers for your gcc version,
|
||||
e.g., on Ubuntu for gcc-4.9::
|
||||
e.g., on Ubuntu for gcc-10::
|
||||
|
||||
apt-get install gcc-4.9-plugin-dev
|
||||
apt-get install gcc-10-plugin-dev
|
||||
|
||||
Or on Fedora::
|
||||
|
||||
dnf install gcc-plugin-devel
|
||||
|
||||
Enable a GCC plugin based feature in the kernel config::
|
||||
Enable the GCC plugin infrastructure and some plugin(s) you want to use
|
||||
in the kernel config::
|
||||
|
||||
CONFIG_GCC_PLUGIN_CYC_COMPLEXITY = y
|
||||
CONFIG_GCC_PLUGINS=y
|
||||
CONFIG_GCC_PLUGIN_CYC_COMPLEXITY=y
|
||||
CONFIG_GCC_PLUGIN_LATENT_ENTROPY=y
|
||||
...
|
||||
|
||||
To compile only the plugin(s)::
|
||||
To compile the minimum tool set including the plugin(s)::
|
||||
|
||||
make gcc-plugins
|
||||
make scripts
|
||||
|
||||
or just run the kernel make and compile the whole kernel with
|
||||
the cyclomatic complexity GCC plugin.
|
||||
@ -91,7 +85,8 @@ the cyclomatic complexity GCC plugin.
|
||||
4. How to add a new GCC plugin
|
||||
==============================
|
||||
|
||||
The GCC plugins are in $(src)/scripts/gcc-plugins/. You can use a file or a directory
|
||||
here. It must be added to $(src)/scripts/gcc-plugins/Makefile,
|
||||
$(src)/scripts/Makefile.gcc-plugins and $(src)/arch/Kconfig.
|
||||
The GCC plugins are in scripts/gcc-plugins/. You need to put plugin source files
|
||||
right under scripts/gcc-plugins/. Creating subdirectories is not supported.
|
||||
It must be added to scripts/gcc-plugins/Makefile, scripts/Makefile.gcc-plugins
|
||||
and a relevant Kconfig file.
|
||||
See the cyc_complexity_plugin.c (CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) GCC plugin.
|
||||
|
@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels::
|
||||
spin_lock(&p->lock);
|
||||
p->count += this_cpu_read(var2);
|
||||
|
||||
On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable()
|
||||
which makes the above code fully equivalent. On a PREEMPT_RT kernel
|
||||
migrate_disable() ensures that the task is pinned on the current CPU which
|
||||
in turn guarantees that the per-CPU access to var1 and var2 are staying on
|
||||
the same CPU.
|
||||
the same CPU while the task remains preemptible.
|
||||
|
||||
The migrate_disable() substitution is not valid for the following
|
||||
scenario::
|
||||
@ -456,9 +454,8 @@ scenario::
|
||||
p = this_cpu_ptr(&var1);
|
||||
p->val = func2();
|
||||
|
||||
While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because
|
||||
here migrate_disable() does not protect against reentrancy from a
|
||||
preempting task. A correct substitution for this case is::
|
||||
This breaks because migrate_disable() does not protect against reentrancy from
|
||||
a preempting task. A correct substitution for this case is::
|
||||
|
||||
func()
|
||||
{
|
||||
|
@ -7310,7 +7310,6 @@ L: linux-hardening@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/kbuild/gcc-plugins.rst
|
||||
F: scripts/Makefile.gcc-plugins
|
||||
F: scripts/gcc-plugin.sh
|
||||
F: scripts/gcc-plugins/
|
||||
|
||||
GCOV BASED KERNEL PROFILING
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 84
|
||||
SUBLEVEL = 85
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -211,7 +211,7 @@ asmlinkage void do_trap_illinsn(struct pt_regs *regs)
|
||||
|
||||
asmlinkage void do_trap_fpe(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_CPU_HAS_FP
|
||||
#ifdef CONFIG_CPU_HAS_FPU
|
||||
return fpu_fpe(regs);
|
||||
#else
|
||||
do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->pc,
|
||||
@ -221,7 +221,7 @@ asmlinkage void do_trap_fpe(struct pt_regs *regs)
|
||||
|
||||
asmlinkage void do_trap_priv(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_CPU_HAS_FP
|
||||
#ifdef CONFIG_CPU_HAS_FPU
|
||||
if (user_mode(regs) && fpu_libc_helper(regs))
|
||||
return;
|
||||
#endif
|
||||
|
@ -1946,6 +1946,7 @@ config EFI
|
||||
depends on ACPI
|
||||
select UCS2_STRING
|
||||
select EFI_RUNTIME_WRAPPERS
|
||||
select ARCH_USE_MEMREMAP_PROT
|
||||
help
|
||||
This enables the kernel to use EFI runtime services that are
|
||||
available (such as the EFI variable services).
|
||||
|
@ -85,7 +85,7 @@
|
||||
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
|
||||
#define KVM_REQ_TLB_FLUSH_GUEST \
|
||||
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
|
||||
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
|
||||
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
|
||||
|
||||
|
@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
|
||||
return;
|
||||
}
|
||||
|
||||
new = early_memremap(data.phys_map, data.size);
|
||||
new = early_memremap_prot(data.phys_map, data.size,
|
||||
pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
|
||||
if (!new) {
|
||||
pr_err("Failed to map new boot services memmap\n");
|
||||
return;
|
||||
|
@ -214,6 +214,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
|
||||
pgrp = task_pgrp(current);
|
||||
else
|
||||
pgrp = find_vpid(who);
|
||||
read_lock(&tasklist_lock);
|
||||
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
|
||||
tmpio = get_task_ioprio(p);
|
||||
if (tmpio < 0)
|
||||
@ -223,6 +224,8 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
|
||||
else
|
||||
ret = ioprio_best(ret, tmpio);
|
||||
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
break;
|
||||
case IOPRIO_WHO_USER:
|
||||
uid = make_kuid(current_user_ns(), who);
|
||||
|
@ -4584,23 +4584,20 @@ static int binder_thread_release(struct binder_proc *proc,
|
||||
__release(&t->lock);
|
||||
|
||||
/*
|
||||
* If this thread used poll, make sure we remove the waitqueue
|
||||
* from any epoll data structures holding it with POLLFREE.
|
||||
* waitqueue_active() is safe to use here because we're holding
|
||||
* the inner lock.
|
||||
* If this thread used poll, make sure we remove the waitqueue from any
|
||||
* poll data structures holding it.
|
||||
*/
|
||||
if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
|
||||
waitqueue_active(&thread->wait)) {
|
||||
wake_up_poll(&thread->wait, EPOLLHUP | POLLFREE);
|
||||
}
|
||||
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||
wake_up_pollfree(&thread->wait);
|
||||
|
||||
binder_inner_proc_unlock(thread->proc);
|
||||
|
||||
/*
|
||||
* This is needed to avoid races between wake_up_poll() above and
|
||||
* and ep_remove_waitqueue() called for other reasons (eg the epoll file
|
||||
* descriptor being closed); ep_remove_waitqueue() holds an RCU read
|
||||
* lock, so we can be sure it's done after calling synchronize_rcu().
|
||||
* This is needed to avoid races between wake_up_pollfree() above and
|
||||
* someone else removing the last entry from the queue for other reasons
|
||||
* (e.g. ep_remove_wait_queue() being called due to an epoll file
|
||||
* descriptor being closed). Such other users hold an RCU read lock, so
|
||||
* we can be sure they're done after we call synchronize_rcu().
|
||||
*/
|
||||
if (thread->looper & BINDER_LOOPER_STATE_POLL)
|
||||
synchronize_rcu();
|
||||
|
@ -3831,6 +3831,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
|
||||
/* Odd clown on sil3726/4726 PMPs */
|
||||
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
|
||||
/* Similar story with ASMedia 1092 */
|
||||
{ "ASMT109x- Config", NULL, ATA_HORKAGE_DISABLE },
|
||||
|
||||
/* Weird ATAPI devices */
|
||||
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
|
||||
|
@ -231,7 +231,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
|
||||
.probe = imx8qxp_lpcg_clk_probe,
|
||||
};
|
||||
|
||||
builtin_platform_driver(imx8qxp_lpcg_clk_driver);
|
||||
module_platform_driver(imx8qxp_lpcg_clk_driver);
|
||||
|
||||
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
|
||||
MODULE_DESCRIPTION("NXP i.MX8QXP LPCG clock driver");
|
||||
|
@ -151,7 +151,7 @@ static struct platform_driver imx8qxp_clk_driver = {
|
||||
},
|
||||
.probe = imx8qxp_clk_probe,
|
||||
};
|
||||
builtin_platform_driver(imx8qxp_clk_driver);
|
||||
module_platform_driver(imx8qxp_clk_driver);
|
||||
|
||||
MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>");
|
||||
MODULE_DESCRIPTION("NXP i.MX8QXP clock driver");
|
||||
|
@ -28,7 +28,7 @@ static u8 mux_get_parent(struct clk_hw *hw)
|
||||
val &= mask;
|
||||
|
||||
if (mux->parent_map)
|
||||
return qcom_find_src_index(hw, mux->parent_map, val);
|
||||
return qcom_find_cfg_index(hw, mux->parent_map, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
@ -69,6 +69,18 @@ int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_find_src_index);
|
||||
|
||||
int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map, u8 cfg)
|
||||
{
|
||||
int i, num_parents = clk_hw_get_num_parents(hw);
|
||||
|
||||
for (i = 0; i < num_parents; i++)
|
||||
if (cfg == map[i].cfg)
|
||||
return i;
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_find_cfg_index);
|
||||
|
||||
struct regmap *
|
||||
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
|
||||
{
|
||||
|
@ -49,6 +49,8 @@ extern void
|
||||
qcom_pll_set_fsm_mode(struct regmap *m, u32 reg, u8 bias_count, u8 lock_count);
|
||||
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
|
||||
u8 src);
|
||||
extern int qcom_find_cfg_index(struct clk_hw *hw, const struct parent_map *map,
|
||||
u8 cfg);
|
||||
|
||||
extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
|
||||
const char *name, unsigned long rate);
|
||||
|
@ -47,12 +47,8 @@ int amdgpu_amdkfd_init(void)
|
||||
amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
|
||||
amdgpu_amdkfd_total_mem_size *= si.mem_unit;
|
||||
|
||||
#ifdef CONFIG_HSA_AMD
|
||||
ret = kgd2kfd_init();
|
||||
amdgpu_amdkfd_gpuvm_init_mem_limits();
|
||||
#else
|
||||
ret = -ENOENT;
|
||||
#endif
|
||||
kfd_initialized = !ret;
|
||||
|
||||
return ret;
|
||||
@ -194,6 +190,16 @@ void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
|
||||
kgd2kfd_suspend(adev->kfd.dev, run_pm);
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (adev->kfd.dev)
|
||||
r = kgd2kfd_resume_iommu(adev->kfd.dev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
|
||||
{
|
||||
int r = 0;
|
||||
@ -695,86 +701,3 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
|
||||
|
||||
return adev->have_atomics_support;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HSA_AMD
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
||||
{
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
}
|
||||
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
||||
unsigned int asic_type, bool vf)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||
struct drm_device *ddev,
|
||||
const struct kgd2kfd_shared_resources *gpu_resources)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void kgd2kfd_device_exit(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
}
|
||||
|
||||
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kgd2kfd_pre_reset(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kgd2kfd_post_reset(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -94,11 +94,6 @@ enum kgd_engine_type {
|
||||
KGD_ENGINE_MAX
|
||||
};
|
||||
|
||||
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
|
||||
struct mm_struct *mm);
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
|
||||
|
||||
struct amdkfd_process_info {
|
||||
/* List head of all VMs that belong to a KFD process */
|
||||
@ -126,14 +121,13 @@ int amdgpu_amdkfd_init(void);
|
||||
void amdgpu_amdkfd_fini(void);
|
||||
|
||||
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
|
||||
int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
|
||||
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
|
||||
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
|
||||
const void *ih_ring_entry);
|
||||
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
|
||||
void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
|
||||
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
|
||||
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
|
||||
uint32_t vmid, uint64_t gpu_addr,
|
||||
uint32_t *ib_cmd, uint32_t ib_len);
|
||||
@ -153,6 +147,38 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
|
||||
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
|
||||
int queue_bit);
|
||||
|
||||
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
|
||||
struct mm_struct *mm);
|
||||
#if IS_ENABLED(CONFIG_HSA_AMD)
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
|
||||
#else
|
||||
static inline
|
||||
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
/* Shared API */
|
||||
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
||||
void **mem_obj, uint64_t *gpu_addr,
|
||||
@ -215,8 +241,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
|
||||
struct file *filp, u32 pasid,
|
||||
void **vm, void **process_info,
|
||||
struct dma_fence **ef);
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
|
||||
void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
|
||||
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
|
||||
@ -236,23 +260,43 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
|
||||
struct kgd_mem *mem, void **kptr, uint64_t *size);
|
||||
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
|
||||
struct dma_fence **ef);
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
|
||||
struct kfd_vm_fault_info *info);
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
|
||||
struct dma_buf *dmabuf,
|
||||
uint64_t va, void *vm,
|
||||
struct kgd_mem **mem, uint64_t *size,
|
||||
uint64_t *mmap_offset);
|
||||
|
||||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
|
||||
|
||||
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
|
||||
struct tile_config *config);
|
||||
#if IS_ENABLED(CONFIG_HSA_AMD)
|
||||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
|
||||
#else
|
||||
static inline
|
||||
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
/* KGD2KFD callbacks */
|
||||
int kgd2kfd_quiesce_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_resume_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
|
||||
struct dma_fence *fence);
|
||||
#if IS_ENABLED(CONFIG_HSA_AMD)
|
||||
int kgd2kfd_init(void);
|
||||
void kgd2kfd_exit(void);
|
||||
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
||||
@ -262,15 +306,78 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||
const struct kgd2kfd_shared_resources *gpu_resources);
|
||||
void kgd2kfd_device_exit(struct kfd_dev *kfd);
|
||||
void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
|
||||
int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
|
||||
int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
|
||||
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
|
||||
int kgd2kfd_post_reset(struct kfd_dev *kfd);
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
|
||||
int kgd2kfd_quiesce_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_resume_mm(struct mm_struct *mm);
|
||||
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
|
||||
struct dma_fence *fence);
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
|
||||
#else
|
||||
static inline int kgd2kfd_init(void)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline void kgd2kfd_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
|
||||
unsigned int asic_type, bool vf)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev,
|
||||
const struct kgd2kfd_shared_resources *gpu_resources)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
}
|
||||
|
||||
static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
|
||||
|
@ -2913,6 +2913,10 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_amdkfd_resume_iommu(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_device_ip_resume_phase1(adev);
|
||||
if (r)
|
||||
return r;
|
||||
@ -4296,6 +4300,10 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
|
||||
|
||||
if (!r) {
|
||||
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
|
||||
r = amdgpu_amdkfd_resume_iommu(tmp_adev);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = amdgpu_device_ip_resume_phase1(tmp_adev);
|
||||
if (r)
|
||||
goto out;
|
||||
|
@ -751,6 +751,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
|
||||
|
||||
kfd_cwsr_init(kfd);
|
||||
|
||||
if(kgd2kfd_resume_iommu(kfd))
|
||||
goto device_iommu_error;
|
||||
|
||||
if (kfd_resume(kfd))
|
||||
goto kfd_resume_error;
|
||||
|
||||
@ -896,17 +899,21 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kfd_resume(struct kfd_dev *kfd)
|
||||
int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = kfd_iommu_resume(kfd);
|
||||
if (err) {
|
||||
if (err)
|
||||
dev_err(kfd_device,
|
||||
"Failed to resume IOMMU for device %x:%x\n",
|
||||
kfd->pdev->vendor, kfd->pdev->device);
|
||||
return err;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int kfd_resume(struct kfd_dev *kfd)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err = kfd->dqm->ops.start(kfd->dqm);
|
||||
if (err) {
|
||||
|
@ -391,8 +391,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
|
||||
|
||||
if (*fence) {
|
||||
ret = dma_fence_chain_find_seqno(fence, point);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
/* If the requested seqno is already signaled
|
||||
* drm_syncobj_find_fence may return a NULL
|
||||
* fence. To make sure the recipient gets
|
||||
* signalled, use a new fence instead.
|
||||
*/
|
||||
if (!*fence)
|
||||
*fence = dma_fence_get_stub();
|
||||
|
||||
goto out;
|
||||
}
|
||||
dma_fence_put(*fence);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
|
@ -207,14 +207,14 @@ config HID_CHERRY
|
||||
|
||||
config HID_CHICONY
|
||||
tristate "Chicony devices"
|
||||
depends on HID
|
||||
depends on USB_HID
|
||||
default !EXPERT
|
||||
help
|
||||
Support for Chicony Tactical pad and special keys on Chicony keyboards.
|
||||
|
||||
config HID_CORSAIR
|
||||
tristate "Corsair devices"
|
||||
depends on HID && USB && LEDS_CLASS
|
||||
depends on USB_HID && LEDS_CLASS
|
||||
help
|
||||
Support for Corsair devices that are not fully compliant with the
|
||||
HID standard.
|
||||
@ -245,7 +245,7 @@ config HID_MACALLY
|
||||
|
||||
config HID_PRODIKEYS
|
||||
tristate "Prodikeys PC-MIDI Keyboard support"
|
||||
depends on HID && SND
|
||||
depends on USB_HID && SND
|
||||
select SND_RAWMIDI
|
||||
help
|
||||
Support for Prodikeys PC-MIDI Keyboard device support.
|
||||
@ -541,7 +541,7 @@ config HID_LENOVO
|
||||
|
||||
config HID_LOGITECH
|
||||
tristate "Logitech devices"
|
||||
depends on HID
|
||||
depends on USB_HID
|
||||
depends on LEDS_CLASS
|
||||
default !EXPERT
|
||||
help
|
||||
@ -918,7 +918,7 @@ config HID_SAITEK
|
||||
|
||||
config HID_SAMSUNG
|
||||
tristate "Samsung InfraRed remote control or keyboards"
|
||||
depends on HID
|
||||
depends on USB_HID
|
||||
help
|
||||
Support for Samsung InfraRed remote control or keyboards.
|
||||
|
||||
|
@ -918,8 +918,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
|
||||
drvdata->tp = &asus_i2c_tp;
|
||||
|
||||
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
|
||||
hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
||||
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
|
||||
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
|
||||
@ -947,8 +946,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
drvdata->tp = &asus_t100chi_tp;
|
||||
}
|
||||
|
||||
if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
|
||||
hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
||||
if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
|
||||
struct usb_host_interface *alt =
|
||||
to_usb_interface(hdev->dev.parent)->altsetting;
|
||||
|
||||
|
@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
|
||||
struct bigben_device, worker);
|
||||
struct hid_field *report_field = bigben->report->field[0];
|
||||
|
||||
if (bigben->removed)
|
||||
if (bigben->removed || !report_field)
|
||||
return;
|
||||
|
||||
if (bigben->work_led) {
|
||||
|
@ -58,8 +58,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
||||
static __u8 *ch_switch12_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
unsigned int *rsize)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
|
||||
struct usb_interface *intf;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return rdesc;
|
||||
|
||||
intf = to_usb_interface(hdev->dev.parent);
|
||||
if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
||||
/* Change usage maximum and logical maximum from 0x7fff to
|
||||
* 0x2fff, so they don't exceed HID_MAX_USAGES */
|
||||
|
@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
|
||||
int ret;
|
||||
unsigned long quirks = id->driver_data;
|
||||
struct corsair_drvdata *drvdata;
|
||||
struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
|
||||
struct usb_interface *usbif;
|
||||
|
||||
if (!hid_is_usb(dev))
|
||||
return -EINVAL;
|
||||
|
||||
usbif = to_usb_interface(dev->dev.parent);
|
||||
|
||||
drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
|
||||
GFP_KERNEL);
|
||||
|
@ -50,7 +50,7 @@ struct elan_drvdata {
|
||||
|
||||
static int is_not_elan_touchpad(struct hid_device *hdev)
|
||||
{
|
||||
if (hdev->bus == BUS_USB) {
|
||||
if (hid_is_usb(hdev)) {
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
|
||||
return (intf->altsetting->desc.bInterfaceNumber !=
|
||||
|
@ -229,6 +229,9 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
struct elo_priv *priv;
|
||||
int ret;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
@ -528,6 +528,8 @@ static void hammer_remove(struct hid_device *hdev)
|
||||
static const struct hid_device_id hammer_devices[] = {
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_DON) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_EEL) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
|
||||
|
@ -140,12 +140,17 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
|
||||
static int holtek_kbd_probe(struct hid_device *hdev,
|
||||
const struct hid_device_id *id)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
int ret = hid_parse(hdev);
|
||||
struct usb_interface *intf;
|
||||
int ret;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = hid_parse(hdev);
|
||||
if (!ret)
|
||||
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
|
||||
|
||||
intf = to_usb_interface(hdev->dev.parent);
|
||||
if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
|
||||
struct hid_input *hidinput;
|
||||
list_for_each_entry(hidinput, &hdev->inputs, list) {
|
||||
|
@ -62,6 +62,14 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
||||
return rdesc;
|
||||
}
|
||||
|
||||
static int holtek_mouse_probe(struct hid_device *hdev,
|
||||
const struct hid_device_id *id)
|
||||
{
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct hid_device_id holtek_mouse_devices[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
|
||||
USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
|
||||
@ -83,6 +91,7 @@ static struct hid_driver holtek_mouse_driver = {
|
||||
.name = "holtek_mouse",
|
||||
.id_table = holtek_mouse_devices,
|
||||
.report_fixup = holtek_mouse_report_fixup,
|
||||
.probe = holtek_mouse_probe,
|
||||
};
|
||||
|
||||
module_hid_driver(holtek_mouse_driver);
|
||||
|
@ -488,6 +488,7 @@
|
||||
#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
|
||||
#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
|
||||
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
|
||||
#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
|
||||
|
||||
#define USB_VENDOR_ID_GOTOP 0x08f2
|
||||
#define USB_DEVICE_ID_SUPER_Q2 0x007f
|
||||
@ -865,6 +866,7 @@
|
||||
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
|
||||
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
|
||||
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
|
||||
#define USB_DEVICE_ID_MS_SURFACE3_COVER 0x07de
|
||||
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
|
||||
#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
|
||||
#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
|
||||
|
@ -769,12 +769,18 @@ static int lg_raw_event(struct hid_device *hdev, struct hid_report *report,
|
||||
|
||||
static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
struct usb_interface *iface = to_usb_interface(hdev->dev.parent);
|
||||
__u8 iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
|
||||
struct usb_interface *iface;
|
||||
__u8 iface_num;
|
||||
unsigned int connect_mask = HID_CONNECT_DEFAULT;
|
||||
struct lg_drv_data *drv_data;
|
||||
int ret;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
iface = to_usb_interface(hdev->dev.parent);
|
||||
iface_num = iface->cur_altsetting->desc.bInterfaceNumber;
|
||||
|
||||
/* G29 only work with the 1st interface */
|
||||
if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) &&
|
||||
(iface_num != 0)) {
|
||||
|
@ -1693,7 +1693,7 @@ static int logi_dj_probe(struct hid_device *hdev,
|
||||
case recvr_type_27mhz: no_dj_interfaces = 2; break;
|
||||
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
|
||||
}
|
||||
if (hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
||||
if (hid_is_usb(hdev)) {
|
||||
intf = to_usb_interface(hdev->dev.parent);
|
||||
if (intf && intf->altsetting->desc.bInterfaceNumber >=
|
||||
no_dj_interfaces) {
|
||||
|
@ -798,12 +798,18 @@ static int pk_raw_event(struct hid_device *hdev, struct hid_report *report,
|
||||
static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
int ret;
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
|
||||
struct usb_interface *intf;
|
||||
unsigned short ifnum;
|
||||
unsigned long quirks = id->driver_data;
|
||||
struct pk_device *pk;
|
||||
struct pcmidi_snd *pm = NULL;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
intf = to_usb_interface(hdev->dev.parent);
|
||||
ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
|
||||
|
||||
pk = kzalloc(sizeof(*pk), GFP_KERNEL);
|
||||
if (pk == NULL) {
|
||||
hid_err(hdev, "can't alloc descriptor\n");
|
||||
|
@ -124,6 +124,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PIXART_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE3_COVER), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
|
||||
|
@ -344,6 +344,9 @@ static int arvo_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -324,6 +324,9 @@ static int isku_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -749,6 +749,9 @@ static int kone_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -431,6 +431,9 @@ static int koneplus_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -133,6 +133,9 @@ static int konepure_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -501,6 +501,9 @@ static int kovaplus_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -160,6 +160,9 @@ static int lua_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -449,6 +449,9 @@ static int pyra_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -141,6 +141,9 @@ static int ryos_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -113,6 +113,9 @@ static int savu_probe(struct hid_device *hdev,
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
retval = hid_parse(hdev);
|
||||
if (retval) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -152,6 +152,9 @@ static int samsung_probe(struct hid_device *hdev,
|
||||
int ret;
|
||||
unsigned int cmask = HID_CONNECT_DEFAULT;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = hid_parse(hdev);
|
||||
if (ret) {
|
||||
hid_err(hdev, "parse failed\n");
|
||||
|
@ -290,7 +290,7 @@ static int u2fzero_probe(struct hid_device *hdev,
|
||||
unsigned int minor;
|
||||
int ret;
|
||||
|
||||
if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
|
||||
|
@ -164,6 +164,9 @@ static int uclogic_probe(struct hid_device *hdev,
|
||||
struct uclogic_drvdata *drvdata = NULL;
|
||||
bool params_initialized = false;
|
||||
|
||||
if (!hid_is_usb(hdev))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* libinput requires the pad interface to be on a different node
|
||||
* than the pen, so use QUIRK_MULTI_INPUT for all tablets.
|
||||
|
@ -841,8 +841,7 @@ int uclogic_params_init(struct uclogic_params *params,
|
||||
struct uclogic_params p = {0, };
|
||||
|
||||
/* Check arguments */
|
||||
if (params == NULL || hdev == NULL ||
|
||||
!hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
|
||||
if (params == NULL || hdev == NULL || !hid_is_usb(hdev)) {
|
||||
rc = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
@ -726,7 +726,7 @@ static void wacom_retrieve_hid_descriptor(struct hid_device *hdev,
|
||||
* Skip the query for this type and modify defaults based on
|
||||
* interface number.
|
||||
*/
|
||||
if (features->type == WIRELESS) {
|
||||
if (features->type == WIRELESS && intf) {
|
||||
if (intf->cur_altsetting->desc.bInterfaceNumber == 0)
|
||||
features->device_type = WACOM_DEVICETYPE_WL_MONITOR;
|
||||
else
|
||||
@ -2217,7 +2217,7 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
|
||||
if ((features->type == HID_GENERIC) && !strcmp("Wacom HID", features->name)) {
|
||||
char *product_name = wacom->hdev->name;
|
||||
|
||||
if (hid_is_using_ll_driver(wacom->hdev, &usb_hid_driver)) {
|
||||
if (hid_is_usb(wacom->hdev)) {
|
||||
struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent);
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
product_name = dev->product;
|
||||
@ -2448,6 +2448,9 @@ static void wacom_wireless_work(struct work_struct *work)
|
||||
|
||||
wacom_destroy_battery(wacom);
|
||||
|
||||
if (!usbdev)
|
||||
return;
|
||||
|
||||
/* Stylus interface */
|
||||
hdev1 = usb_get_intfdata(usbdev->config->interface[1]);
|
||||
wacom1 = hid_get_drvdata(hdev1);
|
||||
@ -2727,8 +2730,6 @@ static void wacom_mode_change_work(struct work_struct *work)
|
||||
static int wacom_probe(struct hid_device *hdev,
|
||||
const struct hid_device_id *id)
|
||||
{
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
struct wacom *wacom;
|
||||
struct wacom_wac *wacom_wac;
|
||||
struct wacom_features *features;
|
||||
@ -2763,8 +2764,14 @@ static int wacom_probe(struct hid_device *hdev,
|
||||
wacom_wac->hid_data.inputmode = -1;
|
||||
wacom_wac->mode_report = -1;
|
||||
|
||||
wacom->usbdev = dev;
|
||||
wacom->intf = intf;
|
||||
if (hid_is_usb(hdev)) {
|
||||
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
|
||||
struct usb_device *dev = interface_to_usbdev(intf);
|
||||
|
||||
wacom->usbdev = dev;
|
||||
wacom->intf = intf;
|
||||
}
|
||||
|
||||
mutex_init(&wacom->lock);
|
||||
INIT_DELAYED_WORK(&wacom->init_work, wacom_init_work);
|
||||
INIT_WORK(&wacom->wireless_work, wacom_wireless_work);
|
||||
|
@ -1435,8 +1435,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
|
||||
return 0;
|
||||
|
||||
err_buffer_cleanup:
|
||||
if (data->dready_trig)
|
||||
iio_triggered_buffer_cleanup(indio_dev);
|
||||
iio_triggered_buffer_cleanup(indio_dev);
|
||||
err_trigger_unregister:
|
||||
if (data->dready_trig)
|
||||
iio_trigger_unregister(data->dready_trig);
|
||||
@ -1459,8 +1458,8 @@ static int kxcjk1013_remove(struct i2c_client *client)
|
||||
pm_runtime_set_suspended(&client->dev);
|
||||
pm_runtime_put_noidle(&client->dev);
|
||||
|
||||
iio_triggered_buffer_cleanup(indio_dev);
|
||||
if (data->dready_trig) {
|
||||
iio_triggered_buffer_cleanup(indio_dev);
|
||||
iio_trigger_unregister(data->dready_trig);
|
||||
iio_trigger_unregister(data->motion_trig);
|
||||
}
|
||||
|
@ -224,14 +224,14 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
|
||||
hw_values.chan,
|
||||
sizeof(hw_values.chan));
|
||||
if (ret) {
|
||||
dev_err(st->dev,
|
||||
"error reading data\n");
|
||||
return ret;
|
||||
dev_err(st->dev, "error reading data: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
iio_push_to_buffers_with_timestamp(indio_dev,
|
||||
&hw_values,
|
||||
iio_get_time_ns(indio_dev));
|
||||
out:
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -1473,7 +1473,7 @@ static int mma8452_trigger_setup(struct iio_dev *indio_dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
indio_dev->trig = trig;
|
||||
indio_dev->trig = iio_trigger_get(trig);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -470,8 +470,8 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
|
||||
iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
|
||||
iio_get_time_ns(indio_dev));
|
||||
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
err_unlock:
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
mutex_unlock(&st->lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -1401,7 +1401,8 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
|
||||
*val = st->conversion_value;
|
||||
ret = at91_adc_adjust_val_osr(st, val);
|
||||
if (chan->scan_type.sign == 's')
|
||||
*val = sign_extend32(*val, 11);
|
||||
*val = sign_extend32(*val,
|
||||
chan->scan_type.realbits - 1);
|
||||
st->conversion_done = false;
|
||||
}
|
||||
|
||||
|
@ -251,19 +251,8 @@ static int axp22x_adc_raw(struct iio_dev *indio_dev,
|
||||
struct iio_chan_spec const *chan, int *val)
|
||||
{
|
||||
struct axp20x_adc_iio *info = iio_priv(indio_dev);
|
||||
int size;
|
||||
|
||||
/*
|
||||
* N.B.: Unlike the Chinese datasheets tell, the charging current is
|
||||
* stored on 12 bits, not 13 bits. Only discharging current is on 13
|
||||
* bits.
|
||||
*/
|
||||
if (chan->type == IIO_CURRENT && chan->channel == AXP22X_BATT_DISCHRG_I)
|
||||
size = 13;
|
||||
else
|
||||
size = 12;
|
||||
|
||||
*val = axp20x_read_variable_width(info->regmap, chan->address, size);
|
||||
*val = axp20x_read_variable_width(info->regmap, chan->address, 12);
|
||||
if (*val < 0)
|
||||
return *val;
|
||||
|
||||
@ -386,9 +375,8 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
|
||||
case IIO_CURRENT:
|
||||
*val = 0;
|
||||
*val2 = 500000;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
*val = 1;
|
||||
return IIO_VAL_INT;
|
||||
|
||||
case IIO_TEMP:
|
||||
*val = 100;
|
||||
|
@ -248,7 +248,6 @@ static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
|
||||
static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
|
||||
{
|
||||
int ret, i;
|
||||
struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
|
||||
u16 conflict;
|
||||
__le16 value;
|
||||
int olen = sizeof(value);
|
||||
@ -257,13 +256,9 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
|
||||
.chan = channel,
|
||||
};
|
||||
|
||||
ret = iio_device_claim_direct_mode(indio_dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = dln2_adc_set_chan_enabled(dln2, channel, true);
|
||||
if (ret < 0)
|
||||
goto release_direct;
|
||||
return ret;
|
||||
|
||||
ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
|
||||
if (ret < 0) {
|
||||
@ -300,8 +295,6 @@ static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
|
||||
dln2_adc_set_port_enabled(dln2, false, NULL);
|
||||
disable_chan:
|
||||
dln2_adc_set_chan_enabled(dln2, channel, false);
|
||||
release_direct:
|
||||
iio_device_release_direct_mode(indio_dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -337,10 +330,16 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
|
||||
|
||||
switch (mask) {
|
||||
case IIO_CHAN_INFO_RAW:
|
||||
ret = iio_device_claim_direct_mode(indio_dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dln2->mutex);
|
||||
ret = dln2_adc_read(dln2, chan->channel);
|
||||
mutex_unlock(&dln2->mutex);
|
||||
|
||||
iio_device_release_direct_mode(indio_dev);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -655,7 +654,11 @@ static int dln2_adc_probe(struct platform_device *pdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
iio_trigger_set_drvdata(dln2->trig, dln2);
|
||||
devm_iio_trigger_register(dev, dln2->trig);
|
||||
ret = devm_iio_trigger_register(dev, dln2->trig);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to register trigger: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
iio_trigger_set_immutable(indio_dev, dln2->trig);
|
||||
|
||||
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
|
||||
|
@ -979,6 +979,7 @@ static void stm32h7_adc_unprepare(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct stm32_adc *adc = iio_priv(indio_dev);
|
||||
|
||||
stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
|
||||
stm32h7_adc_disable(indio_dev);
|
||||
stm32h7_adc_enter_pwr_down(adc);
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -124,7 +125,7 @@ static int adxrs290_get_rate_data(struct iio_dev *indio_dev, const u8 cmd, int *
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
*val = temp;
|
||||
*val = sign_extend32(temp, 15);
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&st->lock);
|
||||
@ -146,7 +147,7 @@ static int adxrs290_get_temp_data(struct iio_dev *indio_dev, int *val)
|
||||
}
|
||||
|
||||
/* extract lower 12 bits temperature reading */
|
||||
*val = temp & 0x0FFF;
|
||||
*val = sign_extend32(temp, 11);
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&st->lock);
|
||||
|
@ -61,9 +61,9 @@ static irqreturn_t itg3200_trigger_handler(int irq, void *p)
|
||||
|
||||
iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
|
||||
|
||||
error_ret:
|
||||
iio_trigger_notify_done(indio_dev->trig);
|
||||
|
||||
error_ret:
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -550,7 +550,6 @@ struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
|
||||
irq_modify_status(trig->subirq_base + i,
|
||||
IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE);
|
||||
}
|
||||
get_device(&trig->dev);
|
||||
|
||||
return trig;
|
||||
|
||||
|
@ -1273,7 +1273,7 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
|
||||
ret = regmap_bulk_read(data->regmap, LTR501_ALS_DATA1,
|
||||
als_buf, sizeof(als_buf));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto done;
|
||||
if (test_bit(0, indio_dev->active_scan_mask))
|
||||
scan.channels[j++] = le16_to_cpu(als_buf[1]);
|
||||
if (test_bit(1, indio_dev->active_scan_mask))
|
||||
|
@ -546,9 +546,8 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
|
||||
mutex_lock(&data->lock);
|
||||
ret = regmap_field_read(data->reg_flag_nf, &dir);
|
||||
if (ret < 0) {
|
||||
dev_err(&data->client->dev, "register read failed\n");
|
||||
mutex_unlock(&data->lock);
|
||||
return ret;
|
||||
dev_err(&data->client->dev, "register read failed: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
event = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1,
|
||||
IIO_EV_TYPE_THRESH,
|
||||
@ -560,6 +559,7 @@ static irqreturn_t stk3310_irq_event_handler(int irq, void *private)
|
||||
ret = regmap_field_write(data->reg_flag_psint, 0);
|
||||
if (ret < 0)
|
||||
dev_err(&data->client->dev, "failed to reset interrupts\n");
|
||||
out:
|
||||
mutex_unlock(&data->lock);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -912,6 +912,6 @@ static struct platform_driver stm32_timer_trigger_driver = {
|
||||
};
|
||||
module_platform_driver(stm32_timer_trigger_driver);
|
||||
|
||||
MODULE_ALIAS("platform: stm32-timer-trigger");
|
||||
MODULE_ALIAS("platform:stm32-timer-trigger");
|
||||
MODULE_DESCRIPTION("STMicroelectronics STM32 Timer Trigger driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -8456,6 +8456,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd)
|
||||
*/
|
||||
static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
if (!rcd->rcvhdrq)
|
||||
return;
|
||||
clear_recv_intr(rcd);
|
||||
if (check_packet_present(rcd))
|
||||
force_recv_intr(rcd);
|
||||
|
@ -1053,6 +1053,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread)
|
||||
struct hfi1_packet packet;
|
||||
int skip_pkt = 0;
|
||||
|
||||
if (!rcd->rcvhdrq)
|
||||
return RCV_PKT_OK;
|
||||
/* Control context will always use the slow path interrupt handler */
|
||||
needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1;
|
||||
|
||||
|
@ -154,7 +154,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd,
|
||||
rcd->fast_handler = get_dma_rtail_setting(rcd) ?
|
||||
handle_receive_interrupt_dma_rtail :
|
||||
handle_receive_interrupt_nodma_rtail;
|
||||
rcd->slow_handler = handle_receive_interrupt;
|
||||
|
||||
hfi1_set_seq_cnt(rcd, 1);
|
||||
|
||||
@ -375,6 +374,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa,
|
||||
rcd->numa_id = numa;
|
||||
rcd->rcv_array_groups = dd->rcv_entries.ngroups;
|
||||
rcd->rhf_rcv_function_map = normal_rhf_rcv_functions;
|
||||
rcd->slow_handler = handle_receive_interrupt;
|
||||
rcd->do_interrupt = rcd->slow_handler;
|
||||
rcd->msix_intr = CCE_NUM_MSIX_VECTORS;
|
||||
|
||||
mutex_init(&rcd->exp_mutex);
|
||||
@ -915,18 +916,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
/* allocate dummy tail memory for all receive contexts */
|
||||
dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev,
|
||||
sizeof(u64),
|
||||
&dd->rcvhdrtail_dummy_dma,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!dd->rcvhdrtail_dummy_kvaddr) {
|
||||
dd_dev_err(dd, "cannot allocate dummy tail memory\n");
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* dd->rcd can be NULL if early initialization failed */
|
||||
for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) {
|
||||
/*
|
||||
@ -939,8 +928,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
|
||||
if (!rcd)
|
||||
continue;
|
||||
|
||||
rcd->do_interrupt = &handle_receive_interrupt;
|
||||
|
||||
lastfail = hfi1_create_rcvhdrq(dd, rcd);
|
||||
if (!lastfail)
|
||||
lastfail = hfi1_setup_eagerbufs(rcd);
|
||||
@ -1161,7 +1148,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
|
||||
rcd->egrbufs.rcvtids = NULL;
|
||||
|
||||
for (e = 0; e < rcd->egrbufs.alloced; e++) {
|
||||
if (rcd->egrbufs.buffers[e].dma)
|
||||
if (rcd->egrbufs.buffers[e].addr)
|
||||
dma_free_coherent(&dd->pcidev->dev,
|
||||
rcd->egrbufs.buffers[e].len,
|
||||
rcd->egrbufs.buffers[e].addr,
|
||||
@ -1242,6 +1229,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
|
||||
dd->tx_opstats = NULL;
|
||||
kfree(dd->comp_vect);
|
||||
dd->comp_vect = NULL;
|
||||
if (dd->rcvhdrtail_dummy_kvaddr)
|
||||
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
|
||||
(void *)dd->rcvhdrtail_dummy_kvaddr,
|
||||
dd->rcvhdrtail_dummy_dma);
|
||||
dd->rcvhdrtail_dummy_kvaddr = NULL;
|
||||
sdma_clean(dd, dd->num_sdma);
|
||||
rvt_dealloc_device(&dd->verbs_dev.rdi);
|
||||
}
|
||||
@ -1339,6 +1331,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* allocate dummy tail memory for all receive contexts */
|
||||
dd->rcvhdrtail_dummy_kvaddr =
|
||||
dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64),
|
||||
&dd->rcvhdrtail_dummy_dma, GFP_KERNEL);
|
||||
if (!dd->rcvhdrtail_dummy_kvaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
atomic_set(&dd->ipoib_rsm_usr_num, 0);
|
||||
return dd;
|
||||
|
||||
@ -1546,13 +1547,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
|
||||
|
||||
free_credit_return(dd);
|
||||
|
||||
if (dd->rcvhdrtail_dummy_kvaddr) {
|
||||
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
|
||||
(void *)dd->rcvhdrtail_dummy_kvaddr,
|
||||
dd->rcvhdrtail_dummy_dma);
|
||||
dd->rcvhdrtail_dummy_kvaddr = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Free any resources still in use (usually just kernel contexts)
|
||||
* at unload; we do for ctxtcnt, because that's what we allocate.
|
||||
|
@ -880,8 +880,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
|
||||
if (current->nr_cpus_allowed != 1)
|
||||
goto out;
|
||||
|
||||
cpu_id = smp_processor_id();
|
||||
rcu_read_lock();
|
||||
cpu_id = smp_processor_id();
|
||||
rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
|
||||
sdma_rht_params);
|
||||
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <net/addrconf.h>
|
||||
@ -964,9 +965,14 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
|
||||
unsigned long instance_stage,
|
||||
unsigned long reset_stage)
|
||||
{
|
||||
#define HW_RESET_TIMEOUT_US 1000000
|
||||
#define HW_RESET_SLEEP_US 1000
|
||||
|
||||
struct hns_roce_v2_priv *priv = hr_dev->priv;
|
||||
struct hnae3_handle *handle = priv->handle;
|
||||
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
|
||||
unsigned long val;
|
||||
int ret;
|
||||
|
||||
/* When hardware reset is detected, we should stop sending mailbox&cmq&
|
||||
* doorbell to hardware. If now in .init_instance() function, we should
|
||||
@ -978,7 +984,11 @@ static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
|
||||
* again.
|
||||
*/
|
||||
hr_dev->dis_db = true;
|
||||
if (!ops->get_hw_reset_stat(handle))
|
||||
|
||||
ret = read_poll_timeout(ops->ae_dev_reset_cnt, val,
|
||||
val > hr_dev->reset_cnt, HW_RESET_SLEEP_US,
|
||||
HW_RESET_TIMEOUT_US, false, handle);
|
||||
if (!ret)
|
||||
hr_dev->is_reset = true;
|
||||
|
||||
if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
|
||||
@ -6342,10 +6352,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
|
||||
if (!hr_dev)
|
||||
return 0;
|
||||
|
||||
hr_dev->is_reset = true;
|
||||
hr_dev->active = false;
|
||||
hr_dev->dis_db = true;
|
||||
|
||||
hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
|
||||
|
||||
return 0;
|
||||
|
@ -232,17 +232,13 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
int hwirq, i;
|
||||
|
||||
mutex_lock(&msi_used_lock);
|
||||
|
||||
hwirq = bitmap_find_next_zero_area(msi_used, PCI_MSI_DOORBELL_NR,
|
||||
0, nr_irqs, 0);
|
||||
if (hwirq >= PCI_MSI_DOORBELL_NR) {
|
||||
mutex_unlock(&msi_used_lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
bitmap_set(msi_used, hwirq, nr_irqs);
|
||||
hwirq = bitmap_find_free_region(msi_used, PCI_MSI_DOORBELL_NR,
|
||||
order_base_2(nr_irqs));
|
||||
mutex_unlock(&msi_used_lock);
|
||||
|
||||
if (hwirq < 0)
|
||||
return -ENOSPC;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
irq_domain_set_info(domain, virq + i, hwirq + i,
|
||||
&armada_370_xp_msi_bottom_irq_chip,
|
||||
@ -250,7 +246,7 @@ static int armada_370_xp_msi_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
return hwirq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_370_xp_msi_free(struct irq_domain *domain,
|
||||
@ -259,7 +255,7 @@ static void armada_370_xp_msi_free(struct irq_domain *domain,
|
||||
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
||||
|
||||
mutex_lock(&msi_used_lock);
|
||||
bitmap_clear(msi_used, d->hwirq, nr_irqs);
|
||||
bitmap_release_region(msi_used, d->hwirq, order_base_2(nr_irqs));
|
||||
mutex_unlock(&msi_used_lock);
|
||||
}
|
||||
|
||||
|
@ -78,8 +78,8 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
|
||||
bit - scu_ic->irq_shift);
|
||||
generic_handle_irq(irq);
|
||||
|
||||
regmap_update_bits(scu_ic->scu, scu_ic->reg, mask,
|
||||
BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
|
||||
regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
|
||||
BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
|
@ -742,7 +742,7 @@ static struct its_collection *its_build_invall_cmd(struct its_node *its,
|
||||
|
||||
its_fixup_cmd(cmd);
|
||||
|
||||
return NULL;
|
||||
return desc->its_invall_cmd.col;
|
||||
}
|
||||
|
||||
static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
#define NVIC_ISER 0x000
|
||||
#define NVIC_ICER 0x080
|
||||
#define NVIC_IPR 0x300
|
||||
#define NVIC_IPR 0x400
|
||||
|
||||
#define NVIC_MAX_BANKS 16
|
||||
/*
|
||||
|
@ -2252,6 +2252,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
|
||||
|
||||
if (!num_sectors || num_sectors > max_sectors)
|
||||
num_sectors = max_sectors;
|
||||
rdev->sb_start = sb_start;
|
||||
}
|
||||
sb = page_address(rdev->sb_page);
|
||||
sb->data_size = cpu_to_le64(num_sectors);
|
||||
|
@ -717,16 +717,18 @@ static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
|
||||
static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
|
||||
{
|
||||
u64 size = 0;
|
||||
int i;
|
||||
int oix;
|
||||
|
||||
size = ALIGN(metalen, FASTRPC_ALIGN);
|
||||
for (i = 0; i < ctx->nscalars; i++) {
|
||||
for (oix = 0; oix < ctx->nbufs; oix++) {
|
||||
int i = ctx->olaps[oix].raix;
|
||||
|
||||
if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
|
||||
|
||||
if (ctx->olaps[i].offset == 0)
|
||||
if (ctx->olaps[oix].offset == 0)
|
||||
size = ALIGN(size, FASTRPC_ALIGN);
|
||||
|
||||
size += (ctx->olaps[i].mend - ctx->olaps[i].mstart);
|
||||
size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -660,7 +660,7 @@ static int renesas_sdhi_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
|
||||
/* Issue CMD19 twice for each tap */
|
||||
for (i = 0; i < 2 * priv->tap_num; i++) {
|
||||
int cmd_error;
|
||||
int cmd_error = 0;
|
||||
|
||||
/* Set sampling clock position */
|
||||
sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, i % priv->tap_num);
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
@ -93,6 +94,14 @@
|
||||
|
||||
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
|
||||
|
||||
/*
|
||||
* According to SPEAr300 Reference Manual (RM0082)
|
||||
* TOUDEL = 7ns (Output delay from the flip-flops to the board)
|
||||
* TINDEL = 5ns (Input delay from the board to the flipflop)
|
||||
*/
|
||||
#define TOUTDEL 7000
|
||||
#define TINDEL 5000
|
||||
|
||||
struct fsmc_nand_timings {
|
||||
u8 tclr;
|
||||
u8 tar;
|
||||
@ -277,7 +286,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
|
||||
{
|
||||
unsigned long hclk = clk_get_rate(host->clk);
|
||||
unsigned long hclkn = NSEC_PER_SEC / hclk;
|
||||
u32 thiz, thold, twait, tset;
|
||||
u32 thiz, thold, twait, tset, twait_min;
|
||||
|
||||
if (sdrt->tRC_min < 30000)
|
||||
return -EOPNOTSUPP;
|
||||
@ -309,13 +318,6 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
|
||||
else if (tims->thold > FSMC_THOLD_MASK)
|
||||
tims->thold = FSMC_THOLD_MASK;
|
||||
|
||||
twait = max(sdrt->tRP_min, sdrt->tWP_min);
|
||||
tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
|
||||
if (tims->twait == 0)
|
||||
tims->twait = 1;
|
||||
else if (tims->twait > FSMC_TWAIT_MASK)
|
||||
tims->twait = FSMC_TWAIT_MASK;
|
||||
|
||||
tset = max(sdrt->tCS_min - sdrt->tWP_min,
|
||||
sdrt->tCEA_max - sdrt->tREA_max);
|
||||
tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
|
||||
@ -324,6 +326,21 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
|
||||
else if (tims->tset > FSMC_TSET_MASK)
|
||||
tims->tset = FSMC_TSET_MASK;
|
||||
|
||||
/*
|
||||
* According to SPEAr300 Reference Manual (RM0082) which gives more
|
||||
* information related to FSMSC timings than the SPEAr600 one (RM0305),
|
||||
* twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
|
||||
*/
|
||||
twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
|
||||
+ TOUTDEL + TINDEL;
|
||||
twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
|
||||
|
||||
tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
|
||||
if (tims->twait == 0)
|
||||
tims->twait = 1;
|
||||
else if (tims->twait > FSMC_TWAIT_MASK)
|
||||
tims->twait = FSMC_TWAIT_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -653,6 +670,9 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
|
||||
instr->ctx.waitrdy.timeout_ms);
|
||||
break;
|
||||
}
|
||||
|
||||
if (instr->delay_ns)
|
||||
ndelay(instr->delay_ns);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -1531,14 +1531,14 @@ void bond_alb_monitor(struct work_struct *work)
|
||||
struct slave *slave;
|
||||
|
||||
if (!bond_has_slaves(bond)) {
|
||||
bond_info->tx_rebalance_counter = 0;
|
||||
atomic_set(&bond_info->tx_rebalance_counter, 0);
|
||||
bond_info->lp_counter = 0;
|
||||
goto re_arm;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
bond_info->tx_rebalance_counter++;
|
||||
atomic_inc(&bond_info->tx_rebalance_counter);
|
||||
bond_info->lp_counter++;
|
||||
|
||||
/* send learning packets */
|
||||
@ -1560,7 +1560,7 @@ void bond_alb_monitor(struct work_struct *work)
|
||||
}
|
||||
|
||||
/* rebalance tx traffic */
|
||||
if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
|
||||
if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
|
||||
bond_for_each_slave_rcu(bond, slave, iter) {
|
||||
tlb_clear_slave(bond, slave, 1);
|
||||
if (slave == rcu_access_pointer(bond->curr_active_slave)) {
|
||||
@ -1570,7 +1570,7 @@ void bond_alb_monitor(struct work_struct *work)
|
||||
bond_info->unbalanced_load = 0;
|
||||
}
|
||||
}
|
||||
bond_info->tx_rebalance_counter = 0;
|
||||
atomic_set(&bond_info->tx_rebalance_counter, 0);
|
||||
}
|
||||
|
||||
if (bond_info->rlb_enabled) {
|
||||
@ -1640,7 +1640,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
|
||||
tlb_init_slave(slave);
|
||||
|
||||
/* order a rebalance ASAP */
|
||||
bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
|
||||
atomic_set(&bond->alb_info.tx_rebalance_counter,
|
||||
BOND_TLB_REBALANCE_TICKS);
|
||||
|
||||
if (bond->alb_info.rlb_enabled)
|
||||
bond->alb_info.rlb_rebalance = 1;
|
||||
@ -1677,7 +1678,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
|
||||
rlb_clear_slave(bond, slave);
|
||||
} else if (link == BOND_LINK_UP) {
|
||||
/* order a rebalance ASAP */
|
||||
bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
|
||||
atomic_set(&bond_info->tx_rebalance_counter,
|
||||
BOND_TLB_REBALANCE_TICKS);
|
||||
if (bond->alb_info.rlb_enabled) {
|
||||
bond->alb_info.rlb_rebalance = 1;
|
||||
/* If the updelay module parameter is smaller than the
|
||||
|
@ -248,6 +248,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
|
||||
#define KVASER_PCIEFD_SPACK_EWLR BIT(23)
|
||||
#define KVASER_PCIEFD_SPACK_EPLR BIT(24)
|
||||
|
||||
/* Kvaser KCAN_EPACK second word */
|
||||
#define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
|
||||
|
||||
struct kvaser_pciefd;
|
||||
|
||||
struct kvaser_pciefd_can {
|
||||
@ -1285,7 +1288,10 @@ static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
|
||||
|
||||
can->err_rep_cnt++;
|
||||
can->can.can_stats.bus_error++;
|
||||
stats->rx_errors++;
|
||||
if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
|
||||
stats->tx_errors++;
|
||||
else
|
||||
stats->rx_errors++;
|
||||
|
||||
can->bec.txerr = bec.txerr;
|
||||
can->bec.rxerr = bec.rxerr;
|
||||
|
@ -207,15 +207,15 @@ enum m_can_reg {
|
||||
|
||||
/* Interrupts for version 3.0.x */
|
||||
#define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
|
||||
#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
|
||||
IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
|
||||
IR_RF1L | IR_RF0L)
|
||||
#define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_BEU | IR_BEC | \
|
||||
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
|
||||
IR_RF0L)
|
||||
#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
|
||||
/* Interrupts for version >= 3.1.x */
|
||||
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
|
||||
#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
|
||||
IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
|
||||
IR_RF1L | IR_RF0L)
|
||||
#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_BEU | IR_BEC | \
|
||||
IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | IR_RF1L | \
|
||||
IR_RF0L)
|
||||
#define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
|
||||
|
||||
/* Interrupt Line Select (ILS) */
|
||||
@ -752,8 +752,6 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
|
||||
{
|
||||
if (irqstatus & IR_WDI)
|
||||
netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
|
||||
if (irqstatus & IR_ELO)
|
||||
netdev_err(dev, "Error Logging Overflow\n");
|
||||
if (irqstatus & IR_BEU)
|
||||
netdev_err(dev, "Bit Error Uncorrected\n");
|
||||
if (irqstatus & IR_BEC)
|
||||
|
@ -692,11 +692,11 @@ static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
|
||||
cf->data[i + 1] = data_reg >> 8;
|
||||
}
|
||||
|
||||
netif_receive_skb(skb);
|
||||
rcv_pkts++;
|
||||
stats->rx_packets++;
|
||||
quota--;
|
||||
stats->rx_bytes += cf->can_dlc;
|
||||
netif_receive_skb(skb);
|
||||
|
||||
pch_fifo_thresh(priv, obj_num);
|
||||
obj_num++;
|
||||
|
@ -235,7 +235,12 @@ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base)
|
||||
free_sja1000dev(dev);
|
||||
}
|
||||
|
||||
err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
|
||||
if (!card->channels) {
|
||||
err = -ENODEV;
|
||||
goto failure_cleanup;
|
||||
}
|
||||
|
||||
err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
|
||||
DRV_NAME, card);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
@ -28,10 +28,6 @@
|
||||
|
||||
#include "kvaser_usb.h"
|
||||
|
||||
/* Forward declaration */
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
|
||||
|
||||
#define CAN_USB_CLOCK 8000000
|
||||
#define MAX_USBCAN_NET_DEVICES 2
|
||||
|
||||
/* Command header size */
|
||||
@ -80,6 +76,12 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg;
|
||||
|
||||
#define CMD_LEAF_LOG_MESSAGE 106
|
||||
|
||||
/* Leaf frequency options */
|
||||
#define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60
|
||||
#define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0
|
||||
#define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5)
|
||||
#define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6)
|
||||
|
||||
/* error factors */
|
||||
#define M16C_EF_ACKE BIT(0)
|
||||
#define M16C_EF_CRCE BIT(1)
|
||||
@ -340,6 +342,50 @@ struct kvaser_usb_err_summary {
|
||||
};
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
|
||||
.name = "kvaser_usb",
|
||||
.tseg1_min = KVASER_USB_TSEG1_MIN,
|
||||
.tseg1_max = KVASER_USB_TSEG1_MAX,
|
||||
.tseg2_min = KVASER_USB_TSEG2_MIN,
|
||||
.tseg2_max = KVASER_USB_TSEG2_MAX,
|
||||
.sjw_max = KVASER_USB_SJW_MAX,
|
||||
.brp_min = KVASER_USB_BRP_MIN,
|
||||
.brp_max = KVASER_USB_BRP_MAX,
|
||||
.brp_inc = KVASER_USB_BRP_INC,
|
||||
};
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_8mhz = {
|
||||
.clock = {
|
||||
.freq = 8000000,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
};
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_16mhz = {
|
||||
.clock = {
|
||||
.freq = 16000000,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
};
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_24mhz = {
|
||||
.clock = {
|
||||
.freq = 24000000,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
};
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg_32mhz = {
|
||||
.clock = {
|
||||
.freq = 32000000,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
};
|
||||
|
||||
static void *
|
||||
kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv,
|
||||
const struct sk_buff *skb, int *frame_len,
|
||||
@ -471,6 +517,27 @@ static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev,
|
||||
const struct leaf_cmd_softinfo *softinfo)
|
||||
{
|
||||
u32 sw_options = le32_to_cpu(softinfo->sw_options);
|
||||
|
||||
dev->fw_version = le32_to_cpu(softinfo->fw_version);
|
||||
dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx);
|
||||
|
||||
switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) {
|
||||
case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK:
|
||||
dev->cfg = &kvaser_usb_leaf_dev_cfg_16mhz;
|
||||
break;
|
||||
case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK:
|
||||
dev->cfg = &kvaser_usb_leaf_dev_cfg_24mhz;
|
||||
break;
|
||||
case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK:
|
||||
dev->cfg = &kvaser_usb_leaf_dev_cfg_32mhz;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
|
||||
{
|
||||
struct kvaser_cmd cmd;
|
||||
@ -486,14 +553,13 @@ static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev)
|
||||
|
||||
switch (dev->card_data.leaf.family) {
|
||||
case KVASER_LEAF:
|
||||
dev->fw_version = le32_to_cpu(cmd.u.leaf.softinfo.fw_version);
|
||||
dev->max_tx_urbs =
|
||||
le16_to_cpu(cmd.u.leaf.softinfo.max_outstanding_tx);
|
||||
kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo);
|
||||
break;
|
||||
case KVASER_USBCAN:
|
||||
dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version);
|
||||
dev->max_tx_urbs =
|
||||
le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx);
|
||||
dev->cfg = &kvaser_usb_leaf_dev_cfg_8mhz;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1225,24 +1291,11 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev)
|
||||
{
|
||||
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
|
||||
|
||||
dev->cfg = &kvaser_usb_leaf_dev_cfg;
|
||||
card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct can_bittiming_const kvaser_usb_leaf_bittiming_const = {
|
||||
.name = "kvaser_usb",
|
||||
.tseg1_min = KVASER_USB_TSEG1_MIN,
|
||||
.tseg1_max = KVASER_USB_TSEG1_MAX,
|
||||
.tseg2_min = KVASER_USB_TSEG2_MIN,
|
||||
.tseg2_max = KVASER_USB_TSEG2_MAX,
|
||||
.sjw_max = KVASER_USB_SJW_MAX,
|
||||
.brp_min = KVASER_USB_BRP_MIN,
|
||||
.brp_max = KVASER_USB_BRP_MAX,
|
||||
.brp_inc = KVASER_USB_BRP_INC,
|
||||
};
|
||||
|
||||
static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev)
|
||||
{
|
||||
struct kvaser_usb_net_priv *priv = netdev_priv(netdev);
|
||||
@ -1348,11 +1401,3 @@ const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = {
|
||||
.dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback,
|
||||
.dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd,
|
||||
};
|
||||
|
||||
static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_dev_cfg = {
|
||||
.clock = {
|
||||
.freq = CAN_USB_CLOCK,
|
||||
},
|
||||
.timestamp_freq = 1,
|
||||
.bittiming_const = &kvaser_usb_leaf_bittiming_const,
|
||||
};
|
||||
|
@ -1431,16 +1431,19 @@ static int altera_tse_probe(struct platform_device *pdev)
|
||||
priv->rxdescmem_busaddr = dma_res->start;
|
||||
|
||||
} else {
|
||||
ret = -ENODEV;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
|
||||
if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) {
|
||||
dma_set_coherent_mask(priv->device,
|
||||
DMA_BIT_MASK(priv->dmaops->dmamask));
|
||||
else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
|
||||
} else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) {
|
||||
dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
|
||||
else
|
||||
} else {
|
||||
ret = -EIO;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
/* MAC address space */
|
||||
ret = request_and_map(pdev, "control_port", &control_port,
|
||||
|
@ -373,6 +373,9 @@ struct bufdesc_ex {
|
||||
#define FEC_ENET_WAKEUP ((uint)0x00020000) /* Wakeup request */
|
||||
#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
|
||||
#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
|
||||
#define FEC_ENET_RXF_GET(X) (((X) == 0) ? FEC_ENET_RXF_0 : \
|
||||
(((X) == 1) ? FEC_ENET_RXF_1 : \
|
||||
FEC_ENET_RXF_2))
|
||||
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
|
||||
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
|
||||
|
||||
|
@ -1439,7 +1439,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
|
||||
break;
|
||||
pkt_received++;
|
||||
|
||||
writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
|
||||
writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
|
||||
|
||||
/* Check for errors. */
|
||||
status ^= BD_ENET_RX_LAST;
|
||||
|
@ -553,6 +553,14 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
|
||||
dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
|
||||
return;
|
||||
}
|
||||
if (vsi->type != I40E_VSI_MAIN &&
|
||||
vsi->type != I40E_VSI_FDIR &&
|
||||
vsi->type != I40E_VSI_VMDQ2) {
|
||||
dev_info(&pf->pdev->dev,
|
||||
"vsi %d type %d descriptor rings not available\n",
|
||||
vsi_seid, vsi->type);
|
||||
return;
|
||||
}
|
||||
if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
|
||||
dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
|
||||
return;
|
||||
|
@ -1895,6 +1895,32 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
|
||||
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_sync_vf_state
|
||||
* @vf: pointer to the VF info
|
||||
* @state: VF state
|
||||
*
|
||||
* Called from a VF message to synchronize the service with a potential
|
||||
* VF reset state
|
||||
**/
|
||||
static bool i40e_sync_vf_state(struct i40e_vf *vf, enum i40e_vf_states state)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* When handling some messages, it needs VF state to be set.
|
||||
* It is possible that this flag is cleared during VF reset,
|
||||
* so there is a need to wait until the end of the reset to
|
||||
* handle the request message correctly.
|
||||
*/
|
||||
for (i = 0; i < I40E_VF_STATE_WAIT_COUNT; i++) {
|
||||
if (test_bit(state, &vf->vf_states))
|
||||
return true;
|
||||
usleep_range(10000, 20000);
|
||||
}
|
||||
|
||||
return test_bit(state, &vf->vf_states);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_vc_get_version_msg
|
||||
* @vf: pointer to the VF info
|
||||
@ -1955,7 +1981,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
|
||||
size_t len = 0;
|
||||
int ret;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_INIT)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -2077,7 +2103,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
|
||||
bool allmulti = false;
|
||||
bool alluni = false;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err_out;
|
||||
}
|
||||
@ -2165,7 +2191,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
struct i40e_vsi *vsi;
|
||||
u16 num_qps_all = 0;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
@ -2314,7 +2340,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
int i;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
@ -2486,7 +2512,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
i40e_status aq_ret = 0;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
@ -2536,7 +2562,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
u8 cur_pairs = vf->num_queue_pairs;
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE))
|
||||
return -EINVAL;
|
||||
|
||||
if (req_pairs > I40E_MAX_VF_QUEUES) {
|
||||
@ -2581,7 +2607,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
|
||||
|
||||
memset(&stats, 0, sizeof(struct i40e_eth_stats));
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
@ -2698,7 +2724,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status ret = 0;
|
||||
int i;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
|
||||
ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
@ -2770,7 +2796,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status ret = 0;
|
||||
int i;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||
!i40e_vc_isvalid_vsi_id(vf, al->vsi_id)) {
|
||||
ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
@ -2914,7 +2940,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
int i;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||
!i40e_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
@ -3034,9 +3060,9 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
|
||||
struct i40e_vsi *vsi = NULL;
|
||||
i40e_status aq_ret = 0;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
|
||||
(vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
|
||||
vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -3065,9 +3091,9 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
u16 i;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
|
||||
!i40e_vc_isvalid_vsi_id(vf, vrl->vsi_id) ||
|
||||
(vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
|
||||
vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -3100,7 +3126,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
int len = 0;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -3136,7 +3162,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
i40e_status aq_ret = 0;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -3161,7 +3187,7 @@ static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
struct i40e_vsi *vsi;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -3187,7 +3213,7 @@ static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
struct i40e_vsi *vsi;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -3414,7 +3440,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
int i, ret;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -3545,7 +3571,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
int i, ret;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err_out;
|
||||
}
|
||||
@ -3654,7 +3680,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
||||
i40e_status aq_ret = 0;
|
||||
u64 speed = 0;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
@ -3761,11 +3787,6 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
|
||||
|
||||
/* set this flag only after making sure all inputs are sane */
|
||||
vf->adq_enabled = true;
|
||||
/* num_req_queues is set when user changes number of queues via ethtool
|
||||
* and this causes issue for default VSI(which depends on this variable)
|
||||
* when ADq is enabled, hence reset it.
|
||||
*/
|
||||
vf->num_req_queues = 0;
|
||||
|
||||
/* reset the VF in order to allocate resources */
|
||||
i40e_vc_reset_vf(vf, true);
|
||||
@ -3788,7 +3809,7 @@ static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
i40e_status aq_ret = 0;
|
||||
|
||||
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
|
||||
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto err;
|
||||
}
|
||||
|
@ -18,6 +18,8 @@
|
||||
|
||||
#define I40E_MAX_VF_PROMISC_FLAGS 3
|
||||
|
||||
#define I40E_VF_STATE_WAIT_COUNT 20
|
||||
|
||||
/* Various queue ctrls */
|
||||
enum i40e_queue_ctrl {
|
||||
I40E_QUEUE_CTRL_UNKNOWN = 0,
|
||||
|
@ -612,23 +612,44 @@ static int iavf_set_ringparam(struct net_device *netdev,
|
||||
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
|
||||
return -EINVAL;
|
||||
|
||||
new_tx_count = clamp_t(u32, ring->tx_pending,
|
||||
IAVF_MIN_TXD,
|
||||
IAVF_MAX_TXD);
|
||||
new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
|
||||
if (ring->tx_pending > IAVF_MAX_TXD ||
|
||||
ring->tx_pending < IAVF_MIN_TXD ||
|
||||
ring->rx_pending > IAVF_MAX_RXD ||
|
||||
ring->rx_pending < IAVF_MIN_RXD) {
|
||||
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
|
||||
ring->tx_pending, ring->rx_pending, IAVF_MIN_TXD,
|
||||
IAVF_MAX_RXD, IAVF_REQ_DESCRIPTOR_MULTIPLE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
new_rx_count = clamp_t(u32, ring->rx_pending,
|
||||
IAVF_MIN_RXD,
|
||||
IAVF_MAX_RXD);
|
||||
new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
|
||||
new_tx_count = ALIGN(ring->tx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
|
||||
if (new_tx_count != ring->tx_pending)
|
||||
netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
|
||||
new_tx_count);
|
||||
|
||||
new_rx_count = ALIGN(ring->rx_pending, IAVF_REQ_DESCRIPTOR_MULTIPLE);
|
||||
if (new_rx_count != ring->rx_pending)
|
||||
netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
|
||||
new_rx_count);
|
||||
|
||||
/* if nothing to do return success */
|
||||
if ((new_tx_count == adapter->tx_desc_count) &&
|
||||
(new_rx_count == adapter->rx_desc_count))
|
||||
(new_rx_count == adapter->rx_desc_count)) {
|
||||
netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
adapter->tx_desc_count = new_tx_count;
|
||||
adapter->rx_desc_count = new_rx_count;
|
||||
if (new_tx_count != adapter->tx_desc_count) {
|
||||
netdev_dbg(netdev, "Changing Tx descriptor count from %d to %d\n",
|
||||
adapter->tx_desc_count, new_tx_count);
|
||||
adapter->tx_desc_count = new_tx_count;
|
||||
}
|
||||
|
||||
if (new_rx_count != adapter->rx_desc_count) {
|
||||
netdev_dbg(netdev, "Changing Rx descriptor count from %d to %d\n",
|
||||
adapter->rx_desc_count, new_rx_count);
|
||||
adapter->rx_desc_count = new_rx_count;
|
||||
}
|
||||
|
||||
if (netif_running(netdev)) {
|
||||
adapter->flags |= IAVF_FLAG_RESET_NEEDED;
|
||||
|
@ -2139,6 +2139,7 @@ static void iavf_reset_task(struct work_struct *work)
|
||||
}
|
||||
|
||||
pci_set_master(adapter->pdev);
|
||||
pci_restore_msi_state(adapter->pdev);
|
||||
|
||||
if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
|
||||
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
|
||||
|
@ -5267,6 +5267,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
|
||||
netif_carrier_on(vsi->netdev);
|
||||
}
|
||||
|
||||
/* clear this now, and the first stats read will be used as baseline */
|
||||
vsi->stat_offsets_loaded = false;
|
||||
|
||||
ice_service_task_schedule(pf);
|
||||
|
||||
return 0;
|
||||
|
@ -2607,11 +2607,11 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
|
||||
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
|
||||
|
||||
if (priv->percpu_pools) {
|
||||
err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id);
|
||||
err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->logic_rxq);
|
||||
if (err < 0)
|
||||
goto err_free_dma;
|
||||
|
||||
err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id);
|
||||
err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->logic_rxq);
|
||||
if (err < 0)
|
||||
goto err_unregister_rxq_short;
|
||||
|
||||
|
@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
|
||||
return -ENOMEM;
|
||||
|
||||
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
|
||||
if (!cache)
|
||||
if (!cache) {
|
||||
nfp_cpp_area_free(area);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cache->id = 0;
|
||||
cache->addr = 0;
|
||||
|
@ -1659,6 +1659,13 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
data_split = true;
|
||||
}
|
||||
} else {
|
||||
if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
|
||||
DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
|
||||
qede_free_failed_tx_pkt(txq, first_bd, 0, false);
|
||||
qede_update_tx_producer(txq);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
|
||||
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
|
||||
}
|
||||
|
@ -3494,20 +3494,19 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
|
||||
|
||||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
|
||||
err = ql_wait_for_drvr_lock(qdev);
|
||||
if (err) {
|
||||
err = ql_adapter_initialize(qdev);
|
||||
if (err) {
|
||||
netdev_err(ndev, "Unable to initialize adapter\n");
|
||||
goto err_init;
|
||||
}
|
||||
netdev_err(ndev, "Releasing driver lock\n");
|
||||
ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
|
||||
} else {
|
||||
if (!ql_wait_for_drvr_lock(qdev)) {
|
||||
netdev_err(ndev, "Could not acquire driver lock\n");
|
||||
err = -ENODEV;
|
||||
goto err_lock;
|
||||
}
|
||||
|
||||
err = ql_adapter_initialize(qdev);
|
||||
if (err) {
|
||||
netdev_err(ndev, "Unable to initialize adapter\n");
|
||||
goto err_init;
|
||||
}
|
||||
ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
|
||||
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
|
||||
set_bit(QL_ADAPTER_UP, &qdev->flags);
|
||||
|
@ -181,6 +181,8 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
|
||||
min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
|
||||
|
||||
max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
|
||||
if (max == 0)
|
||||
max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
|
||||
|
||||
/* some devices set dwNtbOutMaxSize too low for the above default */
|
||||
min = min(min, max);
|
||||
|
@ -771,8 +771,6 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
|
||||
|
||||
skb->dev = vrf_dev;
|
||||
|
||||
vrf_nf_set_untracked(skb);
|
||||
|
||||
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
|
||||
skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
|
||||
|
||||
@ -793,6 +791,8 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
|
||||
if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
|
||||
return skb;
|
||||
|
||||
vrf_nf_set_untracked(skb);
|
||||
|
||||
if (qdisc_tx_is_default(vrf_dev) ||
|
||||
IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
|
||||
return vrf_ip6_out_direct(vrf_dev, sk, skb);
|
||||
@ -1008,8 +1008,6 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
|
||||
|
||||
skb->dev = vrf_dev;
|
||||
|
||||
vrf_nf_set_untracked(skb);
|
||||
|
||||
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
|
||||
skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
|
||||
|
||||
@ -1031,6 +1029,8 @@ static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
|
||||
ipv4_is_lbcast(ip_hdr(skb)->daddr))
|
||||
return skb;
|
||||
|
||||
vrf_nf_set_untracked(skb);
|
||||
|
||||
if (qdisc_tx_is_default(vrf_dev) ||
|
||||
IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
|
||||
return vrf_ip_out_direct(vrf_dev, sk, skb);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user