This is the 5.10.202 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmVmG20ACgkQONu9yGCS aT6dzg/7BnCP2SpVmgEaD7FdPvGO/A6O5VrC9zu3sQE6g2gAwirZhdgE8NRn+ggm WSQ1kIA+HEcY23FKpq46pBED4P1irudiW7DkLw8nyOGp+XLb4wGkF5lBBP5z+B2P ga2RgwqKvYWeDaUW4n1Uy7m2Cz+wqCg/EvnITo40glSWPh20gM532/CSnA5akoje 9mjZYZ0rKHKTZGu65aNScNR7XnXHIivJU6C1jF6L9N1+Xn679nUHKQP4KM/RcjpX g1WQMWFC3mGIn5IX28W1wvKS320D5HLmTLnLqJvFpJN9+13DUnUoXcX469zvQoxJ GL3S94goWN/0BPOgr5KcKvTj00b4O+EWhQuQt+x8NLdydzRQuyFu2UpLNhIKKSou sT+BcxzeuqJhEh1tZItcZkZBptpLEkb0ezT11u5McnU5FjPzzzP8CtEetKKmEaBU AUoEP/lQQlVyk1I6xAeuzu53smncNQt6CqnXJxYXOBGgJ2txAM5kroMKXPin5C8k BCpUIqghhKmBd1hwuKyaOBKF99eLKKZsuvXppoPD0Yz7/Nq5TgdBw0qbNt2iLr05 XSM7WIIeCBROaV+ZiVxgtcXDR51FpMr7CLTbkBQ6IgLwircHeHSK7rQn7kFO3fCg OezhWAuh72qDZ2PCJ84fj21IhZ49a5oCLbUdBew+KzZervVpSo0= =eW67 -----END PGP SIGNATURE----- Merge 5.10.202 into android12-5.10-lts Changes in 5.10.202 locking/ww_mutex/test: Fix potential workqueue corruption perf/core: Bail out early if the request AUX area is out of bound clocksource/drivers/timer-imx-gpt: Fix potential memory leak clocksource/drivers/timer-atmel-tcb: Fix initialization on SAM9 hardware x86/mm: Drop the 4 MB restriction on minimal NUMA node memory size wifi: mac80211_hwsim: fix clang-specific fortify warning wifi: mac80211: don't return unset power in ieee80211_get_tx_power() bpf: Detect IP == ksym.end as part of BPF program wifi: ath9k: fix clang-specific fortify warnings wifi: ath10k: fix clang-specific fortify warning net: annotate data-races around sk->sk_tx_queue_mapping net: annotate data-races around sk->sk_dst_pending_confirm wifi: ath10k: Don't touch the CE interrupt registers after power up Bluetooth: btusb: Add date->evt_skb is NULL check Bluetooth: Fix double free in hci_conn_cleanup platform/x86: thinkpad_acpi: Add battery quirk for Thinkpad X120e drm/komeda: drop all currently held locks if deadlock happens drm/msm/dp: skip validity check for DP CTS EDID checksum drm/amd: Fix UBSAN array-index-out-of-bounds for SMU7 drm/amd: Fix UBSAN array-index-out-of-bounds for Polaris and Tonga drm/amdgpu: Fix potential null pointer derefernce drm/panel: fix a possible null pointer dereference drm/panel/panel-tpo-tpg110: fix a possible null pointer dereference drm/panel: st7703: Pick different reset sequence drm/amdgpu: Fix a null pointer access when the smc_rreg pointer is NULL selftests/efivarfs: create-read: fix a resource leak ASoC: soc-card: Add storage for PCI SSID crypto: pcrypt - Fix hungtask for PADATA_RESET RDMA/hfi1: Use FIELD_GET() to extract Link Width fs/jfs: Add check for negative db_l2nbperpage fs/jfs: Add validity check for db_maxag and db_agpref jfs: fix array-index-out-of-bounds in dbFindLeaf jfs: fix array-index-out-of-bounds in diAlloc HID: lenovo: Detect quirk-free fw on cptkbd and stop applying workaround ARM: 9320/1: fix stack depot IRQ stack filter ALSA: hda: Fix possible null-ptr-deref when assigning a stream PCI: tegra194: Use FIELD_GET()/FIELD_PREP() with Link Width fields atm: iphase: Do PCI error checks on own line scsi: libfc: Fix potential NULL pointer dereference in fc_lport_ptp_setup() misc: pci_endpoint_test: Add Device ID for R-Car S4-8 PCIe controller HID: Add quirk for Dell Pro Wireless Keyboard and Mouse KM5221W exfat: support handle zero-size directory tty: vcc: Add check for kstrdup() in vcc_probe() usb: gadget: f_ncm: Always set current gadget in ncm_bind() 9p/trans_fd: Annotate data-racy writes to file::f_flags i2c: sun6i-p2wi: Prevent potential division by zero media: gspca: cpia1: shift-out-of-bounds in set_flicker media: vivid: avoid integer overflow gfs2: ignore negated quota changes gfs2: fix an oops in gfs2_permission media: cobalt: Use FIELD_GET() to extract Link Width media: imon: fix access to invalid resource for the second interface drm/amd/display: Avoid NULL dereference of timing generator kgdb: Flush console before entering kgdb on panic ASoC: ti: omap-mcbsp: Fix runtime PM underflow warnings drm/amdgpu: fix software pci_unplug on some chips pwm: Fix double shift bug wifi: iwlwifi: Use FW rate for non-data frames xhci: turn cancelled td cleanup to its own function SUNRPC: ECONNRESET might require a rebind SUNRPC: Add an IS_ERR() check back to where it was NFSv4.1: fix SP4_MACH_CRED protection for pnfs IO SUNRPC: Fix RPC client cleaned up the freed pipefs dentries gfs2: Silence "suspicious RCU usage in gfs2_permission" warning ipvlan: add ipvlan_route_v6_outbound() helper tty: Fix uninit-value access in ppp_sync_receive() net: hns3: fix variable may not initialized problem in hns3_init_mac_addr() net: hns3: fix VF reset fail issue tipc: Fix kernel-infoleak due to uninitialized TLV value ppp: limit MRU to 64K xen/events: fix delayed eoi list handling ptp: annotate data-race around q->head and q->tail bonding: stop the device in bond_setup_by_slave() net: ethernet: cortina: Fix max RX frame define net: ethernet: cortina: Handle large frames net: ethernet: cortina: Fix MTU max setting netfilter: nf_conntrack_bridge: initialize err to 0 net: stmmac: fix rx budget limit check net/mlx5e: fix double free of encap_header net/mlx5_core: Clean driver version and name net/mlx5e: Check return value of snprintf writing to fw_version buffer for representors macvlan: Don't propagate promisc change to lower dev in passthru tools/power/turbostat: Fix a knl bug cifs: spnego: add ';' in HOST_KEY_LEN cifs: fix check of rc in function generate_smb3signingkey media: venus: hfi: add checks to perform sanity on queue pointers powerpc/perf: Fix disabling BHRB and instruction sampling randstruct: Fix gcc-plugin performance mode to stay in group bpf: Fix check_stack_write_fixed_off() to correctly spill imm bpf: Fix precision tracking for BPF_ALU | BPF_TO_BE | BPF_END scsi: mpt3sas: Fix loop logic scsi: megaraid_sas: Increase register read retry rount from 3 to 30 for selected registers x86/cpu/hygon: Fix the CPU topology evaluation for real KVM: x86: hyper-v: Don't auto-enable stimer on write from user-space KVM: x86: Ignore MSR_AMD64_TW_CFG access audit: don't take task_lock() in audit_exe_compare() code path audit: don't WARN_ON_ONCE(!current->mm) in audit_exe_compare() tty/sysrq: replace smp_processor_id() with get_cpu() hvc/xen: fix console unplug hvc/xen: fix error path in xen_hvc_init() to always register frontend driver PCI/sysfs: Protect driver's D3cold preference from user space watchdog: move softlockup_panic back to early_param ACPI: resource: Do IRQ override on TongFang GMxXGxx arm64: Restrict CPU_BIG_ENDIAN to GNU as or LLVM IAS 15.x or newer parisc/pdc: Add width field to struct pdc_model clk: qcom: ipq8074: drop the CLK_SET_RATE_PARENT flag from PLL clocks clk: qcom: ipq6018: drop the CLK_SET_RATE_PARENT flag from PLL clocks mmc: vub300: fix an error code mmc: sdhci_am654: fix start loop index for TAP value parsing PCI/ASPM: Fix L1 substate handling in aspm_attr_store_common() arm64: dts: qcom: ipq6018: Fix hwlock index for SMEM PM: hibernate: Use __get_safe_page() rather than touching the list PM: hibernate: Clean up sync_read handling in snapshot_write_next() rcu: kmemleak: Ignore kmemleak false positives when RCU-freeing objects btrfs: don't arbitrarily slow down delalloc if we're committing firmware: qcom_scm: use 64-bit calling convention only when client is 64-bit ima: detect changes to the backing overlay file wifi: ath11k: fix temperature event locking wifi: ath11k: fix dfs radar event locking wifi: ath11k: fix htt pktlog locking mmc: meson-gx: Remove setting of CMD_CFG_ERROR genirq/generic_chip: Make irq_remove_generic_chip() irqdomain aware PCI: keystone: Don't discard .remove() callback PCI: keystone: Don't discard .probe() callback jbd2: fix potential data lost in recovering journal raced with synchronizing fs bdev quota: explicitly forbid quota files from being encrypted kernel/reboot: emergency_restart: Set correct system_state i2c: core: Run atomic i2c xfer when !preemptible mcb: fix error handling for different scenarios when parsing dmaengine: stm32-mdma: correct desc prep when channel running mm/cma: use nth_page() in place of direct struct page manipulation mm/memory_hotplug: use pfn math in place of direct struct page manipulation mtd: cfi_cmdset_0001: Byte swap OTP info i3c: master: cdns: Fix reading status register parisc: Prevent booting 64-bit kernels on PA1.x machines parisc/pgtable: Do not drop upper 5 address bits of physical address xhci: Enable RPM on controllers that support low-power states ALSA: info: Fix potential deadlock at disconnection ALSA: hda/realtek - Add Dell ALC295 to pin fall back table ALSA: hda/realtek - Enable internal speaker of ASUS K6500ZC serial: meson: remove redundant initialization of variable id tty: serial: meson: retrieve port FIFO size from DT serial: meson: Use platform_get_irq() to get the interrupt tty: serial: meson: fix hard LOCKUP on crtscts mode cpufreq: stats: Fix buffer overflow detection in trans_stats() Bluetooth: btusb: Add Realtek RTL8852BE support ID 0x0cb8:0xc559 bluetooth: Add device 0bda:887b to device tables bluetooth: Add device 13d3:3571 to device tables Bluetooth: btusb: Add RTW8852BE device 13d3:3570 to device tables Bluetooth: btusb: Add 0bda:b85b for Fn-Link RTL8852BE PCI: exynos: Don't discard .remove() callback arm64: dts: qcom: ipq6018: switch TCSR mutex to MMIO arm64: dts: qcom: ipq6018: Fix tcsr_mutex register size Revert ncsi: Propagate carrier gain/loss events to the NCSI controller lsm: fix default return value for vm_enough_memory lsm: fix default return value for inode_getsecctx i2c: designware: Disable TX_EMPTY irq while waiting for block length byte net: dsa: lan9303: consequently nested-lock physical MDIO net: phylink: initialize carrier state at creation i2c: i801: fix potential race in i801_block_transaction_byte_by_byte f2fs: avoid format-overflow warning media: lirc: drop trailing space from scancode transmit media: sharp: fix sharp encoding media: venus: hfi_parser: Add check to keep the number of codecs within range media: venus: hfi: fix the check to handle session buffer requirement media: venus: hfi: add checks to handle capabilities from firmware nfsd: fix file memleak on client_opens_release mm: kmem: drop __GFP_NOFAIL when allocating objcg vectors media: qcom: camss: Fix vfe_get() error jump Revert "net: r8169: Disable multicast filter for RTL8168H and RTL8107E" ext4: apply umask if ACL support is disabled ext4: correct offset of gdb backup in non meta_bg group to update_backups ext4: correct return value of ext4_convert_meta_bg ext4: correct the start block of counting reserved clusters ext4: remove gdb backup copy for meta bg in setup_new_flex_group_blocks drm/amd/pm: Handle non-terminated overdrive commands. drm/amdgpu: fix error handling in amdgpu_bo_list_get() drm/amd/display: Change the DMCUB mailbox memory location from FB to inbox io_uring/fdinfo: lock SQ thread while retrieving thread cpu/pid tracing: Have trace_event_file have ref counters netfilter: nftables: update table flags from the commit phase netfilter: nf_tables: fix table flag updates netfilter: nf_tables: disable toggling dormant table state more than once interconnect: qcom: Add support for mask-based BCMs Linux 5.10.202 Change-Id: I762bcd4848d9b87cbb4efe4104fe1685999dc0f7 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
7999a9a70d
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 201
|
||||
SUBLEVEL = 202
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
@ -10,10 +10,6 @@
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#define __exception_irq_entry __irq_entry
|
||||
#else
|
||||
#define __exception_irq_entry
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_ARM_EXCEPTION_H */
|
||||
|
@ -1065,6 +1065,8 @@ choice
|
||||
config CPU_BIG_ENDIAN
|
||||
bool "Build big-endian kernel"
|
||||
depends on !LD_IS_LLD || LLD_VERSION >= 130000
|
||||
# https://github.com/llvm/llvm-project/commit/1379b150991f70a5782e9a143c2ba5308da1161c
|
||||
depends on AS_IS_GNU || AS_VERSION >= 150000
|
||||
help
|
||||
Say Y if you plan on running a kernel with a big-endian userspace.
|
||||
|
||||
|
@ -129,12 +129,6 @@ scm {
|
||||
};
|
||||
};
|
||||
|
||||
tcsr_mutex: hwlock {
|
||||
compatible = "qcom,tcsr-mutex";
|
||||
syscon = <&tcsr_mutex_regs 0 0x80>;
|
||||
#hwlock-cells = <1>;
|
||||
};
|
||||
|
||||
pmuv8: pmu {
|
||||
compatible = "arm,cortex-a53-pmu";
|
||||
interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) |
|
||||
@ -175,7 +169,7 @@ q6_region: memory@4ab00000 {
|
||||
smem {
|
||||
compatible = "qcom,smem";
|
||||
memory-region = <&smem_region>;
|
||||
hwlocks = <&tcsr_mutex 0>;
|
||||
hwlocks = <&tcsr_mutex 3>;
|
||||
};
|
||||
|
||||
soc: soc {
|
||||
@ -242,9 +236,10 @@ gcc: gcc@1800000 {
|
||||
#reset-cells = <1>;
|
||||
};
|
||||
|
||||
tcsr_mutex_regs: syscon@1905000 {
|
||||
compatible = "syscon";
|
||||
reg = <0x0 0x01905000 0x0 0x8000>;
|
||||
tcsr_mutex: hwlock@1905000 {
|
||||
compatible = "qcom,ipq6018-tcsr-mutex", "qcom,tcsr-mutex";
|
||||
reg = <0x0 0x01905000 0x0 0x20000>;
|
||||
#hwlock-cells = <1>;
|
||||
};
|
||||
|
||||
tcsr_q6: syscon@1945000 {
|
||||
|
@ -465,6 +465,7 @@ struct pdc_model { /* for PDC_MODEL */
|
||||
unsigned long arch_rev;
|
||||
unsigned long pot_key;
|
||||
unsigned long curr_key;
|
||||
unsigned long width; /* default of PSW_W bit (1=enabled) */
|
||||
};
|
||||
|
||||
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
|
||||
|
@ -497,13 +497,13 @@
|
||||
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
|
||||
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
|
||||
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
|
||||
#define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
|
||||
|
||||
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
|
||||
.macro convert_for_tlb_insert20 pte,tmp
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
copy \pte,\tmp
|
||||
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
|
||||
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
@ -511,8 +511,7 @@
|
||||
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
|
||||
#else /* Huge pages disabled */
|
||||
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
#endif
|
||||
|
@ -69,9 +69,8 @@ $bss_loop:
|
||||
stw,ma %arg2,4(%r1)
|
||||
stw,ma %arg3,4(%r1)
|
||||
|
||||
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
|
||||
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
|
||||
* and halt kernel if we detect a PA1.x CPU. */
|
||||
#if defined(CONFIG_PA20)
|
||||
/* check for 64-bit capable CPU as required by current kernel */
|
||||
ldi 32,%r10
|
||||
mtctl %r10,%cr11
|
||||
.level 2.0
|
||||
|
@ -1289,8 +1289,7 @@ static void power_pmu_disable(struct pmu *pmu)
|
||||
/*
|
||||
* Disable instruction sampling if it was enabled
|
||||
*/
|
||||
if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
|
||||
val &= ~MMCRA_SAMPLE_ENABLE;
|
||||
val &= ~MMCRA_SAMPLE_ENABLE;
|
||||
|
||||
/* Disable BHRB via mmcra (BHRBRD) for p10 */
|
||||
if (ppmu->flags & PPMU_ARCH_31)
|
||||
@ -1301,7 +1300,7 @@ static void power_pmu_disable(struct pmu *pmu)
|
||||
* instruction sampling or BHRB.
|
||||
*/
|
||||
if (val != mmcra) {
|
||||
mtspr(SPRN_MMCRA, mmcra);
|
||||
mtspr(SPRN_MMCRA, val);
|
||||
mb();
|
||||
isync();
|
||||
}
|
||||
|
@ -505,6 +505,7 @@
|
||||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
#define MSR_AMD64_TW_CFG 0xc0011023
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xc0011029
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
|
||||
|
@ -12,13 +12,6 @@
|
||||
|
||||
#define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
|
||||
|
||||
/*
|
||||
* Too small node sizes may confuse the VM badly. Usually they
|
||||
* result from BIOS bugs. So dont recognize nodes as standalone
|
||||
* NUMA entities that have less than this amount of RAM listed:
|
||||
*/
|
||||
#define NODE_MIN_SIZE (4*1024*1024)
|
||||
|
||||
extern int numa_off;
|
||||
|
||||
/*
|
||||
|
@ -89,8 +89,12 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
|
||||
if (!err)
|
||||
c->x86_coreid_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
/* Socket ID is ApicId[6] for these processors. */
|
||||
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
|
||||
/*
|
||||
* Socket ID is ApicId[6] for the processors with model <= 0x3
|
||||
* when running on host.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
|
||||
c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
|
||||
|
||||
cacheinfo_hygon_init_llc_id(c, cpu);
|
||||
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
|
||||
|
@ -674,10 +674,12 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
|
||||
|
||||
stimer_cleanup(stimer);
|
||||
stimer->count = count;
|
||||
if (stimer->count == 0)
|
||||
stimer->config.enable = 0;
|
||||
else if (stimer->config.auto_enable)
|
||||
stimer->config.enable = 1;
|
||||
if (!host) {
|
||||
if (stimer->count == 0)
|
||||
stimer->config.enable = 0;
|
||||
else if (stimer->config.auto_enable)
|
||||
stimer->config.enable = 1;
|
||||
}
|
||||
|
||||
if (stimer->config.enable)
|
||||
stimer_mark_pending(stimer, false);
|
||||
|
@ -3132,6 +3132,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
case MSR_AMD64_PATCH_LOADER:
|
||||
case MSR_AMD64_BU_CFG2:
|
||||
case MSR_AMD64_DC_CFG:
|
||||
case MSR_AMD64_TW_CFG:
|
||||
case MSR_F15H_EX_CFG:
|
||||
break;
|
||||
|
||||
@ -3485,6 +3486,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
case MSR_AMD64_BU_CFG2:
|
||||
case MSR_IA32_PERF_CTL:
|
||||
case MSR_AMD64_DC_CFG:
|
||||
case MSR_AMD64_TW_CFG:
|
||||
case MSR_F15H_EX_CFG:
|
||||
/*
|
||||
* Intel Sandy Bridge CPUs must support the RAPL (running average power
|
||||
|
@ -602,13 +602,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
|
||||
if (start >= end)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Don't confuse VM with a node that doesn't have the
|
||||
* minimum amount of memory:
|
||||
*/
|
||||
if (end && (end - start) < NODE_MIN_SIZE)
|
||||
continue;
|
||||
|
||||
alloc_node_data(nid);
|
||||
}
|
||||
|
||||
|
@ -117,6 +117,8 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
|
||||
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
if (err == -EBUSY)
|
||||
return -EAGAIN;
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -164,6 +166,8 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
|
||||
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
if (err == -EBUSY)
|
||||
return -EAGAIN;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -442,6 +442,18 @@ static const struct dmi_system_id asus_laptop[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GMxXGxx/TUXEDO Polaris 15 Gen5 AMD */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus ExpertBook B2502",
|
||||
.matches = {
|
||||
|
@ -2290,19 +2290,21 @@ static int get_esi(struct atm_dev *dev)
|
||||
static int reset_sar(struct atm_dev *dev)
|
||||
{
|
||||
IADEV *iadev;
|
||||
int i, error = 1;
|
||||
int i, error;
|
||||
unsigned int pci[64];
|
||||
|
||||
iadev = INPH_IA_DEV(dev);
|
||||
for(i=0; i<64; i++)
|
||||
if ((error = pci_read_config_dword(iadev->pci,
|
||||
i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
for (i = 0; i < 64; i++) {
|
||||
error = pci_read_config_dword(iadev->pci, i * 4, &pci[i]);
|
||||
if (error != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
}
|
||||
writel(0, iadev->reg+IPHASE5575_EXT_RESET);
|
||||
for(i=0; i<64; i++)
|
||||
if ((error = pci_write_config_dword(iadev->pci,
|
||||
i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
for (i = 0; i < 64; i++) {
|
||||
error = pci_write_config_dword(iadev->pci, i * 4, pci[i]);
|
||||
if (error != PCIBIOS_SUCCESSFUL)
|
||||
return error;
|
||||
}
|
||||
udelay(5);
|
||||
return 0;
|
||||
}
|
||||
|
@ -415,6 +415,18 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
/* Realtek 8852BE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3570), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
/* Realtek Bluetooth devices */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
|
||||
.driver_info = BTUSB_REALTEK },
|
||||
@ -3095,6 +3107,9 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
|
||||
goto err_free_wc;
|
||||
}
|
||||
|
||||
if (data->evt_skb == NULL)
|
||||
goto err_free_wc;
|
||||
|
||||
/* Parse and handle the return WMT event */
|
||||
wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data;
|
||||
if (wmt_evt->whdr.op != hdr->op) {
|
||||
|
@ -75,7 +75,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
|
||||
&gpll0_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_fixed_factor_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -89,7 +88,6 @@ static struct clk_alpha_pll_postdiv gpll0 = {
|
||||
&gpll0_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -164,7 +162,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
|
||||
&gpll6_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -195,7 +192,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
|
||||
&gpll4_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -246,7 +242,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
|
||||
&gpll2_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -277,7 +272,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
|
||||
&nss_crypto_pll_main.clkr.hw },
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -418,7 +418,6 @@ static struct clk_fixed_factor gpll0_out_main_div2 = {
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_fixed_factor_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -465,7 +464,6 @@ static struct clk_alpha_pll_postdiv gpll2 = {
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -498,7 +496,6 @@ static struct clk_alpha_pll_postdiv gpll4 = {
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -532,7 +529,6 @@ static struct clk_alpha_pll_postdiv gpll6 = {
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -546,7 +542,6 @@ static struct clk_fixed_factor gpll6_out_main_div2 = {
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_fixed_factor_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -611,7 +606,6 @@ static struct clk_alpha_pll_postdiv nss_crypto_pll = {
|
||||
},
|
||||
.num_parents = 1,
|
||||
.ops = &clk_alpha_pll_postdiv_ro_ops,
|
||||
.flags = CLK_SET_RATE_PARENT,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -315,6 +315,7 @@ static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
|
||||
writel(mck_divisor_idx /* likely divide-by-8 */
|
||||
| ATMEL_TC_WAVE
|
||||
| ATMEL_TC_WAVESEL_UP /* free-run */
|
||||
| ATMEL_TC_ASWTRG_SET /* TIOA0 rises at software trigger */
|
||||
| ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
|
||||
| ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
|
||||
tcaddr + ATMEL_TC_REG(0, CMR));
|
||||
|
@ -454,12 +454,16 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
|
||||
return -ENOMEM;
|
||||
|
||||
imxtm->base = of_iomap(np, 0);
|
||||
if (!imxtm->base)
|
||||
return -ENXIO;
|
||||
if (!imxtm->base) {
|
||||
ret = -ENXIO;
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
imxtm->irq = irq_of_parse_and_map(np, 0);
|
||||
if (imxtm->irq <= 0)
|
||||
return -EINVAL;
|
||||
if (imxtm->irq <= 0) {
|
||||
ret = -EINVAL;
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
|
||||
|
||||
@ -472,11 +476,15 @@ static int __init mxc_timer_init_dt(struct device_node *np, enum imx_gpt_type t
|
||||
|
||||
ret = _mxc_timer_init(imxtm);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_kfree;
|
||||
|
||||
initialized = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
err_kfree:
|
||||
kfree(imxtm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init imx1_timer_init_dt(struct device_node *np)
|
||||
|
@ -131,25 +131,25 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
|
||||
for (i = 0; i < stats->state_num; i++) {
|
||||
if (len >= PAGE_SIZE)
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
break;
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
|
||||
stats->freq_table[i]);
|
||||
}
|
||||
if (len >= PAGE_SIZE)
|
||||
return PAGE_SIZE;
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
return PAGE_SIZE - 1;
|
||||
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
|
||||
|
||||
for (i = 0; i < stats->state_num; i++) {
|
||||
if (len >= PAGE_SIZE)
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
break;
|
||||
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
|
||||
stats->freq_table[i]);
|
||||
|
||||
for (j = 0; j < stats->state_num; j++) {
|
||||
if (len >= PAGE_SIZE)
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
break;
|
||||
|
||||
if (pending)
|
||||
@ -159,12 +159,12 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
||||
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count);
|
||||
}
|
||||
if (len >= PAGE_SIZE)
|
||||
if (len >= PAGE_SIZE - 1)
|
||||
break;
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
|
||||
}
|
||||
|
||||
if (len >= PAGE_SIZE) {
|
||||
if (len >= PAGE_SIZE - 1) {
|
||||
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
|
||||
return -EFBIG;
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan,
|
||||
src_maxburst = chan->dma_config.src_maxburst;
|
||||
dst_maxburst = chan->dma_config.dst_maxburst;
|
||||
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
|
||||
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
|
||||
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
|
||||
|
||||
@ -937,7 +937,7 @@ stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
|
||||
ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & ~STM32_MDMA_CCR_EN;
|
||||
ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id));
|
||||
ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id));
|
||||
cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
|
||||
|
@ -136,6 +136,12 @@ static enum qcom_scm_convention __get_convention(void)
|
||||
if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
|
||||
return qcom_scm_convention;
|
||||
|
||||
/*
|
||||
* Per the "SMC calling convention specification", the 64-bit calling
|
||||
* convention can only be used when the client is 64-bit, otherwise
|
||||
* system will encounter the undefined behaviour.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_ARM64)
|
||||
/*
|
||||
* Device isn't required as there is only one argument - no device
|
||||
* needed to dma_map_single to secure world
|
||||
@ -156,6 +162,7 @@ static enum qcom_scm_convention __get_convention(void)
|
||||
forced = true;
|
||||
goto found;
|
||||
}
|
||||
#endif
|
||||
|
||||
probed_convention = SMC_CONVENTION_ARM_32;
|
||||
ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
|
||||
|
@ -178,6 +178,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
*result = NULL;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
|
@ -575,6 +575,9 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
||||
ssize_t result = 0;
|
||||
int r;
|
||||
|
||||
if (!adev->smc_rreg)
|
||||
return -EPERM;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
@ -634,6 +637,9 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
|
||||
ssize_t result = 0;
|
||||
int r;
|
||||
|
||||
if (!adev->smc_wreg)
|
||||
return -EPERM;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -4530,7 +4530,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
* Flush RAM to disk so that after reboot
|
||||
* the user can read log and see why the system rebooted.
|
||||
*/
|
||||
if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
|
||||
if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
|
||||
amdgpu_ras_get_context(adev)->reboot) {
|
||||
DRM_WARN("Emergency reboot.");
|
||||
|
||||
ksys_sync_helper();
|
||||
|
@ -1031,7 +1031,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
|
||||
sysfs_remove_file_from_group(&adev->dev->kobj,
|
||||
if (adev->dev->kobj.sd)
|
||||
sysfs_remove_file_from_group(&adev->dev->kobj,
|
||||
&con->badpages_attr.attr,
|
||||
RAS_FS_NAME);
|
||||
}
|
||||
@ -1048,7 +1049,8 @@ static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
|
||||
.attrs = attrs,
|
||||
};
|
||||
|
||||
sysfs_remove_group(&adev->dev->kobj, &group);
|
||||
if (adev->dev->kobj.sd)
|
||||
sysfs_remove_group(&adev->dev->kobj, &group);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1096,7 +1098,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
|
||||
if (!obj || !obj->attr_inuse)
|
||||
return -EINVAL;
|
||||
|
||||
sysfs_remove_file_from_group(&adev->dev->kobj,
|
||||
if (adev->dev->kobj.sd)
|
||||
sysfs_remove_file_from_group(&adev->dev->kobj,
|
||||
&obj->sysfs_attr.attr,
|
||||
RAS_FS_NAME);
|
||||
obj->attr_inuse = 0;
|
||||
|
@ -1293,7 +1293,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
struct dmub_srv_create_params create_params;
|
||||
struct dmub_srv_region_params region_params;
|
||||
struct dmub_srv_region_info region_info;
|
||||
struct dmub_srv_fb_params fb_params;
|
||||
struct dmub_srv_memory_params memory_params;
|
||||
struct dmub_srv_fb_info *fb_info;
|
||||
struct dmub_srv *dmub_srv;
|
||||
const struct dmcub_firmware_header_v1_0 *hdr;
|
||||
@ -1389,6 +1389,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
adev->dm.dmub_fw->data +
|
||||
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
|
||||
PSP_HEADER_BYTES;
|
||||
region_params.is_mailbox_in_inbox = false;
|
||||
|
||||
status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
|
||||
®ion_info);
|
||||
@ -1410,10 +1411,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
return r;
|
||||
|
||||
/* Rebase the regions on the framebuffer address. */
|
||||
memset(&fb_params, 0, sizeof(fb_params));
|
||||
fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
|
||||
fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
|
||||
fb_params.region_info = ®ion_info;
|
||||
memset(&memory_params, 0, sizeof(memory_params));
|
||||
memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
|
||||
memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
|
||||
memory_params.region_info = ®ion_info;
|
||||
|
||||
adev->dm.dmub_fb_info =
|
||||
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
|
||||
@ -1425,7 +1426,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
|
||||
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
|
||||
if (status != DMUB_STATUS_OK) {
|
||||
DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
|
||||
return -EINVAL;
|
||||
|
@ -534,7 +534,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
|
||||
|
||||
if (res_ctx->pipe_ctx[i].stream != stream)
|
||||
if (res_ctx->pipe_ctx[i].stream != stream || !tg)
|
||||
continue;
|
||||
|
||||
return tg->funcs->get_frame_count(tg);
|
||||
@ -593,7 +593,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
|
||||
|
||||
if (res_ctx->pipe_ctx[i].stream != stream)
|
||||
if (res_ctx->pipe_ctx[i].stream != stream || !tg)
|
||||
continue;
|
||||
|
||||
tg->funcs->get_scanoutpos(tg,
|
||||
|
@ -152,6 +152,7 @@ struct dmub_srv_region_params {
|
||||
uint32_t vbios_size;
|
||||
const uint8_t *fw_inst_const;
|
||||
const uint8_t *fw_bss_data;
|
||||
bool is_mailbox_in_inbox;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -171,20 +172,25 @@ struct dmub_srv_region_params {
|
||||
*/
|
||||
struct dmub_srv_region_info {
|
||||
uint32_t fb_size;
|
||||
uint32_t inbox_size;
|
||||
uint8_t num_regions;
|
||||
struct dmub_region regions[DMUB_WINDOW_TOTAL];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dmub_srv_fb_params - parameters used for driver fb setup
|
||||
* struct dmub_srv_memory_params - parameters used for driver fb setup
|
||||
* @region_info: region info calculated by dmub service
|
||||
* @cpu_addr: base cpu address for the framebuffer
|
||||
* @gpu_addr: base gpu virtual address for the framebuffer
|
||||
* @cpu_fb_addr: base cpu address for the framebuffer
|
||||
* @cpu_inbox_addr: base cpu address for the gart
|
||||
* @gpu_fb_addr: base gpu virtual address for the framebuffer
|
||||
* @gpu_inbox_addr: base gpu virtual address for the gart
|
||||
*/
|
||||
struct dmub_srv_fb_params {
|
||||
struct dmub_srv_memory_params {
|
||||
const struct dmub_srv_region_info *region_info;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
void *cpu_fb_addr;
|
||||
void *cpu_inbox_addr;
|
||||
uint64_t gpu_fb_addr;
|
||||
uint64_t gpu_inbox_addr;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -398,8 +404,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
* DMUB_STATUS_OK - success
|
||||
* DMUB_STATUS_INVALID - unspecified error
|
||||
*/
|
||||
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
|
||||
const struct dmub_srv_fb_params *params,
|
||||
enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
|
||||
const struct dmub_srv_memory_params *params,
|
||||
struct dmub_srv_fb_info *out);
|
||||
|
||||
/**
|
||||
|
@ -250,7 +250,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
|
||||
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
|
||||
uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
|
||||
|
||||
uint32_t previous_top = 0;
|
||||
if (!dmub->sw_init)
|
||||
return DMUB_STATUS_INVALID;
|
||||
|
||||
@ -275,8 +275,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
bios->base = dmub_align(stack->top, 256);
|
||||
bios->top = bios->base + params->vbios_size;
|
||||
|
||||
mail->base = dmub_align(bios->top, 256);
|
||||
mail->top = mail->base + DMUB_MAILBOX_SIZE;
|
||||
if (params->is_mailbox_in_inbox) {
|
||||
mail->base = 0;
|
||||
mail->top = mail->base + DMUB_MAILBOX_SIZE;
|
||||
previous_top = bios->top;
|
||||
} else {
|
||||
mail->base = dmub_align(bios->top, 256);
|
||||
mail->top = mail->base + DMUB_MAILBOX_SIZE;
|
||||
previous_top = mail->top;
|
||||
}
|
||||
|
||||
fw_info = dmub_get_fw_meta_info(params);
|
||||
|
||||
@ -295,7 +302,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
dmub->fw_version = fw_info->fw_version;
|
||||
}
|
||||
|
||||
trace_buff->base = dmub_align(mail->top, 256);
|
||||
trace_buff->base = dmub_align(previous_top, 256);
|
||||
trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
|
||||
|
||||
fw_state->base = dmub_align(trace_buff->top, 256);
|
||||
@ -306,11 +313,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
|
||||
|
||||
out->fb_size = dmub_align(scratch_mem->top, 4096);
|
||||
|
||||
if (params->is_mailbox_in_inbox)
|
||||
out->inbox_size = dmub_align(mail->top, 4096);
|
||||
|
||||
return DMUB_STATUS_OK;
|
||||
}
|
||||
|
||||
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
|
||||
const struct dmub_srv_fb_params *params,
|
||||
enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
|
||||
const struct dmub_srv_memory_params *params,
|
||||
struct dmub_srv_fb_info *out)
|
||||
{
|
||||
uint8_t *cpu_base;
|
||||
@ -325,8 +335,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
|
||||
if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
|
||||
return DMUB_STATUS_INVALID;
|
||||
|
||||
cpu_base = (uint8_t *)params->cpu_addr;
|
||||
gpu_base = params->gpu_addr;
|
||||
cpu_base = (uint8_t *)params->cpu_fb_addr;
|
||||
gpu_base = params->gpu_fb_addr;
|
||||
|
||||
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
|
||||
const struct dmub_region *reg =
|
||||
@ -334,6 +344,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
|
||||
|
||||
out->fb[i].cpu_addr = cpu_base + reg->base;
|
||||
out->fb[i].gpu_addr = gpu_base + reg->base;
|
||||
|
||||
if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
|
||||
out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
|
||||
out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
|
||||
}
|
||||
|
||||
out->fb[i].size = reg->top - reg->base;
|
||||
}
|
||||
|
||||
|
@ -78,7 +78,7 @@ typedef struct _ATOM_PPLIB_THERMALCONTROLLER
|
||||
typedef struct _ATOM_PPLIB_STATE
|
||||
{
|
||||
UCHAR ucNonClockStateIndex;
|
||||
UCHAR ucClockStateIndices[1]; // variable-sized
|
||||
UCHAR ucClockStateIndices[]; // variable-sized
|
||||
} ATOM_PPLIB_STATE;
|
||||
|
||||
|
||||
@ -473,7 +473,7 @@ typedef struct _ATOM_PPLIB_STATE_V2
|
||||
/**
|
||||
* Driver will read the first ucNumDPMLevels in this array
|
||||
*/
|
||||
UCHAR clockInfoIndex[1];
|
||||
UCHAR clockInfoIndex[];
|
||||
} ATOM_PPLIB_STATE_V2;
|
||||
|
||||
typedef struct _StateArray{
|
||||
|
@ -776,7 +776,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
if (amdgpu_in_reset(adev))
|
||||
return -EPERM;
|
||||
|
||||
if (count > 127)
|
||||
if (count > 127 || count == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (*buf == 's')
|
||||
@ -792,7 +792,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(buf_cpy, buf, count+1);
|
||||
memcpy(buf_cpy, buf, count);
|
||||
buf_cpy[count] = 0;
|
||||
|
||||
tmp_str = buf_cpy;
|
||||
|
||||
@ -807,6 +808,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
|
||||
return -EINVAL;
|
||||
parameter_size++;
|
||||
|
||||
if (!tmp_str)
|
||||
break;
|
||||
|
||||
while (isspace(*tmp_str))
|
||||
tmp_str++;
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ typedef struct _ATOM_Tonga_State {
|
||||
typedef struct _ATOM_Tonga_State_Array {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Tonga_State entries[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Tonga_State entries[]; /* Dynamically allocate entries. */
|
||||
} ATOM_Tonga_State_Array;
|
||||
|
||||
typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
|
||||
@ -179,7 +179,7 @@ typedef struct _ATOM_Tonga_MCLK_Dependency_Record {
|
||||
typedef struct _ATOM_Tonga_MCLK_Dependency_Table {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Tonga_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Tonga_MCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
|
||||
} ATOM_Tonga_MCLK_Dependency_Table;
|
||||
|
||||
typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
|
||||
@ -194,7 +194,7 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Record {
|
||||
typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Tonga_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
|
||||
} ATOM_Tonga_SCLK_Dependency_Table;
|
||||
|
||||
typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
|
||||
@ -210,7 +210,7 @@ typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
|
||||
typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Polaris_SCLK_Dependency_Record entries[]; /* Dynamically allocate entries. */
|
||||
} ATOM_Polaris_SCLK_Dependency_Table;
|
||||
|
||||
typedef struct _ATOM_Tonga_PCIE_Record {
|
||||
@ -222,7 +222,7 @@ typedef struct _ATOM_Tonga_PCIE_Record {
|
||||
typedef struct _ATOM_Tonga_PCIE_Table {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Tonga_PCIE_Record entries[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Tonga_PCIE_Record entries[]; /* Dynamically allocate entries. */
|
||||
} ATOM_Tonga_PCIE_Table;
|
||||
|
||||
typedef struct _ATOM_Polaris10_PCIE_Record {
|
||||
@ -235,7 +235,7 @@ typedef struct _ATOM_Polaris10_PCIE_Record {
|
||||
typedef struct _ATOM_Polaris10_PCIE_Table {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Polaris10_PCIE_Record entries[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Polaris10_PCIE_Record entries[]; /* Dynamically allocate entries. */
|
||||
} ATOM_Polaris10_PCIE_Table;
|
||||
|
||||
|
||||
@ -252,7 +252,7 @@ typedef struct _ATOM_Tonga_MM_Dependency_Record {
|
||||
typedef struct _ATOM_Tonga_MM_Dependency_Table {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Tonga_MM_Dependency_Record entries[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Tonga_MM_Dependency_Record entries[]; /* Dynamically allocate entries. */
|
||||
} ATOM_Tonga_MM_Dependency_Table;
|
||||
|
||||
typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
|
||||
@ -265,7 +265,7 @@ typedef struct _ATOM_Tonga_Voltage_Lookup_Record {
|
||||
typedef struct _ATOM_Tonga_Voltage_Lookup_Table {
|
||||
UCHAR ucRevId;
|
||||
UCHAR ucNumEntries; /* Number of entries. */
|
||||
ATOM_Tonga_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries. */
|
||||
ATOM_Tonga_Voltage_Lookup_Record entries[]; /* Dynamically allocate entries. */
|
||||
} ATOM_Tonga_Voltage_Lookup_Table;
|
||||
|
||||
typedef struct _ATOM_Tonga_Fan_Table {
|
||||
|
@ -1223,7 +1223,7 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
|
||||
struct komeda_pipeline_state *new)
|
||||
{
|
||||
@ -1243,8 +1243,12 @@ komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
|
||||
c = komeda_pipeline_get_component(pipe, id);
|
||||
c_st = komeda_component_get_state_and_set_user(c,
|
||||
drm_st, NULL, new->crtc);
|
||||
if (PTR_ERR(c_st) == -EDEADLK)
|
||||
return -EDEADLK;
|
||||
WARN_ON(IS_ERR(c_st));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* release unclaimed pipeline resource */
|
||||
@ -1266,9 +1270,8 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
|
||||
if (WARN_ON(IS_ERR_OR_NULL(st)))
|
||||
return -EINVAL;
|
||||
|
||||
komeda_pipeline_unbound_components(pipe, st);
|
||||
return komeda_pipeline_unbound_components(pipe, st);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Since standalong disabled components must be disabled separately and in the
|
||||
|
@ -264,26 +264,9 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
|
||||
|
||||
static u8 dp_panel_get_edid_checksum(struct edid *edid)
|
||||
{
|
||||
struct edid *last_block;
|
||||
u8 *raw_edid;
|
||||
bool is_edid_corrupt = false;
|
||||
edid += edid->extensions;
|
||||
|
||||
if (!edid) {
|
||||
DRM_ERROR("invalid edid input\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
raw_edid = (u8 *)edid;
|
||||
raw_edid += (edid->extensions * EDID_LENGTH);
|
||||
last_block = (struct edid *)raw_edid;
|
||||
|
||||
/* block type extension */
|
||||
drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
|
||||
if (!is_edid_corrupt)
|
||||
return last_block->checksum;
|
||||
|
||||
DRM_ERROR("Invalid block, no checksum\n");
|
||||
return 0;
|
||||
return edid->checksum;
|
||||
}
|
||||
|
||||
void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
|
||||
|
@ -267,6 +267,8 @@ static int versatile_panel_get_modes(struct drm_panel *panel,
|
||||
connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev, &vpanel->panel_type->mode);
|
||||
if (!mode)
|
||||
return -ENOMEM;
|
||||
drm_mode_set_name(mode);
|
||||
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
|
||||
|
||||
|
@ -428,29 +428,30 @@ static int st7703_prepare(struct drm_panel *panel)
|
||||
return 0;
|
||||
|
||||
dev_dbg(ctx->dev, "Resetting the panel\n");
|
||||
ret = regulator_enable(ctx->vcc);
|
||||
if (ret < 0) {
|
||||
dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
|
||||
|
||||
ret = regulator_enable(ctx->iovcc);
|
||||
if (ret < 0) {
|
||||
dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
|
||||
goto disable_vcc;
|
||||
return ret;
|
||||
}
|
||||
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
|
||||
usleep_range(20, 40);
|
||||
ret = regulator_enable(ctx->vcc);
|
||||
if (ret < 0) {
|
||||
dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
|
||||
regulator_disable(ctx->iovcc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Give power supplies time to stabilize before deasserting reset. */
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
|
||||
msleep(20);
|
||||
usleep_range(15000, 20000);
|
||||
|
||||
ctx->prepared = true;
|
||||
|
||||
return 0;
|
||||
|
||||
disable_vcc:
|
||||
regulator_disable(ctx->vcc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int st7703_get_modes(struct drm_panel *panel,
|
||||
|
@ -378,6 +378,8 @@ static int tpg110_get_modes(struct drm_panel *panel,
|
||||
connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev, &tpg->panel_mode->mode);
|
||||
if (!mode)
|
||||
return -ENOMEM;
|
||||
drm_mode_set_name(mode);
|
||||
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
|
||||
|
||||
|
@ -345,6 +345,7 @@
|
||||
|
||||
#define USB_VENDOR_ID_DELL 0x413c
|
||||
#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
|
||||
#define USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W 0x4503
|
||||
|
||||
#define USB_VENDOR_ID_DELORME 0x1163
|
||||
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
|
||||
|
@ -50,7 +50,12 @@ struct lenovo_drvdata {
|
||||
int select_right;
|
||||
int sensitivity;
|
||||
int press_speed;
|
||||
u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */
|
||||
/* 0: Up
|
||||
* 1: Down (undecided)
|
||||
* 2: Scrolling
|
||||
* 3: Patched firmware, disable workaround
|
||||
*/
|
||||
u8 middlebutton_state;
|
||||
bool fn_lock;
|
||||
};
|
||||
|
||||
@ -478,31 +483,48 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
|
||||
{
|
||||
struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
|
||||
|
||||
/* "wheel" scroll events */
|
||||
if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
|
||||
usage->code == REL_HWHEEL)) {
|
||||
/* Scroll events disable middle-click event */
|
||||
cptkbd_data->middlebutton_state = 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Middle click events */
|
||||
if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
|
||||
if (value == 1) {
|
||||
cptkbd_data->middlebutton_state = 1;
|
||||
} else if (value == 0) {
|
||||
if (cptkbd_data->middlebutton_state == 1) {
|
||||
/* No scrolling inbetween, send middle-click */
|
||||
input_event(field->hidinput->input,
|
||||
EV_KEY, BTN_MIDDLE, 1);
|
||||
input_sync(field->hidinput->input);
|
||||
input_event(field->hidinput->input,
|
||||
EV_KEY, BTN_MIDDLE, 0);
|
||||
input_sync(field->hidinput->input);
|
||||
}
|
||||
cptkbd_data->middlebutton_state = 0;
|
||||
if (cptkbd_data->middlebutton_state != 3) {
|
||||
/* REL_X and REL_Y events during middle button pressed
|
||||
* are only possible on patched, bug-free firmware
|
||||
* so set middlebutton_state to 3
|
||||
* to never apply workaround anymore
|
||||
*/
|
||||
if (cptkbd_data->middlebutton_state == 1 &&
|
||||
usage->type == EV_REL &&
|
||||
(usage->code == REL_X || usage->code == REL_Y)) {
|
||||
cptkbd_data->middlebutton_state = 3;
|
||||
/* send middle button press which was hold before */
|
||||
input_event(field->hidinput->input,
|
||||
EV_KEY, BTN_MIDDLE, 1);
|
||||
input_sync(field->hidinput->input);
|
||||
}
|
||||
|
||||
/* "wheel" scroll events */
|
||||
if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
|
||||
usage->code == REL_HWHEEL)) {
|
||||
/* Scroll events disable middle-click event */
|
||||
cptkbd_data->middlebutton_state = 2;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Middle click events */
|
||||
if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) {
|
||||
if (value == 1) {
|
||||
cptkbd_data->middlebutton_state = 1;
|
||||
} else if (value == 0) {
|
||||
if (cptkbd_data->middlebutton_state == 1) {
|
||||
/* No scrolling inbetween, send middle-click */
|
||||
input_event(field->hidinput->input,
|
||||
EV_KEY, BTN_MIDDLE, 1);
|
||||
input_sync(field->hidinput->input);
|
||||
input_event(field->hidinput->input,
|
||||
EV_KEY, BTN_MIDDLE, 0);
|
||||
input_sync(field->hidinput->input);
|
||||
}
|
||||
cptkbd_data->middlebutton_state = 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -66,6 +66,7 @@ static const struct hid_device_id hid_quirks[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PRO_WIRELESS_KM5221W), HID_QUIRK_ALWAYS_POLL },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
|
||||
|
@ -362,10 +362,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
|
||||
|
||||
/*
|
||||
* Because we don't know the buffer length in the
|
||||
* I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
|
||||
* the transaction here.
|
||||
* I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
|
||||
* transaction here. Also disable the TX_EMPTY IRQ
|
||||
* while waiting for the data length byte to avoid the
|
||||
* bogus interrupts flood.
|
||||
*/
|
||||
if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
|
||||
if (flags & I2C_M_RECV_LEN) {
|
||||
dev->status |= STATUS_WRITE_IN_PROGRESS;
|
||||
intr_mask &= ~DW_IC_INTR_TX_EMPTY;
|
||||
break;
|
||||
} else if (buf_len > 0) {
|
||||
/* more bytes to be written */
|
||||
dev->status |= STATUS_WRITE_IN_PROGRESS;
|
||||
break;
|
||||
@ -401,6 +407,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
|
||||
msgs[dev->msg_read_idx].len = len;
|
||||
msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
|
||||
|
||||
/*
|
||||
* Received buffer length, re-enable TX_EMPTY interrupt
|
||||
* to resume the SMBUS transaction.
|
||||
*/
|
||||
regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
|
||||
DW_IC_INTR_TX_EMPTY);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -735,15 +735,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
|
||||
return i801_check_post(priv, status);
|
||||
}
|
||||
|
||||
if (len == 1 && read_write == I2C_SMBUS_READ)
|
||||
smbcmd |= SMBHSTCNT_LAST_BYTE;
|
||||
outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
|
||||
|
||||
for (i = 1; i <= len; i++) {
|
||||
if (i == len && read_write == I2C_SMBUS_READ)
|
||||
smbcmd |= SMBHSTCNT_LAST_BYTE;
|
||||
outb_p(smbcmd, SMBHSTCNT(priv));
|
||||
|
||||
if (i == 1)
|
||||
outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
|
||||
SMBHSTCNT(priv));
|
||||
|
||||
status = i801_wait_byte_done(priv);
|
||||
if (status)
|
||||
goto exit;
|
||||
@ -766,9 +762,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
|
||||
data->block[0] = len;
|
||||
}
|
||||
|
||||
/* Retrieve/store value in SMBBLKDAT */
|
||||
if (read_write == I2C_SMBUS_READ)
|
||||
if (read_write == I2C_SMBUS_READ) {
|
||||
data->block[i] = inb_p(SMBBLKDAT(priv));
|
||||
if (i == len - 1)
|
||||
outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
|
||||
}
|
||||
|
||||
if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
|
||||
outb_p(data->block[i+1], SMBBLKDAT(priv));
|
||||
|
||||
|
@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (clk_freq == 0) {
|
||||
dev_err(dev, "clock-frequency is set to 0 in DT\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (of_get_child_count(np) > 1) {
|
||||
dev_err(dev, "P2WI only supports one slave device\n");
|
||||
return -EINVAL;
|
||||
|
@ -29,7 +29,7 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
|
||||
*/
|
||||
static inline bool i2c_in_atomic_xfer_mode(void)
|
||||
{
|
||||
return system_state > SYSTEM_RUNNING && irqs_disabled();
|
||||
return system_state > SYSTEM_RUNNING && !preemptible();
|
||||
}
|
||||
|
||||
static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap)
|
||||
|
@ -192,7 +192,7 @@
|
||||
#define SLV_STATUS1_HJ_DIS BIT(18)
|
||||
#define SLV_STATUS1_MR_DIS BIT(17)
|
||||
#define SLV_STATUS1_PROT_ERR BIT(16)
|
||||
#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
|
||||
#define SLV_STATUS1_DA(s) (((s) & GENMASK(15, 9)) >> 9)
|
||||
#define SLV_STATUS1_HAS_DA BIT(8)
|
||||
#define SLV_STATUS1_DDR_RX_FULL BIT(7)
|
||||
#define SLV_STATUS1_DDR_TX_FULL BIT(6)
|
||||
@ -1622,13 +1622,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
|
||||
/* Device ID0 is reserved to describe this master. */
|
||||
master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
|
||||
master->free_rr_slots = GENMASK(master->maxdevs, 1);
|
||||
master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
|
||||
master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
|
||||
|
||||
val = readl(master->regs + CONF_STATUS1);
|
||||
master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
|
||||
master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
|
||||
master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
|
||||
master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
|
||||
master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
|
||||
|
||||
spin_lock_init(&master->ibi.lock);
|
||||
master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
|
||||
|
@ -45,6 +45,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
@ -261,12 +262,6 @@ static u32 extract_speed(u16 linkstat)
|
||||
return speed;
|
||||
}
|
||||
|
||||
/* return the PCIe link speed from the given link status */
|
||||
static u32 extract_width(u16 linkstat)
|
||||
{
|
||||
return (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
|
||||
}
|
||||
|
||||
/* read the link status and set dd->{lbus_width,lbus_speed,lbus_info} */
|
||||
static void update_lbus_info(struct hfi1_devdata *dd)
|
||||
{
|
||||
@ -279,7 +274,7 @@ static void update_lbus_info(struct hfi1_devdata *dd)
|
||||
return;
|
||||
}
|
||||
|
||||
dd->lbus_width = extract_width(linkstat);
|
||||
dd->lbus_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, linkstat);
|
||||
dd->lbus_speed = extract_speed(linkstat);
|
||||
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
|
||||
"PCIe,%uMHz,x%u", dd->lbus_speed, dd->lbus_width);
|
||||
|
@ -248,6 +248,7 @@ int mcb_device_register(struct mcb_bus *bus, struct mcb_device *dev)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
put_device(&dev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
|
||||
return 0;
|
||||
|
||||
err:
|
||||
put_device(&mdev->dev);
|
||||
mcb_free_dev(mdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/delay.h>
|
||||
#include <media/i2c/adv7604.h>
|
||||
#include <media/i2c/adv7842.h>
|
||||
@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
|
||||
pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
|
||||
cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
|
||||
capa, get_link_speed(capa),
|
||||
(capa & PCI_EXP_LNKCAP_MLW) >> 4);
|
||||
FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
|
||||
cobalt_info("PCIe link control 0x%04x\n", ctrl);
|
||||
cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
|
||||
stat, get_link_speed(stat),
|
||||
(stat & PCI_EXP_LNKSTA_NLW) >> 4);
|
||||
FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
|
||||
|
||||
/* Bus */
|
||||
pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
|
||||
cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
|
||||
capa, get_link_speed(capa),
|
||||
(capa & PCI_EXP_LNKCAP_MLW) >> 4);
|
||||
FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
|
||||
|
||||
/* Slot */
|
||||
pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
|
||||
@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
|
||||
if (!pci_is_pcie(pci_dev))
|
||||
return 0;
|
||||
pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
|
||||
return (link & PCI_EXP_LNKSTA_NLW) >> 4;
|
||||
return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
|
||||
}
|
||||
|
||||
static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
|
||||
@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
|
||||
if (!pci_is_pcie(pci_dev))
|
||||
return 0;
|
||||
pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
|
||||
return (link & PCI_EXP_LNKCAP_MLW) >> 4;
|
||||
return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
|
||||
}
|
||||
|
||||
static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
|
||||
|
@ -1282,7 +1282,7 @@ static int vfe_get(struct vfe_device *vfe)
|
||||
} else {
|
||||
ret = vfe_check_clock_rates(vfe);
|
||||
if (ret < 0)
|
||||
goto error_pm_runtime_get;
|
||||
goto error_pm_domain;
|
||||
}
|
||||
vfe->power_count++;
|
||||
|
||||
|
@ -351,7 +351,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
|
||||
memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
|
||||
idx++;
|
||||
|
||||
if (idx > HFI_BUFFER_TYPE_MAX)
|
||||
if (idx >= HFI_BUFFER_TYPE_MAX)
|
||||
return HFI_ERR_SESSION_INVALID_PARAMETER;
|
||||
|
||||
req_bytes -= sizeof(struct hfi_buffer_requirements);
|
||||
|
@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
|
||||
struct venus_caps *caps = core->caps, *cap;
|
||||
unsigned long bit;
|
||||
|
||||
if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
|
||||
return;
|
||||
|
||||
for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
|
||||
cap = &caps[core->codecs_count++];
|
||||
cap->codec = BIT(bit);
|
||||
@ -86,6 +89,9 @@ static void fill_profile_level(struct venus_caps *cap, const void *data,
|
||||
{
|
||||
const struct hfi_profile_level *pl = data;
|
||||
|
||||
if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
|
||||
return;
|
||||
|
||||
memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
|
||||
cap->num_pl += num;
|
||||
}
|
||||
@ -111,6 +117,9 @@ fill_caps(struct venus_caps *cap, const void *data, unsigned int num)
|
||||
{
|
||||
const struct hfi_capability *caps = data;
|
||||
|
||||
if (cap->num_caps + num >= MAX_CAP_ENTRIES)
|
||||
return;
|
||||
|
||||
memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
|
||||
cap->num_caps += num;
|
||||
}
|
||||
@ -137,6 +146,9 @@ static void fill_raw_fmts(struct venus_caps *cap, const void *fmts,
|
||||
{
|
||||
const struct raw_formats *formats = fmts;
|
||||
|
||||
if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
|
||||
return;
|
||||
|
||||
memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
|
||||
cap->num_fmts += num_fmts;
|
||||
}
|
||||
@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
|
||||
rawfmts[i].buftype = fmt->buffer_type;
|
||||
i++;
|
||||
|
||||
if (i >= MAX_FMT_ENTRIES)
|
||||
return;
|
||||
|
||||
if (pinfo->num_planes > MAX_PLANES)
|
||||
break;
|
||||
|
||||
|
@ -206,6 +206,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
|
||||
|
||||
new_wr_idx = wr_idx + dwords;
|
||||
wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
|
||||
|
||||
if (wr_ptr < (u32 *)queue->qmem.kva ||
|
||||
wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
|
||||
return -EINVAL;
|
||||
|
||||
if (new_wr_idx < qsize) {
|
||||
memcpy(wr_ptr, packet, dwords << 2);
|
||||
} else {
|
||||
@ -273,6 +278,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
|
||||
}
|
||||
|
||||
rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
|
||||
|
||||
if (rd_ptr < (u32 *)queue->qmem.kva ||
|
||||
rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
|
||||
return -EINVAL;
|
||||
|
||||
dwords = *rd_ptr >> 2;
|
||||
if (!dwords)
|
||||
return -EINVAL;
|
||||
|
@ -2430,6 +2430,12 @@ static int imon_probe(struct usb_interface *interface,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (first_if->dev.driver != interface->dev.driver) {
|
||||
dev_err(&interface->dev, "inconsistent driver matching\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ifnum == 0) {
|
||||
ictx = imon_init_intf0(interface, id);
|
||||
if (!ictx) {
|
||||
|
@ -15,7 +15,9 @@
|
||||
#define SHARP_UNIT 40 /* us */
|
||||
#define SHARP_BIT_PULSE (8 * SHARP_UNIT) /* 320us */
|
||||
#define SHARP_BIT_0_PERIOD (25 * SHARP_UNIT) /* 1ms (680us space) */
|
||||
#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680ms space) */
|
||||
#define SHARP_BIT_1_PERIOD (50 * SHARP_UNIT) /* 2ms (1680us space) */
|
||||
#define SHARP_BIT_0_SPACE (17 * SHARP_UNIT) /* 680us space */
|
||||
#define SHARP_BIT_1_SPACE (42 * SHARP_UNIT) /* 1680us space */
|
||||
#define SHARP_ECHO_SPACE (1000 * SHARP_UNIT) /* 40 ms */
|
||||
#define SHARP_TRAILER_SPACE (125 * SHARP_UNIT) /* 5 ms (even longer) */
|
||||
|
||||
@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
|
||||
.header_pulse = 0,
|
||||
.header_space = 0,
|
||||
.bit_pulse = SHARP_BIT_PULSE,
|
||||
.bit_space[0] = SHARP_BIT_0_PERIOD,
|
||||
.bit_space[1] = SHARP_BIT_1_PERIOD,
|
||||
.bit_space[0] = SHARP_BIT_0_SPACE,
|
||||
.bit_space[1] = SHARP_BIT_1_SPACE,
|
||||
.trailer_pulse = SHARP_BIT_PULSE,
|
||||
.trailer_space = SHARP_ECHO_SPACE,
|
||||
.msb_first = 1,
|
||||
|
@ -286,7 +286,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
|
||||
if (ret < 0)
|
||||
goto out_kfree_raw;
|
||||
|
||||
count = ret;
|
||||
/* drop trailing space */
|
||||
if (!(ret % 2))
|
||||
count = ret - 1;
|
||||
else
|
||||
count = ret;
|
||||
|
||||
txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
|
||||
if (!txbuf) {
|
||||
|
@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
|
||||
rds->ta = alt;
|
||||
rds->ms = true;
|
||||
snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
|
||||
freq / 16, ((freq & 0xf) * 10) / 16);
|
||||
(freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
|
||||
if (alt)
|
||||
strscpy(rds->radiotext,
|
||||
" The Radio Data System can switch between different Radio Texts ",
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
#include <linux/input.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#include "gspca.h"
|
||||
|
||||
@ -1027,6 +1028,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
|
||||
sd->params.exposure.expMode = 2;
|
||||
sd->exposure_status = EXPOSURE_NORMAL;
|
||||
}
|
||||
if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
|
||||
return -EINVAL;
|
||||
currentexp = currentexp << sd->params.exposure.gain;
|
||||
sd->params.exposure.gain = 0;
|
||||
/* round down current exposure to nearest value */
|
||||
|
@ -79,6 +79,7 @@
|
||||
#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
|
||||
#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
|
||||
#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
|
||||
#define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
|
||||
|
||||
static DEFINE_IDA(pci_endpoint_test_ida);
|
||||
|
||||
@ -993,6 +994,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
|
||||
.driver_data = (kernel_ulong_t)&default_data,
|
||||
},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
|
||||
.driver_data = (kernel_ulong_t)&j721e_data,
|
||||
},
|
||||
|
@ -800,7 +800,6 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
|
||||
|
||||
cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode);
|
||||
cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */
|
||||
cmd_cfg |= CMD_CFG_ERROR; /* stop in case of error */
|
||||
|
||||
meson_mmc_set_response_bits(cmd, &cmd_cfg);
|
||||
|
||||
|
@ -600,7 +600,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
|
||||
for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
|
||||
|
||||
ret = device_property_read_u32(dev, td[i].otap_binding,
|
||||
&sdhci_am654->otap_del_sel[i]);
|
||||
|
@ -2311,6 +2311,7 @@ static int vub300_probe(struct usb_interface *interface,
|
||||
vub300->read_only =
|
||||
(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
|
||||
} else {
|
||||
retval = -EINVAL;
|
||||
goto error5;
|
||||
}
|
||||
usb_set_intfdata(interface, vub300);
|
||||
|
@ -420,9 +420,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
|
||||
extra_size = 0;
|
||||
|
||||
/* Protection Register info */
|
||||
if (extp->NumProtectionFields)
|
||||
if (extp->NumProtectionFields) {
|
||||
struct cfi_intelext_otpinfo *otp =
|
||||
(struct cfi_intelext_otpinfo *)&extp->extra[0];
|
||||
|
||||
extra_size += (extp->NumProtectionFields - 1) *
|
||||
sizeof(struct cfi_intelext_otpinfo);
|
||||
sizeof(struct cfi_intelext_otpinfo);
|
||||
|
||||
if (extp_size >= sizeof(*extp) + extra_size) {
|
||||
int i;
|
||||
|
||||
/* Do some byteswapping if necessary */
|
||||
for (i = 0; i < extp->NumProtectionFields - 1; i++) {
|
||||
otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
|
||||
otp->FactGroups = le16_to_cpu(otp->FactGroups);
|
||||
otp->UserGroups = le16_to_cpu(otp->UserGroups);
|
||||
otp++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (extp->MinorVersion >= '1') {
|
||||
|
@ -1433,6 +1433,10 @@ static void bond_compute_features(struct bonding *bond)
|
||||
static void bond_setup_by_slave(struct net_device *bond_dev,
|
||||
struct net_device *slave_dev)
|
||||
{
|
||||
bool was_up = !!(bond_dev->flags & IFF_UP);
|
||||
|
||||
dev_close(bond_dev);
|
||||
|
||||
bond_dev->header_ops = slave_dev->header_ops;
|
||||
|
||||
bond_dev->type = slave_dev->type;
|
||||
@ -1447,6 +1451,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
|
||||
bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
|
||||
bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
|
||||
}
|
||||
if (was_up)
|
||||
dev_open(bond_dev, NULL);
|
||||
}
|
||||
|
||||
/* On bonding slaves other than the currently active slave, suppress
|
||||
|
@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
|
||||
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
|
||||
|
||||
reg <<= 2; /* reg num to offset */
|
||||
mutex_lock(&sw_dev->device->bus->mdio_lock);
|
||||
mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
|
||||
lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
|
||||
lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
|
||||
mutex_unlock(&sw_dev->device->bus->mdio_lock);
|
||||
@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
|
||||
struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
|
||||
|
||||
reg <<= 2; /* reg num to offset */
|
||||
mutex_lock(&sw_dev->device->bus->mdio_lock);
|
||||
mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
|
||||
*val = lan9303_mdio_real_read(sw_dev->device, reg);
|
||||
*val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
|
||||
mutex_unlock(&sw_dev->device->bus->mdio_lock);
|
||||
|
@ -433,8 +433,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
|
||||
.val = CONFIG0_MAXLEN_1536,
|
||||
},
|
||||
{
|
||||
.max_l3_len = 1542,
|
||||
.val = CONFIG0_MAXLEN_1542,
|
||||
.max_l3_len = 1548,
|
||||
.val = CONFIG0_MAXLEN_1548,
|
||||
},
|
||||
{
|
||||
.max_l3_len = 9212,
|
||||
@ -1146,6 +1146,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
||||
dma_addr_t mapping;
|
||||
unsigned short mtu;
|
||||
void *buffer;
|
||||
int ret;
|
||||
|
||||
mtu = ETH_HLEN;
|
||||
mtu += netdev->mtu;
|
||||
@ -1160,9 +1161,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
||||
word3 |= mtu;
|
||||
}
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_NONE) {
|
||||
if (skb->len >= ETH_FRAME_LEN) {
|
||||
/* Hardware offloaded checksumming isn't working on frames
|
||||
* bigger than 1514 bytes. A hypothesis about this is that the
|
||||
* checksum buffer is only 1518 bytes, so when the frames get
|
||||
* bigger they get truncated, or the last few bytes get
|
||||
* overwritten by the FCS.
|
||||
*
|
||||
* Just use software checksumming and bypass on bigger frames.
|
||||
*/
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
ret = skb_checksum_help(skb);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
word1 |= TSS_BYPASS_BIT;
|
||||
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
int tcp = 0;
|
||||
|
||||
/* We do not switch off the checksumming on non TCP/UDP
|
||||
* frames: as is shown from tests, the checksumming engine
|
||||
* is smart enough to see that a frame is not actually TCP
|
||||
* or UDP and then just pass it through without any changes
|
||||
* to the frame.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
word1 |= TSS_IP_CHKSUM_BIT;
|
||||
tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
|
||||
@ -1979,15 +2001,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static netdev_features_t gmac_fix_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
|
||||
features &= ~GMAC_OFFLOAD_FEATURES;
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
static int gmac_set_features(struct net_device *netdev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
@ -2205,7 +2218,6 @@ static const struct net_device_ops gmac_351x_ops = {
|
||||
.ndo_set_mac_address = gmac_set_mac_address,
|
||||
.ndo_get_stats64 = gmac_get_stats64,
|
||||
.ndo_change_mtu = gmac_change_mtu,
|
||||
.ndo_fix_features = gmac_fix_features,
|
||||
.ndo_set_features = gmac_set_features,
|
||||
};
|
||||
|
||||
@ -2463,11 +2475,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
|
||||
|
||||
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
|
||||
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
|
||||
/* We can handle jumbo frames up to 10236 bytes so, let's accept
|
||||
* payloads of 10236 bytes minus VLAN and ethernet header
|
||||
/* We can receive jumbo frames up to 10236 bytes but only
|
||||
* transmit 2047 bytes so, let's accept payloads of 2047
|
||||
* bytes minus VLAN and ethernet header
|
||||
*/
|
||||
netdev->min_mtu = ETH_MIN_MTU;
|
||||
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
|
||||
netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
|
||||
|
||||
port->freeq_refill = 0;
|
||||
netif_napi_add(netdev, &port->napi, gmac_napi_poll,
|
||||
|
@ -502,7 +502,7 @@ union gmac_txdesc_3 {
|
||||
#define SOF_BIT 0x80000000
|
||||
#define EOF_BIT 0x40000000
|
||||
#define EOFIE_BIT BIT(29)
|
||||
#define MTU_SIZE_BIT_MASK 0x1fff
|
||||
#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
|
||||
|
||||
/* GMAC Tx Descriptor */
|
||||
struct gmac_txdesc {
|
||||
@ -787,7 +787,7 @@ union gmac_config0 {
|
||||
#define CONFIG0_MAXLEN_1536 0
|
||||
#define CONFIG0_MAXLEN_1518 1
|
||||
#define CONFIG0_MAXLEN_1522 2
|
||||
#define CONFIG0_MAXLEN_1542 3
|
||||
#define CONFIG0_MAXLEN_1548 3
|
||||
#define CONFIG0_MAXLEN_9k 4 /* 9212 */
|
||||
#define CONFIG0_MAXLEN_10k 5 /* 10236 */
|
||||
#define CONFIG0_MAXLEN_1518__6 6
|
||||
|
@ -3974,7 +3974,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
|
||||
{
|
||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||
struct hnae3_handle *h = priv->ae_handle;
|
||||
u8 mac_addr_temp[ETH_ALEN];
|
||||
u8 mac_addr_temp[ETH_ALEN] = {0};
|
||||
int ret = 0;
|
||||
|
||||
if (h->ae_algo->ops->get_mac_addr)
|
||||
|
@ -2363,8 +2363,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
|
||||
return HCLGEVF_VECTOR0_EVENT_OTHER;
|
||||
}
|
||||
|
||||
static void hclgevf_reset_timer(struct timer_list *t)
|
||||
{
|
||||
struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
|
||||
|
||||
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
}
|
||||
|
||||
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
|
||||
{
|
||||
#define HCLGEVF_RESET_DELAY 5
|
||||
|
||||
enum hclgevf_evt_cause event_cause;
|
||||
struct hclgevf_dev *hdev = data;
|
||||
u32 clearval;
|
||||
@ -2376,7 +2386,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
|
||||
|
||||
switch (event_cause) {
|
||||
case HCLGEVF_VECTOR0_EVENT_RST:
|
||||
hclgevf_reset_task_schedule(hdev);
|
||||
mod_timer(&hdev->reset_timer,
|
||||
jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
|
||||
break;
|
||||
case HCLGEVF_VECTOR0_EVENT_MBX:
|
||||
hclgevf_mbx_handler(hdev);
|
||||
@ -3269,6 +3280,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
||||
HCLGEVF_DRIVER_NAME);
|
||||
|
||||
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
|
||||
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -274,6 +274,7 @@ struct hclgevf_dev {
|
||||
enum hnae3_reset_type reset_level;
|
||||
unsigned long reset_pending;
|
||||
enum hnae3_reset_type reset_type;
|
||||
struct timer_list reset_timer;
|
||||
|
||||
#define HCLGEVF_RESET_REQUESTED 0
|
||||
#define HCLGEVF_RESET_PENDING 1
|
||||
|
@ -52,7 +52,7 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
|
||||
u32 running_fw, stored_fw;
|
||||
int err;
|
||||
|
||||
err = devlink_info_driver_name_put(req, DRIVER_NAME);
|
||||
err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -267,9 +267,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -286,6 +283,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv4_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
|
||||
mlx5e_route_lookup_ipv4_put(route_dev, n);
|
||||
@ -431,9 +430,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
if (err)
|
||||
goto destroy_neigh_entry;
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
|
||||
if (!(nud_state & NUD_VALID)) {
|
||||
neigh_event_send(n, NULL);
|
||||
/* the encap entry will be made valid on neigh update event
|
||||
@ -451,6 +447,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
||||
goto destroy_neigh_entry;
|
||||
}
|
||||
|
||||
e->encap_size = ipv6_encap_size;
|
||||
e->encap_header = encap_header;
|
||||
e->flags |= MLX5_ENCAP_ENTRY_VALID;
|
||||
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
|
||||
mlx5e_route_lookup_ipv6_put(route_dev, n);
|
||||
|
@ -40,9 +40,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
||||
strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
|
||||
strlcpy(drvinfo->version, DRIVER_VERSION,
|
||||
sizeof(drvinfo->version));
|
||||
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)",
|
||||
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
|
||||
|
@ -61,14 +61,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int count;
|
||||
|
||||
strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
|
||||
sizeof(drvinfo->driver));
|
||||
strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)",
|
||||
fw_rev_maj(mdev), fw_rev_min(mdev),
|
||||
fw_rev_sub(mdev), mdev->board_id);
|
||||
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
|
||||
if (count == sizeof(drvinfo->fw_version))
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%d.%d.%04d", fw_rev_maj(mdev),
|
||||
fw_rev_min(mdev), fw_rev_sub(mdev));
|
||||
}
|
||||
|
||||
static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
|
||||
|
@ -39,7 +39,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev,
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(dev);
|
||||
|
||||
mlx5e_ethtool_get_drvinfo(priv, drvinfo);
|
||||
strlcpy(drvinfo->driver, DRIVER_NAME "[ib_ipoib]",
|
||||
strlcpy(drvinfo->driver, KBUILD_MODNAME "[ib_ipoib]",
|
||||
sizeof(drvinfo->driver));
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,6 @@
|
||||
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
|
||||
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_VERSION(DRIVER_VERSION);
|
||||
|
||||
unsigned int mlx5_core_debug_mask;
|
||||
module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
|
||||
@ -228,7 +227,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
|
||||
strncat(string, ",", remaining_size);
|
||||
|
||||
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
|
||||
strncat(string, DRIVER_NAME, remaining_size);
|
||||
strncat(string, KBUILD_MODNAME, remaining_size);
|
||||
|
||||
remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
|
||||
strncat(string, ",", remaining_size);
|
||||
@ -313,7 +312,7 @@ static int request_bar(struct pci_dev *pdev)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
err = pci_request_regions(pdev, DRIVER_NAME);
|
||||
err = pci_request_regions(pdev, KBUILD_MODNAME);
|
||||
if (err)
|
||||
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
|
||||
|
||||
@ -1620,7 +1619,7 @@ void mlx5_recover_device(struct mlx5_core_dev *dev)
|
||||
}
|
||||
|
||||
static struct pci_driver mlx5_core_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.name = KBUILD_MODNAME,
|
||||
.id_table = mlx5_core_pci_table,
|
||||
.probe = init_one,
|
||||
.remove = remove_one,
|
||||
@ -1646,6 +1645,9 @@ static int __init mlx5_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
|
||||
"mlx5_core name not in sync with kernel module name");
|
||||
|
||||
get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
|
||||
|
||||
mlx5_core_verify_params();
|
||||
|
@ -42,9 +42,6 @@
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include <linux/mlx5/driver.h>
|
||||
|
||||
#define DRIVER_NAME "mlx5_core"
|
||||
#define DRIVER_VERSION "5.0-0"
|
||||
|
||||
extern uint mlx5_core_debug_mask;
|
||||
|
||||
#define mlx5_core_dbg(__dev, format, ...) \
|
||||
|
@ -2581,9 +2581,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
|
||||
rx_mode &= ~AcceptMulticast;
|
||||
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
|
||||
dev->flags & IFF_ALLMULTI ||
|
||||
tp->mac_version == RTL_GIGA_MAC_VER_35 ||
|
||||
tp->mac_version == RTL_GIGA_MAC_VER_46 ||
|
||||
tp->mac_version == RTL_GIGA_MAC_VER_48) {
|
||||
tp->mac_version == RTL_GIGA_MAC_VER_35) {
|
||||
/* accept all multicasts */
|
||||
} else if (netdev_mc_empty(dev)) {
|
||||
rx_mode &= ~AcceptMulticast;
|
||||
|
@ -3858,10 +3858,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
|
||||
len = 0;
|
||||
}
|
||||
|
||||
read_again:
|
||||
if (count >= limit)
|
||||
break;
|
||||
|
||||
read_again:
|
||||
buf1_len = 0;
|
||||
buf2_len = 0;
|
||||
entry = next_entry;
|
||||
|
@ -412,7 +412,7 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
|
||||
return addr;
|
||||
}
|
||||
|
||||
static int ipvlan_process_v4_outbound(struct sk_buff *skb)
|
||||
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
|
||||
{
|
||||
const struct iphdr *ip4h = ip_hdr(skb);
|
||||
struct net_device *dev = skb->dev;
|
||||
@ -454,13 +454,11 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
||||
|
||||
static noinline_for_stack int
|
||||
ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
|
||||
{
|
||||
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
struct net_device *dev = skb->dev;
|
||||
struct net *net = dev_net(dev);
|
||||
struct dst_entry *dst;
|
||||
int err, ret = NET_XMIT_DROP;
|
||||
struct flowi6 fl6 = {
|
||||
.flowi6_oif = dev->ifindex,
|
||||
.daddr = ip6h->daddr,
|
||||
@ -470,27 +468,38 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
||||
.flowi6_mark = skb->mark,
|
||||
.flowi6_proto = ip6h->nexthdr,
|
||||
};
|
||||
struct dst_entry *dst;
|
||||
int err;
|
||||
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst->error) {
|
||||
ret = dst->error;
|
||||
dst = ip6_route_output(dev_net(dev), NULL, &fl6);
|
||||
err = dst->error;
|
||||
if (err) {
|
||||
dst_release(dst);
|
||||
goto err;
|
||||
return err;
|
||||
}
|
||||
skb_dst_set(skb, dst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
int err, ret = NET_XMIT_DROP;
|
||||
|
||||
err = ipvlan_route_v6_outbound(dev, skb);
|
||||
if (unlikely(err)) {
|
||||
DEV_STATS_INC(dev, tx_errors);
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
|
||||
err = ip6_local_out(net, skb->sk, skb);
|
||||
err = ip6_local_out(dev_net(dev), skb->sk, skb);
|
||||
if (unlikely(net_xmit_eval(err)))
|
||||
dev->stats.tx_errors++;
|
||||
DEV_STATS_INC(dev, tx_errors);
|
||||
else
|
||||
ret = NET_XMIT_SUCCESS;
|
||||
goto out;
|
||||
err:
|
||||
dev->stats.tx_errors++;
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
|
@ -765,7 +765,7 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
|
||||
if (dev->flags & IFF_UP) {
|
||||
if (change & IFF_ALLMULTI)
|
||||
dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
|
||||
if (change & IFF_PROMISC)
|
||||
if (!macvlan_passthru(vlan->port) && change & IFF_PROMISC)
|
||||
dev_set_promiscuity(lowerdev,
|
||||
dev->flags & IFF_PROMISC ? 1 : -1);
|
||||
|
||||
|
@ -838,6 +838,7 @@ struct phylink *phylink_create(struct phylink_config *config,
|
||||
pl->config = config;
|
||||
if (config->type == PHYLINK_NETDEV) {
|
||||
pl->netdev = to_net_dev(config->dev);
|
||||
netif_carrier_off(pl->netdev);
|
||||
} else if (config->type == PHYLINK_DEV) {
|
||||
pl->dev = config->dev;
|
||||
} else {
|
||||
|
@ -464,6 +464,10 @@ ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
|
||||
case PPPIOCSMRU:
|
||||
if (get_user(val, (int __user *) argp))
|
||||
break;
|
||||
if (val > U16_MAX) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
if (val < PPP_MRU)
|
||||
val = PPP_MRU;
|
||||
ap->mru = val;
|
||||
@ -699,7 +703,7 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
|
||||
|
||||
/* strip address/control field if present */
|
||||
p = skb->data;
|
||||
if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
|
||||
if (skb->len >= 2 && p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
|
||||
/* chop off address/control */
|
||||
if (skb->len < 3)
|
||||
goto err;
|
||||
|
@ -1139,7 +1139,7 @@ void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
|
||||
u32 sset, u8 *data)
|
||||
{
|
||||
if (sset == ETH_SS_STATS)
|
||||
memcpy(data, *ath10k_gstrings_stats,
|
||||
memcpy(data, ath10k_gstrings_stats,
|
||||
sizeof(ath10k_gstrings_stats));
|
||||
}
|
||||
|
||||
|
@ -827,12 +827,20 @@ static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
|
||||
|
||||
static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
|
||||
{
|
||||
ath10k_ce_disable_interrupts(ar);
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
int id;
|
||||
|
||||
for (id = 0; id < CE_COUNT_MAX; id++)
|
||||
disable_irq(ar_snoc->ce_irqs[id].irq_line);
|
||||
}
|
||||
|
||||
static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
|
||||
{
|
||||
ath10k_ce_enable_interrupts(ar);
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
int id;
|
||||
|
||||
for (id = 0; id < CE_COUNT_MAX; id++)
|
||||
enable_irq(ar_snoc->ce_irqs[id].irq_line);
|
||||
}
|
||||
|
||||
static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
|
||||
@ -1048,6 +1056,8 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
|
||||
goto err_free_rri;
|
||||
}
|
||||
|
||||
ath10k_ce_enable_interrupts(ar);
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_rri:
|
||||
@ -1209,8 +1219,8 @@ static int ath10k_snoc_request_irq(struct ath10k *ar)
|
||||
|
||||
for (id = 0; id < CE_COUNT_MAX; id++) {
|
||||
ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
|
||||
ath10k_snoc_per_engine_handler, 0,
|
||||
ce_name[id], ar);
|
||||
ath10k_snoc_per_engine_handler,
|
||||
IRQF_NO_AUTOEN, ce_name[id], ar);
|
||||
if (ret) {
|
||||
ath10k_err(ar,
|
||||
"failed to register IRQ handler for CE %d: %d\n",
|
||||
|
@ -1578,14 +1578,20 @@ static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
|
||||
u8 pdev_id;
|
||||
|
||||
pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
|
||||
if (!ar) {
|
||||
ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
|
||||
ar->ab->pktlog_defs_checksum);
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
|
||||
|
@ -6355,6 +6355,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
|
||||
ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
|
||||
ev->freq_offset, ev->sidx);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
|
||||
|
||||
if (!ar) {
|
||||
@ -6372,6 +6374,8 @@ ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff
|
||||
ieee80211_radar_detected(ar->hw);
|
||||
|
||||
exit:
|
||||
rcu_read_unlock();
|
||||
|
||||
kfree(tb);
|
||||
}
|
||||
|
||||
@ -6401,15 +6405,19 @@ ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
|
||||
ath11k_dbg(ab, ATH11K_DBG_WMI,
|
||||
"pdev temperature ev temp %d pdev_id %d\n", ev->temp, ev->pdev_id);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
|
||||
if (!ar) {
|
||||
ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
|
||||
kfree(tb);
|
||||
return;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ath11k_thermal_event_temperature(ar, ev->temp);
|
||||
|
||||
exit:
|
||||
rcu_read_unlock();
|
||||
|
||||
kfree(tb);
|
||||
}
|
||||
|
||||
|
@ -1284,7 +1284,7 @@ void ath9k_get_et_strings(struct ieee80211_hw *hw,
|
||||
u32 sset, u8 *data)
|
||||
{
|
||||
if (sset == ETH_SS_STATS)
|
||||
memcpy(data, *ath9k_gstrings_stats,
|
||||
memcpy(data, ath9k_gstrings_stats,
|
||||
sizeof(ath9k_gstrings_stats));
|
||||
}
|
||||
|
||||
|
@ -428,7 +428,7 @@ void ath9k_htc_get_et_strings(struct ieee80211_hw *hw,
|
||||
u32 sset, u8 *data)
|
||||
{
|
||||
if (sset == ETH_SS_STATS)
|
||||
memcpy(data, *ath9k_htc_gstrings_stats,
|
||||
memcpy(data, ath9k_htc_gstrings_stats,
|
||||
sizeof(ath9k_htc_gstrings_stats));
|
||||
}
|
||||
|
||||
|
@ -532,16 +532,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
|
||||
|
||||
/*
|
||||
* For data packets rate info comes from the fw. Only
|
||||
* set rate/antenna during connection establishment or in case
|
||||
* no station is given.
|
||||
* For data and mgmt packets rate info comes from the fw. Only
|
||||
* set rate/antenna for injected frames with fixed rate, or
|
||||
* when no sta is given.
|
||||
*/
|
||||
if (!sta || !ieee80211_is_data(hdr->frame_control) ||
|
||||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
|
||||
if (unlikely(!sta ||
|
||||
info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
|
||||
flags |= IWL_TX_FLAGS_CMD_RATE;
|
||||
rate_n_flags =
|
||||
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
|
||||
hdr->frame_control);
|
||||
} else if (!ieee80211_is_data(hdr->frame_control) ||
|
||||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
|
||||
/* These are important frames */
|
||||
flags |= IWL_TX_FLAGS_HIGH_PRI;
|
||||
}
|
||||
|
||||
if (mvm->trans->trans_cfg->device_family >=
|
||||
|
@ -2547,7 +2547,7 @@ static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw,
|
||||
u32 sset, u8 *data)
|
||||
{
|
||||
if (sset == ETH_SS_STATS)
|
||||
memcpy(data, *mac80211_hwsim_gstrings_stats,
|
||||
memcpy(data, mac80211_hwsim_gstrings_stats,
|
||||
sizeof(mac80211_hwsim_gstrings_stats));
|
||||
}
|
||||
|
||||
|
@ -503,7 +503,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __exit exynos_pcie_remove(struct platform_device *pdev)
|
||||
static int exynos_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct exynos_pcie *ep = platform_get_drvdata(pdev);
|
||||
|
||||
@ -522,7 +522,7 @@ static const struct of_device_id exynos_pcie_of_match[] = {
|
||||
};
|
||||
|
||||
static struct platform_driver exynos_pcie_driver = {
|
||||
.remove = __exit_p(exynos_pcie_remove),
|
||||
.remove = exynos_pcie_remove,
|
||||
.driver = {
|
||||
.name = "exynos-pcie",
|
||||
.of_match_table = exynos_pcie_of_match,
|
||||
|
@ -1142,7 +1142,7 @@ static const struct of_device_id ks_pcie_of_match[] = {
|
||||
{ },
|
||||
};
|
||||
|
||||
static int __init ks_pcie_probe(struct platform_device *pdev)
|
||||
static int ks_pcie_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct dw_pcie_host_ops *host_ops;
|
||||
const struct dw_pcie_ep_ops *ep_ops;
|
||||
@ -1338,7 +1338,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __exit ks_pcie_remove(struct platform_device *pdev)
|
||||
static int ks_pcie_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
|
||||
struct device_link **link = ks_pcie->link;
|
||||
@ -1354,9 +1354,9 @@ static int __exit ks_pcie_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver ks_pcie_driver __refdata = {
|
||||
static struct platform_driver ks_pcie_driver = {
|
||||
.probe = ks_pcie_probe,
|
||||
.remove = __exit_p(ks_pcie_remove),
|
||||
.remove = ks_pcie_remove,
|
||||
.driver = {
|
||||
.name = "keystone-pcie",
|
||||
.of_match_table = of_match_ptr(ks_pcie_of_match),
|
||||
|
@ -7,6 +7,7 @@
|
||||
* Author: Vidya Sagar <vidyas@nvidia.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/delay.h>
|
||||
@ -346,8 +347,7 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
|
||||
*/
|
||||
val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
|
||||
if (val & PCI_EXP_LNKSTA_LBMS) {
|
||||
current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
|
||||
PCI_EXP_LNKSTA_NLW_SHIFT;
|
||||
current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
|
||||
if (pcie->init_link_width > current_link_width) {
|
||||
dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
|
||||
val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
|
||||
@ -731,8 +731,7 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
|
||||
|
||||
val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
|
||||
PCI_EXP_LNKSTA);
|
||||
pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
|
||||
PCI_EXP_LNKSTA_NLW_SHIFT;
|
||||
pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
|
||||
|
||||
val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
|
||||
PCI_EXP_LNKCTL);
|
||||
@ -885,7 +884,7 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
|
||||
/* Configure Max lane width from DT */
|
||||
val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
|
||||
val &= ~PCI_EXP_LNKCAP_MLW;
|
||||
val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
|
||||
val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
|
||||
dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
|
||||
|
||||
config_gen3_gen4_eq_presets(pcie);
|
||||
|
@ -909,7 +909,7 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
|
||||
{
|
||||
int acpi_state, d_max;
|
||||
|
||||
if (pdev->no_d3cold)
|
||||
if (pdev->no_d3cold || !pdev->d3cold_allowed)
|
||||
d_max = ACPI_STATE_D3_HOT;
|
||||
else
|
||||
d_max = ACPI_STATE_D3_COLD;
|
||||
|
@ -500,10 +500,7 @@ static ssize_t d3cold_allowed_store(struct device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
pdev->d3cold_allowed = !!val;
|
||||
if (pdev->d3cold_allowed)
|
||||
pci_d3cold_enable(pdev);
|
||||
else
|
||||
pci_d3cold_disable(pdev);
|
||||
pci_bridge_d3_update(pdev);
|
||||
|
||||
pm_runtime_resume(dev);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user