Merge 6.1.8 into android14-6.1

Changes in 6.1.8
	dma-buf: fix dma_buf_export init order v2
	btrfs: fix trace event name typo for FLUSH_DELAYED_REFS
	wifi: iwlwifi: fw: skip PPAG for JF
	pNFS/filelayout: Fix coalescing test for single DS
	selftests/bpf: check null propagation only neither reg is PTR_TO_BTF_ID
	net: ethernet: marvell: octeontx2: Fix uninitialized variable warning
	tools/virtio: initialize spinlocks in vring_test.c
	vdpa/mlx5: Return error on vlan ctrl commands if not supported
	vdpa/mlx5: Avoid using reslock in event_handler
	vdpa/mlx5: Avoid overwriting CVQ iotlb
	virtio_pci: modify ENOENT to EINVAL
	vduse: Validate vq_num in vduse_validate_config()
	vdpa_sim_net: should not drop the multicast/broadcast packet
	net/ethtool/ioctl: return -EOPNOTSUPP if we have no phy stats
	r8169: move rtl_wol_enable_rx() and rtl_prepare_power_down()
	r8169: fix dmar pte write access is not set error
	bpf: keep a reference to the mm, in case the task is dead.
	RDMA/srp: Move large values to a new enum for gcc13
	selftests: net: fix cmsg_so_mark.sh test hang
	btrfs: always report error in run_one_delayed_ref()
	x86/asm: Fix an assembler warning with current binutils
	f2fs: let's avoid panic if extent_tree is not created
	perf/x86/rapl: Treat Tigerlake like Icelake
	cifs: fix race in assemble_neg_contexts()
	memblock tests: Fix compilation error.
	perf/x86/rapl: Add support for Intel Meteor Lake
	perf/x86/rapl: Add support for Intel Emerald Rapids
	of: fdt: Honor CONFIG_CMDLINE* even without /chosen node, take 2
	fbdev: omapfb: avoid stack overflow warning
	Bluetooth: hci_sync: Fix use HCI_OP_LE_READ_BUFFER_SIZE_V2
	Bluetooth: hci_qca: Fix driver shutdown on closed serdev
	wifi: brcmfmac: fix regression for Broadcom PCIe wifi devices
	wifi: mac80211: fix MLO + AP_VLAN check
	wifi: mac80211: reset multiple BSSID options in stop_ap()
	wifi: mac80211: sdata can be NULL during AMPDU start
	wifi: mac80211: fix initialization of rx->link and rx->link_sta
	nommu: fix memory leak in do_mmap() error path
	nommu: fix do_munmap() error path
	nommu: fix split_vma() map_count error
	proc: fix PIE proc-empty-vm, proc-pid-vm tests
	Add exception protection processing for vd in axi_chan_handle_err function
	LoongArch: Add HWCAP_LOONGARCH_CPUCFG to elf_hwcap
	zonefs: Detect append writes at invalid locations
	nilfs2: fix general protection fault in nilfs_btree_insert()
	mm/shmem: restore SHMEM_HUGE_DENY precedence over MADV_COLLAPSE
	hugetlb: unshare some PMDs when splitting VMAs
	mm/khugepaged: fix collapse_pte_mapped_thp() to allow anon_vma
	serial: stm32: Merge hard IRQ and threaded IRQ handling into single IRQ handler
	Revert "serial: stm32: Merge hard IRQ and threaded IRQ handling into single IRQ handler"
	xhci-pci: set the dma max_seg_size
	usb: xhci: Check endpoint is valid before dereferencing it
	xhci: Fix null pointer dereference when host dies
	xhci: Add update_hub_device override for PCI xHCI hosts
	xhci: Add a flag to disable USB3 lpm on a xhci root port level.
	usb: acpi: add helper to check port lpm capability using acpi _DSM
	xhci: Detect lpm incapable xHC USB3 roothub ports from ACPI tables
	prlimit: do_prlimit needs to have a speculation check
	USB: serial: option: add Quectel EM05-G (GR) modem
	USB: serial: option: add Quectel EM05-G (CS) modem
	USB: serial: option: add Quectel EM05-G (RS) modem
	USB: serial: option: add Quectel EC200U modem
	USB: serial: option: add Quectel EM05CN (SG) modem
	USB: serial: option: add Quectel EM05CN modem
	staging: vchiq_arm: fix enum vchiq_status return types
	USB: misc: iowarrior: fix up header size for USB_DEVICE_ID_CODEMERCS_IOW100
	usb: misc: onboard_hub: Invert driver registration order
	usb: misc: onboard_hub: Move 'attach' work to the driver
	misc: fastrpc: Fix use-after-free and race in fastrpc_map_find
	misc: fastrpc: Don't remove map on creater_process and device_release
	misc: fastrpc: Fix use-after-free race condition for maps
	usb: core: hub: disable autosuspend for TI TUSB8041
	comedi: adv_pci1760: Fix PWM instruction handling
	ACPI: PRM: Check whether EFI runtime is available
	mmc: sunxi-mmc: Fix clock refcount imbalance during unbind
	mmc: sdhci-esdhc-imx: correct the tuning start tap and step setting
	mm/hugetlb: fix PTE marker handling in hugetlb_change_protection()
	mm/hugetlb: fix uffd-wp handling for migration entries in hugetlb_change_protection()
	mm/hugetlb: pre-allocate pgtable pages for uffd wr-protects
	mm/userfaultfd: enable writenotify while userfaultfd-wp is enabled for a VMA
	mm/MADV_COLLAPSE: don't expand collapse when vm_end is past requested end
	btrfs: add extra error messages to cover non-ENOMEM errors from device_add_list()
	btrfs: fix missing error handling when logging directory items
	btrfs: fix directory logging due to race with concurrent index key deletion
	btrfs: add missing setup of log for full commit at add_conflicting_inode()
	btrfs: do not abort transaction on failure to write log tree when syncing log
	btrfs: do not abort transaction on failure to update log root
	btrfs: qgroup: do not warn on record without old_roots populated
	btrfs: fix invalid leaf access due to inline extent during lseek
	btrfs: fix race between quota rescan and disable leading to NULL pointer deref
	cifs: do not include page data when checking signature
	thunderbolt: Disable XDomain lane 1 only in software connection manager
	thunderbolt: Use correct function to calculate maximum USB3 link rate
	thunderbolt: Do not report errors if on-board retimers are found
	thunderbolt: Do not call PM runtime functions in tb_retimer_scan()
	riscv: dts: sifive: fu740: fix size of pcie 32bit memory
	bpf: restore the ebpf program ID for BPF_AUDIT_UNLOAD and PERF_BPF_EVENT_PROG_UNLOAD
	tty: serial: qcom-geni-serial: fix slab-out-of-bounds on RX FIFO buffer
	tty: fix possible null-ptr-defer in spk_ttyio_release
	pktcdvd: check for NULL returna fter calling bio_split_to_limits()
	io_uring/poll: don't reissue in case of poll race on multishot request
	mptcp: explicitly specify sock family at subflow creation time
	mptcp: netlink: respect v4/v6-only sockets
	selftests: mptcp: userspace: validate v4-v6 subflows mix
	USB: gadgetfs: Fix race between mounting and unmounting
	USB: serial: cp210x: add SCALANCE LPE-9000 device id
	usb: cdns3: remove fetched trb from cache before dequeuing
	usb: host: ehci-fsl: Fix module alias
	usb: musb: fix error return code in omap2430_probe()
	usb: typec: tcpm: Fix altmode re-registration causes sysfs create fail
	usb: typec: altmodes/displayport: Add pin assignment helper
	usb: typec: altmodes/displayport: Fix pin assignment calculation
	usb: gadget: g_webcam: Send color matching descriptor per frame
	USB: gadget: Add ID numbers to configfs-gadget driver names
	usb: gadget: f_ncm: fix potential NULL ptr deref in ncm_bitrate()
	usb-storage: apply IGNORE_UAS only for HIKSEMI MD202 on RTL9210
	arm64: dts: imx8mp: correct usb clocks
	dt-bindings: phy: g12a-usb2-phy: fix compatible string documentation
	dt-bindings: phy: g12a-usb3-pcie-phy: fix compatible string documentation
	serial: pch_uart: Pass correct sg to dma_unmap_sg()
	dmaengine: lgm: Move DT parsing after initialization
	dmaengine: tegra210-adma: fix global intr clear
	dmaengine: idxd: Let probe fail when workqueue cannot be enabled
	dmaengine: idxd: Prevent use after free on completion memory
	dmaengine: idxd: Do not call DMX TX callbacks during workqueue disable
	serial: amba-pl011: fix high priority character transmission in rs486 mode
	serial: atmel: fix incorrect baudrate setup
	serial: exar: Add support for Sealevel 7xxxC serial cards
	gsmi: fix null-deref in gsmi_get_variable
	mei: bus: fix unlink on bus in error path
	mei: me: add meteor lake point M DID
	VMCI: Use threaded irqs instead of tasklets
	ARM: dts: qcom: apq8084-ifc6540: fix overriding SDHCI
	ARM: omap1: fix !ARCH_OMAP1_ANY link failures
	drm/amdgpu: fix amdgpu_job_free_resources v2
	drm/amdgpu: allow multipipe policy on ASICs with one MEC
	drm/amdgpu: Correct the power calcultion for Renior/Cezanne.
	drm/i915: re-disable RC6p on Sandy Bridge
	drm/i915/display: Check source height is > 0
	drm/i915: Allow switching away via vga-switcheroo if uninitialized
	drm/i915: Remove unused variable
	drm/amd/display: Fix set scaling doesn's work
	drm/amd/display: Calculate output_color_space after pixel encoding adjustment
	drm/amd/display: Fix COLOR_SPACE_YCBCR2020_TYPE matrix
	drm/amd/display: disable S/G display on DCN 3.1.5
	drm/amd/display: disable S/G display on DCN 3.1.4
	cifs: reduce roundtrips on create/qinfo requests
	fs/ntfs3: Fix attr_punch_hole() null pointer derenference
	arm64: efi: Execute runtime services from a dedicated stack
	efi: rt-wrapper: Add missing include
	panic: Separate sysctl logic from CONFIG_SMP
	exit: Put an upper limit on how often we can oops
	exit: Expose "oops_count" to sysfs
	exit: Allow oops_limit to be disabled
	panic: Consolidate open-coded panic_on_warn checks
	panic: Introduce warn_limit
	panic: Expose "warn_count" to sysfs
	docs: Fix path paste-o for /sys/kernel/warn_count
	exit: Use READ_ONCE() for all oops/warn limit reads
	x86/fpu: Use _Alignof to avoid undefined behavior in TYPE_ALIGN
	drm/amdgpu/discovery: enable soc21 common for GC 11.0.4
	drm/amdgpu/discovery: enable gmc v11 for GC 11.0.4
	drm/amdgpu/discovery: enable gfx v11 for GC 11.0.4
	drm/amdgpu/discovery: enable mes support for GC v11.0.4
	drm/amdgpu: set GC 11.0.4 family
	drm/amdgpu/discovery: set the APU flag for GC 11.0.4
	drm/amdgpu: add gfx support for GC 11.0.4
	drm/amdgpu: add gmc v11 support for GC 11.0.4
	drm/amdgpu/discovery: add PSP IP v13.0.11 support
	drm/amdgpu/pm: enable swsmu for SMU IP v13.0.11
	drm/amdgpu: add smu 13 support for smu 13.0.11
	drm/amdgpu/pm: add GFXOFF control IP version check for SMU IP v13.0.11
	drm/amdgpu/soc21: add mode2 asic reset for SMU IP v13.0.11
	drm/amdgpu/pm: use the specific mailbox registers only for SMU IP v13.0.4
	drm/amdgpu/discovery: enable nbio support for NBIO v7.7.1
	drm/amdgpu: enable PSP IP v13.0.11 support
	drm/amdgpu: enable GFX IP v11.0.4 CG support
	drm/amdgpu: enable GFX Power Gating for GC IP v11.0.4
	drm/amdgpu: enable GFX Clock Gating control for GC IP v11.0.4
	drm/amdgpu: add tmz support for GC 11.0.1
	drm/amdgpu: add tmz support for GC IP v11.0.4
	drm/amdgpu: correct MEC number for gfx11 APUs
	octeontx2-pf: Avoid use of GFP_KERNEL in atomic context
	net/ulp: use consistent error code when blocking ULP
	octeontx2-pf: Fix the use of GFP_KERNEL in atomic context on rt
	net/mlx5: fix missing mutex_unlock in mlx5_fw_fatal_reporter_err_work()
	block: mq-deadline: Rename deadline_is_seq_writes()
	Revert "wifi: mac80211: fix memory leak in ieee80211_if_add()"
	soc: qcom: apr: Make qcom,protection-domain optional again
	Linux 6.1.8

Change-Id: I35d5b5a1ed4822eddb2fc8b29b323b36f7d11926
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-01-25 16:02:49 +00:00
commit ee921ef7b4
172 changed files with 1634 additions and 858 deletions

View File

@ -0,0 +1,6 @@
What: /sys/kernel/oops_count
Date: November 2022
KernelVersion: 6.2.0
Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
Description:
Shows how many times the system has Oopsed since last boot.

View File

@ -0,0 +1,6 @@
What: /sys/kernel/warn_count
Date: November 2022
KernelVersion: 6.2.0
Contact: Linux Kernel Hardening List <linux-hardening@vger.kernel.org>
Description:
Shows how many times the system has Warned since last boot.

View File

@ -667,6 +667,15 @@ This is the default behavior.
an oops event is detected.
oops_limit
==========
Number of kernel oopses after which the kernel should panic when
``panic_on_oops`` is not set. Setting this to 0 disables checking
the count. Setting this to 1 has the same effect as setting
``panic_on_oops=1``. The default value is 10000.
osrelease, ostype & version
===========================
@ -1523,6 +1532,16 @@ entry will default to 2 instead of 0.
2 Unprivileged calls to ``bpf()`` are disabled
= =============================================================
warn_limit
==========
Number of kernel warnings after which the kernel should panic when
``panic_on_warn`` is not set. Setting this to 0 disables checking
the warning count. Setting this to 1 has the same effect as setting
``panic_on_warn=1``. The default value is 0.
watchdog
========

View File

@ -2,7 +2,7 @@
# Copyright 2019 BayLibre, SAS
%YAML 1.2
---
$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb2-phy.yaml#"
$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb2-phy.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic G12A USB2 PHY
@ -13,8 +13,8 @@ maintainers:
properties:
compatible:
enum:
- amlogic,meson-g12a-usb2-phy
- amlogic,meson-a1-usb2-phy
- amlogic,g12a-usb2-phy
- amlogic,a1-usb2-phy
reg:
maxItems: 1
@ -68,7 +68,7 @@ additionalProperties: false
examples:
- |
phy@36000 {
compatible = "amlogic,meson-g12a-usb2-phy";
compatible = "amlogic,g12a-usb2-phy";
reg = <0x36000 0x2000>;
clocks = <&xtal>;
clock-names = "xtal";

View File

@ -2,7 +2,7 @@
# Copyright 2019 BayLibre, SAS
%YAML 1.2
---
$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#"
$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic G12A USB3 + PCIE Combo PHY
@ -13,7 +13,7 @@ maintainers:
properties:
compatible:
enum:
- amlogic,meson-g12a-usb3-pcie-phy
- amlogic,g12a-usb3-pcie-phy
reg:
maxItems: 1
@ -49,7 +49,7 @@ additionalProperties: false
examples:
- |
phy@46000 {
compatible = "amlogic,meson-g12a-usb3-pcie-phy";
compatible = "amlogic,g12a-usb3-pcie-phy";
reg = <0x46000 0x2000>;
clocks = <&ref_clk>;
clock-names = "ref_clk";

View File

@ -11120,6 +11120,8 @@ M: Kees Cook <keescook@chromium.org>
L: linux-hardening@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/hardening
F: Documentation/ABI/testing/sysfs-kernel-oops_count
F: Documentation/ABI/testing/sysfs-kernel-warn_count
F: include/linux/overflow.h
F: include/linux/randomize_kstack.h
F: mm/usercopy.c

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
SUBLEVEL = 7
SUBLEVEL = 8
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth

View File

@ -19,16 +19,16 @@ soc {
serial@f995e000 {
status = "okay";
};
sdhci@f9824900 {
bus-width = <8>;
non-removable;
status = "okay";
};
sdhci@f98a4900 {
cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
bus-width = <4>;
};
};
};
&sdhc_1 {
bus-width = <8>;
non-removable;
status = "okay";
};
&sdhc_2 {
cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
bus-width = <4>;
};

View File

@ -419,7 +419,7 @@ blsp2_uart2: serial@f995e000 {
status = "disabled";
};
mmc@f9824900 {
sdhc_1: mmc@f9824900 {
compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
reg-names = "hc", "core";
@ -432,7 +432,7 @@ mmc@f9824900 {
status = "disabled";
};
mmc@f98a4900 {
sdhc_2: mmc@f98a4900 {
compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
reg-names = "hc", "core";

View File

@ -4,6 +4,7 @@ menuconfig ARCH_OMAP1
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5
depends on CPU_LITTLE_ENDIAN
depends on ATAGS
select ARCH_OMAP
select ARCH_HAS_HOLES_MEMORYMODEL
select ARCH_OMAP
select CLKSRC_MMIO
@ -45,10 +46,6 @@ config ARCH_OMAP16XX
select CPU_ARM926T
select OMAP_DM_TIMER
config ARCH_OMAP1_ANY
select ARCH_OMAP
def_bool ARCH_OMAP730 || ARCH_OMAP850 || ARCH_OMAP15XX || ARCH_OMAP16XX
config ARCH_OMAP
bool

View File

@ -3,8 +3,6 @@
# Makefile for the linux kernel.
#
ifdef CONFIG_ARCH_OMAP1_ANY
# Common support
obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \
serial.o devices.o dma.o omap-dma.o fb.o
@ -59,5 +57,3 @@ obj-$(CONFIG_ARCH_OMAP730) += gpio7xx.o
obj-$(CONFIG_ARCH_OMAP850) += gpio7xx.o
obj-$(CONFIG_ARCH_OMAP15XX) += gpio15xx.o
obj-$(CONFIG_ARCH_OMAP16XX) += gpio16xx.o
endif

View File

@ -22,17 +22,14 @@
* The machine specific code may provide the extra mapping besides the
* default mapping provided here.
*/
static struct map_desc omap_io_desc[] __initdata = {
#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
static struct map_desc omap7xx_io_desc[] __initdata = {
{
.virtual = OMAP1_IO_VIRT,
.pfn = __phys_to_pfn(OMAP1_IO_PHYS),
.length = OMAP1_IO_SIZE,
.type = MT_DEVICE
}
};
#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
static struct map_desc omap7xx_io_desc[] __initdata = {
},
{
.virtual = OMAP7XX_DSP_BASE,
.pfn = __phys_to_pfn(OMAP7XX_DSP_START),
@ -49,6 +46,12 @@ static struct map_desc omap7xx_io_desc[] __initdata = {
#ifdef CONFIG_ARCH_OMAP15XX
static struct map_desc omap1510_io_desc[] __initdata = {
{
.virtual = OMAP1_IO_VIRT,
.pfn = __phys_to_pfn(OMAP1_IO_PHYS),
.length = OMAP1_IO_SIZE,
.type = MT_DEVICE
},
{
.virtual = OMAP1510_DSP_BASE,
.pfn = __phys_to_pfn(OMAP1510_DSP_START),
@ -65,6 +68,12 @@ static struct map_desc omap1510_io_desc[] __initdata = {
#if defined(CONFIG_ARCH_OMAP16XX)
static struct map_desc omap16xx_io_desc[] __initdata = {
{
.virtual = OMAP1_IO_VIRT,
.pfn = __phys_to_pfn(OMAP1_IO_PHYS),
.length = OMAP1_IO_SIZE,
.type = MT_DEVICE
},
{
.virtual = OMAP16XX_DSP_BASE,
.pfn = __phys_to_pfn(OMAP16XX_DSP_START),
@ -79,18 +88,9 @@ static struct map_desc omap16xx_io_desc[] __initdata = {
};
#endif
/*
* Maps common IO regions for omap1
*/
static void __init omap1_map_common_io(void)
{
iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc));
}
#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
void __init omap7xx_map_io(void)
{
omap1_map_common_io();
iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc));
}
#endif
@ -98,7 +98,6 @@ void __init omap7xx_map_io(void)
#ifdef CONFIG_ARCH_OMAP15XX
void __init omap15xx_map_io(void)
{
omap1_map_common_io();
iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
}
#endif
@ -106,7 +105,6 @@ void __init omap15xx_map_io(void)
#if defined(CONFIG_ARCH_OMAP16XX)
void __init omap16xx_map_io(void)
{
omap1_map_common_io();
iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc));
}
#endif

View File

@ -89,7 +89,6 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = {
#define OMAP1610_MCBSP2_BASE 0xfffb1000
#define OMAP1610_MCBSP3_BASE 0xe1017000
#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
struct resource omap7xx_mcbsp_res[][6] = {
{
{
@ -159,14 +158,7 @@ static struct omap_mcbsp_platform_data omap7xx_mcbsp_pdata[] = {
};
#define OMAP7XX_MCBSP_RES_SZ ARRAY_SIZE(omap7xx_mcbsp_res[1])
#define OMAP7XX_MCBSP_COUNT ARRAY_SIZE(omap7xx_mcbsp_res)
#else
#define omap7xx_mcbsp_res_0 NULL
#define omap7xx_mcbsp_pdata NULL
#define OMAP7XX_MCBSP_RES_SZ 0
#define OMAP7XX_MCBSP_COUNT 0
#endif
#ifdef CONFIG_ARCH_OMAP15XX
struct resource omap15xx_mcbsp_res[][6] = {
{
{
@ -266,14 +258,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
};
#define OMAP15XX_MCBSP_RES_SZ ARRAY_SIZE(omap15xx_mcbsp_res[1])
#define OMAP15XX_MCBSP_COUNT ARRAY_SIZE(omap15xx_mcbsp_res)
#else
#define omap15xx_mcbsp_res_0 NULL
#define omap15xx_mcbsp_pdata NULL
#define OMAP15XX_MCBSP_RES_SZ 0
#define OMAP15XX_MCBSP_COUNT 0
#endif
#ifdef CONFIG_ARCH_OMAP16XX
struct resource omap16xx_mcbsp_res[][6] = {
{
{
@ -373,12 +358,6 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
};
#define OMAP16XX_MCBSP_RES_SZ ARRAY_SIZE(omap16xx_mcbsp_res[1])
#define OMAP16XX_MCBSP_COUNT ARRAY_SIZE(omap16xx_mcbsp_res)
#else
#define omap16xx_mcbsp_res_0 NULL
#define omap16xx_mcbsp_pdata NULL
#define OMAP16XX_MCBSP_RES_SZ 0
#define OMAP16XX_MCBSP_COUNT 0
#endif
static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
struct omap_mcbsp_platform_data *config, int size)

View File

@ -106,13 +106,6 @@
#define OMAP7XX_IDLECT3 0xfffece24
#define OMAP7XX_IDLE_LOOP_REQUEST 0x0C00
#if !defined(CONFIG_ARCH_OMAP730) && \
!defined(CONFIG_ARCH_OMAP850) && \
!defined(CONFIG_ARCH_OMAP15XX) && \
!defined(CONFIG_ARCH_OMAP16XX)
#warning "Power management for this processor not implemented yet"
#endif
#ifndef __ASSEMBLER__
#include <linux/clk.h>

View File

@ -1279,7 +1279,7 @@ usb3_0: usb@32f10100 {
reg = <0x32f10100 0x8>,
<0x381f0000 0x20>;
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
<&clk IMX8MP_CLK_USB_ROOT>;
<&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "hsio", "suspend";
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
@ -1292,9 +1292,9 @@ usb3_0: usb@32f10100 {
usb_dwc3_0: usb@38100000 {
compatible = "snps,dwc3";
reg = <0x38100000 0x10000>;
clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
clocks = <&clk IMX8MP_CLK_USB_ROOT>,
<&clk IMX8MP_CLK_USB_CORE_REF>,
<&clk IMX8MP_CLK_USB_ROOT>;
<&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "bus_early", "ref", "suspend";
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb3_phy0>, <&usb3_phy0>;
@ -1321,7 +1321,7 @@ usb3_1: usb@32f10108 {
reg = <0x32f10108 0x8>,
<0x382f0000 0x20>;
clocks = <&clk IMX8MP_CLK_HSIO_ROOT>,
<&clk IMX8MP_CLK_USB_ROOT>;
<&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "hsio", "suspend";
interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&hsio_blk_ctrl IMX8MP_HSIOBLK_PD_USB>;
@ -1334,9 +1334,9 @@ usb3_1: usb@32f10108 {
usb_dwc3_1: usb@38200000 {
compatible = "snps,dwc3";
reg = <0x38200000 0x10000>;
clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
clocks = <&clk IMX8MP_CLK_USB_ROOT>,
<&clk IMX8MP_CLK_USB_CORE_REF>,
<&clk IMX8MP_CLK_USB_ROOT>;
<&clk IMX8MP_CLK_USB_SUSP>;
clock-names = "bus_early", "ref", "suspend";
interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usb3_phy1>, <&usb3_phy1>;

View File

@ -25,6 +25,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
({ \
efi_virtmap_load(); \
__efi_fpsimd_begin(); \
spin_lock(&efi_rt_lock); \
})
#undef arch_efi_call_virt
@ -33,10 +34,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
#define arch_efi_call_virt_teardown() \
({ \
spin_unlock(&efi_rt_lock); \
__efi_fpsimd_end(); \
efi_virtmap_unload(); \
})
extern spinlock_t efi_rt_lock;
efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...);
#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)

View File

@ -4,6 +4,7 @@
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
SYM_FUNC_START(__efi_rt_asm_wrapper)
stp x29, x30, [sp, #-32]!
@ -16,6 +17,12 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
*/
stp x1, x18, [sp, #16]
ldr_l x16, efi_rt_stack_top
mov sp, x16
#ifdef CONFIG_SHADOW_CALL_STACK
str x18, [sp, #-16]!
#endif
/*
* We are lucky enough that no EFI runtime services take more than
* 5 arguments, so all are passed in registers rather than via the
@ -29,6 +36,7 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
mov x4, x6
blr x8
mov sp, x29
ldp x1, x2, [sp, #16]
cmp x2, x18
ldp x29, x30, [sp], #32
@ -42,6 +50,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper)
* called with preemption disabled and a separate shadow stack is used
* for interrupts.
*/
mov x18, x2
#ifdef CONFIG_SHADOW_CALL_STACK
ldr_l x18, efi_rt_stack_top
ldr x18, [x18, #-16]
#endif
b efi_handle_corrupted_x18 // tail call
SYM_FUNC_END(__efi_rt_asm_wrapper)

View File

@ -144,3 +144,30 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
return s;
}
DEFINE_SPINLOCK(efi_rt_lock);
asmlinkage u64 *efi_rt_stack_top __ro_after_init;
/* EFI requires 8 KiB of stack space for runtime services */
static_assert(THREAD_SIZE >= SZ_8K);
static int __init arm64_efi_rt_init(void)
{
void *p;
if (!efi_enabled(EFI_RUNTIME_SERVICES))
return 0;
p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
NUMA_NO_NODE, &&l);
l: if (!p) {
pr_warn("Failed to allocate EFI runtime stack\n");
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
return -ENOMEM;
}
efi_rt_stack_top = p + THREAD_SIZE;
return 0;
}
core_initcall(arm64_efi_rt_init);

View File

@ -94,7 +94,7 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
elf_hwcap |= HWCAP_LOONGARCH_CRC32;
elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
config = read_cpucfg(LOONGARCH_CPUCFG1);
if (config & CPUCFG1_UAL) {

View File

@ -328,7 +328,7 @@ pcie@e00000000 {
bus-range = <0x0 0xff>;
ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */
<0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */
<0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */
<0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x10000000>, /* mem */
<0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */
num-lanes = <0x8>;
interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>;

View File

@ -800,13 +800,18 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &model_spr),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &model_skl),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);

View File

@ -133,9 +133,6 @@ static void __init fpu__init_system_generic(void)
fpu__init_system_mxcsr();
}
/* Get alignment of the TYPE. */
#define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
/*
* Enforce that 'MEMBER' is the last field of 'TYPE'.
*
@ -143,8 +140,8 @@ static void __init fpu__init_system_generic(void)
* because that's how C aligns structs.
*/
#define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
BUILD_BUG_ON(sizeof(TYPE) != ALIGN(offsetofend(TYPE, MEMBER), \
TYPE_ALIGN(TYPE)))
BUILD_BUG_ON(sizeof(TYPE) != \
ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE)))
/*
* We append the 'struct fpu' to the task_struct:

View File

@ -10,6 +10,6 @@
*/
SYM_FUNC_START(__iowrite32_copy)
movl %edx,%ecx
rep movsd
rep movsl
RET
SYM_FUNC_END(__iowrite32_copy)

View File

@ -294,7 +294,7 @@ static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
/*
* Check if rq has a sequential request preceding it.
*/
static bool deadline_is_seq_writes(struct deadline_data *dd, struct request *rq)
static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
{
struct request *prev = deadline_earlier_request(rq);
@ -353,7 +353,7 @@ deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
if (blk_req_can_dispatch_to_zone(rq) &&
(blk_queue_nonrot(rq->q) ||
!deadline_is_seq_writes(dd, rq)))
!deadline_is_seq_write(dd, rq)))
goto out;
}
rq = NULL;

View File

@ -354,6 +354,9 @@ void spk_ttyio_release(struct spk_synth *in_synth)
{
struct tty_struct *tty = in_synth->dev;
if (tty == NULL)
return;
tty_lock(tty);
if (tty->ops->close)

View File

@ -236,6 +236,11 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
efi_status_t status;
struct prm_context_buffer context;
if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
pr_err_ratelimited("PRM: EFI runtime services no longer available\n");
return AE_NO_HANDLER;
}
/*
* The returned acpi_status will always be AE_OK. Error values will be
* saved in the first byte of the PRM message buffer to be used by ASL.
@ -325,6 +330,11 @@ void __init init_prmt(void)
pr_info("PRM: found %u modules\n", mc);
if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
pr_err("PRM: EFI runtime services unavailable\n");
return;
}
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
ACPI_ADR_SPACE_PLATFORM_RT,
&acpi_platformrt_space_handler,

View File

@ -2400,6 +2400,8 @@ static void pkt_submit_bio(struct bio *bio)
struct bio *split;
bio = bio_split_to_limits(bio);
if (!bio)
return;
pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
(unsigned long long)bio->bi_iter.bi_sector,

View File

@ -2157,10 +2157,17 @@ static void qca_serdev_shutdown(struct device *dev)
int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
struct serdev_device *serdev = to_serdev_device(dev);
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
struct hci_uart *hu = &qcadev->serdev_hu;
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
const u8 ibs_wake_cmd[] = { 0xFD };
const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
if (qcadev->btsoc_type == QCA_QCA6390) {
if (test_bit(QCA_BT_OFF, &qca->flags) ||
!test_bit(HCI_RUNNING, &hdev->flags))
return;
serdev_device_write_flush(serdev);
ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
sizeof(ibs_wake_cmd));

View File

@ -58,7 +58,7 @@
#define PCI1760_CMD_CLR_IMB2 0x00 /* Clears IMB2 */
#define PCI1760_CMD_SET_DO 0x01 /* Set output state */
#define PCI1760_CMD_GET_DO 0x02 /* Read output status */
#define PCI1760_CMD_GET_STATUS 0x03 /* Read current status */
#define PCI1760_CMD_GET_STATUS 0x07 /* Read current status */
#define PCI1760_CMD_GET_FW_VER 0x0e /* Read firmware version */
#define PCI1760_CMD_GET_HW_VER 0x0f /* Read hardware version */
#define PCI1760_CMD_SET_PWM_HI(x) (0x10 + (x) * 2) /* Set "hi" period */

View File

@ -168,14 +168,11 @@ void dma_buf_uninit_sysfs_statistics(void)
kset_unregister(dma_buf_stats_kset);
}
int dma_buf_stats_setup(struct dma_buf *dmabuf)
int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
{
struct dma_buf_sysfs_entry *sysfs_entry;
int ret;
if (!dmabuf || !dmabuf->file)
return -EINVAL;
if (!dmabuf->exp_name) {
pr_err("exporter name must not be empty if stats needed\n");
return -EINVAL;
@ -192,7 +189,7 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
/* create the directory for buffer stats */
ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
"%lu", file_inode(dmabuf->file)->i_ino);
"%lu", file_inode(file)->i_ino);
if (ret)
goto err_sysfs_dmabuf;

View File

@ -13,7 +13,7 @@
int dma_buf_init_sysfs_statistics(void);
void dma_buf_uninit_sysfs_statistics(void);
int dma_buf_stats_setup(struct dma_buf *dmabuf);
int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file);
void dma_buf_stats_teardown(struct dma_buf *dmabuf);
#else
@ -25,7 +25,7 @@ static inline int dma_buf_init_sysfs_statistics(void)
static inline void dma_buf_uninit_sysfs_statistics(void) {}
static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
{
return 0;
}

View File

@ -96,10 +96,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
return -EINVAL;
dmabuf = file->private_data;
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
if (dmabuf) {
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
}
return 0;
}
@ -527,17 +528,17 @@ static inline int is_dma_buf_file(struct file *file)
return file->f_op == &dma_buf_fops;
}
static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
static struct file *dma_buf_getfile(size_t size, int flags)
{
static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
struct file *file;
struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
struct file *file;
if (IS_ERR(inode))
return ERR_CAST(inode);
inode->i_size = dmabuf->size;
inode_set_bytes(inode, dmabuf->size);
inode->i_size = size;
inode_set_bytes(inode, size);
/*
* The ->i_ino acquired from get_next_ino() is not unique thus
@ -551,8 +552,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
flags, &dma_buf_fops);
if (IS_ERR(file))
goto err_alloc_file;
file->private_data = dmabuf;
file->f_path.dentry->d_fsdata = dmabuf;
return file;
@ -618,19 +617,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
size_t alloc_size = sizeof(struct dma_buf);
int ret;
if (!exp_info->resv)
alloc_size += sizeof(struct dma_resv);
else
/* prevent &dma_buf[1] == dma_buf->resv */
alloc_size += 1;
if (WARN_ON(!exp_info->priv
|| !exp_info->ops
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release)) {
if (WARN_ON(!exp_info->priv || !exp_info->ops
|| !exp_info->ops->map_dma_buf
|| !exp_info->ops->unmap_dma_buf
|| !exp_info->ops->release))
return ERR_PTR(-EINVAL);
}
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
(exp_info->ops->pin || exp_info->ops->unpin)))
@ -642,10 +633,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
if (!try_module_get(exp_info->owner))
return ERR_PTR(-ENOENT);
file = dma_buf_getfile(exp_info->size, exp_info->flags);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto err_module;
}
if (!exp_info->resv)
alloc_size += sizeof(struct dma_resv);
else
/* prevent &dma_buf[1] == dma_buf->resv */
alloc_size += 1;
dmabuf = kzalloc(alloc_size, GFP_KERNEL);
if (!dmabuf) {
ret = -ENOMEM;
goto err_module;
goto err_file;
}
dmabuf->priv = exp_info->priv;
@ -657,44 +659,36 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
if (!resv) {
resv = (struct dma_resv *)&dmabuf[1];
dma_resv_init(resv);
}
dmabuf->resv = resv;
file = dma_buf_getfile(dmabuf, exp_info->flags);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto err_dmabuf;
}
dmabuf->file = file;
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
if (!resv) {
dmabuf->resv = (struct dma_resv *)&dmabuf[1];
dma_resv_init(dmabuf->resv);
} else {
dmabuf->resv = resv;
}
ret = dma_buf_stats_setup(dmabuf, file);
if (ret)
goto err_dmabuf;
file->private_data = dmabuf;
file->f_path.dentry->d_fsdata = dmabuf;
dmabuf->file = file;
mutex_lock(&db_list.lock);
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
ret = dma_buf_stats_setup(dmabuf);
if (ret)
goto err_sysfs;
return dmabuf;
err_sysfs:
/*
* Set file->f_path.dentry->d_fsdata to NULL so that when
* dma_buf_release() gets invoked by dentry_ops, it exits
* early before calling the release() dma_buf op.
*/
file->f_path.dentry->d_fsdata = NULL;
fput(file);
err_dmabuf:
if (!resv)
dma_resv_fini(dmabuf->resv);
kfree(dmabuf);
err_file:
fput(file);
err_module:
module_put(exp_info->owner);
return ERR_PTR(ret);

View File

@ -1018,6 +1018,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
if (!vd) {
dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
axi_chan_name(chan));
goto out;
}
/* Remove the completed descriptor from issued list */
list_del(&vd->node);
@ -1032,6 +1037,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* Try to restart the controller */
axi_chan_start_first_queued(chan);
out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}

View File

@ -1173,8 +1173,19 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
spin_unlock(&ie->list_lock);
list_for_each_entry_safe(desc, itr, &flist, list) {
struct dma_async_tx_descriptor *tx;
list_del(&desc->list);
ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
/*
* wq is being disabled. Any remaining descriptors are
* likely to be stuck and can be dropped. callback could
* point to code that is no longer accessible, for example
* if dmatest module has been unloaded.
*/
tx = &desc->txd;
tx->callback = NULL;
tx->callback_result = NULL;
idxd_dma_complete_txd(desc, ctype, true);
}
}
@ -1391,8 +1402,7 @@ int drv_enable_wq(struct idxd_wq *wq)
err_irq:
idxd_wq_unmap_portal(wq);
err_map_portal:
rc = idxd_wq_disable(wq, false);
if (rc < 0)
if (idxd_wq_disable(wq, false))
dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
err:
return rc;
@ -1409,11 +1419,11 @@ void drv_disable_wq(struct idxd_wq *wq)
dev_warn(dev, "Clients has claim on wq %d: %d\n",
wq->id, idxd_wq_refcount(wq));
idxd_wq_free_resources(wq);
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
idxd_wq_free_irq(wq);
idxd_wq_reset(wq);
idxd_wq_free_resources(wq);
percpu_ref_exit(&wq->wq_active);
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;

View File

@ -914,7 +914,7 @@ static void ldma_dev_init(struct ldma_dev *d)
}
}
static int ldma_cfg_init(struct ldma_dev *d)
static int ldma_parse_dt(struct ldma_dev *d)
{
struct fwnode_handle *fwnode = dev_fwnode(d->dev);
struct ldma_port *p;
@ -1661,10 +1661,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
p->ldev = d;
}
ret = ldma_cfg_init(d);
if (ret)
return ret;
dma_dev->dev = &pdev->dev;
ch_mask = (unsigned long)d->channels_mask;
@ -1675,6 +1671,10 @@ static int intel_ldma_probe(struct platform_device *pdev)
ldma_dma_init_v3X(j, d);
}
ret = ldma_parse_dt(d);
if (ret)
return ret;
dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
dma_dev->device_free_chan_resources = ldma_free_chan_resources;
dma_dev->device_terminate_all = ldma_terminate_all;

View File

@ -221,7 +221,7 @@ static int tegra_adma_init(struct tegra_adma *tdma)
int ret;
/* Clear any interrupts */
tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);

View File

@ -361,9 +361,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
memcpy(data, gsmi_dev.data_buf->start, *data_size);
/* All variables are have the following attributes */
*attr = EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS;
if (attr)
*attr = EFI_VARIABLE_NON_VOLATILE |
EFI_VARIABLE_BOOTSERVICE_ACCESS |
EFI_VARIABLE_RUNTIME_ACCESS;
}
spin_unlock_irqrestore(&gsmi_dev.lock, flags);

View File

@ -1507,6 +1507,7 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
break;
default:
@ -1551,6 +1552,7 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
break;
default:
@ -1636,6 +1638,7 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
case IP_VERSION(13, 0, 4):
@ -1686,6 +1689,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
@ -1785,6 +1789,7 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
break;
default:
@ -1948,6 +1953,7 @@ static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
adev->enable_mes = true;
adev->enable_mes_kiq = true;
@ -2177,6 +2183,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->family = AMDGPU_FAMILY_GC_11_0_0;
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
adev->family = AMDGPU_FAMILY_GC_11_0_1;
break;
default:
@ -2194,6 +2201,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 6):
case IP_VERSION(10, 3, 7):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
adev->flags |= AMD_IS_APU;
break;
default:
@ -2250,6 +2258,7 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
break;
case IP_VERSION(7, 7, 0):
case IP_VERSION(7, 7, 1):
adev->nbio.funcs = &nbio_v7_7_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
break;

View File

@ -156,6 +156,9 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
return amdgpu_compute_multipipe == 1;
}
if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
return true;
/* FIXME: spreading the queues across pipes causes perf regressions
* on POLARIS11 compute workloads */
if (adev->asic_type == CHIP_POLARIS11)

View File

@ -548,6 +548,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
case IP_VERSION(10, 3, 1):
/* YELLOW_CARP*/
case IP_VERSION(10, 3, 3):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
/* Don't enable it by default yet.
*/
if (amdgpu_tmz < 1) {

View File

@ -154,8 +154,14 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
struct dma_fence *f;
unsigned i;
/* use sched fence if available */
f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence;
/* Check if any fences where initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
else if (job->hw_fence.ops)
f = &job->hw_fence;
else
f = NULL;
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}

View File

@ -139,6 +139,7 @@ static int psp_early_init(void *handle)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;

View File

@ -77,6 +77,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_3_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_3_rlc.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_pfp.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_me.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_mec.bin");
MODULE_FIRMWARE("amdgpu/gc_11_0_4_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
{
@ -262,6 +266,7 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
soc15_program_register_sequence(adev,
golden_settings_gc_11_0_1,
(const u32)ARRAY_SIZE(golden_settings_gc_11_0_1));
@ -856,6 +861,7 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@ -1282,7 +1288,6 @@ static int gfx_v11_0_sw_init(void *handle)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
adev->gfx.me.num_me = 1;
@ -1292,6 +1297,15 @@ static int gfx_v11_0_sw_init(void *handle)
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 1;
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 4;
break;
default:
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
@ -2486,7 +2500,8 @@ static int gfx_v11_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
for (i = 0; i < adev->usec_timeout; i++) {
cp_status = RREG32_SOC15(GC, 0, regCP_STAT);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1))
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 1) ||
adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 4))
bootload_status = RREG32_SOC15(GC, 0,
regRLC_RLCS_BOOTLOAD_STATUS_gc_11_0_1);
else
@ -5022,6 +5037,7 @@ static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
break;
default:
@ -5055,6 +5071,7 @@ static int gfx_v11_0_set_powergating_state(void *handle,
amdgpu_gfx_off_ctrl(adev, enable);
break;
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 4):
gfx_v11_cntl_pg(adev, enable);
amdgpu_gfx_off_ctrl(adev, enable);
break;
@ -5078,6 +5095,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
gfx_v11_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;

View File

@ -749,6 +749,7 @@ static int gmc_v11_0_sw_init(void *handle)
case IP_VERSION(11, 0, 1):
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
case IP_VERSION(11, 0, 4):
adev->num_vmhubs = 2;
/*
* To fulfill 4-level page support,

View File

@ -46,6 +46,8 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
@ -102,6 +104,7 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 3):
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
err = psp_init_toc_microcode(psp, chip_name);
if (err)
return err;

View File

@ -325,6 +325,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
case IP_VERSION(13, 0, 10):
return AMD_RESET_METHOD_MODE1;
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
return AMD_RESET_METHOD_MODE2;
default:
if (amdgpu_dpm_is_baco_supported(adev))
@ -654,7 +655,23 @@ static int soc21_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x20;
break;
case IP_VERSION(11, 0, 4):
adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG |
adev->cg_flags =
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_FGCG |
AMD_CG_SUPPORT_REPEATER_FGCG |
AMD_CG_SUPPORT_GFX_PERF_CLK |
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_HDP_MGCG |
AMD_CG_SUPPORT_HDP_LS |
AMD_CG_SUPPORT_ATHUB_MGCG |
AMD_CG_SUPPORT_ATHUB_LS |
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_BIF_MGCG |
AMD_CG_SUPPORT_BIF_LS |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |

View File

@ -1512,8 +1512,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 4):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
@ -5283,8 +5281,6 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
stream->output_color_space = get_output_color_space(timing_out);
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
@ -5295,6 +5291,8 @@ static void fill_stream_properties_from_drm_display_mode(
adjust_colour_depth_from_display_info(timing_out, info);
}
}
stream->output_color_space = get_output_color_space(timing_out);
}
static void fill_audio_info(struct audio_info *audio_info,
@ -9433,8 +9431,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
if (dm_old_con_state->abm_level !=
dm_new_con_state->abm_level)
if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
dm_old_con_state->scaling != dm_new_con_state->scaling)
new_crtc_state->connectors_changed = true;
}

View File

@ -90,8 +90,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
{ 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
{ COLOR_SPACE_YCBCR2020_TYPE,
{ 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
{ 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
{ COLOR_SPACE_YCBCR709_BLACK_TYPE,
{ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },

View File

@ -585,6 +585,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
yellow_carp_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
smu_v13_0_4_set_ppt_funcs(smu);
break;
case IP_VERSION(13, 0, 5):

View File

@ -1171,6 +1171,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
int ret = 0;
uint32_t apu_percent = 0;
uint32_t dgpu_percent = 0;
struct amdgpu_device *adev = smu->adev;
ret = smu_cmn_get_metrics_table(smu,
@ -1196,7 +1197,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->AverageUvdActivity / 100;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = (metrics->CurrentSocketPower << 8) / 1000;
if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||
((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))
*value = metrics->CurrentSocketPower << 8;
else
*value = (metrics->CurrentSocketPower << 8) / 1000;
break;
case METRICS_TEMPERATURE_EDGE:
*value = (metrics->GfxTemperature / 100) *

View File

@ -250,6 +250,7 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
mp1_fw_flags = RREG32_PCIE(MP1_Public |
(smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
break;
@ -303,6 +304,7 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
break;
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_4;
break;
case IP_VERSION(13, 0, 5):
@ -843,6 +845,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 10):
case IP_VERSION(13, 0, 11):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)

View File

@ -1026,6 +1026,15 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu,
};
static void smu_v13_0_4_set_smu_mailbox_registers(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@ -1035,7 +1044,9 @@ void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = smu_v13_0_4_feature_mask_map;
smu->table_map = smu_v13_0_4_table_map;
smu->is_apu = true;
smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4))
smu_v13_0_4_set_smu_mailbox_registers(smu);
else
smu_v13_0_set_smu_mailbox_registers(smu);
}

View File

@ -1620,7 +1620,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
u32 offset;
int ret;
if (w > max_width || w < min_width || h > max_height) {
if (w > max_width || w < min_width || h > max_height || h < 1) {
drm_dbg_kms(&dev_priv->drm,
"requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
w, h, min_width, max_width, max_height);

View File

@ -1070,12 +1070,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
*/
static void i915_driver_lastclose(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
intel_fbdev_restore_mode(dev);
if (HAS_DISPLAY(i915))
vga_switcheroo_process_delayed_switch();
vga_switcheroo_process_delayed_switch();
}
static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)

View File

@ -423,7 +423,8 @@ static const struct intel_device_info ilk_m_info = {
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
/* snb does support rc6p, but enabling it causes various issues */ \
.has_rc6p = 0, \
.has_rps = true, \
.dma_mask_size = 40, \
.__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \

View File

@ -19,6 +19,10 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
return;
}
if (!HAS_DISPLAY(i915)) {
dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
return;
}
if (state == VGA_SWITCHEROO_ON) {
drm_info(&i915->drm, "switched on\n");
@ -44,7 +48,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
return i915 && atomic_read(&i915->drm.open_count) == 0;
return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {

View File

@ -62,9 +62,6 @@ enum {
SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
SRP_TSK_MGMT_SQ_SIZE,
SRP_TAG_NO_REQ = ~0U,
SRP_TAG_TSK_MGMT = 1U << 31,
SRP_MAX_PAGES_PER_MR = 512,
SRP_MAX_ADD_CDB_LEN = 16,
@ -79,6 +76,11 @@ enum {
sizeof(struct srp_imm_buf),
};
enum {
SRP_TAG_NO_REQ = ~0U,
SRP_TAG_TSK_MGMT = BIT(31),
};
enum srp_target_state {
SRP_TARGET_SCANNING,
SRP_TARGET_LIVE,

View File

@ -316,6 +316,13 @@ static void fastrpc_free_map(struct kref *ref)
dma_buf_put(map->buf);
}
if (map->fl) {
spin_lock(&map->fl->lock);
list_del(&map->node);
spin_unlock(&map->fl->lock);
map->fl = NULL;
}
kfree(map);
}
@ -325,38 +332,41 @@ static void fastrpc_map_put(struct fastrpc_map *map)
kref_put(&map->refcount, fastrpc_free_map);
}
static void fastrpc_map_get(struct fastrpc_map *map)
static int fastrpc_map_get(struct fastrpc_map *map)
{
if (map)
kref_get(&map->refcount);
if (!map)
return -ENOENT;
return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
}
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
struct fastrpc_map **ppmap)
struct fastrpc_map **ppmap, bool take_ref)
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
int ret = -ENOENT;
mutex_lock(&fl->mutex);
spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
if (map->fd == fd) {
*ppmap = map;
mutex_unlock(&fl->mutex);
return 0;
if (map->fd != fd)
continue;
if (take_ref) {
ret = fastrpc_map_get(map);
if (ret) {
dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
__func__, fd, ret);
break;
}
}
*ppmap = map;
ret = 0;
break;
}
mutex_unlock(&fl->mutex);
return -ENOENT;
}
static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
struct fastrpc_map **ppmap)
{
int ret = fastrpc_map_lookup(fl, fd, ppmap);
if (!ret)
fastrpc_map_get(*ppmap);
spin_unlock(&fl->lock);
return ret;
}
@ -703,7 +713,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
struct fastrpc_map *map = NULL;
int err = 0;
if (!fastrpc_map_find(fl, fd, ppmap))
if (!fastrpc_map_lookup(fl, fd, ppmap, true))
return 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@ -1026,7 +1036,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
fastrpc_map_put(mmap);
}
@ -1265,12 +1275,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
fl->init_mem = NULL;
fastrpc_buf_free(imem);
err_alloc:
if (map) {
spin_lock(&fl->lock);
list_del(&map->node);
spin_unlock(&fl->lock);
fastrpc_map_put(map);
}
fastrpc_map_put(map);
err:
kfree(args);
@ -1346,10 +1351,8 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
fastrpc_context_put(ctx);
}
list_for_each_entry_safe(map, m, &fl->maps, node) {
list_del(&map->node);
list_for_each_entry_safe(map, m, &fl->maps, node)
fastrpc_map_put(map);
}
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
list_del(&buf->node);

View File

@ -665,13 +665,15 @@ void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
if (cl->state == MEI_FILE_UNINITIALIZED) {
ret = mei_cl_link(cl);
if (ret)
goto out;
goto notlinked;
/* update pointers */
cl->cldev = cldev;
}
ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
out:
if (ret)
mei_cl_unlink(cl);
notlinked:
mutex_unlock(&bus->device_lock);
if (ret)
return ERR_PTR(ret);
@ -721,7 +723,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
if (cl->state == MEI_FILE_UNINITIALIZED) {
ret = mei_cl_link(cl);
if (ret)
goto out;
goto notlinked;
/* update pointers */
cl->cldev = cldev;
}
@ -748,6 +750,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
}
out:
if (ret)
mei_cl_unlink(cl);
notlinked:
mutex_unlock(&bus->device_lock);
return ret;
@ -1115,7 +1120,6 @@ static void mei_cl_bus_dev_release(struct device *dev)
mei_cl_flush_queues(cldev->cl, NULL);
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
mei_cl_unlink(cldev->cl);
kfree(cldev->cl);
kfree(cldev);
}

View File

@ -111,6 +111,8 @@
#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
#define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */
/*
* MEI HW Section
*/

View File

@ -118,6 +118,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }
};

View File

@ -56,8 +56,6 @@ struct vmci_guest_device {
bool exclusive_vectors;
struct tasklet_struct datagram_tasklet;
struct tasklet_struct bm_tasklet;
struct wait_queue_head inout_wq;
void *data_buffer;
@ -304,9 +302,8 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
* This function assumes that it has exclusive access to the data
* in register(s) for the duration of the call.
*/
static void vmci_dispatch_dgs(unsigned long data)
static void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev)
{
struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
u8 *dg_in_buffer = vmci_dev->data_buffer;
struct vmci_datagram *dg;
size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
@ -465,10 +462,8 @@ static void vmci_dispatch_dgs(unsigned long data)
* Scans the notification bitmap for raised flags, clears them
* and handles the notifications.
*/
static void vmci_process_bitmap(unsigned long data)
static void vmci_process_bitmap(struct vmci_guest_device *dev)
{
struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
if (!dev->notification_bitmap) {
dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
return;
@ -486,13 +481,13 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
struct vmci_guest_device *dev = _dev;
/*
* If we are using MSI-X with exclusive vectors then we simply schedule
* the datagram tasklet, since we know the interrupt was meant for us.
* If we are using MSI-X with exclusive vectors then we simply call
* vmci_dispatch_dgs(), since we know the interrupt was meant for us.
* Otherwise we must read the ICR to determine what to do.
*/
if (dev->exclusive_vectors) {
tasklet_schedule(&dev->datagram_tasklet);
vmci_dispatch_dgs(dev);
} else {
unsigned int icr;
@ -502,12 +497,12 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
return IRQ_NONE;
if (icr & VMCI_ICR_DATAGRAM) {
tasklet_schedule(&dev->datagram_tasklet);
vmci_dispatch_dgs(dev);
icr &= ~VMCI_ICR_DATAGRAM;
}
if (icr & VMCI_ICR_NOTIFICATION) {
tasklet_schedule(&dev->bm_tasklet);
vmci_process_bitmap(dev);
icr &= ~VMCI_ICR_NOTIFICATION;
}
@ -536,7 +531,7 @@ static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
struct vmci_guest_device *dev = _dev;
/* For MSI-X we can just assume it was meant for us. */
tasklet_schedule(&dev->bm_tasklet);
vmci_process_bitmap(dev);
return IRQ_HANDLED;
}
@ -638,10 +633,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev->iobase = iobase;
vmci_dev->mmio_base = mmio_base;
tasklet_init(&vmci_dev->datagram_tasklet,
vmci_dispatch_dgs, (unsigned long)vmci_dev);
tasklet_init(&vmci_dev->bm_tasklet,
vmci_process_bitmap, (unsigned long)vmci_dev);
init_waitqueue_head(&vmci_dev->inout_wq);
if (mmio_base != NULL) {
@ -808,8 +799,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* Request IRQ for legacy or MSI interrupts, or for first
* MSI-X vector.
*/
error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL,
vmci_interrupt, IRQF_SHARED,
KBUILD_MODNAME, vmci_dev);
if (error) {
dev_err(&pdev->dev, "Irq %u in use: %d\n",
pci_irq_vector(pdev, 0), error);
@ -823,9 +815,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* between the vectors.
*/
if (vmci_dev->exclusive_vectors) {
error = request_irq(pci_irq_vector(pdev, 1),
vmci_interrupt_bm, 0, KBUILD_MODNAME,
vmci_dev);
error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL,
vmci_interrupt_bm, 0,
KBUILD_MODNAME, vmci_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to allocate irq %u: %d\n",
@ -833,9 +825,11 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
goto err_free_irq;
}
if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
error = request_irq(pci_irq_vector(pdev, 2),
vmci_interrupt_dma_datagram,
0, KBUILD_MODNAME, vmci_dev);
error = request_threaded_irq(pci_irq_vector(pdev, 2),
NULL,
vmci_interrupt_dma_datagram,
0, KBUILD_MODNAME,
vmci_dev);
if (error) {
dev_err(&pdev->dev,
"Failed to allocate irq %u: %d\n",
@ -871,8 +865,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
err_free_irq:
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
tasklet_kill(&vmci_dev->datagram_tasklet);
tasklet_kill(&vmci_dev->bm_tasklet);
err_disable_msi:
pci_free_irq_vectors(pdev);
@ -943,9 +935,6 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
pci_free_irq_vectors(pdev);
tasklet_kill(&vmci_dev->datagram_tasklet);
tasklet_kill(&vmci_dev->bm_tasklet);
if (vmci_dev->notification_bitmap) {
/*
* The device reset above cleared the bitmap state of the

View File

@ -107,6 +107,7 @@
#define ESDHC_TUNING_START_TAP_DEFAULT 0x1
#define ESDHC_TUNING_START_TAP_MASK 0x7f
#define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE (1 << 7)
#define ESDHC_TUNING_STEP_DEFAULT 0x1
#define ESDHC_TUNING_STEP_MASK 0x00070000
#define ESDHC_TUNING_STEP_SHIFT 16
@ -1361,7 +1362,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
struct cqhci_host *cq_host = host->mmc->cqe_private;
int tmp;
u32 tmp;
if (esdhc_is_usdhc(imx_data)) {
/*
@ -1416,17 +1417,24 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
tmp |= ESDHC_STD_TUNING_EN |
ESDHC_TUNING_START_TAP_DEFAULT;
if (imx_data->boarddata.tuning_start_tap) {
tmp &= ~ESDHC_TUNING_START_TAP_MASK;
tmp |= ESDHC_STD_TUNING_EN;
/*
* ROM code or bootloader may config the start tap
* and step, unmask them first.
*/
tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
if (imx_data->boarddata.tuning_start_tap)
tmp |= imx_data->boarddata.tuning_start_tap;
}
else
tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
if (imx_data->boarddata.tuning_step) {
tmp &= ~ESDHC_TUNING_STEP_MASK;
tmp |= imx_data->boarddata.tuning_step
<< ESDHC_TUNING_STEP_SHIFT;
} else {
tmp |= ESDHC_TUNING_STEP_DEFAULT
<< ESDHC_TUNING_STEP_SHIFT;
}
/* Disable the CMD CRC check for tuning, if not, need to

View File

@ -1492,9 +1492,11 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
struct sunxi_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
pm_runtime_force_suspend(&pdev->dev);
disable_irq(host->irq);
sunxi_mmc_disable(host);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev)) {
disable_irq(host->irq);
sunxi_mmc_disable(host);
}
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
mmc_free_host(mmc);

View File

@ -589,7 +589,7 @@ int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
u16 pcifunc = req->hdr.pcifunc;
struct mcs_rsrc_map *map;
struct mcs *mcs;
int rc;
int rc = 0;
if (req->mcs_id >= rvu->mcs_blk_cnt)
return MCS_AF_ERR_INVALID_MCSID;

View File

@ -1012,7 +1012,6 @@ static void otx2_pool_refill_task(struct work_struct *work)
rbpool = cq->rbpool;
free_ptrs = cq->pool_ptrs;
get_cpu();
while (cq->pool_ptrs) {
if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
@ -1032,7 +1031,6 @@ static void otx2_pool_refill_task(struct work_struct *work)
pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
put_cpu();
cq->refill_task_sched = false;
}
@ -1370,7 +1368,6 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
if (err)
goto fail;
get_cpu();
/* Allocate pointers and free them to aura/pool */
for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
@ -1394,7 +1391,6 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
}
err_mem:
put_cpu();
return err ? -ENOMEM : 0;
fail:
@ -1435,21 +1431,18 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
if (err)
goto fail;
get_cpu();
/* Allocate pointers and free them to aura/pool */
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
if (err)
goto err_mem;
return -ENOMEM;
pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
bufptr + OTX2_HEAD_ROOM);
}
}
err_mem:
put_cpu();
return err ? -ENOMEM : 0;
return 0;
fail:
otx2_mbox_reset(&pfvf->mbox.mbox, 0);
otx2_aura_pool_free(pfvf);

View File

@ -733,8 +733,10 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
u64 ptrs[2];
ptrs[1] = buf;
get_cpu();
/* Free only one buffer at time during init and teardown */
__cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
put_cpu();
}
/* Alloc pointer from pool/aura */

View File

@ -677,6 +677,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
mlx5_core_err(dev, "health works are not permitted at this stage\n");
mutex_unlock(&dev->intf_state_mutex);
return;
}
mutex_unlock(&dev->intf_state_mutex);

View File

@ -2207,28 +2207,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
static void rtl_wol_enable_rx(struct rtl8169_private *tp)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
}
static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
if (tp->dash_type != RTL_DASH_NONE)
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(tp, 0x19, 0xff64);
if (device_may_wakeup(tp_to_dev(tp))) {
phy_speed_down(tp->phydev, false);
rtl_wol_enable_rx(tp);
}
}
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
@ -2452,6 +2430,31 @@ static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
rtl_wait_txrx_fifo_empty(tp);
}
static void rtl_wol_enable_rx(struct rtl8169_private *tp)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
if (tp->mac_version >= RTL_GIGA_MAC_VER_40)
rtl_disable_rxdvgate(tp);
}
static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
if (tp->dash_type != RTL_DASH_NONE)
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
rtl_ephy_write(tp, 0x19, 0xff64);
if (device_may_wakeup(tp_to_dev(tp))) {
phy_speed_down(tp->phydev, false);
rtl_wol_enable_rx(tp);
}
}
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
{
u32 val = TX_DMA_BURST << TxDMAShift |
@ -3869,7 +3872,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
netdev_reset_queue(tp->dev);
}
static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
static void rtl8169_cleanup(struct rtl8169_private *tp)
{
napi_disable(&tp->napi);
@ -3881,9 +3884,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
rtl_rx_close(tp);
if (going_down && tp->dev->wol_enabled)
goto no_reset;
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
@ -3904,7 +3904,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
}
rtl_hw_reset(tp);
no_reset:
rtl8169_tx_clear(tp);
rtl8169_init_ring_indexes(tp);
}
@ -3915,7 +3915,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
netif_stop_queue(tp->dev);
rtl8169_cleanup(tp, false);
rtl8169_cleanup(tp);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i);
@ -4601,7 +4601,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
pci_clear_master(tp->pci_dev);
rtl_pci_commit(tp);
rtl8169_cleanup(tp, true);
rtl8169_cleanup(tp);
rtl_disable_exit_l1(tp);
rtl_prepare_power_down(tp);
}

View File

@ -1218,7 +1218,7 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
BRCMF_NROF_H2D_COMMON_MSGRINGS;
max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
}
if (max_flowrings > 256) {
if (max_flowrings > 512) {
brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
return -EIO;
}

View File

@ -1106,6 +1106,11 @@ int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *c
int i, j, num_sub_bands;
s8 *gain;
/* many firmware images for JF lie about this */
if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
return -EOPNOTSUPP;
if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
IWL_DEBUG_RADIO(fwrt,
"PPAG capability not supported by FW, command not sent.\n");

View File

@ -1185,13 +1185,25 @@ int __init early_init_dt_scan_chosen(char *cmdline)
if (node < 0)
node = fdt_path_offset(fdt, "/chosen@0");
if (node < 0)
return -ENOENT;
/* Handle the cmdline config options even if no /chosen node */
goto handle_cmdline;
chosen_node_offset = node;
early_init_dt_check_for_initrd(node);
early_init_dt_check_for_elfcorehdr(node);
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
add_bootloader_randomness(rng_seed, l);
/* try to clear seed so it won't be found. */
fdt_nop_property(initial_boot_params, node, "rng-seed");
/* update CRC check value */
of_fdt_crc32 = crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params));
}
/* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
if (overwrite_incoming_cmdline || !cmdline[0])
@ -1215,20 +1227,9 @@ int __init early_init_dt_scan_chosen(char *cmdline)
}
}
handle_cmdline:
pr_debug("Command line is: %s\n", (char *)cmdline);
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
add_bootloader_randomness(rng_seed, l);
/* try to clear seed so it won't be found. */
fdt_nop_property(initial_boot_params, node, "rng-seed");
/* update CRC check value */
of_fdt_crc32 = crc32_be(~0, initial_boot_params,
fdt_totalsize(initial_boot_params));
}
return 0;
}

View File

@ -461,9 +461,10 @@ static int apr_add_device(struct device *dev, struct device_node *np,
goto out;
}
/* Protection domain is optional, it does not exist on older platforms */
ret = of_property_read_string_index(np, "qcom,protection-domain",
1, &adev->service_path);
if (ret < 0) {
if (ret < 0 && ret != -EINVAL) {
dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
goto out;
}

View File

@ -86,7 +86,7 @@ struct vchiq_service_params_kernel {
struct vchiq_instance;
extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
extern int vchiq_initialise(struct vchiq_instance **pinstance);
extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,

View File

@ -100,10 +100,10 @@ vchiq_dump_platform_use_state(struct vchiq_state *state);
extern void
vchiq_dump_service_use_state(struct vchiq_state *state);
extern enum vchiq_status
extern int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type);
extern enum vchiq_status
extern int
vchiq_release_internal(struct vchiq_state *state,
struct vchiq_service *service);

View File

@ -427,13 +427,6 @@ int tb_retimer_scan(struct tb_port *port, bool add)
{
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
int ret, i, last_idx = 0;
struct usb4_port *usb4;
usb4 = port->usb4;
if (!usb4)
return 0;
pm_runtime_get_sync(&usb4->dev);
/*
* Send broadcast RT to make sure retimer indices facing this
@ -441,7 +434,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
*/
ret = usb4_port_enumerate_retimers(port);
if (ret)
goto out;
return ret;
/*
* Enable sideband channel for each retimer. We can do this
@ -471,12 +464,11 @@ int tb_retimer_scan(struct tb_port *port, bool add)
break;
}
if (!last_idx) {
ret = 0;
goto out;
}
if (!last_idx)
return 0;
/* Add on-board retimers if they do not exist already */
ret = 0;
for (i = 1; i <= last_idx; i++) {
struct tb_retimer *rt;
@ -490,10 +482,6 @@ int tb_retimer_scan(struct tb_port *port, bool add)
}
}
out:
pm_runtime_mark_last_busy(&usb4->dev);
pm_runtime_put_autosuspend(&usb4->dev);
return ret;
}

View File

@ -628,11 +628,15 @@ static void tb_scan_port(struct tb_port *port)
* Downstream switch is reachable through two ports.
* Only scan on the primary port (link_nr == 0).
*/
if (port->usb4)
pm_runtime_get_sync(&port->usb4->dev);
if (tb_wait_for_port(port, false) <= 0)
return;
goto out_rpm_put;
if (port->remote) {
tb_port_dbg(port, "port already has a remote\n");
return;
goto out_rpm_put;
}
tb_retimer_scan(port, true);
@ -647,12 +651,12 @@ static void tb_scan_port(struct tb_port *port)
*/
if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
tb_scan_xdomain(port);
return;
goto out_rpm_put;
}
if (tb_switch_configure(sw)) {
tb_switch_put(sw);
return;
goto out_rpm_put;
}
/*
@ -681,7 +685,7 @@ static void tb_scan_port(struct tb_port *port)
if (tb_switch_add(sw)) {
tb_switch_put(sw);
return;
goto out_rpm_put;
}
/* Link the switches using both links if available */
@ -733,6 +737,12 @@ static void tb_scan_port(struct tb_port *port)
tb_add_dp_resources(sw);
tb_scan_switch(sw);
out_rpm_put:
if (port->usb4) {
pm_runtime_mark_last_busy(&port->usb4->dev);
pm_runtime_put_autosuspend(&port->usb4->dev);
}
}
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)

View File

@ -1275,7 +1275,7 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
return;
} else if (!ret) {
/* Use maximum link rate if the link valid is not set */
ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
if (ret < 0) {
tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
return;

View File

@ -1419,12 +1419,19 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
* registered, we notify the userspace that it has changed.
*/
if (!update) {
struct tb_port *port;
/*
* Now disable lane 1 if bonding was not enabled. Do
* this only if bonding was possible at the beginning
* (that is we are the connection manager and there are
* two lanes).
*/
if (xd->bonding_possible) {
struct tb_port *port;
/* Now disable lane 1 if bonding was not enabled */
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
if (!port->bonded)
tb_port_disable(port->dual_link_port);
port = tb_port_at(xd->route, tb_xdomain_parent(xd));
if (!port->bonded)
tb_port_disable(port->dual_link_port);
}
if (device_add(&xd->dev)) {
dev_err(&xd->dev, "failed to add XDomain device\n");

View File

@ -43,6 +43,12 @@
#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
#define PCI_DEVICE_ID_SEALEVEL_710xC 0x1001
#define PCI_DEVICE_ID_SEALEVEL_720xC 0x1002
#define PCI_DEVICE_ID_SEALEVEL_740xC 0x1004
#define PCI_DEVICE_ID_SEALEVEL_780xC 0x1008
#define PCI_DEVICE_ID_SEALEVEL_716xC 0x1010
#define UART_EXAR_INT0 0x80
#define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */
#define UART_EXAR_SLEEP 0x8b /* Sleep mode */
@ -638,6 +644,8 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
nr_ports = BIT(((pcidev->device & 0x38) >> 3) - 1);
else if (board->num_ports)
nr_ports = board->num_ports;
else if (pcidev->vendor == PCI_VENDOR_ID_SEALEVEL)
nr_ports = pcidev->device & 0xff;
else
nr_ports = pcidev->device & 0x0f;
@ -864,6 +872,12 @@ static const struct pci_device_id exar_pci_tbl[] = {
EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4),
EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8),
EXAR_DEVICE(SEALEVEL, 710xC, pbn_exar_XR17V35x),
EXAR_DEVICE(SEALEVEL, 720xC, pbn_exar_XR17V35x),
EXAR_DEVICE(SEALEVEL, 740xC, pbn_exar_XR17V35x),
EXAR_DEVICE(SEALEVEL, 780xC, pbn_exar_XR17V35x),
EXAR_DEVICE(SEALEVEL, 716xC, pbn_exar_XR17V35x),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, exar_pci_tbl);

View File

@ -1467,6 +1467,10 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
struct circ_buf *xmit = &uap->port.state->xmit;
int count = uap->fifosize >> 1;
if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
!uap->rs485_tx_started)
pl011_rs485_tx_start(uap);
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
return true;
@ -1478,10 +1482,6 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
return false;
}
if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
!uap->rs485_tx_started)
pl011_rs485_tx_start(uap);
/* If we are using DMA mode, try to send some characters. */
if (pl011_dma_tx_irq(uap))
return true;

View File

@ -2673,13 +2673,7 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
else if (mr == ATMEL_US_PAR_ODD)
*parity = 'o';
/*
* The serial core only rounds down when matching this to a
* supported baud rate. Make sure we don't end up slightly
* lower than one of those, as it would make us fall through
* to a much lower baud rate than we really want.
*/
*baud = port->uartclk / (16 * (quot - 1));
*baud = port->uartclk / (16 * quot);
}
static int __init atmel_console_setup(struct console *co, char *options)

View File

@ -752,7 +752,7 @@ static void pch_dma_tx_complete(void *arg)
}
xmit->tail &= UART_XMIT_SIZE - 1;
async_tx_ack(priv->desc_tx);
dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE);
priv->tx_dma_use = 0;
priv->nent = 0;
priv->orig_nent = 0;

View File

@ -864,9 +864,10 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
return IRQ_HANDLED;
}
static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
static int setup_fifos(struct qcom_geni_serial_port *port)
{
struct uart_port *uport;
u32 old_rx_fifo_depth = port->rx_fifo_depth;
uport = &port->uport;
port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
@ -874,6 +875,16 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
uport->fifosize =
(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo,
port->rx_fifo_depth * sizeof(u32),
GFP_KERNEL);
if (!port->rx_fifo)
return -ENOMEM;
}
return 0;
}
@ -888,6 +899,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
u32 proto;
u32 pin_swap;
int ret;
proto = geni_se_read_proto(&port->se);
if (proto != GENI_SE_UART) {
@ -897,7 +909,9 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
qcom_geni_serial_stop_rx(uport);
get_tx_fifo_size(port);
ret = setup_fifos(port);
if (ret)
return ret;
writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);

View File

@ -2614,6 +2614,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
u8 req_on_hw_ring = 0;
unsigned long flags;
int ret = 0;
int val;
if (!ep || !request || !ep->desc)
return -EINVAL;
@ -2649,6 +2650,13 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
/* Update ring only if removed request is on pending_req_list list */
if (req_on_hw_ring && link_trb) {
/* Stop DMA */
writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd);
/* wait for DFLUSH cleared */
readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
!(val & EP_CMD_DFLUSH), 1, 1000);
link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
((priv_req->end_trb + 1) * TRB_SIZE)));
link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
@ -2660,6 +2668,10 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
req = cdns3_next_request(&priv_ep->pending_req_list);
if (req)
cdns3_rearm_transfer(priv_ep, 1);
not_found:
spin_unlock_irqrestore(&priv_dev->lock, flags);
return ret;

View File

@ -44,6 +44,9 @@
#define USB_PRODUCT_USB5534B 0x5534
#define USB_VENDOR_CYPRESS 0x04b4
#define USB_PRODUCT_CY7C65632 0x6570
#define USB_VENDOR_TEXAS_INSTRUMENTS 0x0451
#define USB_PRODUCT_TUSB8041_USB3 0x8140
#define USB_PRODUCT_TUSB8041_USB2 0x8142
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
@ -5798,6 +5801,16 @@ static const struct usb_device_id hub_id_table[] = {
.idVendor = USB_VENDOR_GENESYS_LOGIC,
.bInterfaceClass = USB_CLASS_HUB,
.driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT,
.idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
.idProduct = USB_PRODUCT_TUSB8041_USB2,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_PRODUCT,
.idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
.idProduct = USB_PRODUCT_TUSB8041_USB3,
.driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
.bDeviceClass = USB_CLASS_HUB},
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,

View File

@ -37,6 +37,71 @@ bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
}
EXPORT_SYMBOL_GPL(usb_acpi_power_manageable);
#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899"
#define USB_DSM_DISABLE_U1_U2_FOR_PORT 5
/**
* usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port.
* @hdev: USB device belonging to the usb hub
* @index: zero based port index
*
* Some USB3 ports may not support USB3 link power management U1/U2 states
* due to different retimer setup. ACPI provides _DSM method which returns 0x01
* if U1 and U2 states should be disabled. Evaluate _DSM with:
* Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899
* Arg1: Revision ID = 0
* Arg2: Function Index = 5
* Arg3: (empty)
*
* Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0
*/
int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
{
union acpi_object *obj;
acpi_handle port_handle;
int port1 = index + 1;
guid_t guid;
int ret;
ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid);
if (ret)
return ret;
port_handle = usb_get_hub_port_acpi_handle(hdev, port1);
if (!port_handle) {
dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1);
return -ENODEV;
}
if (!acpi_check_dsm(port_handle, &guid, 0,
BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) {
dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n",
port1, USB_DSM_DISABLE_U1_U2_FOR_PORT);
return -ENODEV;
}
obj = acpi_evaluate_dsm(port_handle, &guid, 0,
USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL);
if (!obj)
return -ENODEV;
if (obj->type != ACPI_TYPE_INTEGER) {
dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1);
ACPI_FREE(obj);
return -EINVAL;
}
if (obj->integer.value == 0x01)
ret = 1;
ACPI_FREE(obj);
return ret;
}
EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable);
/**
* usb_acpi_set_power_state - control usb port's power via acpi power
* resource

View File

@ -424,6 +424,7 @@ static void gadget_info_attr_release(struct config_item *item)
WARN_ON(!list_empty(&gi->string_list));
WARN_ON(!list_empty(&gi->available_func));
kfree(gi->composite.gadget_driver.function);
kfree(gi->composite.gadget_driver.driver.name);
kfree(gi);
}
@ -1715,7 +1716,6 @@ static const struct usb_gadget_driver configfs_driver_template = {
.max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
},
.match_existing_only = 1,
};
@ -1851,17 +1851,24 @@ static struct config_group *gadgets_make(
gi->composite.gadget_driver = configfs_driver_template;
gi->composite.gadget_driver.driver.name = kasprintf(GFP_KERNEL,
"configfs-gadget.%s", name);
if (!gi->composite.gadget_driver.driver.name)
goto err;
gi->composite.gadget_driver.function = kstrdup(name, GFP_KERNEL);
gi->composite.name = gi->composite.gadget_driver.function;
if (!gi->composite.gadget_driver.function)
goto err;
goto out_free_driver_name;
if (android_device_create(gi) < 0)
goto err;
goto out_free_driver_name;
return &gi->group;
out_free_driver_name:
kfree(gi->composite.gadget_driver.driver.name);
err:
kfree(gi);
return ERR_PTR(-ENOMEM);

View File

@ -83,7 +83,9 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
/* peak (theoretical) bulk transfer rate in bits-per-second */
static inline unsigned ncm_bitrate(struct usb_gadget *g)
{
if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
if (!g)
return 0;
else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
return 4250000000U;
else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
return 3750000000U;

View File

@ -229,6 +229,7 @@ static void put_ep (struct ep_data *data)
*/
static const char *CHIP;
static DEFINE_MUTEX(sb_mutex); /* Serialize superblock operations */
/*----------------------------------------------------------------------*/
@ -2010,13 +2011,20 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct dev_data *dev;
int rc;
if (the_device)
return -ESRCH;
mutex_lock(&sb_mutex);
if (the_device) {
rc = -ESRCH;
goto Done;
}
CHIP = usb_get_gadget_udc_name();
if (!CHIP)
return -ENODEV;
if (!CHIP) {
rc = -ENODEV;
goto Done;
}
/* superblock */
sb->s_blocksize = PAGE_SIZE;
@ -2053,13 +2061,17 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
* from binding to a controller.
*/
the_device = dev;
return 0;
rc = 0;
goto Done;
Enomem:
Enomem:
kfree(CHIP);
CHIP = NULL;
rc = -ENOMEM;
return -ENOMEM;
Done:
mutex_unlock(&sb_mutex);
return rc;
}
/* "mount -t gadgetfs path /dev/gadget" ends up here */
@ -2081,6 +2093,7 @@ static int gadgetfs_init_fs_context(struct fs_context *fc)
static void
gadgetfs_kill_sb (struct super_block *sb)
{
mutex_lock(&sb_mutex);
kill_litter_super (sb);
if (the_device) {
put_dev (the_device);
@ -2088,6 +2101,7 @@ gadgetfs_kill_sb (struct super_block *sb)
}
kfree(CHIP);
CHIP = NULL;
mutex_unlock(&sb_mutex);
}
/*----------------------------------------------------------------------*/

View File

@ -293,6 +293,7 @@ static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
@ -305,6 +306,7 @@ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
@ -317,6 +319,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
(const struct uvc_descriptor_header *) &uvc_format_yuv,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
(const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
(const struct uvc_descriptor_header *) &uvc_color_matching,
(const struct uvc_descriptor_header *) &uvc_format_mjpg,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
(const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,

View File

@ -29,7 +29,7 @@
#include "ehci-fsl.h"
#define DRIVER_DESC "Freescale EHCI Host controller driver"
#define DRV_NAME "ehci-fsl"
#define DRV_NAME "fsl-ehci"
static struct hc_driver __read_mostly fsl_ehci_hc_driver;

View File

@ -78,9 +78,12 @@ static const char hcd_name[] = "xhci_hcd";
static struct hc_driver __read_mostly xhci_pci_hc_driver;
static int xhci_pci_setup(struct usb_hcd *hcd);
static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
.reset = xhci_pci_setup,
.update_hub_device = xhci_pci_update_hub_device,
};
/* called after powerup, by probe or system-pm "wakeup" */
@ -352,8 +355,38 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
NULL);
ACPI_FREE(obj);
}
static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_hub *rhub = &xhci->usb3_rhub;
int ret;
int i;
/* This is not the usb3 roothub we are looking for */
if (hcd != rhub->hcd)
return;
if (hdev->maxchild > rhub->num_ports) {
dev_err(&hdev->dev, "USB3 roothub port number mismatch\n");
return;
}
for (i = 0; i < hdev->maxchild; i++) {
ret = usb_acpi_port_lpm_incapable(hdev, i);
dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret);
if (ret >= 0) {
rhub->ports[i]->lpm_incapable = ret;
continue;
}
}
}
#else
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { }
#endif /* CONFIG_ACPI */
/* called during probe() after chip reset completes */
@ -386,6 +419,16 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
return xhci_pci_reinit(xhci, pdev);
}
static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
/* Check if acpi claims some USB3 roothub ports are lpm incapable */
if (!hdev->parent)
xhci_find_lpm_incapable_ports(hcd, hdev);
return xhci_update_hub_device(hcd, hdev, tt, mem_flags);
}
/*
* We need to register our own PCI probe function (instead of the USB core's
* function) in order to create a second roothub under xHCI.
@ -455,6 +498,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
pm_runtime_allow(&dev->dev);
dma_set_max_seg_size(&dev->dev, UINT_MAX);
return 0;
put_usb3_hcd:

View File

@ -1170,7 +1170,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep;
struct xhci_ring *ring;
ep = &xhci->devs[slot_id]->eps[ep_index];
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
if (!ep)
return;
if ((ep->ep_state & EP_HAS_STREAMS) ||
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;

View File

@ -3974,6 +3974,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *virt_dev;
struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
int i, ret;
/*
@ -4000,7 +4001,11 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
virt_dev->udev = NULL;
xhci_disable_slot(xhci, udev->slot_id);
spin_lock_irqsave(&xhci->lock, flags);
xhci_free_virt_device(xhci, udev->slot_id);
spin_unlock_irqrestore(&xhci->lock, flags);
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@ -5044,6 +5049,7 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
struct usb_device *udev, enum usb3_link_state state)
{
struct xhci_hcd *xhci;
struct xhci_port *port;
u16 hub_encoded_timeout;
int mel;
int ret;
@ -5060,6 +5066,13 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
if (xhci_check_tier_policy(xhci, udev, state) < 0)
return USB3_LPM_DISABLED;
/* If connected to root port then check port can handle lpm */
if (udev->parent && !udev->parent->parent) {
port = xhci->usb3_rhub.ports[udev->portnum - 1];
if (port->lpm_incapable)
return USB3_LPM_DISABLED;
}
hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
if (mel < 0) {
@ -5119,7 +5132,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@ -5219,6 +5232,7 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
xhci_free_command(xhci, config_cmd);
return ret;
}
EXPORT_SYMBOL_GPL(xhci_update_hub_device);
static int xhci_get_frame(struct usb_hcd *hcd)
{
@ -5502,6 +5516,8 @@ void xhci_init_driver(struct hc_driver *drv,
drv->check_bandwidth = over->check_bandwidth;
if (over->reset_bandwidth)
drv->reset_bandwidth = over->reset_bandwidth;
if (over->update_hub_device)
drv->update_hub_device = over->update_hub_device;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);

View File

@ -1735,6 +1735,7 @@ struct xhci_port {
int hcd_portnum;
struct xhci_hub *rhub;
struct xhci_port_cap *port_cap;
unsigned int lpm_incapable:1;
};
struct xhci_hub {
@ -1943,6 +1944,8 @@ struct xhci_driver_overrides {
struct usb_host_endpoint *ep);
int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
};
#define XHCI_CFC_DELAY 10
@ -2122,6 +2125,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);

View File

@ -814,7 +814,7 @@ static int iowarrior_probe(struct usb_interface *interface,
break;
case USB_DEVICE_ID_CODEMERCS_IOW100:
dev->report_size = 13;
dev->report_size = 12;
break;
}
}

View File

@ -27,7 +27,10 @@
#include "onboard_usb_hub.h"
static void onboard_hub_attach_usb_driver(struct work_struct *work);
static struct usb_device_driver onboard_hub_usbdev_driver;
static DECLARE_WORK(attach_usb_driver_work, onboard_hub_attach_usb_driver);
/************************** Platform driver **************************/
@ -45,7 +48,6 @@ struct onboard_hub {
bool is_powered_on;
bool going_away;
struct list_head udev_list;
struct work_struct attach_usb_driver_work;
struct mutex lock;
};
@ -271,8 +273,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
* This needs to be done deferred to avoid self-deadlocks on systems
* with nested onboard hubs.
*/
INIT_WORK(&hub->attach_usb_driver_work, onboard_hub_attach_usb_driver);
schedule_work(&hub->attach_usb_driver_work);
schedule_work(&attach_usb_driver_work);
return 0;
}
@ -285,9 +286,6 @@ static int onboard_hub_remove(struct platform_device *pdev)
hub->going_away = true;
if (&hub->attach_usb_driver_work != current_work())
cancel_work_sync(&hub->attach_usb_driver_work);
mutex_lock(&hub->lock);
/* unbind the USB devices to avoid dangling references to this device */
@ -431,13 +429,13 @@ static int __init onboard_hub_init(void)
{
int ret;
ret = platform_driver_register(&onboard_hub_driver);
ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
if (ret)
return ret;
ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
ret = platform_driver_register(&onboard_hub_driver);
if (ret)
platform_driver_unregister(&onboard_hub_driver);
usb_deregister_device_driver(&onboard_hub_usbdev_driver);
return ret;
}
@ -447,6 +445,8 @@ static void __exit onboard_hub_exit(void)
{
usb_deregister_device_driver(&onboard_hub_usbdev_driver);
platform_driver_unregister(&onboard_hub_driver);
cancel_work_sync(&attach_usb_driver_work);
}
module_exit(onboard_hub_exit);

View File

@ -411,8 +411,10 @@ static int omap2430_probe(struct platform_device *pdev)
memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
if (!res) {
ret = -EINVAL;
goto err2;
}
musb_res[i].start = res->start;
musb_res[i].end = res->end;

View File

@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
{ USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */

Some files were not shown because too many files have changed in this diff Show More