diff --git a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml index f333ee2288e7..11ae8ec3c739 100644 --- a/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml +++ b/Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml @@ -126,7 +126,7 @@ examples: - | #include - gpio@e000a000 { + gpio@a0020000 { compatible = "xlnx,xps-gpio-1.00.a"; reg = <0xa0020000 0x10000>; #gpio-cells = <2>; diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml index 9cfc0c7d23e0..46730687c662 100644 --- a/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml +++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-rdma.yaml @@ -61,6 +61,9 @@ properties: - description: used for 1st data pipe from RDMA - description: used for 2nd data pipe from RDMA + '#dma-cells': + const: 1 + required: - compatible - reg @@ -70,6 +73,7 @@ required: - clocks - iommus - mboxes + - '#dma-cells' additionalProperties: false @@ -80,16 +84,17 @@ examples: #include #include - mdp3_rdma0: mdp3-rdma0@14001000 { - compatible = "mediatek,mt8183-mdp3-rdma"; - reg = <0x14001000 0x1000>; - mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>; - mediatek,gce-events = , - ; - power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; - clocks = <&mmsys CLK_MM_MDP_RDMA0>, - <&mmsys CLK_MM_MDP_RSZ1>; - iommus = <&iommu>; - mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST>, - <&gce 21 CMDQ_THR_PRIO_LOWEST>; + dma-controller@14001000 { + compatible = "mediatek,mt8183-mdp3-rdma"; + reg = <0x14001000 0x1000>; + mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>; + mediatek,gce-events = , + ; + power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; + clocks = <&mmsys CLK_MM_MDP_RDMA0>, + <&mmsys CLK_MM_MDP_RSZ1>; + iommus = <&iommu>; + mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST>, + <&gce 21 CMDQ_THR_PRIO_LOWEST>; + #dma-cells = <1>; }; diff --git a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml index 0baa77198fa2..64ea98aa0592 100644 --- a/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml +++ b/Documentation/devicetree/bindings/media/mediatek,mdp3-wrot.yaml @@ -50,6 +50,9 @@ properties: iommus: maxItems: 1 + '#dma-cells': + const: 1 + required: - compatible - reg @@ -58,6 +61,7 @@ required: - power-domains - clocks - iommus + - '#dma-cells' additionalProperties: false @@ -68,13 +72,14 @@ examples: #include #include - mdp3_wrot0: mdp3-wrot0@14005000 { - compatible = "mediatek,mt8183-mdp3-wrot"; - reg = <0x14005000 0x1000>; - mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>; - mediatek,gce-events = , - ; - power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; - clocks = <&mmsys CLK_MM_MDP_WROT0>; - iommus = <&iommu>; + dma-controller@14005000 { + compatible = "mediatek,mt8183-mdp3-wrot"; + reg = <0x14005000 0x1000>; + mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>; + mediatek,gce-events = , + ; + power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; + clocks = <&mmsys CLK_MM_MDP_WROT0>; + iommus = <&iommu>; + #dma-cells = <1>; }; diff --git a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml index b3661d7d4357..2a0ad332f5ce 100644 --- a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml +++ b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml @@ -90,15 +90,16 @@ properties: description: connection point for input on the parallel interface properties: - bus-type: - enum: [5, 6] - endpoint: $ref: video-interfaces.yaml# unevaluatedProperties: false - required: - - bus-type + properties: + bus-type: + enum: [5, 6] + + required: + - bus-type anyOf: - required: diff --git a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml index ff317fd7c15b..2e1fcff3c280 100644 --- a/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml +++ b/Documentation/devicetree/bindings/nvmem/mxs-ocotp.yaml @@ -14,9 +14,11 @@ allOf: properties: compatible: - enum: - - fsl,imx23-ocotp - - fsl,imx28-ocotp + items: + - enum: + - fsl,imx23-ocotp + - fsl,imx28-ocotp + - const: fsl,ocotp "#address-cells": const: 1 @@ -40,7 +42,7 @@ additionalProperties: false examples: - | ocotp: efuse@8002c000 { - compatible = "fsl,imx28-ocotp"; + compatible = "fsl,imx28-ocotp", "fsl,ocotp"; #address-cells = <1>; #size-cells = <1>; reg = <0x8002c000 0x2000>; diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst index 44deb52beeb4..d0b241628cf1 100644 --- a/Documentation/driver-api/pci/p2pdma.rst +++ b/Documentation/driver-api/pci/p2pdma.rst @@ -83,19 +83,9 @@ this to include other types of resources like doorbells. Client Drivers -------------- -A client driver typically only has to conditionally change its DMA map -routine to use the mapping function :c:func:`pci_p2pdma_map_sg()` instead -of the usual :c:func:`dma_map_sg()` function. Memory mapped in this -way does not need to be unmapped. - -The client may also, optionally, make use of -:c:func:`is_pci_p2pdma_page()` to determine when to use the P2P mapping -functions and when to use the regular mapping functions. In some -situations, it may be more appropriate to use a flag to indicate a -given request is P2P memory and map appropriately. It is important to -ensure that struct pages that back P2P memory stay out of code that -does not have support for them as other code may treat the pages as -regular memory which may not be appropriate. +A client driver only has to use the mapping API :c:func:`dma_map_sg()` +and :c:func:`dma_unmap_sg()` functions as usual, and the implementation +will do the right thing for the P2P capable memory. Orchestrator Drivers diff --git a/MAINTAINERS b/MAINTAINERS index cfbf50390ac5..b1149b2acebc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -10845,6 +10845,8 @@ L: linux-kernel@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: kernel/irq/ +F: include/linux/group_cpus.h +F: lib/group_cpus.c IRQCHIP DRIVERS M: Thomas Gleixner diff --git a/Makefile b/Makefile index 4492c78f9178..7ab870eb945a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 1 -SUBLEVEL = 68 +SUBLEVEL = 75 EXTRAVERSION = NAME = Curry Ramen diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 7010ebbcb440..f9ec60375ec5 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -39134,6 +39134,10 @@ member { id: 0x230f36dc type_id: 0x0312ab60 } +member { + id: 0x23531e28 + type_id: 0x026208b2 +} member { id: 0x2380a48f type_id: 0x012ce22f @@ -40048,6 +40052,11 @@ member { type_id: 0x5d86aa37 offset: 96 } +member { + id: 0x34aaae1a + type_id: 0x5d84d1ff + offset: 24 +} member { id: 0x34be27ed type_id: 0x5dd6efa4 @@ -51111,10 +51120,10 @@ member { offset: 160 } member { - id: 0xb2dd57f2 + id: 0xb2dd5b41 name: "autoconf" type_id: 0xb3e7bac9 - offset: 30 + offset: 6 bitsize: 1 } member { @@ -141062,10 +141071,10 @@ member { offset: 9920 } member { - id: 0x7adb50ba + id: 0x7adb5caf name: "onlink" type_id: 0xb3e7bac9 - offset: 31 + offset: 7 bitsize: 1 } member { @@ -164807,10 +164816,9 @@ member { offset: 1024 } member { - id: 0x688b9047 + id: 0x688b9626 name: "reserved" type_id: 0xb3e7bac9 - offset: 24 bitsize: 6 } member { @@ -209482,6 +209490,16 @@ struct_union { member_id: 0x2d8a4e32 } } +struct_union { + id: 0x026208b2 + kind: STRUCT + definition { + bytesize: 1 + member_id: 0x688b9626 + member_id: 0xb2dd5b41 + member_id: 0x7adb5caf + } +} struct_union { id: 0x02c70092 kind: STRUCT @@ -214638,6 +214656,15 @@ struct_union { member_id: 0x36752b74 } } +struct_union { + id: 0x5d84d1ff + kind: UNION + definition { + bytesize: 1 + member_id: 0x2ddb63e4 + member_id: 0x23531e28 + } +} struct_union { id: 0x5d86aa37 kind: UNION @@ -249206,9 +249233,7 @@ struct_union { member_id: 0x5ce532c4 member_id: 0xb5de8e04 member_id: 0x2165da89 - member_id: 0x688b9047 - member_id: 0xb2dd57f2 - member_id: 0x7adb50ba + member_id: 0x34aaae1a member_id: 0xe91a1d71 member_id: 0xbada6e7d member_id: 0x08cabd3c diff --git a/arch/Kconfig b/arch/Kconfig index 322105079929..dff499ac1fd0 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -34,6 +34,9 @@ config ARCH_HAS_SUBPAGE_FAULTS config HOTPLUG_SMT bool +config SMT_NUM_THREADS_DYNAMIC + bool + config GENERIC_ENTRY bool diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 3c1590c27fae..723abcb10c80 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c @@ -61,7 +61,7 @@ struct rt_sigframe { unsigned int sigret_magic; }; -static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) +static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs) { int err = 0; #ifndef CONFIG_ISA_ARCOMPACT @@ -74,12 +74,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) #else v2abi.r58 = v2abi.r59 = 0; #endif - err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi)); + err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi)); #endif return err; } -static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs) +static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs) { int err = 0; #ifndef CONFIG_ISA_ARCOMPACT diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 32d397b3950b..b2e7f6a71074 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -349,6 +349,7 @@ usb: target-module@47400000 { , , ; + ti,sysc-delay-us = <2>; clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 97ce0c4f1df7..a79920ec461f 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -144,7 +144,7 @@ ocp: ocp { l3-noc@44000000 { compatible = "ti,dra7-l3-noc"; - reg = <0x44000000 0x1000>, + reg = <0x44000000 0x1000000>, <0x45000000 0x1000>; interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi index 4b57e9f5bc64..2b3927a829b7 100644 --- a/arch/arm/boot/dts/qcom-apq8064.dtsi +++ b/arch/arm/boot/dts/qcom-apq8064.dtsi @@ -750,7 +750,7 @@ pwrkey@1c { xoadc: xoadc@197 { compatible = "qcom,pm8921-adc"; - reg = <197>; + reg = <0x197>; interrupts-extended = <&pmicintc 78 IRQ_TYPE_EDGE_RISING>; #address-cells = <2>; #size-cells = <0>; diff --git a/arch/arm/boot/dts/qcom-sdx65.dtsi b/arch/arm/boot/dts/qcom-sdx65.dtsi index ecb9171e4da5..ebb78b489e63 100644 --- a/arch/arm/boot/dts/qcom-sdx65.dtsi +++ b/arch/arm/boot/dts/qcom-sdx65.dtsi @@ -401,7 +401,7 @@ restart@c264000 { reg = <0x0c264000 0x1000>; }; - spmi_bus: qcom,spmi@c440000 { + spmi_bus: spmi@c440000 { compatible = "qcom,spmi-pmic-arb"; reg = <0xc440000 0xd00>, <0xc600000 0x2000000>, diff --git a/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts b/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts index e539cc80bef8..942a6ca38d97 100644 --- a/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts +++ b/arch/arm/boot/dts/stm32mp157a-dk1-scmi.dts @@ -11,7 +11,7 @@ / { model = "STMicroelectronics STM32MP157A-DK1 SCMI Discovery Board"; - compatible = "st,stm32mp157a-dk1-scmi", "st,stm32mp157a-dk1", "st,stm32mp157"; + compatible = "st,stm32mp157a-dk1-scmi", "st,stm32mp157"; reserved-memory { optee@de000000 { diff --git a/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts b/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts index 97e4f94b0a24..99c4ff1f5c21 100644 --- a/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts +++ b/arch/arm/boot/dts/stm32mp157c-dk2-scmi.dts @@ -11,7 +11,7 @@ / { model = "STMicroelectronics STM32MP157C-DK2 SCMI Discovery Board"; - compatible = "st,stm32mp157c-dk2-scmi", "st,stm32mp157c-dk2", "st,stm32mp157"; + compatible = "st,stm32mp157c-dk2-scmi", "st,stm32mp157"; reserved-memory { optee@de000000 { diff --git a/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts b/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts index 9cf0a44d2f47..21010458b36f 100644 --- a/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts +++ b/arch/arm/boot/dts/stm32mp157c-ed1-scmi.dts @@ -11,7 +11,7 @@ / { model = "STMicroelectronics STM32MP157C-ED1 SCMI eval daughter"; - compatible = "st,stm32mp157c-ed1-scmi", "st,stm32mp157c-ed1", "st,stm32mp157"; + compatible = "st,stm32mp157c-ed1-scmi", "st,stm32mp157"; reserved-memory { optee@fe000000 { diff --git a/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts b/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts index 3b9dd6f4ccc9..d37637149919 100644 --- a/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts +++ b/arch/arm/boot/dts/stm32mp157c-ev1-scmi.dts @@ -11,8 +11,7 @@ / { model = "STMicroelectronics STM32MP157C-EV1 SCMI eval daughter on eval mother"; - compatible = "st,stm32mp157c-ev1-scmi", "st,stm32mp157c-ev1", "st,stm32mp157c-ed1", - "st,stm32mp157"; + compatible = "st,stm32mp157c-ev1-scmi", "st,stm32mp157c-ed1", "st,stm32mp157"; reserved-memory { optee@fe000000 { diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index c8cbd9a30791..672ffb0b5f3a 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -4,12 +4,14 @@ menuconfig ARCH_DAVINCI bool "TI DaVinci" depends on ARCH_MULTI_V5 depends on CPU_LITTLE_ENDIAN + select CPU_ARM926T select DAVINCI_TIMER select ZONE_DMA select PM_GENERIC_DOMAINS if PM select PM_GENERIC_DOMAINS_OF if PM && OF select REGMAP_MMIO select RESET_CONTROLLER + select PINCTRL select PINCTRL_SINGLE if ARCH_DAVINCI diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 59755b5a1ad7..75091aa7269a 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -793,11 +793,16 @@ void __init omap_soc_device_init(void) soc_dev_attr->machine = soc_name; soc_dev_attr->family = omap_get_family(); + if (!soc_dev_attr->family) { + kfree(soc_dev_attr); + return; + } soc_dev_attr->revision = soc_rev; soc_dev_attr->custom_attr_group = omap_soc_groups[0]; soc_dev = soc_device_register(soc_dev_attr); if (IS_ERR(soc_dev)) { + kfree(soc_dev_attr->family); kfree(soc_dev_attr); return; } diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c index 26cbce135338..f779e386b6e7 100644 --- a/arch/arm/mach-sunxi/mc_smp.c +++ b/arch/arm/mach-sunxi/mc_smp.c @@ -804,16 +804,16 @@ static int __init sunxi_mc_smp_init(void) for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) { ret = of_property_match_string(node, "enable-method", sunxi_mc_smp_data[i].enable_method); - if (!ret) + if (ret >= 0) break; } - is_a83t = sunxi_mc_smp_data[i].is_a83t; - of_node_put(node); - if (ret) + if (ret < 0) return -ENODEV; + is_a83t = sunxi_mc_smp_data[i].is_a83t; + if (!sunxi_mc_smp_cpu_table_init()) return -EINVAL; diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 908eaf9f3028..740d3bcf7f20 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -171,7 +171,7 @@ ifndef KBUILD_MIXED_TREE all: $(notdir $(KBUILD_IMAGE)) endif - +vmlinuz.efi: Image Image vmlinuz.efi: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi index d583db18f74c..7a410d73600b 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi @@ -1303,7 +1303,7 @@ gpu_3d: gpu@38000000 { assigned-clocks = <&clk IMX8MM_CLK_GPU3D_CORE>, <&clk IMX8MM_GPU_PLL_OUT>; assigned-clock-parents = <&clk IMX8MM_GPU_PLL_OUT>; - assigned-clock-rates = <0>, <1000000000>; + assigned-clock-rates = <0>, <800000000>; power-domains = <&pgc_gpu>; }; @@ -1318,7 +1318,7 @@ gpu_2d: gpu@38008000 { assigned-clocks = <&clk IMX8MM_CLK_GPU2D_CORE>, <&clk IMX8MM_GPU_PLL_OUT>; assigned-clock-parents = <&clk IMX8MM_GPU_PLL_OUT>; - assigned-clock-rates = <0>, <1000000000>; + assigned-clock-rates = <0>, <800000000>; power-domains = <&pgc_gpu>; }; diff --git a/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi b/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi index 970047f2dabd..c06e011a6c3f 100644 --- a/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hikey970-pmic.dtsi @@ -25,9 +25,6 @@ pmic: pmic@0 { gpios = <&gpio28 0 0>; regulators { - #address-cells = <1>; - #size-cells = <0>; - ldo3: ldo3 { /* HDMI */ regulator-name = "ldo3"; regulator-min-microvolt = <1500000>; diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts index 200f97e1c4c9..37350e5fa253 100644 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts @@ -130,7 +130,7 @@ rtc@6f { compatible = "microchip,mcp7940x"; reg = <0x6f>; interrupt-parent = <&gpiosb>; - interrupts = <5 0>; /* GPIO2_5 */ + interrupts = <5 IRQ_TYPE_EDGE_FALLING>; /* GPIO2_5 */ }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index 10779a9947fe..d5d9b954c449 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -1586,7 +1586,7 @@ mmsys: syscon@14000000 { mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>; }; - mdp3-rdma0@14001000 { + dma-controller0@14001000 { compatible = "mediatek,mt8183-mdp3-rdma"; reg = <0 0x14001000 0 0x1000>; mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>; @@ -1598,6 +1598,7 @@ mdp3-rdma0@14001000 { iommus = <&iommu M4U_PORT_MDP_RDMA0>; mboxes = <&gce 20 CMDQ_THR_PRIO_LOWEST 0>, <&gce 21 CMDQ_THR_PRIO_LOWEST 0>; + #dma-cells = <1>; }; mdp3-rsz0@14003000 { @@ -1618,7 +1619,7 @@ mdp3-rsz1@14004000 { clocks = <&mmsys CLK_MM_MDP_RSZ1>; }; - mdp3-wrot0@14005000 { + dma-controller@14005000 { compatible = "mediatek,mt8183-mdp3-wrot"; reg = <0 0x14005000 0 0x1000>; mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x5000 0x1000>; @@ -1627,6 +1628,7 @@ mdp3-wrot0@14005000 { power-domains = <&spm MT8183_POWER_DOMAIN_DISP>; clocks = <&mmsys CLK_MM_MDP_WROT0>; iommus = <&iommu M4U_PORT_MDP_WROT0>; + #dma-cells = <1>; }; mdp3-wdma@14006000 { diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi index 43ff8f1f1475..1533c61cb106 100644 --- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi +++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi @@ -146,7 +146,7 @@ reserved-memory { ranges; rpm_msg_ram: memory@60000 { - reg = <0x0 0x60000 0x0 0x6000>; + reg = <0x0 0x00060000 0x0 0x6000>; no-map; }; @@ -181,7 +181,7 @@ soc: soc { prng: qrng@e1000 { compatible = "qcom,prng-ee"; - reg = <0x0 0xe3000 0x0 0x1000>; + reg = <0x0 0x000e3000 0x0 0x1000>; clocks = <&gcc GCC_PRNG_AHB_CLK>; clock-names = "core"; }; @@ -201,8 +201,8 @@ crypto: crypto@73a000 { compatible = "qcom,crypto-v5.1"; reg = <0x0 0x0073a000 0x0 0x6000>; clocks = <&gcc GCC_CRYPTO_AHB_CLK>, - <&gcc GCC_CRYPTO_AXI_CLK>, - <&gcc GCC_CRYPTO_CLK>; + <&gcc GCC_CRYPTO_AXI_CLK>, + <&gcc GCC_CRYPTO_CLK>; clock-names = "iface", "bus", "core"; dmas = <&cryptobam 2>, <&cryptobam 3>; dma-names = "rx", "tx"; @@ -272,7 +272,7 @@ blsp1_uart3: serial@78b1000 { reg = <0x0 0x078b1000 0x0 0x200>; interrupts = ; clocks = <&gcc GCC_BLSP1_UART3_APPS_CLK>, - <&gcc GCC_BLSP1_AHB_CLK>; + <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; status = "disabled"; }; @@ -285,7 +285,7 @@ blsp1_spi1: spi@78b5000 { interrupts = ; spi-max-frequency = <50000000>; clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>, - <&gcc GCC_BLSP1_AHB_CLK>; + <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; dmas = <&blsp_dma 12>, <&blsp_dma 13>; dma-names = "tx", "rx"; @@ -300,7 +300,7 @@ blsp1_spi2: spi@78b6000 { interrupts = ; spi-max-frequency = <50000000>; clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>, - <&gcc GCC_BLSP1_AHB_CLK>; + <&gcc GCC_BLSP1_AHB_CLK>; clock-names = "core", "iface"; dmas = <&blsp_dma 14>, <&blsp_dma 15>; dma-names = "tx", "rx"; @@ -358,8 +358,8 @@ qpic_nand: nand@79b0000 { clock-names = "core", "aon"; dmas = <&qpic_bam 0>, - <&qpic_bam 1>, - <&qpic_bam 2>; + <&qpic_bam 1>, + <&qpic_bam 2>; dma-names = "tx", "rx", "cmd"; pinctrl-0 = <&qpic_pins>; pinctrl-names = "default"; @@ -372,10 +372,10 @@ intc: interrupt-controller@b000000 { #size-cells = <2>; interrupt-controller; #interrupt-cells = <0x3>; - reg = <0x0 0x0b000000 0x0 0x1000>, /*GICD*/ - <0x0 0x0b002000 0x0 0x1000>, /*GICC*/ - <0x0 0x0b001000 0x0 0x1000>, /*GICH*/ - <0x0 0x0b004000 0x0 0x1000>; /*GICV*/ + reg = <0x0 0x0b000000 0x0 0x1000>, /*GICD*/ + <0x0 0x0b002000 0x0 0x1000>, /*GICC*/ + <0x0 0x0b001000 0x0 0x1000>, /*GICH*/ + <0x0 0x0b004000 0x0 0x1000>; /*GICV*/ interrupts = ; ranges = <0 0 0 0xb00a000 0 0xffd>; @@ -388,7 +388,7 @@ v2m@0 { pcie_phy: phy@84000 { compatible = "qcom,ipq6018-qmp-pcie-phy"; - reg = <0x0 0x84000 0x0 0x1bc>; /* Serdes PLL */ + reg = <0x0 0x00084000 0x0 0x1bc>; /* Serdes PLL */ status = "disabled"; #address-cells = <2>; #size-cells = <2>; @@ -404,9 +404,10 @@ pcie_phy: phy@84000 { "common"; pcie_phy0: phy@84200 { - reg = <0x0 0x84200 0x0 0x16c>, /* Serdes Tx */ - <0x0 0x84400 0x0 0x200>, /* Serdes Rx */ - <0x0 0x84800 0x0 0x4f4>; /* PCS: Lane0, COM, PCIE */ + reg = <0x0 0x00084200 0x0 0x16c>, /* Serdes Tx */ + <0x0 0x00084400 0x0 0x200>, /* Serdes Rx */ + <0x0 0x00084800 0x0 0x1f0>, /* PCS: Lane0, COM, PCIE */ + <0x0 0x00084c00 0x0 0xf4>; /* pcs_misc */ #phy-cells = <0>; clocks = <&gcc GCC_PCIE0_PIPE_CLK>; @@ -628,7 +629,7 @@ mdio: mdio@90000 { #address-cells = <1>; #size-cells = <0>; compatible = "qcom,ipq6018-mdio", "qcom,ipq4019-mdio"; - reg = <0x0 0x90000 0x0 0x64>; + reg = <0x0 0x00090000 0x0 0x64>; clocks = <&gcc GCC_MDIO_AHB_CLK>; clock-names = "gcc_mdio_ahb_clk"; status = "disabled"; @@ -636,7 +637,7 @@ mdio: mdio@90000 { qusb_phy_1: qusb@59000 { compatible = "qcom,ipq6018-qusb2-phy"; - reg = <0x0 0x059000 0x0 0x180>; + reg = <0x0 0x00059000 0x0 0x180>; #phy-cells = <0>; clocks = <&gcc GCC_USB1_PHY_CFG_AHB_CLK>, @@ -668,23 +669,23 @@ usb2: usb@70f8800 { status = "disabled"; dwc_1: usb@7000000 { - compatible = "snps,dwc3"; - reg = <0x0 0x7000000 0x0 0xcd00>; - interrupts = ; - phys = <&qusb_phy_1>; - phy-names = "usb2-phy"; - tx-fifo-resize; - snps,is-utmi-l1-suspend; - snps,hird-threshold = /bits/ 8 <0x0>; - snps,dis_u2_susphy_quirk; - snps,dis_u3_susphy_quirk; - dr_mode = "host"; + compatible = "snps,dwc3"; + reg = <0x0 0x07000000 0x0 0xcd00>; + interrupts = ; + phys = <&qusb_phy_1>; + phy-names = "usb2-phy"; + tx-fifo-resize; + snps,is-utmi-l1-suspend; + snps,hird-threshold = /bits/ 8 <0x0>; + snps,dis_u2_susphy_quirk; + snps,dis_u3_susphy_quirk; + dr_mode = "host"; }; }; ssphy_0: ssphy@78000 { compatible = "qcom,ipq6018-qmp-usb3-phy"; - reg = <0x0 0x78000 0x0 0x1C4>; + reg = <0x0 0x00078000 0x0 0x1c4>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -701,7 +702,7 @@ ssphy_0: ssphy@78000 { usb0_ssphy: phy@78200 { reg = <0x0 0x00078200 0x0 0x130>, /* Tx */ <0x0 0x00078400 0x0 0x200>, /* Rx */ - <0x0 0x00078800 0x0 0x1F8>, /* PCS */ + <0x0 0x00078800 0x0 0x1f8>, /* PCS */ <0x0 0x00078600 0x0 0x044>; /* PCS misc */ #phy-cells = <0>; #clock-cells = <0>; @@ -713,7 +714,7 @@ usb0_ssphy: phy@78200 { qusb_phy_0: qusb@79000 { compatible = "qcom,ipq6018-qusb2-phy"; - reg = <0x0 0x079000 0x0 0x180>; + reg = <0x0 0x00079000 0x0 0x180>; #phy-cells = <0>; clocks = <&gcc GCC_USB0_PHY_CFG_AHB_CLK>, @@ -726,7 +727,7 @@ qusb_phy_0: qusb@79000 { usb3: usb@8af8800 { compatible = "qcom,ipq6018-dwc3", "qcom,dwc3"; - reg = <0x0 0x8AF8800 0x0 0x400>; + reg = <0x0 0x8af8800 0x0 0x400>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -745,14 +746,14 @@ usb3: usb@8af8800 { <&gcc GCC_USB0_MOCK_UTMI_CLK>; assigned-clock-rates = <133330000>, <133330000>, - <20000000>; + <24000000>; resets = <&gcc GCC_USB0_BCR>; status = "disabled"; dwc_0: usb@8a00000 { compatible = "snps,dwc3"; - reg = <0x0 0x8A00000 0x0 0xcd00>; + reg = <0x0 0x8a00000 0x0 0xcd00>; interrupts = ; phys = <&qusb_phy_0>, <&usb0_ssphy>; phy-names = "usb2-phy", "usb3-phy"; diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts index 9731a7c63d53..1defbe0404e2 100644 --- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts +++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts @@ -63,8 +63,8 @@ led-user4 { function = LED_FUNCTION_INDICATOR; color = ; gpios = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "panic-indicator"; default-state = "off"; + panic-indicator; }; led-wlan { diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi index 6f0ee4e13ef1..78e537f1d796 100644 --- a/arch/arm64/boot/dts/qcom/sc7180.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi @@ -3378,7 +3378,7 @@ watchdog@17c10000 { compatible = "qcom,apss-wdt-sc7180", "qcom,kpss-wdt"; reg = <0 0x17c10000 0 0x1000>; clocks = <&sleep_clk>; - interrupts = ; + interrupts = ; }; timer@17c20000{ diff --git a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi index 25f31c81b2b7..efe6ea538ad2 100644 --- a/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280-chrome-common.dtsi @@ -56,6 +56,26 @@ mba_mem: memory@9c700000 { }; }; +&lpass_aon { + status = "okay"; +}; + +&lpass_core { + status = "okay"; +}; + +&lpass_hm { + status = "okay"; +}; + +&lpasscc { + status = "okay"; +}; + +&pdc_reset { + status = "okay"; +}; + /* The PMIC PON code isn't compatible w/ how Chrome EC/BIOS handle things. */ &pmk8350_pon { status = "disabled"; @@ -93,6 +113,10 @@ &rmtfs_mem { reg = <0x0 0x9c900000 0x0 0x800000>; }; +&watchdog { + status = "okay"; +}; + &wifi { status = "okay"; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index aea356c63b9a..7fc8c2045022 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -888,6 +888,7 @@ sdhc_1: mmc@7c4000 { bus-width = <8>; supports-cqe; + dma-coherent; qcom,dll-config = <0x0007642c>; qcom,ddr-config = <0x80040868>; @@ -2187,6 +2188,7 @@ lpasscc: lpasscc@3000000 { clocks = <&gcc GCC_CFG_NOC_LPASS_CLK>; clock-names = "iface"; #clock-cells = <1>; + status = "reserved"; /* Owned by ADSP firmware */ }; lpass_rx_macro: codec@3200000 { @@ -2339,6 +2341,7 @@ lpass_aon: clock-controller@3380000 { clock-names = "bi_tcxo", "bi_tcxo_ao", "iface"; #clock-cells = <1>; #power-domain-cells = <1>; + status = "reserved"; /* Owned by ADSP firmware */ }; lpass_core: clock-controller@3900000 { @@ -2349,6 +2352,7 @@ lpass_core: clock-controller@3900000 { power-domains = <&lpass_hm LPASS_CORE_CC_LPASS_CORE_HM_GDSC>; #clock-cells = <1>; #power-domain-cells = <1>; + status = "reserved"; /* Owned by ADSP firmware */ }; lpass_cpu: audio@3987000 { @@ -2419,6 +2423,7 @@ lpass_hm: clock-controller@3c00000 { clock-names = "bi_tcxo"; #clock-cells = <1>; #power-domain-cells = <1>; + status = "reserved"; /* Owned by ADSP firmware */ }; lpass_ag_noc: interconnect@3c40000 { @@ -2529,7 +2534,8 @@ gpu: gpu@3d00000 { "cx_mem", "cx_dbgc"; interrupts = ; - iommus = <&adreno_smmu 0 0x401>; + iommus = <&adreno_smmu 0 0x400>, + <&adreno_smmu 1 0x400>; operating-points-v2 = <&gpu_opp_table>; qcom,gmu = <&gmu>; interconnects = <&gem_noc MASTER_GFX3D 0 &mc_virt SLAVE_EBI1 0>; @@ -2696,6 +2702,7 @@ adreno_smmu: iommu@3da0000 { "gpu_cc_hub_aon_clk"; power-domains = <&gpucc GPU_CC_CX_GDSC>; + dma-coherent; }; remoteproc_mpss: remoteproc@4080000 { @@ -3265,6 +3272,7 @@ sdhc_2: mmc@8804000 { operating-points-v2 = <&sdhc2_opp_table>; bus-width = <4>; + dma-coherent; qcom,dll-config = <0x0007642c>; @@ -3386,8 +3394,8 @@ usb_2: usb@8cf8800 { assigned-clock-rates = <19200000>, <200000000>; interrupts-extended = <&intc GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>, - <&pdc 12 IRQ_TYPE_EDGE_RISING>, - <&pdc 13 IRQ_TYPE_EDGE_RISING>; + <&pdc 12 IRQ_TYPE_EDGE_BOTH>, + <&pdc 13 IRQ_TYPE_EDGE_BOTH>; interrupt-names = "hs_phy_irq", "dp_hs_phy_irq", "dm_hs_phy_irq"; @@ -4195,6 +4203,7 @@ pdc_reset: reset-controller@b5e0000 { compatible = "qcom,sc7280-pdc-global"; reg = <0 0x0b5e0000 0 0x20000>; #reset-cells = <1>; + status = "reserved"; /* Owned by firmware */ }; tsens0: thermal-sensor@c263000 { @@ -5186,11 +5195,12 @@ gic-its@17a40000 { }; }; - watchdog@17c10000 { + watchdog: watchdog@17c10000 { compatible = "qcom,apss-wdt-sc7280", "qcom,kpss-wdt"; reg = <0 0x17c10000 0 0x1000>; clocks = <&sleep_clk>; - interrupts = ; + interrupts = ; + status = "reserved"; /* Owned by Gunyah hyp */ }; timer@17c20000 { diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi index 405835ad28bc..7e3aaf5de3f5 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -1653,7 +1653,7 @@ watchdog@17c10000 { compatible = "qcom,apss-wdt-sc8280xp", "qcom,kpss-wdt"; reg = <0 0x17c10000 0 0x1000>; clocks = <&sleep_clk>; - interrupts = ; + interrupts = ; }; timer@17c20000 { diff --git a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi index a5c0c788969f..43ee28db61aa 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-cheza.dtsi @@ -150,15 +150,15 @@ &cpufreq_hw { }; &psci { - /delete-node/ cpu0; - /delete-node/ cpu1; - /delete-node/ cpu2; - /delete-node/ cpu3; - /delete-node/ cpu4; - /delete-node/ cpu5; - /delete-node/ cpu6; - /delete-node/ cpu7; - /delete-node/ cpu-cluster0; + /delete-node/ power-domain-cpu0; + /delete-node/ power-domain-cpu1; + /delete-node/ power-domain-cpu2; + /delete-node/ power-domain-cpu3; + /delete-node/ power-domain-cpu4; + /delete-node/ power-domain-cpu5; + /delete-node/ power-domain-cpu6; + /delete-node/ power-domain-cpu7; + /delete-node/ power-domain-cluster; }; &cpus { @@ -351,7 +351,9 @@ flash@0 { &apps_rsc { - pm8998-rpmh-regulators { + /delete-property/ power-domains; + + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; @@ -633,7 +635,7 @@ src_pp1800_lvs2: lvs2 { }; }; - pm8005-rpmh-regulators { + regulators-1 { compatible = "qcom,pm8005-rpmh-regulators"; qcom,pmic-id = "c"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index c9efcb894a52..135ff4368c4a 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -66,8 +66,8 @@ led-0 { function = LED_FUNCTION_INDICATOR; color = ; gpios = <&pm8998_gpio 13 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "panic-indicator"; default-state = "off"; + panic-indicator; }; led-1 { @@ -271,7 +271,7 @@ &adsp_pas { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; vdd-s1-supply = <&vph_pwr>; @@ -396,7 +396,7 @@ vreg_lvs2a_1p8: lvs2 { }; }; - pmi8998-rpmh-regulators { + regulators-1 { compatible = "qcom,pmi8998-rpmh-regulators"; qcom,pmic-id = "b"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi index 20f275f8694d..e2921640880a 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-lg-common.dtsi @@ -166,7 +166,7 @@ &adsp_pas { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; @@ -419,7 +419,7 @@ vreg_lvs2a_1p8: lvs2 { }; }; - pmi8998-rpmh-regulators { + regulators-1 { compatible = "qcom,pmi8998-rpmh-regulators"; qcom,pmic-id = "b"; @@ -433,7 +433,7 @@ vreg_bob: bob { }; }; - pm8005-rpmh-regulators { + regulators-2 { compatible = "qcom,pm8005-rpmh-regulators"; qcom,pmic-id = "c"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts index 64958dee17d8..b47e333aa351 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts @@ -117,7 +117,7 @@ &adsp_pas { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; @@ -382,7 +382,7 @@ vreg_lvs2a_1p8: lvs2 { }; }; - pmi8998-rpmh-regulators { + regulators-1 { compatible = "qcom,pmi8998-rpmh-regulators"; qcom,pmic-id = "b"; @@ -396,7 +396,7 @@ vreg_bob: bob { }; }; - pm8005-rpmh-regulators { + regulators-2 { compatible = "qcom,pm8005-rpmh-regulators"; qcom,pmic-id = "c"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi index 392461c29e76..0713b774a97b 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi @@ -144,7 +144,7 @@ &adsp_pas { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; @@ -280,7 +280,7 @@ vreg_l28a_3p0: ldo28 { }; }; - pmi8998-rpmh-regulators { + regulators-1 { compatible = "qcom,pmi8998-rpmh-regulators"; qcom,pmic-id = "b"; @@ -294,7 +294,7 @@ vreg_bob: bob { }; }; - pm8005-rpmh-regulators { + regulators-2 { compatible = "qcom,pm8005-rpmh-regulators"; qcom,pmic-id = "c"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts index 83261c9bb4f2..b65c35865dab 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts @@ -110,7 +110,7 @@ &adsp_pas { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; @@ -375,7 +375,7 @@ vreg_lvs2a_1p8: lvs2 { }; }; - pmi8998-rpmh-regulators { + regulators-1 { compatible = "qcom,pmi8998-rpmh-regulators"; qcom,pmic-id = "b"; @@ -389,7 +389,7 @@ vreg_bob: bob { }; }; - pm8005-rpmh-regulators { + regulators-2 { compatible = "qcom,pm8005-rpmh-regulators"; qcom,pmic-id = "c"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi index d6918e6d1979..249a715d5aae 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845-sony-xperia-tama.dtsi @@ -78,7 +78,7 @@ ramoops@ffc00000 { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; @@ -308,7 +308,7 @@ vreg_lvs2a_1p8: lvs2 { }; }; - pmi8998-rpmh-regulators { + regulators-1 { compatible = "qcom,pmi8998-rpmh-regulators"; qcom,pmic-id = "b"; @@ -319,7 +319,7 @@ src_vreg_bob: bob { }; }; - pm8005-rpmh-regulators { + regulators-2 { compatible = "qcom,pm8005-rpmh-regulators"; qcom,pmic-id = "c"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts index 0f470cf1ed1c..6d6b3dd69947 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts @@ -125,7 +125,7 @@ &adsp_pas { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts index 093b04359ec3..ffbe45a99b74 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-polaris.dts @@ -143,7 +143,7 @@ vreg_s4a_1p8: vreg-s4a-1p8 { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; @@ -343,7 +343,7 @@ vreg_lvs2a_1p8: lvs2 { }; }; - pmi8998-rpmh-regulators { + regulators-1 { compatible = "qcom,pmi8998-rpmh-regulators"; qcom,pmic-id = "b"; @@ -355,7 +355,7 @@ vreg_bob: bob { }; }; - pm8005-rpmh-regulators { + regulators-2 { compatible = "qcom,pm8005-rpmh-regulators"; qcom,pmic-id = "c"; diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 52c9f5639f8a..1e6841902900 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -5019,7 +5019,7 @@ watchdog@17980000 { compatible = "qcom,apss-wdt-sdm845", "qcom,kpss-wdt"; reg = <0 0x17980000 0 0x1000>; clocks = <&sleep_clk>; - interrupts = ; + interrupts = ; }; apss_shared: mailbox@17990000 { diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts index 74f43da51fa5..48a41ace8fc5 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts @@ -99,7 +99,7 @@ &adsp_pas { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; diff --git a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts index d028a7eb364a..c169d2870bdf 100644 --- a/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts +++ b/arch/arm64/boot/dts/qcom/sdm850-samsung-w737.dts @@ -129,7 +129,7 @@ &adsp_pas { }; &apps_rsc { - pm8998-rpmh-regulators { + regulators-0 { compatible = "qcom,pm8998-rpmh-regulators"; qcom,pmic-id = "a"; diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi index cea7ca3f326f..9da373090593 100644 --- a/arch/arm64/boot/dts/qcom/sm6350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi @@ -1462,7 +1462,7 @@ watchdog@17c10000 { compatible = "qcom,apss-wdt-sm6350", "qcom,kpss-wdt"; reg = <0 0x17c10000 0 0x1000>; clocks = <&sleep_clk>; - interrupts = ; + interrupts = ; }; timer@17c20000 { diff --git a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts index 3331ee957d64..368da4c7f41b 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-hdk.dts +++ b/arch/arm64/boot/dts/qcom/sm8150-hdk.dts @@ -126,8 +126,6 @@ vdda_qrefs_0p875_5: vdda_sp_sensor: vdda_ufs_2ln_core_1: vdda_ufs_2ln_core_2: - vdda_usb_ss_dp_core_1: - vdda_usb_ss_dp_core_2: vdda_qlink_lv: vdda_qlink_lv_ck: vreg_l5a_0p875: ldo5 { @@ -209,6 +207,12 @@ vreg_l17a_3p0: ldo17 { regulator-max-microvolt = <3008000>; regulator-initial-mode = ; }; + + vreg_l18a_0p8: ldo18 { + regulator-min-microvolt = <880000>; + regulator-max-microvolt = <880000>; + regulator-initial-mode = ; + }; }; pm8150l-rpmh-regulators { @@ -439,13 +443,13 @@ &usb_2_hsphy { &usb_1_qmpphy { status = "okay"; vdda-phy-supply = <&vreg_l3c_1p2>; - vdda-pll-supply = <&vdda_usb_ss_dp_core_1>; + vdda-pll-supply = <&vreg_l18a_0p8>; }; &usb_2_qmpphy { status = "okay"; vdda-phy-supply = <&vreg_l3c_1p2>; - vdda-pll-supply = <&vdda_usb_ss_dp_core_1>; + vdda-pll-supply = <&vreg_l5a_0p875>; }; &usb_1 { diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index c586378fc6bc..c3c12b0cd416 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -3940,7 +3940,7 @@ watchdog@17c10000 { compatible = "qcom,apss-wdt-sm8150", "qcom,kpss-wdt"; reg = <0 0x17c10000 0 0x1000>; clocks = <&sleep_clk>; - interrupts = ; + interrupts = ; }; timer@17c20000 { diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index 4d9b30f0b284..3d02adbc0b62 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -4879,7 +4879,7 @@ watchdog@17c10000 { compatible = "qcom,apss-wdt-sm8250", "qcom,kpss-wdt"; reg = <0 0x17c10000 0 0x1000>; clocks = <&sleep_clk>; - interrupts = ; + interrupts = ; }; timer@17c20000 { diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index 793768a2c9e1..888bf4cd73c3 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -903,9 +903,9 @@ spi19: spi@894000 { }; }; - gpi_dma0: dma-controller@9800000 { + gpi_dma0: dma-controller@900000 { compatible = "qcom,sm8350-gpi-dma", "qcom,sm6350-gpi-dma"; - reg = <0 0x09800000 0 0x60000>; + reg = <0 0x00900000 0 0x60000>; interrupts = , , , diff --git a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi index 895f0bd9f754..541b1e73b65e 100644 --- a/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779g0-white-hawk-cpu.dtsi @@ -125,6 +125,9 @@ &extalr_clk { }; &hscif0 { + pinctrl-0 = <&hscif0_pins>; + pinctrl-names = "default"; + status = "okay"; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi index ee6095baba4d..7d9b8064ad2e 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-chromebook.dtsi @@ -510,8 +510,7 @@ wacky_spi_audio: spi2@0 { &pci_rootport { mvl_wifi: wifi@0,0 { compatible = "pci1b4b,2b42"; - reg = <0x83010000 0x0 0x00000000 0x0 0x00100000 - 0x83010000 0x0 0x00100000 0x0 0x00100000>; + reg = <0x0000 0x0 0x0 0x0 0x0>; interrupt-parent = <&gpio0>; interrupts = <8 IRQ_TYPE_LEVEL_LOW>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts index 853e88455e75..9e4b12ed62cb 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-dumo.dts @@ -34,8 +34,8 @@ &mipi_panel { &pci_rootport { wifi@0,0 { compatible = "qcom,ath10k"; - reg = <0x00010000 0x0 0x00000000 0x0 0x00000000>, - <0x03010010 0x0 0x00000000 0x0 0x00200000>; + reg = <0x00000000 0x0 0x00000000 0x0 0x00000000>, + <0x03000010 0x0 0x00000000 0x0 0x00200000>; qcom,ath10k-calibration-variant = "GO_DUMO"; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi index 23bfba86daab..7ba25315dd9a 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi @@ -489,6 +489,7 @@ pci_rootport: pcie@0,0 { #address-cells = <3>; #size-cells = <2>; ranges; + device_type = "pci"; }; }; diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi index 234b5bbda120..f4d6dbbbddcd 100644 --- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi @@ -958,7 +958,7 @@ pcie2x1: pcie@fe260000 { , , ; - interrupt-names = "sys", "pmc", "msi", "legacy", "err"; + interrupt-names = "sys", "pmc", "msg", "legacy", "err"; bus-range = <0x0 0xf>; clocks = <&cru ACLK_PCIE20_MST>, <&cru ACLK_PCIE20_SLV>, <&cru ACLK_PCIE20_DBI>, <&cru PCLK_PCIE20>, diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi index bc4b50bcd177..9301ea388802 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi @@ -245,7 +245,7 @@ main_gpio0: gpio@600000 { <193>, <194>, <195>; interrupt-controller; #interrupt-cells = <2>; - ti,ngpio = <87>; + ti,ngpio = <92>; ti,davinci-gpio-unbanked = <0>; power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 77 0>; @@ -263,7 +263,7 @@ main_gpio1: gpio@601000 { <183>, <184>, <185>; interrupt-controller; #interrupt-cells = <2>; - ti,ngpio = <88>; + ti,ngpio = <52>; ti,davinci-gpio-unbanked = <0>; power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>; clocks = <&k3_clks 78 0>; diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi index ebb1c5ce7aec..83dd8993027a 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi @@ -856,7 +856,7 @@ dss: dss@4a00000 { assigned-clocks = <&k3_clks 67 2>; assigned-clock-parents = <&k3_clks 67 5>; - interrupts = ; + interrupts = ; dma-coherent; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 60df8459a337..97df104913fb 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -826,6 +826,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) if (pte_hw_dirty(pte)) pte = pte_mkdirty(pte); pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); + /* + * If we end up clearing hw dirtiness for a sw-dirty PTE, set hardware + * dirtiness again. + */ + if (pte_sw_dirty(pte)) + pte = pte_mkdirty(pte); return pte; } diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index dd187094ab40..dbf748a10265 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -489,7 +489,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_timer_vcpu_terminate(vcpu); kvm_pmu_vcpu_destroy(vcpu); - + kvm_vgic_vcpu_destroy(vcpu); kvm_arm_vcpu_destroy(vcpu); } diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c index f2f3bf4a04b0..0919e3b8f46e 100644 --- a/arch/arm64/kvm/vgic/vgic-init.c +++ b/arch/arm64/kvm/vgic/vgic-init.c @@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm) vgic_v4_teardown(kvm); } -void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) +static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; @@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) vgic_flush_pending_lpis(vcpu); INIT_LIST_HEAD(&vgic_cpu->ap_list_head); - vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; + if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) { + vgic_unregister_redist_iodev(vcpu); + vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF; + } } -static void __kvm_vgic_destroy(struct kvm *kvm) +void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu; - unsigned long i; + struct kvm *kvm = vcpu->kvm; - lockdep_assert_held(&kvm->arch.config_lock); - - vgic_debug_destroy(kvm); - - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_vgic_vcpu_destroy(vcpu); - - kvm_vgic_dist_destroy(kvm); + mutex_lock(&kvm->slots_lock); + __kvm_vgic_vcpu_destroy(vcpu); + mutex_unlock(&kvm->slots_lock); } void kvm_vgic_destroy(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + + mutex_lock(&kvm->slots_lock); + + vgic_debug_destroy(kvm); + + kvm_for_each_vcpu(i, vcpu, kvm) + __kvm_vgic_vcpu_destroy(vcpu); + mutex_lock(&kvm->arch.config_lock); - __kvm_vgic_destroy(kvm); + + kvm_vgic_dist_destroy(kvm); + mutex_unlock(&kvm->arch.config_lock); + mutex_unlock(&kvm->slots_lock); } /** @@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm) type = VGIC_V3; } - if (ret) { - __kvm_vgic_destroy(kvm); + if (ret) goto out; - } + dist->ready = true; dist_base = dist->vgic_dist_base; mutex_unlock(&kvm->arch.config_lock); ret = vgic_register_dist_iodev(kvm, dist_base, type); - if (ret) { + if (ret) kvm_err("Unable to register VGIC dist MMIO regions\n"); - kvm_vgic_destroy(kvm); - } - mutex_unlock(&kvm->slots_lock); - return ret; + goto out_slots; out: mutex_unlock(&kvm->arch.config_lock); +out_slots: mutex_unlock(&kvm->slots_lock); + + if (ret) + kvm_vgic_destroy(kvm); + return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 46ac071f71e1..ee7bc0b233b0 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -584,7 +584,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db, unsigned long flags; raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); + irq = __vgic_its_check_cache(dist, db, devid, eventid); + if (irq) + vgic_get_irq_kref(irq); + raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); return irq; @@ -763,6 +767,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi) raw_spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = true; vgic_queue_irq_unlock(kvm, irq, flags); + vgic_put_irq(kvm, irq); return 0; } diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 188d2187eede..ae5a3a717655 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -365,19 +365,26 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (test_bit(i, &val)) { - /* - * pending_latch is set irrespective of irq type - * (level or edge) to avoid dependency that VM should - * restore irq config before pending info. - */ - irq->pending_latch = true; - vgic_queue_irq_unlock(vcpu->kvm, irq, flags); - } else { + + /* + * pending_latch is set irrespective of irq type + * (level or edge) to avoid dependency that VM should + * restore irq config before pending info. + */ + irq->pending_latch = test_bit(i, &val); + + if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + irq_set_irqchip_state(irq->host_irq, + IRQCHIP_STATE_PENDING, + irq->pending_latch); irq->pending_latch = false; - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } + if (irq->pending_latch) + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); + else + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + vgic_put_irq(vcpu->kvm, irq); } @@ -820,7 +827,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) return ret; } -static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) +void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) { struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 4973c8803cab..2a0da1a42854 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -239,6 +239,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq); int vgic_v3_save_pending_tables(struct kvm *kvm); int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count); int vgic_register_redist_iodev(struct kvm_vcpu *vcpu); +void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu); bool vgic_v3_check_base(struct kvm *kvm); void vgic_v3_load(struct kvm_vcpu *vcpu); diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h index d488ba6084bc..98a3f4b168bd 100644 --- a/arch/csky/include/asm/jump_label.h +++ b/arch/csky/include/asm/jump_label.h @@ -43,5 +43,10 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, return true; } +enum jump_label_type; +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type); +#define arch_jump_label_transform_static arch_jump_label_transform_static + #endif /* __ASSEMBLY__ */ #endif /* __ASM_CSKY_JUMP_LABEL_H */ diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index 01b57b726322..ed47a3a87768 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -116,6 +116,8 @@ vdso_install: all: $(notdir $(KBUILD_IMAGE)) +vmlinuz.efi: vmlinux.efi + vmlinux.elf vmlinux.efi vmlinuz.efi: vmlinux $(Q)$(MAKE) $(build)=$(boot) $(bootvars-y) $(boot)/$@ diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h index b9a4ab54285c..f16bd42456e4 100644 --- a/arch/loongarch/include/asm/elf.h +++ b/arch/loongarch/include/asm/elf.h @@ -241,8 +241,6 @@ void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs); do { \ current->thread.vdso = &vdso_info; \ \ - loongarch_set_personality_fcsr(state); \ - \ if (personality(current->personality) != PER_LINUX) \ set_personality(PER_LINUX); \ } while (0) @@ -259,7 +257,6 @@ do { \ clear_thread_flag(TIF_32BIT_ADDR); \ \ current->thread.vdso = &vdso_info; \ - loongarch_set_personality_fcsr(state); \ \ p = personality(current->personality); \ if (p != PER_LINUX32 && p != PER_LINUX) \ @@ -293,7 +290,7 @@ extern const char *__elf_platform; #define ELF_PLAT_INIT(_r, load_addr) do { \ _r->regs[1] = _r->regs[2] = _r->regs[3] = _r->regs[4] = 0; \ _r->regs[5] = _r->regs[6] = _r->regs[7] = _r->regs[8] = 0; \ - _r->regs[9] = _r->regs[10] = _r->regs[11] = _r->regs[12] = 0; \ + _r->regs[9] = _r->regs[10] /* syscall n */ = _r->regs[12] = 0; \ _r->regs[13] = _r->regs[14] = _r->regs[15] = _r->regs[16] = 0; \ _r->regs[17] = _r->regs[18] = _r->regs[19] = _r->regs[20] = 0; \ _r->regs[21] = _r->regs[22] = _r->regs[23] = _r->regs[24] = 0; \ @@ -340,6 +337,4 @@ extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr, struct arch_elf_state *state); -extern void loongarch_set_personality_fcsr(struct arch_elf_state *state); - #endif /* _ASM_ELF_H */ diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c index 183e94fc9c69..0fa81ced28dc 100644 --- a/arch/loongarch/kernel/elf.c +++ b/arch/loongarch/kernel/elf.c @@ -23,8 +23,3 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr, { return 0; } - -void loongarch_set_personality_fcsr(struct arch_elf_state *state) -{ - current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0; -} diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c index 90a5de746332..1259bc312979 100644 --- a/arch/loongarch/kernel/process.c +++ b/arch/loongarch/kernel/process.c @@ -82,6 +82,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) euen = regs->csr_euen & ~(CSR_EUEN_FPEN); regs->csr_euen = euen; lose_fpu(0); + current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0; clear_thread_flag(TIF_LSX_CTX_LIVE); clear_thread_flag(TIF_LASX_CTX_LIVE); diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index d2b7d5df132a..150df6e17bb6 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -58,21 +58,6 @@ static int constant_set_state_oneshot(struct clock_event_device *evt) return 0; } -static int constant_set_state_oneshot_stopped(struct clock_event_device *evt) -{ - unsigned long timer_config; - - raw_spin_lock(&state_lock); - - timer_config = csr_read64(LOONGARCH_CSR_TCFG); - timer_config &= ~CSR_TCFG_EN; - csr_write64(timer_config, LOONGARCH_CSR_TCFG); - - raw_spin_unlock(&state_lock); - - return 0; -} - static int constant_set_state_periodic(struct clock_event_device *evt) { unsigned long period; @@ -92,6 +77,16 @@ static int constant_set_state_periodic(struct clock_event_device *evt) static int constant_set_state_shutdown(struct clock_event_device *evt) { + unsigned long timer_config; + + raw_spin_lock(&state_lock); + + timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config &= ~CSR_TCFG_EN; + csr_write64(timer_config, LOONGARCH_CSR_TCFG); + + raw_spin_unlock(&state_lock); + return 0; } @@ -156,7 +151,7 @@ int constant_clockevent_init(void) cd->rating = 320; cd->cpumask = cpumask_of(cpu); cd->set_state_oneshot = constant_set_state_oneshot; - cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped; + cd->set_state_oneshot_stopped = constant_set_state_shutdown; cd->set_state_periodic = constant_set_state_periodic; cd->set_state_shutdown = constant_set_state_shutdown; cd->set_next_event = constant_timer_next_event; diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 40ed49d9adff..4e86441e6319 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -402,7 +402,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext const u8 dst = regmap[insn->dst_reg]; const s16 off = insn->off; const s32 imm = insn->imm; - const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm; const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32; switch (code) { @@ -806,8 +805,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext /* dst = imm64 */ case BPF_LD | BPF_IMM | BPF_DW: + { + const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm; + move_imm(ctx, dst, imm64, is32); return 1; + } /* dst = *(size *)(src + off) */ case BPF_LDX | BPF_MEM | BPF_B: diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c index f521874ebb07..67f067706af2 100644 --- a/arch/mips/alchemy/devboards/db1200.c +++ b/arch/mips/alchemy/devboards/db1200.c @@ -847,7 +847,7 @@ int __init db1200_dev_setup(void) i2c_register_board_info(0, db1200_i2c_devs, ARRAY_SIZE(db1200_i2c_devs)); spi_register_board_info(db1200_spi_devs, - ARRAY_SIZE(db1200_i2c_devs)); + ARRAY_SIZE(db1200_spi_devs)); /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI) * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S) diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c index fd91d9c9a252..6c6837181f55 100644 --- a/arch/mips/alchemy/devboards/db1550.c +++ b/arch/mips/alchemy/devboards/db1550.c @@ -589,7 +589,7 @@ int __init db1550_dev_setup(void) i2c_register_board_info(0, db1550_i2c_devs, ARRAY_SIZE(db1550_i2c_devs)); spi_register_board_info(db1550_spi_devs, - ARRAY_SIZE(db1550_i2c_devs)); + ARRAY_SIZE(db1550_spi_devs)); c = clk_get(NULL, "psc0_intclk"); if (!IS_ERR(c)) { diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi index 8143a61111e3..c16b521308cb 100644 --- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi +++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi @@ -123,8 +123,7 @@ gmac@3,0 { compatible = "pci0014,7a03.0", "pci0014,7a03", "pciclass0c0320", - "pciclass0c03", - "loongson, pci-gmac"; + "pciclass0c03"; reg = <0x1800 0x0 0x0 0x0 0x0>; interrupts = <12 IRQ_TYPE_LEVEL_LOW>, diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi index 2f45fce2cdc4..ed99ee316feb 100644 --- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi +++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi @@ -186,8 +186,7 @@ gmac@3,0 { compatible = "pci0014,7a03.0", "pci0014,7a03", "pciclass020000", - "pciclass0200", - "loongson, pci-gmac"; + "pciclass0200"; reg = <0x1800 0x0 0x0 0x0 0x0>; interrupts = <12 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/mips/include/asm/dmi.h b/arch/mips/include/asm/dmi.h index 27415a288adf..dc397f630c66 100644 --- a/arch/mips/include/asm/dmi.h +++ b/arch/mips/include/asm/dmi.h @@ -5,7 +5,7 @@ #include #include -#define dmi_early_remap(x, l) ioremap_cache(x, l) +#define dmi_early_remap(x, l) ioremap(x, l) #define dmi_early_unmap(x, l) iounmap(x) #define dmi_remap(x, l) ioremap_cache(x, l) #define dmi_unmap(x) iounmap(x) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 7c540572f1f7..e46e7ec76b4f 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -326,11 +326,11 @@ static void __init bootmem_init(void) panic("Incorrect memory mapping !!!"); if (max_pfn > PFN_DOWN(HIGHMEM_START)) { + max_low_pfn = PFN_DOWN(HIGHMEM_START); #ifdef CONFIG_HIGHMEM - highstart_pfn = PFN_DOWN(HIGHMEM_START); + highstart_pfn = max_low_pfn; highend_pfn = max_pfn; #else - max_low_pfn = PFN_DOWN(HIGHMEM_START); max_pfn = max_low_pfn; #endif } diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 1d93b85271ba..002c91fcb842 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -333,10 +333,11 @@ early_initcall(mips_smp_ipi_init); */ asmlinkage void start_secondary(void) { - unsigned int cpu; + unsigned int cpu = raw_smp_processor_id(); cpu_probe(); per_cpu_trap_init(false); + rcu_cpu_starting(cpu); mips_clockevent_init(); mp_ops->init_secondary(); cpu_report(); @@ -348,7 +349,6 @@ asmlinkage void start_secondary(void) */ calibrate_delay(); - cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; set_cpu_sibling_map(cpu); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 6050e6e10d32..2c94f9cf1ce0 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -806,6 +806,7 @@ config THREAD_SHIFT int "Thread shift" if EXPERT range 13 15 default "15" if PPC_256K_PAGES + default "15" if PPC_PSERIES || PPC_POWERNV default "14" if PPC64 default "13" help diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 054844153b1f..487e4967b60d 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -42,18 +42,13 @@ machine-$(CONFIG_PPC64) += 64 machine-$(CONFIG_CPU_LITTLE_ENDIAN) += le UTS_MACHINE := $(subst $(space),,$(machine-y)) -# XXX This needs to be before we override LD below -ifdef CONFIG_PPC32 -KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o -else -ifeq ($(call ld-ifversion, -ge, 22500, y),y) +ifeq ($(CONFIG_PPC64)$(CONFIG_LD_IS_BFD),yy) # Have the linker provide sfpr if possible. # There is a corresponding test in arch/powerpc/lib/Makefile KBUILD_LDFLAGS_MODULE += --save-restore-funcs else KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o endif -endif ifdef CONFIG_CPU_LITTLE_ENDIAN KBUILD_CFLAGS += -mlittle-endian @@ -391,17 +386,7 @@ endif endif PHONY += checkbin -# Check toolchain versions: -# - gcc-4.6 is the minimum kernel-wide version so nothing required. checkbin: - @if test "x${CONFIG_LD_IS_LLD}" != "xy" -a \ - "x$(call ld-ifversion, -le, 22400, y)" = "xy" ; then \ - echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \ - echo 'in some circumstances.' ; \ - echo '*** binutils 2.23 do not define the TOC symbol ' ; \ - echo -n '*** Please use a different binutils version.' ; \ - false ; \ - fi @if test "x${CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT}" = "xy" -a \ "x${CONFIG_LD_IS_BFD}" = "xy" -a \ "${CONFIG_LD_VERSION}" = "23700" ; then \ diff --git a/arch/powerpc/kernel/trace/ftrace_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S index 6f9c2dea905b..f4a72b38488f 100644 --- a/arch/powerpc/kernel/trace/ftrace_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S @@ -62,7 +62,7 @@ .endif /* Save previous stack pointer (r1) */ - addi r8, r1, SWITCH_FRAME_SIZE + addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE PPC_STL r8, GPR1(r1) .if \allregs == 1 @@ -182,7 +182,7 @@ ftrace_no_trace: mflr r3 mtctr r3 REST_GPR(3, r1) - addi r1, r1, SWITCH_FRAME_SIZE + addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE mtlr r0 bctr #endif diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 8560c912186d..9b394bab17eb 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -42,8 +42,8 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o # 64-bit linker creates .sfpr on demand for final link (vmlinux), # so it is only needed for modules, and only for older linkers which # do not support --save-restore-funcs -ifeq ($(call ld-ifversion, -lt, 22500, y),y) -extra-$(CONFIG_PPC64) += crtsavres.o +ifndef CONFIG_LD_IS_BFD +always-$(CONFIG_PPC64) += crtsavres.o endif obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \ diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index ada817c49b72..56d82f7f9734 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c @@ -299,6 +299,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) attr_group->attrs = attrs; do { ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); + if (!ev_val_str) + continue; dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); if (!dev_str) continue; @@ -306,6 +308,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) attrs[j++] = dev_str; if (pmu->events[i].scale) { ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); + if (!ev_scale_str) + continue; dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); if (!dev_str) continue; @@ -315,6 +319,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) if (pmu->events[i].unit) { ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); + if (!ev_unit_str) + continue; dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); if (!dev_str) continue; diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 25b80cd558f8..fc79f8466933 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig @@ -173,6 +173,7 @@ config ISS4xx config CURRITUCK bool "IBM Currituck (476fpe) Support" depends on PPC_47x + select I2C select SWIOTLB select 476FPE select FORCE_PCI diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index d55652b5f6fa..391f50535200 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c @@ -275,6 +275,8 @@ int __init opal_event_init(void) else name = kasprintf(GFP_KERNEL, "opal"); + if (!name) + continue; /* Install interrupt handler */ rc = request_irq(r->start, opal_interrupt, r->flags & IRQD_TRIGGER_MASK, name, NULL); diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c index 7bfe4cbeb35a..ea917266aa17 100644 --- a/arch/powerpc/platforms/powernv/opal-powercap.c +++ b/arch/powerpc/platforms/powernv/opal-powercap.c @@ -196,6 +196,12 @@ void __init opal_powercap_init(void) j = 0; pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node); + if (!pcaps[i].pg.name) { + kfree(pcaps[i].pattrs); + kfree(pcaps[i].pg.attrs); + goto out_pcaps_pattrs; + } + if (has_min) { powercap_add_attr(min, "powercap-min", &pcaps[i].pattrs[j]); diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 6b4eed2ef4fa..f67235d1ba2c 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c @@ -165,6 +165,11 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn, ent->chip = chip; snprintf(ent->name, 16, "%08x", chip); ent->path.data = (void *)kasprintf(GFP_KERNEL, "%pOF", dn); + if (!ent->path.data) { + kfree(ent); + return -ENOMEM; + } + ent->path.size = strlen((char *)ent->path.data); dir = debugfs_create_dir(ent->name, root); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index 2e3a317722a8..051a777ba1b2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -500,14 +500,15 @@ static int dlpar_memory_remove_by_index(u32 drc_index) } } - if (!lmb_found) + if (!lmb_found) { + pr_debug("Failed to look up LMB for drc index %x\n", drc_index); rc = -EINVAL; - - if (rc) + } else if (rc) { pr_debug("Failed to hot-remove memory at %llx\n", lmb->base_addr); - else + } else { pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr); + } return rc; } diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h index 32336e8a17cb..a393d5035c54 100644 --- a/arch/riscv/include/asm/sections.h +++ b/arch/riscv/include/asm/sections.h @@ -13,6 +13,7 @@ extern char _start_kernel[]; extern char __init_data_begin[], __init_data_end[]; extern char __init_text_begin[], __init_text_end[]; extern char __alt_start[], __alt_end[]; +extern char __exittext_begin[], __exittext_end[]; static inline bool is_va_kernel_text(uintptr_t va) { diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h index 532c29ef0376..956ae0a01bad 100644 --- a/arch/riscv/include/asm/signal.h +++ b/arch/riscv/include/asm/signal.h @@ -7,6 +7,6 @@ #include asmlinkage __visible -void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); +void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags); #endif diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h index d4ffc3c37649..b65bf6306f69 100644 --- a/arch/riscv/include/asm/xip_fixup.h +++ b/arch/riscv/include/asm/xip_fixup.h @@ -13,7 +13,7 @@ add \reg, \reg, t0 .endm .macro XIP_FIXUP_FLASH_OFFSET reg - la t1, __data_loc + la t0, __data_loc REG_L t1, _xip_phys_offset sub \reg, \reg, t1 add \reg, \reg, t0 diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 91fe16bfaa07..a331001e33e6 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -424,7 +424,8 @@ void *module_alloc(unsigned long size) { return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, - PAGE_KERNEL, 0, NUMA_NO_NODE, + PAGE_KERNEL, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __builtin_return_address(0)); } #endif diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index e099961453cc..160e5c1caa9c 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -13,6 +13,7 @@ #include #include #include +#include struct patch_insn { void *addr; @@ -23,6 +24,14 @@ struct patch_insn { int riscv_patch_in_stop_machine = false; #ifdef CONFIG_MMU + +static inline bool is_kernel_exittext(uintptr_t addr) +{ + return system_state < SYSTEM_RUNNING && + addr >= (uintptr_t)__exittext_begin && + addr < (uintptr_t)__exittext_end; +} + /* * The fix_to_virt(, idx) needs a const value (not a dynamic variable of * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses". @@ -33,7 +42,7 @@ static __always_inline void *patch_map(void *addr, const unsigned int fixmap) uintptr_t uintaddr = (uintptr_t) addr; struct page *page; - if (core_kernel_text(uintaddr)) + if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr)) page = phys_to_page(__pa_symbol(addr)); else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) page = vmalloc_to_page(addr); diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S index 75e0fa8a700a..24a2fdd3be6a 100644 --- a/arch/riscv/kernel/vmlinux-xip.lds.S +++ b/arch/riscv/kernel/vmlinux-xip.lds.S @@ -29,10 +29,12 @@ SECTIONS HEAD_TEXT_SECTION INIT_TEXT_SECTION(PAGE_SIZE) /* we have to discard exit text and such at runtime, not link time */ + __exittext_begin = .; .exit.text : { EXIT_TEXT } + __exittext_end = .; .text : { _text = .; diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 4e6c88aa4d87..d478e063b878 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S @@ -72,10 +72,12 @@ SECTIONS __soc_builtin_dtb_table_end = .; } /* we have to discard exit text and such at runtime, not link time */ + __exittext_begin = .; .exit.text : { EXIT_TEXT } + __exittext_end = .; __init_text_end = .; . = ALIGN(SECTION_ALIGN); diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 161d0b34c2cb..01398fee5cf8 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -25,19 +26,6 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) return new_val; } -static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr, - unsigned long next, struct mm_walk *walk) -{ - pgd_t val = READ_ONCE(*pgd); - - if (pgd_leaf(val)) { - val = __pgd(set_pageattr_masks(pgd_val(val), walk)); - set_pgd(pgd, val); - } - - return 0; -} - static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, unsigned long next, struct mm_walk *walk) { @@ -96,7 +84,6 @@ static int pageattr_pte_hole(unsigned long addr, unsigned long next, } static const struct mm_walk_ops pageattr_ops = { - .pgd_entry = pageattr_pgd_entry, .p4d_entry = pageattr_p4d_entry, .pud_entry = pageattr_pud_entry, .pmd_entry = pageattr_pmd_entry, @@ -105,12 +92,181 @@ static const struct mm_walk_ops pageattr_ops = { .walk_lock = PGWALK_RDLOCK, }; +#ifdef CONFIG_64BIT +static int __split_linear_mapping_pmd(pud_t *pudp, + unsigned long vaddr, unsigned long end) +{ + pmd_t *pmdp; + unsigned long next; + + pmdp = pmd_offset(pudp, vaddr); + + do { + next = pmd_addr_end(vaddr, end); + + if (next - vaddr >= PMD_SIZE && + vaddr <= (vaddr & PMD_MASK) && end >= next) + continue; + + if (pmd_leaf(*pmdp)) { + struct page *pte_page; + unsigned long pfn = _pmd_pfn(*pmdp); + pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK); + pte_t *ptep_new; + int i; + + pte_page = alloc_page(GFP_KERNEL); + if (!pte_page) + return -ENOMEM; + + ptep_new = (pte_t *)page_address(pte_page); + for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new) + set_pte(ptep_new, pfn_pte(pfn + i, prot)); + + smp_wmb(); + + set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE)); + } + } while (pmdp++, vaddr = next, vaddr != end); + + return 0; +} + +static int __split_linear_mapping_pud(p4d_t *p4dp, + unsigned long vaddr, unsigned long end) +{ + pud_t *pudp; + unsigned long next; + int ret; + + pudp = pud_offset(p4dp, vaddr); + + do { + next = pud_addr_end(vaddr, end); + + if (next - vaddr >= PUD_SIZE && + vaddr <= (vaddr & PUD_MASK) && end >= next) + continue; + + if (pud_leaf(*pudp)) { + struct page *pmd_page; + unsigned long pfn = _pud_pfn(*pudp); + pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK); + pmd_t *pmdp_new; + int i; + + pmd_page = alloc_page(GFP_KERNEL); + if (!pmd_page) + return -ENOMEM; + + pmdp_new = (pmd_t *)page_address(pmd_page); + for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new) + set_pmd(pmdp_new, + pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot)); + + smp_wmb(); + + set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE)); + } + + ret = __split_linear_mapping_pmd(pudp, vaddr, next); + if (ret) + return ret; + } while (pudp++, vaddr = next, vaddr != end); + + return 0; +} + +static int __split_linear_mapping_p4d(pgd_t *pgdp, + unsigned long vaddr, unsigned long end) +{ + p4d_t *p4dp; + unsigned long next; + int ret; + + p4dp = p4d_offset(pgdp, vaddr); + + do { + next = p4d_addr_end(vaddr, end); + + /* + * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't + * need to split, we'll change the protections on the whole P4D. + */ + if (next - vaddr >= P4D_SIZE && + vaddr <= (vaddr & P4D_MASK) && end >= next) + continue; + + if (p4d_leaf(*p4dp)) { + struct page *pud_page; + unsigned long pfn = _p4d_pfn(*p4dp); + pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK); + pud_t *pudp_new; + int i; + + pud_page = alloc_page(GFP_KERNEL); + if (!pud_page) + return -ENOMEM; + + /* + * Fill the pud level with leaf puds that have the same + * protections as the leaf p4d. + */ + pudp_new = (pud_t *)page_address(pud_page); + for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new) + set_pud(pudp_new, + pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot)); + + /* + * Make sure the pud filling is not reordered with the + * p4d store which could result in seeing a partially + * filled pud level. + */ + smp_wmb(); + + set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE)); + } + + ret = __split_linear_mapping_pud(p4dp, vaddr, next); + if (ret) + return ret; + } while (p4dp++, vaddr = next, vaddr != end); + + return 0; +} + +static int __split_linear_mapping_pgd(pgd_t *pgdp, + unsigned long vaddr, + unsigned long end) +{ + unsigned long next; + int ret; + + do { + next = pgd_addr_end(vaddr, end); + /* We never use PGD mappings for the linear mapping */ + ret = __split_linear_mapping_p4d(pgdp, vaddr, next); + if (ret) + return ret; + } while (pgdp++, vaddr = next, vaddr != end); + + return 0; +} + +static int split_linear_mapping(unsigned long start, unsigned long end) +{ + return __split_linear_mapping_pgd(pgd_offset_k(start), start, end); +} +#endif /* CONFIG_64BIT */ + static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { int ret; unsigned long start = addr; unsigned long end = start + PAGE_SIZE * numpages; + unsigned long __maybe_unused lm_start; + unsigned long __maybe_unused lm_end; struct pageattr_masks masks = { .set_mask = set_mask, .clear_mask = clear_mask @@ -120,11 +276,72 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, return 0; mmap_write_lock(&init_mm); + +#ifdef CONFIG_64BIT + /* + * We are about to change the permissions of a kernel mapping, we must + * apply the same changes to its linear mapping alias, which may imply + * splitting a huge mapping. + */ + + if (is_vmalloc_or_module_addr((void *)start)) { + struct vm_struct *area = NULL; + int i, page_start; + + area = find_vm_area((void *)start); + page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT; + + for (i = page_start; i < page_start + numpages; ++i) { + lm_start = (unsigned long)page_address(area->pages[i]); + lm_end = lm_start + PAGE_SIZE; + + ret = split_linear_mapping(lm_start, lm_end); + if (ret) + goto unlock; + + ret = walk_page_range_novma(&init_mm, lm_start, lm_end, + &pageattr_ops, NULL, &masks); + if (ret) + goto unlock; + } + } else if (is_kernel_mapping(start) || is_linear_mapping(start)) { + if (is_kernel_mapping(start)) { + lm_start = (unsigned long)lm_alias(start); + lm_end = (unsigned long)lm_alias(end); + } else { + lm_start = start; + lm_end = end; + } + + ret = split_linear_mapping(lm_start, lm_end); + if (ret) + goto unlock; + + ret = walk_page_range_novma(&init_mm, lm_start, lm_end, + &pageattr_ops, NULL, &masks); + if (ret) + goto unlock; + } + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks); + +unlock: + mmap_write_unlock(&init_mm); + + /* + * We can't use flush_tlb_kernel_range() here as we may have split a + * hugepage that is larger than that, so let's flush everything. + */ + flush_tlb_all(); +#else + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, + &masks); + mmap_write_unlock(&init_mm); flush_tlb_kernel_range(start, end); +#endif return ret; } @@ -159,36 +376,14 @@ int set_memory_nx(unsigned long addr, int numpages) int set_direct_map_invalid_noflush(struct page *page) { - int ret; - unsigned long start = (unsigned long)page_address(page); - unsigned long end = start + PAGE_SIZE; - struct pageattr_masks masks = { - .set_mask = __pgprot(0), - .clear_mask = __pgprot(_PAGE_PRESENT) - }; - - mmap_read_lock(&init_mm); - ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); - mmap_read_unlock(&init_mm); - - return ret; + return __set_memory((unsigned long)page_address(page), 1, + __pgprot(0), __pgprot(_PAGE_PRESENT)); } int set_direct_map_default_noflush(struct page *page) { - int ret; - unsigned long start = (unsigned long)page_address(page); - unsigned long end = start + PAGE_SIZE; - struct pageattr_masks masks = { - .set_mask = PAGE_KERNEL, - .clear_mask = __pgprot(0) - }; - - mmap_read_lock(&init_mm); - ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); - mmap_read_unlock(&init_mm); - - return ret; + return __set_memory((unsigned long)page_address(page), 1, + PAGE_KERNEL, __pgprot(_PAGE_EXEC)); } #ifdef CONFIG_DEBUG_PAGEALLOC diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h index b714ed0ef688..9acf48e53a87 100644 --- a/arch/s390/include/asm/fpu/api.h +++ b/arch/s390/include/asm/fpu/api.h @@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc) #define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31) #define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH) -#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7) +#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW) struct kernel_fpu; diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 287bb88f7698..2686bee800e3 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -11,6 +11,8 @@ /* I/O size constraints */ #define ZPCI_MAX_READ_SIZE 8 #define ZPCI_MAX_WRITE_SIZE 128 +#define ZPCI_BOUNDARY_SIZE (1 << 12) +#define ZPCI_BOUNDARY_MASK (ZPCI_BOUNDARY_SIZE - 1) /* I/O Map */ #define ZPCI_IOMAP_SHIFT 48 @@ -125,16 +127,18 @@ static inline int zpci_read_single(void *dst, const volatile void __iomem *src, int zpci_write_block(volatile void __iomem *dst, const void *src, unsigned long len); -static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) +static inline int zpci_get_max_io_size(u64 src, u64 dst, int len, int max) { - int count = len > max ? max : len, size = 1; + int offset = dst & ZPCI_BOUNDARY_MASK; + int size; - while (!(src & 0x1) && !(dst & 0x1) && ((size << 1) <= count)) { - dst = dst >> 1; - src = src >> 1; - size = size << 1; - } - return size; + size = min3(len, ZPCI_BOUNDARY_SIZE - offset, max); + if (IS_ALIGNED(src, 8) && IS_ALIGNED(dst, 8) && IS_ALIGNED(size, 8)) + return size; + + if (size >= 8) + return 8; + return rounddown_pow_of_two(size); } static inline int zpci_memcpy_fromio(void *dst, @@ -144,9 +148,9 @@ static inline int zpci_memcpy_fromio(void *dst, int size, rc = 0; while (n > 0) { - size = zpci_get_max_write_size((u64 __force) src, - (u64) dst, n, - ZPCI_MAX_READ_SIZE); + size = zpci_get_max_io_size((u64 __force) src, + (u64) dst, n, + ZPCI_MAX_READ_SIZE); rc = zpci_read_single(dst, src, size); if (rc) break; @@ -166,9 +170,9 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst, return -EINVAL; while (n > 0) { - size = zpci_get_max_write_size((u64 __force) dst, - (u64) src, n, - ZPCI_MAX_WRITE_SIZE); + size = zpci_get_max_io_size((u64 __force) dst, + (u64) src, n, + ZPCI_MAX_WRITE_SIZE); if (size > 8) /* main path */ rc = zpci_write_block(dst, src, size); else diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index f043a7ff220b..28fa80fd69fa 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -2,7 +2,7 @@ /* * Performance event support for s390x - CPU-measurement Counter Facility * - * Copyright IBM Corp. 2012, 2021 + * Copyright IBM Corp. 2012, 2022 * Author(s): Hendrik Brueckner * Thomas Richter */ @@ -434,6 +434,12 @@ static void cpumf_hw_inuse(void) mutex_unlock(&pmc_reserve_mutex); } +static int is_userspace_event(u64 ev) +{ + return cpumf_generic_events_user[PERF_COUNT_HW_CPU_CYCLES] == ev || + cpumf_generic_events_user[PERF_COUNT_HW_INSTRUCTIONS] == ev; +} + static int __hw_perf_event_init(struct perf_event *event, unsigned int type) { struct perf_event_attr *attr = &event->attr; @@ -456,19 +462,26 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type) if (is_sampling_event(event)) /* No sampling support */ return -ENOENT; ev = attr->config; - /* Count user space (problem-state) only */ if (!attr->exclude_user && attr->exclude_kernel) { - if (ev >= ARRAY_SIZE(cpumf_generic_events_user)) - return -EOPNOTSUPP; - ev = cpumf_generic_events_user[ev]; - - /* No support for kernel space counters only */ + /* + * Count user space (problem-state) only + * Handle events 32 and 33 as 0:u and 1:u + */ + if (!is_userspace_event(ev)) { + if (ev >= ARRAY_SIZE(cpumf_generic_events_user)) + return -EOPNOTSUPP; + ev = cpumf_generic_events_user[ev]; + } } else if (!attr->exclude_kernel && attr->exclude_user) { + /* No support for kernel space counters only */ return -EOPNOTSUPP; - } else { /* Count user and kernel space */ - if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) - return -EOPNOTSUPP; - ev = cpumf_generic_events_basic[ev]; + } else { + /* Count user and kernel space, incl. events 32 + 33 */ + if (!is_userspace_event(ev)) { + if (ev >= ARRAY_SIZE(cpumf_generic_events_basic)) + return -EOPNOTSUPP; + ev = cpumf_generic_events_basic[ev]; + } } break; diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 9a0ce5315f36..3cbb46182066 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -44,8 +45,11 @@ void *vmem_crst_alloc(unsigned long val) unsigned long *table; table = vmem_alloc_pages(CRST_ALLOC_ORDER); - if (table) - crst_table_init(table, val); + if (!table) + return NULL; + crst_table_init(table, val); + if (slab_is_available()) + arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER); return table; } diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 588089332931..a90499c087f0 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -97,9 +97,9 @@ static inline int __memcpy_toio_inuser(void __iomem *dst, return -EINVAL; while (n > 0) { - size = zpci_get_max_write_size((u64 __force) dst, - (u64 __force) src, n, - ZPCI_MAX_WRITE_SIZE); + size = zpci_get_max_io_size((u64 __force) dst, + (u64 __force) src, n, + ZPCI_MAX_WRITE_SIZE); if (size > 8) /* main path */ rc = __pcistb_mio_inuser(dst, src, size, &status); else @@ -242,9 +242,9 @@ static inline int __memcpy_fromio_inuser(void __user *dst, u8 status; while (n > 0) { - size = zpci_get_max_write_size((u64 __force) src, - (u64 __force) dst, n, - ZPCI_MAX_READ_SIZE); + size = zpci_get_max_io_size((u64 __force) src, + (u64 __force) dst, n, + ZPCI_MAX_READ_SIZE); rc = __pcilg_mio_inuser(dst, src, size, &status); if (rc) break; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 2fb5e1541efc..949129443b1c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4033,12 +4033,17 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable; int global_ctrl, pebs_enable; + /* + * In addition to obeying exclude_guest/exclude_host, remove bits being + * used for PEBS when running a guest, because PEBS writes to virtual + * addresses (not physical addresses). + */ *nr = 0; global_ctrl = (*nr)++; arr[global_ctrl] = (struct perf_guest_switch_msr){ .msr = MSR_CORE_PERF_GLOBAL_CTRL, .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask, - .guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask), + .guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask & ~pebs_mask, }; if (!x86_pmu.pebs) diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index 5fd72d4b8bbb..7d454141433c 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -140,13 +140,21 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, unsigned int *box_offset, *ids; int i; - if (WARN_ON_ONCE(!unit->ctl || !unit->ctl_offset || !unit->ctr_offset)) + if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) { + pr_info("Invalid address is detected for uncore type %d box %d, " + "Disable the uncore unit.\n", + unit->box_type, unit->box_id); return; + } if (parsed) { type = search_uncore_discovery_type(unit->box_type); - if (WARN_ON_ONCE(!type)) + if (!type) { + pr_info("A spurious uncore type %d is detected, " + "Disable the uncore type.\n", + unit->box_type); return; + } /* Store the first box of each die */ if (!type->box_ctrl_die[die]) type->box_ctrl_die[die] = unit->ctl; @@ -181,8 +189,12 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, ids[i] = type->ids[i]; box_offset[i] = type->box_offset[i]; - if (WARN_ON_ONCE(unit->box_id == ids[i])) + if (unit->box_id == ids[i]) { + pr_info("Duplicate uncore type %d box ID %d is detected, " + "Drop the duplicate uncore unit.\n", + unit->box_type, unit->box_id); goto free_ids; + } } ids[i] = unit->box_id; box_offset[i] = unit->ctl - type->box_ctrl; diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 189ae92de4d0..c18e5c764643 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -267,15 +268,31 @@ static int hv_cpu_die(unsigned int cpu) static int __init hv_pci_init(void) { - int gen2vm = efi_enabled(EFI_BOOT); + bool gen2vm = efi_enabled(EFI_BOOT); /* - * For Generation-2 VM, we exit from pci_arch_init() by returning 0. - * The purpose is to suppress the harmless warning: + * A Generation-2 VM doesn't support legacy PCI/PCIe, so both + * raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() -> + * pcibios_init() doesn't call pcibios_resource_survey() -> + * e820__reserve_resources_late(); as a result, any emulated persistent + * memory of E820_TYPE_PRAM (12) via the kernel parameter + * memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be + * detected by register_e820_pmem(). Fix this by directly calling + * e820__reserve_resources_late() here: e820__reserve_resources_late() + * depends on e820__reserve_resources(), which has been called earlier + * from setup_arch(). Note: e820__reserve_resources_late() also adds + * any memory of E820_TYPE_PMEM (7) into iomem_resource, and + * acpi_nfit_register_region() -> acpi_nfit_insert_resource() -> + * region_intersects() returns REGION_INTERSECTS, so the memory of + * E820_TYPE_PMEM won't get added twice. + * + * We return 0 here so that pci_arch_init() won't print the warning: * "PCI: Fatal: No config space access function found" */ - if (gen2vm) + if (gen2vm) { + e820__reserve_resources_late(); return 0; + } /* For Generation-1 VM, we'll proceed in pci_arch_init(). */ return 1; diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 46b7ee0ab01a..6b8c93989aa3 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -1015,8 +1015,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode, } else { local_irq_save(flags); memcpy(addr, opcode, len); - local_irq_restore(flags); sync_core(); + local_irq_restore(flags); /* * Could also do a CLFLUSH here to speed up CPU recovery; but diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c index 12cf2e7ca33c..87c15ab89651 100644 --- a/arch/x86/kernel/cpu/mce/inject.c +++ b/arch/x86/kernel/cpu/mce/inject.c @@ -747,6 +747,7 @@ static void check_hw_inj_possible(void) wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), status); rdmsrl_safe(mca_msr_reg(bank, MCA_STATUS), &status); + wrmsrl_safe(mca_msr_reg(bank, MCA_STATUS), 0); if (!status) { hw_injection_possible = false; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index ea155f0cf545..6120f25b0d5c 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -549,7 +549,8 @@ static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs) { unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg]; - int3_emulate_call(regs, regs_get_register(regs, offs)); + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + p->ainsn.size); + int3_emulate_jmp(regs, regs_get_register(regs, offs)); } NOKPROBE_SYMBOL(kprobe_emulate_call_indirect); diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 16333ba1904b..c067887d42df 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -24,8 +24,8 @@ static int kvmclock __initdata = 1; static int kvmclock_vsyscall __initdata = 1; -static int msr_kvm_system_time __ro_after_init = MSR_KVM_SYSTEM_TIME; -static int msr_kvm_wall_clock __ro_after_init = MSR_KVM_WALL_CLOCK; +static int msr_kvm_system_time __ro_after_init; +static int msr_kvm_wall_clock __ro_after_init; static u64 kvm_sched_clock_offset __ro_after_init; static int __init parse_no_kvmclock(char *arg) @@ -195,7 +195,8 @@ static void kvm_setup_secondary_clock(void) void kvmclock_disable(void) { - native_write_msr(msr_kvm_system_time, 0, 0); + if (msr_kvm_system_time) + native_write_msr(msr_kvm_system_time, 0, 0); } static void __init kvmclock_init_mem(void) @@ -294,7 +295,10 @@ void __init kvmclock_init(void) if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE2)) { msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW; msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; - } else if (!kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { + } else if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) { + msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; + msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; + } else { return; } diff --git a/arch/x86/lib/misc.c b/arch/x86/lib/misc.c index a018ec4fba53..c97be9a1430a 100644 --- a/arch/x86/lib/misc.c +++ b/arch/x86/lib/misc.c @@ -6,7 +6,7 @@ */ int num_digits(int val) { - int m = 10; + long long m = 10; int d = 1; if (val < 0) { diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 4686c1d9d0cf..84c695ae1940 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -893,6 +893,10 @@ static void emit_nops(u8 **pprog, int len) #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ +#define RESTORE_TAIL_CALL_CNT(stack) \ + EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) { @@ -1436,9 +1440,7 @@ st: if (is_imm8(insn->off)) case BPF_JMP | BPF_CALL: func = (u8 *) __bpf_call_base + imm32; if (tail_call_reachable) { - /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ - EMIT3_off32(0x48, 0x8B, 0x85, - -round_up(bpf_prog->aux->stack_depth, 8) - 8); + RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7)) return -EINVAL; } else { @@ -1750,63 +1752,37 @@ st: if (is_imm8(insn->off)) return proglen; } -static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args, +static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_regs, int stack_size) { - int i, j, arg_size, nr_regs; + int i; + /* Store function arguments to stack. * For a function that accepts two pointers the sequence will be: * mov QWORD PTR [rbp-0x10],rdi * mov QWORD PTR [rbp-0x8],rsi */ - for (i = 0, j = 0; i < min(nr_args, 6); i++) { - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) { - nr_regs = (m->arg_size[i] + 7) / 8; - arg_size = 8; - } else { - nr_regs = 1; - arg_size = m->arg_size[i]; - } - - while (nr_regs) { - emit_stx(prog, bytes_to_bpf_size(arg_size), - BPF_REG_FP, - j == 5 ? X86_REG_R9 : BPF_REG_1 + j, - -(stack_size - j * 8)); - nr_regs--; - j++; - } - } + for (i = 0; i < min(nr_regs, 6); i++) + emit_stx(prog, BPF_DW, BPF_REG_FP, + i == 5 ? X86_REG_R9 : BPF_REG_1 + i, + -(stack_size - i * 8)); } -static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args, +static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_regs, int stack_size) { - int i, j, arg_size, nr_regs; + int i; /* Restore function arguments from stack. * For a function that accepts two pointers the sequence will be: * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10] * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8] */ - for (i = 0, j = 0; i < min(nr_args, 6); i++) { - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) { - nr_regs = (m->arg_size[i] + 7) / 8; - arg_size = 8; - } else { - nr_regs = 1; - arg_size = m->arg_size[i]; - } - - while (nr_regs) { - emit_ldx(prog, bytes_to_bpf_size(arg_size), - j == 5 ? X86_REG_R9 : BPF_REG_1 + j, - BPF_REG_FP, - -(stack_size - j * 8)); - nr_regs--; - j++; - } - } + for (i = 0; i < min(nr_regs, 6); i++) + emit_ldx(prog, BPF_DW, + i == 5 ? X86_REG_R9 : BPF_REG_1 + i, + BPF_REG_FP, + -(stack_size - i * 8)); } static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog, @@ -2031,8 +2007,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i struct bpf_tramp_links *tlinks, void *func_addr) { - int ret, i, nr_args = m->nr_args, extra_nregs = 0; - int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off; + int i, ret, nr_regs = m->nr_args, stack_size = 0; + int regs_off, nregs_off, ip_off, run_ctx_off; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; @@ -2041,17 +2017,14 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i u8 *prog; bool save_ret; - /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ - if (nr_args > 6) - return -ENOTSUPP; - - for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) { + /* extra registers for struct arguments */ + for (i = 0; i < m->nr_args; i++) if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) - extra_nregs += (m->arg_size[i] + 7) / 8 - 1; - } - if (nr_args + extra_nregs > 6) + nr_regs += (m->arg_size[i] + 7) / 8 - 1; + + /* x86-64 supports up to 6 arguments. 7+ can be added in the future */ + if (nr_regs > 6) return -ENOTSUPP; - stack_size += extra_nregs * 8; /* Generated trampoline stack layout: * @@ -2065,11 +2038,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i * [ ... ] * RBP - regs_off [ reg_arg1 ] program's ctx pointer * - * RBP - args_off [ arg regs count ] always + * RBP - nregs_off [ regs count ] always * * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag * * RBP - run_ctx_off [ bpf_tramp_run_ctx ] + * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX */ /* room for return value of orig_call or fentry prog */ @@ -2077,11 +2051,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i if (save_ret) stack_size += 8; + stack_size += nr_regs * 8; regs_off = stack_size; - /* args count */ + /* regs count */ stack_size += 8; - args_off = stack_size; + nregs_off = stack_size; if (flags & BPF_TRAMP_F_IP_ARG) stack_size += 8; /* room for IP address argument */ @@ -2106,14 +2081,16 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i EMIT1(0x55); /* push rbp */ EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */ EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + EMIT1(0x50); /* push rax */ EMIT1(0x53); /* push rbx */ /* Store number of argument registers of the traced function: - * mov rax, nr_args + extra_nregs - * mov QWORD PTR [rbp - args_off], rax + * mov rax, nr_regs + * mov QWORD PTR [rbp - nregs_off], rax */ - emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs); - emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off); + emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_regs); + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -nregs_off); if (flags & BPF_TRAMP_F_IP_ARG) { /* Store IP address of the traced function: @@ -2124,7 +2101,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); } - save_regs(m, &prog, nr_args, regs_off); + save_regs(m, &prog, nr_regs, regs_off); if (flags & BPF_TRAMP_F_CALL_ORIG) { /* arg1: mov rdi, im */ @@ -2154,11 +2131,17 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i } if (flags & BPF_TRAMP_F_CALL_ORIG) { - restore_regs(m, &prog, nr_args, regs_off); + restore_regs(m, &prog, nr_regs, regs_off); + + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + /* Before calling the original function, restore the + * tail_call_cnt from stack to rax. + */ + RESTORE_TAIL_CALL_CNT(stack_size); if (flags & BPF_TRAMP_F_ORIG_STACK) { - emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); - EMIT2(0xff, 0xd0); /* call *rax */ + emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); + EMIT2(0xff, 0xd3); /* call *rbx */ } else { /* call original function */ if (emit_call(&prog, orig_call, prog)) { @@ -2195,7 +2178,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i } if (flags & BPF_TRAMP_F_RESTORE_REGS) - restore_regs(m, &prog, nr_args, regs_off); + restore_regs(m, &prog, nr_regs, regs_off); /* This needs to be done regardless. If there were fmod_ret programs, * the return value is only updated on the stack and still needs to be @@ -2209,7 +2192,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ret = -EINVAL; goto cleanup; } - } + } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) + /* Before running the original function, restore the + * tail_call_cnt from stack to rax. + */ + RESTORE_TAIL_CALL_CNT(stack_size); + /* restore return value of orig_call or fentry prog back into RAX */ if (save_ret) emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 9b1ec5d8c99c..a65fc2ae15b4 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -9,6 +9,7 @@ config XEN select PARAVIRT_CLOCK select X86_HV_CALLBACK_VECTOR depends on X86_64 || (X86_32 && X86_PAE) + depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8) depends on X86_LOCAL_APIC && X86_TSC help This is the Linux Xen port. Enabling this will allow the diff --git a/block/bdev.c b/block/bdev.c index 87d66d16b666..ef60d9912717 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -512,6 +512,8 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) void bdev_add(struct block_device *bdev, dev_t dev) { + if (bdev_stable_writes(bdev)) + mapping_set_stable_writes(bdev->bd_inode->i_mapping); bdev->bd_dev = dev; bdev->bd_inode->i_rdev = dev; bdev->bd_inode->i_ino = dev; diff --git a/block/bio.c b/block/bio.c index 311f3b3456a1..ec7cf6b332a3 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1112,13 +1112,22 @@ bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, void __bio_release_pages(struct bio *bio, bool mark_dirty) { - struct bvec_iter_all iter_all; - struct bio_vec *bvec; + struct folio_iter fi; - bio_for_each_segment_all(bvec, bio, iter_all) { - if (mark_dirty && !PageCompound(bvec->bv_page)) - set_page_dirty_lock(bvec->bv_page); - put_page(bvec->bv_page); + bio_for_each_folio_all(fi, bio) { + struct page *page; + size_t done = 0; + + if (mark_dirty) { + folio_lock(fi.folio); + folio_mark_dirty(fi.folio); + folio_unlock(fi.folio); + } + page = folio_page(fi.folio, fi.offset / PAGE_SIZE); + do { + folio_put(fi.folio); + done += PAGE_SIZE; + } while (done < fi.length); } } EXPORT_SYMBOL_GPL(__bio_release_pages); @@ -1417,12 +1426,12 @@ EXPORT_SYMBOL(bio_free_pages); */ void bio_set_pages_dirty(struct bio *bio) { - struct bio_vec *bvec; - struct bvec_iter_all iter_all; + struct folio_iter fi; - bio_for_each_segment_all(bvec, bio, iter_all) { - if (!PageCompound(bvec->bv_page)) - set_page_dirty_lock(bvec->bv_page); + bio_for_each_folio_all(fi, bio) { + folio_lock(fi.folio); + folio_mark_dirty(fi.folio); + folio_unlock(fi.folio); } } @@ -1465,12 +1474,11 @@ static void bio_dirty_fn(struct work_struct *work) void bio_check_pages_dirty(struct bio *bio) { - struct bio_vec *bvec; + struct folio_iter fi; unsigned long flags; - struct bvec_iter_all iter_all; - bio_for_each_segment_all(bvec, bio, iter_all) { - if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) + bio_for_each_folio_all(fi, bio) { + if (!folio_test_dirty(fi.folio)) goto defer; } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 60f366f98fa2..1b7fd1fc2f33 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -462,6 +462,7 @@ static void blkg_destroy_all(struct gendisk *disk) struct request_queue *q = disk->queue; struct blkcg_gq *blkg, *n; int count = BLKG_DESTROY_BATCH_SIZE; + int i; restart: spin_lock_irq(&q->queue_lock); @@ -487,6 +488,18 @@ static void blkg_destroy_all(struct gendisk *disk) } } + /* + * Mark policy deactivated since policy offline has been done, and + * the free is scheduled, so future blkcg_deactivate_policy() can + * be bypassed + */ + for (i = 0; i < BLKCG_MAX_POLS; i++) { + struct blkcg_policy *pol = blkcg_policy[i]; + + if (pol) + __clear_bit(pol->plid, q->blkcg_pols); + } + q->root_blkg = NULL; spin_unlock_irq(&q->queue_lock); } diff --git a/block/blk-mq.c b/block/blk-mq.c index c425c31e5a19..0e56e3e73019 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1501,14 +1501,26 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q, } EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list); +static bool blk_is_flush_data_rq(struct request *rq) +{ + return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq); +} + static bool blk_mq_rq_inflight(struct request *rq, void *priv) { /* * If we find a request that isn't idle we know the queue is busy * as it's checked in the iter. * Return false to stop the iteration. + * + * In case of queue quiesce, if one flush data request is completed, + * don't count it as inflight given the flush sequence is suspended, + * and the original flush data request is invisible to driver, just + * like other pending requests because of quiesce */ - if (blk_mq_request_started(rq)) { + if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) && + blk_is_flush_data_rq(rq) && + blk_mq_request_completed(rq))) { bool *busy = priv; *busy = true; @@ -2853,11 +2865,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, }; struct request *rq; - if (unlikely(bio_queue_enter(bio))) - return NULL; - if (blk_mq_attempt_bio_merge(q, bio, nsegs)) - goto queue_exit; + return NULL; rq_qos_throttle(q, bio); @@ -2873,35 +2882,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q, rq_qos_cleanup(q, bio); if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); -queue_exit: - blk_queue_exit(q); return NULL; } -static inline struct request *blk_mq_get_cached_request(struct request_queue *q, - struct blk_plug *plug, struct bio **bio, unsigned int nsegs) +/* return true if this @rq can be used for @bio */ +static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug, + struct bio *bio) { - struct request *rq; - enum hctx_type type, hctx_type; + enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf); + enum hctx_type hctx_type = rq->mq_hctx->type; - if (!plug) - return NULL; - rq = rq_list_peek(&plug->cached_rq); - if (!rq || rq->q != q) - return NULL; + WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq); - if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) { - *bio = NULL; - return NULL; - } - - type = blk_mq_get_hctx_type((*bio)->bi_opf); - hctx_type = rq->mq_hctx->type; if (type != hctx_type && !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) - return NULL; - if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) - return NULL; + return false; + if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf)) + return false; /* * If any qos ->throttle() end up blocking, we will have flushed the @@ -2909,11 +2906,11 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, * before we throttle. */ plug->cached_rq = rq_list_next(rq); - rq_qos_throttle(q, *bio); + rq_qos_throttle(rq->q, bio); - rq->cmd_flags = (*bio)->bi_opf; + rq->cmd_flags = bio->bi_opf; INIT_LIST_HEAD(&rq->queuelist); - return rq; + return true; } static void bio_set_ioprio(struct bio *bio) @@ -2942,31 +2939,51 @@ void blk_mq_submit_bio(struct bio *bio) struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blk_plug *plug = blk_mq_plug(bio); const int is_sync = op_is_sync(bio->bi_opf); - struct request *rq; + struct request *rq = NULL; unsigned int nr_segs = 1; blk_status_t ret; bio = blk_queue_bounce(bio, q); - if (bio_may_exceed_limits(bio, &q->limits)) { - bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); - if (!bio) - return; - } - - if (!bio_integrity_prep(bio)) - return; - bio_set_ioprio(bio); - rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs); - if (!rq) { - if (!bio) + if (plug) { + rq = rq_list_peek(&plug->cached_rq); + if (rq && rq->q != q) + rq = NULL; + } + if (rq) { + if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { + bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); + if (!bio) + return; + } + if (!bio_integrity_prep(bio)) return; - rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); - if (unlikely(!rq)) + if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) return; + if (blk_mq_can_use_cached_rq(rq, plug, bio)) + goto done; + percpu_ref_get(&q->q_usage_counter); + } else { + if (unlikely(bio_queue_enter(bio))) + return; + if (unlikely(bio_may_exceed_limits(bio, &q->limits))) { + bio = __bio_split_to_limits(bio, &q->limits, &nr_segs); + if (!bio) + goto fail; + } + if (!bio_integrity_prep(bio)) + goto fail; } + rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); + if (unlikely(!rq)) { +fail: + blk_queue_exit(q); + return; + } + +done: trace_block_getrq(bio); rq_qos_track(q, rq, bio); diff --git a/block/blk-settings.c b/block/blk-settings.c index b99ad9892c06..9940199b7916 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -201,7 +201,7 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto limits->max_hw_sectors = max_hw_sectors; max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); - max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); + max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS); max_sectors = round_down(max_sectors, limits->logical_block_size >> SECTOR_SHIFT); limits->max_sectors = max_sectors; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 009b0d76bf03..62a3f62316df 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1333,6 +1333,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global) tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); + rcu_read_lock(); /* * Update has_rules[] flags for the updated tg's subtree. A tg is * considered to have rules if either the tg itself or any of its @@ -1360,6 +1361,7 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global) this_tg->latency_target = max(this_tg->latency_target, parent_tg->latency_target); } + rcu_read_unlock(); /* * We're already holding queue_lock and know @tg is valid. Let's diff --git a/block/fops.c b/block/fops.c index 6197d1c41652..01cb6260fa24 100644 --- a/block/fops.c +++ b/block/fops.c @@ -655,24 +655,35 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, filemap_invalidate_lock(inode->i_mapping); - /* Invalidate the page cache, including dirty pages. */ - error = truncate_bdev_range(bdev, file->f_mode, start, end); - if (error) - goto fail; - + /* + * Invalidate the page cache, including dirty pages, for valid + * de-allocate mode calls to fallocate(). + */ switch (mode) { case FALLOC_FL_ZERO_RANGE: case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: + error = truncate_bdev_range(bdev, file->f_mode, start, end); + if (error) + goto fail; + error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, len >> SECTOR_SHIFT, GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); break; case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: + error = truncate_bdev_range(bdev, file->f_mode, start, end); + if (error) + goto fail; + error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT, len >> SECTOR_SHIFT, GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK); break; case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: + error = truncate_bdev_range(bdev, file->f_mode, start, end); + if (error) + goto fail; + error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT, len >> SECTOR_SHIFT, GFP_KERNEL); break; diff --git a/block/genhd.c b/block/genhd.c index 0c00489b9731..e04ff2f08f39 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -446,7 +446,9 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, DISK_MAX_PARTS); disk->minors = DISK_MAX_PARTS; } - if (disk->first_minor + disk->minors > MINORMASK + 1) + if (disk->first_minor > MINORMASK || + disk->minors > MINORMASK + 1 || + disk->first_minor + disk->minors > MINORMASK + 1) goto out_exit_elevator; } else { if (WARN_ON(disk->minors)) @@ -569,6 +571,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, out_del_block_link: if (!sysfs_deprecated) sysfs_remove_link(block_depr, dev_name(ddev)); + pm_runtime_set_memalloc_noio(ddev, false); out_device_del: device_del(ddev); out_free_ext_minor: diff --git a/block/ioctl.c b/block/ioctl.c index 3c475e4166e9..ebe4a2653622 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -18,7 +18,7 @@ static int blkpg_do_ioctl(struct block_device *bdev, { struct gendisk *disk = bdev->bd_disk; struct blkpg_partition p; - long long start, length; + sector_t start, length; if (disk->flags & GENHD_FL_NO_PART) return -EINVAL; @@ -35,14 +35,17 @@ static int blkpg_do_ioctl(struct block_device *bdev, if (op == BLKPG_DEL_PARTITION) return bdev_del_partition(disk, p.pno); + if (p.start < 0 || p.length <= 0 || p.start + p.length < 0) + return -EINVAL; + /* Check that the partition is aligned to the block size */ + if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev))) + return -EINVAL; + start = p.start >> SECTOR_SHIFT; length = p.length >> SECTOR_SHIFT; switch (op) { case BLKPG_ADD_PARTITION: - /* check if partition is aligned to blocksize */ - if (p.start & (bdev_logical_block_size(bdev) - 1)) - return -EINVAL; return bdev_add_partition(disk, p.pno, start, length); case BLKPG_RESIZE_PARTITION: return bdev_resize_partition(disk, p.pno, start, length); diff --git a/build.config.db845c b/build.config.db845c index 8ad7ae362abc..f3b2a23a52a6 100644 --- a/build.config.db845c +++ b/build.config.db845c @@ -7,6 +7,8 @@ FRAGMENT_CONFIG=${KERNEL_DIR}/arch/arm64/configs/db845c_gki.fragment PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${FRAGMENT_CONFIG}" POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}" +DTC_FLAGS="${DTC_FLAGS} -@" + FILES=" arch/arm64/boot/dts/qcom/sdm845-db845c.dtb arch/arm64/boot/dts/qcom/qrb5165-rb5.dtb diff --git a/crypto/af_alg.c b/crypto/af_alg.c index e893c0f6c879..fef69d2a6b18 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -1045,9 +1045,13 @@ EXPORT_SYMBOL_GPL(af_alg_sendpage); void af_alg_free_resources(struct af_alg_async_req *areq) { struct sock *sk = areq->sk; + struct af_alg_ctx *ctx; af_alg_free_areq_sgls(areq); sock_kfree_s(sk, areq, areq->areqlen); + + ctx = alg_sk(sk)->private; + ctx->inflight = false; } EXPORT_SYMBOL_GPL(af_alg_free_resources); @@ -1117,11 +1121,19 @@ EXPORT_SYMBOL_GPL(af_alg_poll); struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen) { - struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); + struct af_alg_ctx *ctx = alg_sk(sk)->private; + struct af_alg_async_req *areq; + /* Only one AIO request can be in flight. */ + if (ctx->inflight) + return ERR_PTR(-EBUSY); + + areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); if (unlikely(!areq)) return ERR_PTR(-ENOMEM); + ctx->inflight = true; + areq->areqlen = areqlen; areq->sk = sk; areq->last_rsgl = NULL; diff --git a/crypto/scompress.c b/crypto/scompress.c index 738f4f8f0f41..4d6366a44400 100644 --- a/crypto/scompress.c +++ b/crypto/scompress.c @@ -124,6 +124,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) struct crypto_scomp *scomp = *tfm_ctx; void **ctx = acomp_request_ctx(req); struct scomp_scratch *scratch; + unsigned int dlen; int ret; if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) @@ -135,6 +136,8 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) req->dlen = SCOMP_SCRATCH_SIZE; + dlen = req->dlen; + scratch = raw_cpu_ptr(&scomp_scratch); spin_lock(&scratch->lock); @@ -152,6 +155,9 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) ret = -ENOMEM; goto out; } + } else if (req->dlen > dlen) { + ret = -ENOSPC; + goto out; } scatterwalk_map_and_copy(scratch->dst, req->dst, 0, req->dlen, 1); diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index e648158368a7..088db2356998 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -145,9 +145,14 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, static u32 err_seq; estatus = extlog_elog_entry_check(cpu, bank); - if (estatus == NULL || (mce->kflags & MCE_HANDLED_CEC)) + if (!estatus) return NOTIFY_DONE; + if (mce->kflags & MCE_HANDLED_CEC) { + estatus->block_status = 0; + return NOTIFY_DONE; + } + memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN); /* clear record status to enable BIOS to update it again */ estatus->block_status = 0; diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c index 50540d4d4948..2c015ecf7185 100644 --- a/drivers/acpi/acpi_lpit.c +++ b/drivers/acpi/acpi_lpit.c @@ -98,7 +98,7 @@ static void lpit_update_residency(struct lpit_residency_info *info, struct acpi_lpit_native *lpit_native) { info->frequency = lpit_native->counter_frequency ? - lpit_native->counter_frequency : tsc_khz * 1000; + lpit_native->counter_frequency : mul_u32_u32(tsc_khz, 1000U); if (!info->frequency) info->frequency = 1; diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index f08ffa75f4a7..8b44743945c8 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -450,8 +450,9 @@ static int register_device_clock(struct acpi_device *adev, if (!clk_name) return -ENOMEM; clk = clk_register_fractional_divider(NULL, clk_name, parent, + 0, prv_base, 1, 15, 16, 15, CLK_FRAC_DIVIDER_POWER_OF_TWO_PS, - prv_base, 1, 15, 16, 15, 0, NULL); + NULL); parent = clk_name; clk_name = kasprintf(GFP_KERNEL, "%s-update", devname); diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index ed318485eb19..f7852fb75ab3 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -1726,12 +1726,12 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device) return; count++; - acpi_get_parent(device->dev->handle, &acpi_parent); - - pdev = acpi_get_pci_dev(acpi_parent); - if (pdev) { - parent = &pdev->dev; - pci_dev_put(pdev); + if (ACPI_SUCCESS(acpi_get_parent(device->dev->handle, &acpi_parent))) { + pdev = acpi_get_pci_dev(acpi_parent); + if (pdev) { + parent = &pdev->dev; + pci_dev_put(pdev); + } } memset(&props, 0, sizeof(struct backlight_properties)); diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 0565c18c2ee3..62aee900af3d 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -851,6 +851,7 @@ static int acpi_get_ref_args(struct fwnode_reference_args *args, * @index: Index of the reference to return * @num_args: Maximum number of arguments after each reference * @args: Location to store the returned reference with optional arguments + * (may be NULL) * * Find property with @name, verifify that it is a package containing at least * one object reference and if so, store the ACPI device object pointer to the @@ -907,6 +908,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, if (!device) return -EINVAL; + if (!args) + return 0; + args->fwnode = acpi_fwnode_handle(device); args->nargs = 0; return 0; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index c297e40c5bdc..5ebeb0d7b6be 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -512,6 +512,13 @@ static const struct dmi_system_id maingear_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "GMxXGxx"), }, }, + { + /* TongFang GMxXGxx sold as Eluktronics Inc. RP-15 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."), + DMI_MATCH(DMI_BOARD_NAME, "RP-15"), + }, + }, { /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */ .matches = { diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a62fce8c272a..dc2a8ba27b4c 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5284,7 +5284,7 @@ static __poll_t binder_poll(struct file *filp, thread = binder_get_thread(proc); if (!thread) - return POLLERR; + return EPOLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index d49de5c714ee..c7ebf14587a6 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -272,7 +272,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, } if (mm) { mmap_write_unlock(mm); - mmput(mm); + mmput_async(mm); } return 0; @@ -305,7 +305,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, err_no_vma: if (mm) { mmap_write_unlock(mm); - mmput(mm); + mmput_async(mm); } return vma ? -ENOMEM : -ESRCH; } @@ -345,8 +345,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) continue; if (!buffer->async_transaction) continue; - total_alloc_size += binder_alloc_buffer_size(alloc, buffer) - + sizeof(struct binder_buffer); + total_alloc_size += binder_alloc_buffer_size(alloc, buffer); num_buffers++; } @@ -409,26 +408,27 @@ static struct binder_buffer *binder_alloc_new_buf_locked( alloc->pid, extra_buffers_size); return ERR_PTR(-EINVAL); } + trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async); trace_android_vh_binder_detect_low_async_space(is_async, &alloc->free_async_space, pid, &should_fail); if (should_fail) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd failed, not allowed to alloc more async space\n", - alloc->pid, size); + "%d: binder_alloc_buf size %zd failed, not allowed to alloc more async space\n", + alloc->pid, size); return ERR_PTR(-EPERM); } - if (is_async && - alloc->free_async_space < size + sizeof(struct binder_buffer)) { + + /* Pad 0-size buffers so they get assigned unique addresses */ + size = max(size, sizeof(void *)); + + if (is_async && alloc->free_async_space < size) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", alloc->pid, size); return ERR_PTR(-ENOSPC); } - /* Pad 0-size buffers so they get assigned unique addresses */ - size = max(size, sizeof(void *)); - while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); @@ -530,7 +530,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( buffer->pid = pid; buffer->oneway_spam_suspect = false; if (is_async) { - alloc->free_async_space -= size + sizeof(struct binder_buffer); + alloc->free_async_space -= size; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); @@ -568,7 +568,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( * is the sum of the three given sizes (each rounded up to * pointer-sized boundary) * - * Return: The allocated buffer or %NULL if error + * Return: The allocated buffer or %ERR_PTR(-errno) if error */ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, @@ -668,8 +668,7 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { - alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); - + alloc->free_async_space += buffer_size; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_free_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); @@ -717,7 +716,7 @@ void binder_alloc_free_buf(struct binder_alloc *alloc, /* * We could eliminate the call to binder_alloc_clear_buf() * from binder_alloc_deferred_release() by moving this to - * binder_alloc_free_buf_locked(). However, that could + * binder_free_buf_locked(). However, that could * increase contention for the alloc mutex if clear_on_free * is used frequently for large buffers. The mutex is not * needed for correctness here. @@ -1016,7 +1015,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item, goto err_mmget; if (!mmap_read_trylock(mm)) goto err_mmap_read_lock_failed; - vma = binder_alloc_get_vma(alloc); + vma = vma_lookup(mm, page_addr); + if (vma && vma != binder_alloc_get_vma(alloc)) + goto err_invalid_vma; list_lru_isolate(lru, item); spin_unlock(lock); @@ -1042,6 +1043,8 @@ enum lru_status binder_alloc_free_page(struct list_head *item, mutex_unlock(&alloc->mutex); return LRU_REMOVED_RETRY; +err_invalid_vma: + mmap_read_unlock(mm); err_mmap_read_lock_failed: mmput_async(mm); err_mmget: diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 94fbc3abe60e..d3c30a28c410 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -449,9 +449,9 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr, struct sk_buff *skb; unsigned int len; - spin_lock(&card->cli_queue_lock); + spin_lock_bh(&card->cli_queue_lock); skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); - spin_unlock(&card->cli_queue_lock); + spin_unlock_bh(&card->cli_queue_lock); if(skb == NULL) return sprintf(buf, "No data.\n"); @@ -956,14 +956,14 @@ static void pclose(struct atm_vcc *vcc) struct pkt_hdr *header; /* Remove any yet-to-be-transmitted packets from the pending queue */ - spin_lock(&card->tx_queue_lock); + spin_lock_bh(&card->tx_queue_lock); skb_queue_walk_safe(&card->tx_queue[port], skb, tmpskb) { if (SKB_CB(skb)->vcc == vcc) { skb_unlink(skb, &card->tx_queue[port]); solos_pop(vcc, skb); } } - spin_unlock(&card->tx_queue_lock); + spin_unlock_bh(&card->tx_queue_lock); skb = alloc_skb(sizeof(*header), GFP_KERNEL); if (!skb) { diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 9aa0da991cfb..5d39f3e374da 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -175,6 +175,9 @@ int memory_notify(unsigned long val, void *v) return blocking_notifier_call_chain(&memory_chain, val, v); } +/* + * Must acquire mem_hotplug_lock in write mode. + */ static int memory_block_online(struct memory_block *mem) { unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); @@ -193,10 +196,11 @@ static int memory_block_online(struct memory_block *mem) * stage helps to keep accounting easier to follow - e.g vmemmaps * belong to the same zone as the memory they backed. */ + mem_hotplug_begin(); if (nr_vmemmap_pages) { ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); if (ret) - return ret; + goto out; } ret = online_pages(start_pfn + nr_vmemmap_pages, @@ -204,7 +208,7 @@ static int memory_block_online(struct memory_block *mem) if (ret) { if (nr_vmemmap_pages) mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); - return ret; + goto out; } /* @@ -216,9 +220,14 @@ static int memory_block_online(struct memory_block *mem) nr_vmemmap_pages); mem->zone = zone; +out: + mem_hotplug_done(); return ret; } +/* + * Must acquire mem_hotplug_lock in write mode. + */ static int memory_block_offline(struct memory_block *mem) { unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); @@ -233,6 +242,7 @@ static int memory_block_offline(struct memory_block *mem) * Unaccount before offlining, such that unpopulated zone and kthreads * can properly be torn down in offline_pages(). */ + mem_hotplug_begin(); if (nr_vmemmap_pages) adjust_present_page_count(pfn_to_page(start_pfn), mem->group, -nr_vmemmap_pages); @@ -244,13 +254,15 @@ static int memory_block_offline(struct memory_block *mem) if (nr_vmemmap_pages) adjust_present_page_count(pfn_to_page(start_pfn), mem->group, nr_vmemmap_pages); - return ret; + goto out; } if (nr_vmemmap_pages) mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); mem->zone = NULL; +out: + mem_hotplug_done(); return ret; } diff --git a/drivers/base/node.c b/drivers/base/node.c index faf3597a96da..a4141b57b147 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -859,11 +859,15 @@ int __register_one_node(int nid) { int error; int cpu; + struct node *node; - node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL); - if (!node_devices[nid]) + node = kzalloc(sizeof(struct node), GFP_KERNEL); + if (!node) return -ENOMEM; + INIT_LIST_HEAD(&node->access_list); + node_devices[nid] = node; + error = register_node(node_devices[nid], nid); /* link cpu under this node */ @@ -872,7 +876,6 @@ int __register_one_node(int nid) register_cpu_under_node(cpu, nid); } - INIT_LIST_HEAD(&node_devices[nid]->access_list); node_init_caches(nid); return error; diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c index 0a482212c7e8..44153caa893a 100644 --- a/drivers/base/swnode.c +++ b/drivers/base/swnode.c @@ -541,6 +541,9 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode, if (nargs > NR_FWNODE_REFERENCE_ARGS) return -EINVAL; + if (!args) + return 0; + args->fwnode = software_node_get(refnode); args->nargs = nargs; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 426d0b42685a..12ff6f58b8a9 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -165,39 +165,37 @@ static loff_t get_loop_size(struct loop_device *lo, struct file *file) return get_size(lo->lo_offset, lo->lo_sizelimit, file); } +/* + * We support direct I/O only if lo_offset is aligned with the logical I/O size + * of backing device, and the logical block size of loop is bigger than that of + * the backing device. + */ +static bool lo_bdev_can_use_dio(struct loop_device *lo, + struct block_device *backing_bdev) +{ + unsigned short sb_bsize = bdev_logical_block_size(backing_bdev); + + if (queue_logical_block_size(lo->lo_queue) < sb_bsize) + return false; + if (lo->lo_offset & (sb_bsize - 1)) + return false; + return true; +} + static void __loop_update_dio(struct loop_device *lo, bool dio) { struct file *file = lo->lo_backing_file; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - unsigned short sb_bsize = 0; - unsigned dio_align = 0; + struct inode *inode = file->f_mapping->host; + struct block_device *backing_bdev = NULL; bool use_dio; - if (inode->i_sb->s_bdev) { - sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev); - dio_align = sb_bsize - 1; - } + if (S_ISBLK(inode->i_mode)) + backing_bdev = I_BDEV(inode); + else if (inode->i_sb->s_bdev) + backing_bdev = inode->i_sb->s_bdev; - /* - * We support direct I/O only if lo_offset is aligned with the - * logical I/O size of backing device, and the logical block - * size of loop is bigger than the backing device's. - * - * TODO: the above condition may be loosed in the future, and - * direct I/O may be switched runtime at that time because most - * of requests in sane applications should be PAGE_SIZE aligned - */ - if (dio) { - if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && - !(lo->lo_offset & dio_align) && - (file->f_mode & FMODE_CAN_ODIRECT)) - use_dio = true; - else - use_dio = false; - } else { - use_dio = false; - } + use_dio = dio && (file->f_mode & FMODE_CAN_ODIRECT) && + (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev)); if (lo->use_dio == use_dio) return; @@ -1777,14 +1775,43 @@ static const struct block_device_operations lo_fops = { /* * If max_loop is specified, create that many devices upfront. * This also becomes a hard limit. If max_loop is not specified, + * the default isn't a hard limit (as before commit 85c50197716c + * changed the default value from 0 for max_loop=0 reasons), just * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module * init time. Loop devices can be requested on-demand with the * /dev/loop-control interface, or be instantiated by accessing * a 'dead' device node. */ static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT; -module_param(max_loop, int, 0444); + +#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD +static bool max_loop_specified; + +static int max_loop_param_set_int(const char *val, + const struct kernel_param *kp) +{ + int ret; + + ret = param_set_int(val, kp); + if (ret < 0) + return ret; + + max_loop_specified = true; + return 0; +} + +static const struct kernel_param_ops max_loop_param_ops = { + .set = max_loop_param_set_int, + .get = param_get_int, +}; + +module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444); MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); +#else +module_param(max_loop, int, 0444); +MODULE_PARM_DESC(max_loop, "Initial number of loop devices"); +#endif + module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); @@ -2089,14 +2116,18 @@ static void loop_remove(struct loop_device *lo) put_disk(lo->lo_disk); } +#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD static void loop_probe(dev_t dev) { int idx = MINOR(dev) >> part_shift; - if (max_loop && idx >= max_loop) + if (max_loop_specified && max_loop && idx >= max_loop) return; loop_add(idx); } +#else +#define loop_probe NULL +#endif /* !CONFIG_BLOCK_LEGACY_AUTOLOAD */ static int loop_control_remove(int idx) { @@ -2277,6 +2308,9 @@ module_exit(loop_exit); static int __init max_loop_setup(char *str) { max_loop = simple_strtol(str, NULL, 0); +#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD + max_loop_specified = true; +#endif return 1; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index e94d2ff6b122..8037aaefeb2e 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -67,6 +67,7 @@ struct nbd_sock { struct recv_thread_args { struct work_struct work; struct nbd_device *nbd; + struct nbd_sock *nsock; int index; }; @@ -489,15 +490,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req) return BLK_EH_DONE; } -/* - * Send or receive packet. Return a positive value on success and - * negtive value on failue, and never return 0. - */ -static int sock_xmit(struct nbd_device *nbd, int index, int send, - struct iov_iter *iter, int msg_flags, int *sent) +static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, + struct iov_iter *iter, int msg_flags, int *sent) { - struct nbd_config *config = nbd->config; - struct socket *sock = config->socks[index]->sock; int result; struct msghdr msg; unsigned int noreclaim_flag; @@ -539,6 +534,19 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, return result; } +/* + * Send or receive packet. Return a positive value on success and + * negtive value on failure, and never return 0. + */ +static int sock_xmit(struct nbd_device *nbd, int index, int send, + struct iov_iter *iter, int msg_flags, int *sent) +{ + struct nbd_config *config = nbd->config; + struct socket *sock = config->socks[index]->sock; + + return __sock_xmit(nbd, sock, send, iter, msg_flags, sent); +} + /* * Different settings for sk->sk_sndtimeo can result in different return values * if there is a signal pending when we enter sendmsg, because reasons? @@ -695,7 +703,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) return 0; } -static int nbd_read_reply(struct nbd_device *nbd, int index, +static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock, struct nbd_reply *reply) { struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)}; @@ -704,7 +712,7 @@ static int nbd_read_reply(struct nbd_device *nbd, int index, reply->magic = 0; iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply)); - result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL); + result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL); if (result < 0) { if (!nbd_disconnected(nbd->config)) dev_err(disk_to_dev(nbd->disk), @@ -828,14 +836,14 @@ static void recv_work(struct work_struct *work) struct nbd_device *nbd = args->nbd; struct nbd_config *config = nbd->config; struct request_queue *q = nbd->disk->queue; - struct nbd_sock *nsock; + struct nbd_sock *nsock = args->nsock; struct nbd_cmd *cmd; struct request *rq; while (1) { struct nbd_reply reply; - if (nbd_read_reply(nbd, args->index, &reply)) + if (nbd_read_reply(nbd, nsock->sock, &reply)) break; /* @@ -870,7 +878,6 @@ static void recv_work(struct work_struct *work) percpu_ref_put(&q->q_usage_counter); } - nsock = config->socks[args->index]; mutex_lock(&nsock->tx_lock); nbd_mark_nsock_dead(nbd, nsock, 1); mutex_unlock(&nsock->tx_lock); @@ -1214,6 +1221,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) INIT_WORK(&args->work, recv_work); args->index = i; args->nbd = nbd; + args->nsock = nsock; nsock->cookie++; mutex_unlock(&nsock->tx_lock); sockfd_put(old); @@ -1396,6 +1404,7 @@ static int nbd_start_device(struct nbd_device *nbd) refcount_inc(&nbd->config_refs); INIT_WORK(&args->work, recv_work); args->nbd = nbd; + args->nsock = config->socks[i]; args->index = i; queue_work(nbd->recv_workq, &args->work); } @@ -1530,17 +1539,20 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, return error; } -static struct nbd_config *nbd_alloc_config(void) +static int nbd_alloc_and_init_config(struct nbd_device *nbd) { struct nbd_config *config; + if (WARN_ON(nbd->config)) + return -EINVAL; + if (!try_module_get(THIS_MODULE)) - return ERR_PTR(-ENODEV); + return -ENODEV; config = kzalloc(sizeof(struct nbd_config), GFP_NOFS); if (!config) { module_put(THIS_MODULE); - return ERR_PTR(-ENOMEM); + return -ENOMEM; } atomic_set(&config->recv_threads, 0); @@ -1548,7 +1560,10 @@ static struct nbd_config *nbd_alloc_config(void) init_waitqueue_head(&config->conn_wait); config->blksize_bits = NBD_DEF_BLKSIZE_BITS; atomic_set(&config->live_connections, 0); - return config; + nbd->config = config; + refcount_set(&nbd->config_refs, 1); + + return 0; } static int nbd_open(struct block_device *bdev, fmode_t mode) @@ -1567,21 +1582,17 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) goto out; } if (!refcount_inc_not_zero(&nbd->config_refs)) { - struct nbd_config *config; - mutex_lock(&nbd->config_lock); if (refcount_inc_not_zero(&nbd->config_refs)) { mutex_unlock(&nbd->config_lock); goto out; } - config = nbd_alloc_config(); - if (IS_ERR(config)) { - ret = PTR_ERR(config); + ret = nbd_alloc_and_init_config(nbd); + if (ret) { mutex_unlock(&nbd->config_lock); goto out; } - nbd->config = config; - refcount_set(&nbd->config_refs, 1); + refcount_inc(&nbd->refs); mutex_unlock(&nbd->config_lock); if (max_part) @@ -1990,22 +2001,17 @@ static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info) pr_err("nbd%d already in use\n", index); return -EBUSY; } - if (WARN_ON(nbd->config)) { - mutex_unlock(&nbd->config_lock); - nbd_put(nbd); - return -EINVAL; - } - config = nbd_alloc_config(); - if (IS_ERR(config)) { + + ret = nbd_alloc_and_init_config(nbd); + if (ret) { mutex_unlock(&nbd->config_lock); nbd_put(nbd); pr_err("couldn't allocate config\n"); - return PTR_ERR(config); + return ret; } - nbd->config = config; - refcount_set(&nbd->config_refs, 1); - set_bit(NBD_RT_BOUND, &config->runtime_flags); + config = nbd->config; + set_bit(NBD_RT_BOUND, &config->runtime_flags); ret = nbd_genl_size_set(info, nbd); if (ret) goto out; diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index e9f38eba2f13..959952e8ede3 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -2114,11 +2114,8 @@ static int null_add_dev(struct nullb_device *dev) blk_queue_logical_block_size(nullb->q, dev->blocksize); blk_queue_physical_block_size(nullb->q, dev->blocksize); - if (!dev->max_sectors) - dev->max_sectors = queue_max_hw_sectors(nullb->q); - dev->max_sectors = min_t(unsigned int, dev->max_sectors, - BLK_DEF_MAX_SECTORS); - blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); + if (dev->max_sectors) + blk_queue_max_hw_sectors(nullb->q, dev->max_sectors); if (dev->virt_boundary) blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); @@ -2218,12 +2215,6 @@ static int __init null_init(void) g_bs = PAGE_SIZE; } - if (g_max_sectors > BLK_DEF_MAX_SECTORS) { - pr_warn("invalid max sectors\n"); - pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS); - g_max_sectors = BLK_DEF_MAX_SECTORS; - } - if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { pr_err("invalid home_node value\n"); g_home_node = NUMA_NO_NODE; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index c2f0f74193f0..3fa74051f31b 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -103,6 +103,9 @@ struct ublk_uring_cmd_pdu { */ #define UBLK_IO_FLAG_NEED_GET_DATA 0x08 +/* atomic RW with ubq->cancel_lock */ +#define UBLK_IO_FLAG_CANCELED 0x80000000 + struct ublk_io { /* userspace buffer address from io cmd */ __u64 addr; @@ -126,6 +129,7 @@ struct ublk_queue { unsigned int max_io_sz; bool force_abort; unsigned short nr_io_ready; /* how many ios setup */ + spinlock_t cancel_lock; struct ublk_device *dev; struct ublk_io ios[]; }; @@ -1045,28 +1049,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq) return ubq->nr_io_ready == ubq->q_depth; } -static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags) -{ - io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags); -} - static void ublk_cancel_queue(struct ublk_queue *ubq) { int i; - if (!ublk_queue_ready(ubq)) - return; - for (i = 0; i < ubq->q_depth; i++) { struct ublk_io *io = &ubq->ios[i]; - if (io->flags & UBLK_IO_FLAG_ACTIVE) - io_uring_cmd_complete_in_task(io->cmd, - ublk_cmd_cancel_cb); - } + if (io->flags & UBLK_IO_FLAG_ACTIVE) { + bool done; - /* all io commands are canceled */ - ubq->nr_io_ready = 0; + spin_lock(&ubq->cancel_lock); + done = !!(io->flags & UBLK_IO_FLAG_CANCELED); + if (!done) + io->flags |= UBLK_IO_FLAG_CANCELED; + spin_unlock(&ubq->cancel_lock); + + if (!done) + io_uring_cmd_done(io->cmd, + UBLK_IO_RES_ABORT, 0, + IO_URING_F_UNLOCKED); + } + } } /* Cancel all pending commands, must be called after del_gendisk() returns */ @@ -1113,7 +1117,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub) blk_mq_quiesce_queue(ub->ub_disk->queue); ublk_wait_tagset_rqs_idle(ub); ub->dev_info.state = UBLK_S_DEV_QUIESCED; - ublk_cancel_dev(ub); /* we are going to release task_struct of ubq_daemon and resets * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF. * Besides, monitor_work is not necessary in QUIESCED state since we have @@ -1136,6 +1139,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work) __ublk_quiesce_dev(ub); unlock: mutex_unlock(&ub->mutex); + ublk_cancel_dev(ub); } static void ublk_unquiesce_dev(struct ublk_device *ub) @@ -1175,8 +1179,8 @@ static void ublk_stop_dev(struct ublk_device *ub) put_disk(ub->ub_disk); ub->ub_disk = NULL; unlock: - ublk_cancel_dev(ub); mutex_unlock(&ub->mutex); + ublk_cancel_dev(ub); cancel_delayed_work_sync(&ub->monitor_work); } @@ -1353,6 +1357,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id) void *ptr; int size; + spin_lock_init(&ubq->cancel_lock); ubq->flags = ub->dev_info.flags; ubq->q_id = q_id; ubq->q_depth = ub->dev_info.queue_depth; @@ -1882,8 +1887,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq) int i; WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq))); + /* All old ioucmds have to be completed */ - WARN_ON_ONCE(ubq->nr_io_ready); + ubq->nr_io_ready = 0; /* old daemon is PF_EXITING, put it now */ put_task_struct(ubq->ubq_daemon); /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */ diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index efa5535a8e1d..3124837aa406 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -609,12 +609,12 @@ static void virtblk_config_changed(struct virtio_device *vdev) static int init_vq(struct virtio_blk *vblk) { int err; - int i; + unsigned short i; vq_callback_t **callbacks; const char **names; struct virtqueue **vqs; unsigned short num_vqs; - unsigned int num_poll_vqs; + unsigned short num_poll_vqs; struct virtio_device *vdev = vblk->vdev; struct irq_affinity desc = { 0, }; @@ -658,13 +658,13 @@ static int init_vq(struct virtio_blk *vblk) for (i = 0; i < num_vqs - num_poll_vqs; i++) { callbacks[i] = virtblk_done; - snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); + snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%u", i); names[i] = vblk->vqs[i].name; } for (; i < num_vqs; i++) { callbacks[i] = NULL; - snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i); + snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%u", i); names[i] = vblk->vqs[i].name; } diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index c98691cdbbd5..04b72394dda5 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -337,7 +337,7 @@ mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count, return data; } -static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) +static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); const unsigned char *p_left = data, *p_h4; @@ -376,25 +376,20 @@ static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) bt_dev_err(bdev->hdev, "Frame reassembly failed (%d)", err); bdev->rx_skb = NULL; - return err; + return; } sz_left -= sz_h4; p_left += sz_h4; } - - return 0; } static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data, size_t count) { struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); - int err; - err = btmtkuart_recv(bdev->hdev, data, count); - if (err < 0) - return err; + btmtkuart_recv(bdev->hdev, data, count); bdev->hdev->stat.byte_rx += count; diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 4415d850d698..44dc91555aa0 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -44,6 +45,7 @@ struct vhci_data { bool wakeup; __u16 msft_opcode; bool aosp_capable; + atomic_t initialized; }; static int vhci_open_dev(struct hci_dev *hdev) @@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); - mutex_lock(&data->open_mutex); skb_queue_tail(&data->readq, skb); - mutex_unlock(&data->open_mutex); - wake_up_interruptible(&data->read_wait); + if (atomic_read(&data->initialized)) + wake_up_interruptible(&data->read_wait); return 0; } @@ -363,7 +364,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) skb_put_u8(skb, 0xff); skb_put_u8(skb, opcode); put_unaligned_le16(hdev->id, skb_put(skb, 2)); - skb_queue_tail(&data->readq, skb); + skb_queue_head(&data->readq, skb); + atomic_inc(&data->initialized); wake_up_interruptible(&data->read_wait); return 0; diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 59a2fe2448f1..15c6b85b125d 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -2174,13 +2174,23 @@ static int sysc_reset(struct sysc *ddata) sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); - /* Flush posted write */ + + /* + * Some devices need a delay before reading registers + * after reset. Presumably a srst_udelay is not needed + * for devices that use a rstctrl register reset. + */ + if (ddata->cfg.srst_udelay) + fsleep(ddata->cfg.srst_udelay); + + /* + * Flush posted write. For devices needing srst_udelay + * this should trigger an interconnect error if the + * srst_udelay value is needed but not configured. + */ sysc_val = sysc_read_sysconfig(ddata); } - if (ddata->cfg.srst_udelay) - fsleep(ddata->cfg.srst_udelay); - if (ddata->post_reset_quirk) ddata->post_reset_quirk(ddata); diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index c7d8cbd22bac..5acb35236c58 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -892,10 +892,8 @@ static int si5341_output_clk_set_rate(struct clk_hw *hw, unsigned long rate, r[0] = r_div ? (r_div & 0xff) : 1; r[1] = (r_div >> 8) & 0xff; r[2] = (r_div >> 16) & 0xff; - err = regmap_bulk_write(output->data->regmap, + return regmap_bulk_write(output->data->regmap, SI5341_OUT_R_REG(output), r, 3); - - return 0; } static int si5341_output_reparent(struct clk_si5341_output *output, u8 index) diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c index 8422fd047493..c89a5b59ddb7 100644 --- a/drivers/clk/qcom/gpucc-sm8150.c +++ b/drivers/clk/qcom/gpucc-sm8150.c @@ -37,8 +37,8 @@ static struct alpha_pll_config gpu_cc_pll1_config = { .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, .test_ctl_val = 0x00000000, - .test_ctl_hi_val = 0x00000002, - .test_ctl_hi1_val = 0x00000000, + .test_ctl_hi_val = 0x00000000, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000d0, diff --git a/drivers/clk/qcom/videocc-sm8150.c b/drivers/clk/qcom/videocc-sm8150.c index 1afdbe4a249d..52a9a453a143 100644 --- a/drivers/clk/qcom/videocc-sm8150.c +++ b/drivers/clk/qcom/videocc-sm8150.c @@ -33,6 +33,7 @@ static struct alpha_pll_config video_pll0_config = { .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00002267, .config_ctl_hi1_val = 0x00000024, + .test_ctl_hi1_val = 0x00000020, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000805, .user_ctl_hi1_val = 0x000000D0, @@ -214,6 +215,10 @@ static const struct regmap_config video_cc_sm8150_regmap_config = { static const struct qcom_reset_map video_cc_sm8150_resets[] = { [VIDEO_CC_MVSC_CORE_CLK_BCR] = { 0x850, 2 }, + [VIDEO_CC_INTERFACE_BCR] = { 0x8f0 }, + [VIDEO_CC_MVS0_BCR] = { 0x870 }, + [VIDEO_CC_MVS1_BCR] = { 0x8b0 }, + [VIDEO_CC_MVSC_BCR] = { 0x810 }, }; static const struct qcom_cc_desc video_cc_sm8150_desc = { diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index 84767cfc1e73..473feb36a38f 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -1115,41 +1115,33 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod, #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev) -static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev, - unsigned long id) -{ - struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); - const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->resets[id].off; - u32 dis = BIT(info->resets[id].bit); - u32 we = dis << 16; - - dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); - - /* Reset module */ - writel(we, priv->base + CLK_RST_R(reg)); - - /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ - udelay(35); - - /* Release module from reset state */ - writel(we | dis, priv->base + CLK_RST_R(reg)); - - return 0; -} - static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev, unsigned long id) { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; unsigned int reg = info->resets[id].off; - u32 value = BIT(info->resets[id].bit) << 16; + u32 mask = BIT(info->resets[id].bit); + s8 monbit = info->resets[id].monbit; + u32 value = mask << 16; dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); writel(value, priv->base + CLK_RST_R(reg)); - return 0; + + if (info->has_clk_mon_regs) { + reg = CLK_MRST_R(reg); + } else if (monbit >= 0) { + reg = CPG_RST_MON; + mask = BIT(monbit); + } else { + /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ + udelay(35); + return 0; + } + + return readl_poll_timeout_atomic(priv->base + reg, value, + value & mask, 10, 200); } static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev, @@ -1158,14 +1150,40 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev, struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; unsigned int reg = info->resets[id].off; - u32 dis = BIT(info->resets[id].bit); - u32 value = (dis << 16) | dis; + u32 mask = BIT(info->resets[id].bit); + s8 monbit = info->resets[id].monbit; + u32 value = (mask << 16) | mask; dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); writel(value, priv->base + CLK_RST_R(reg)); - return 0; + + if (info->has_clk_mon_regs) { + reg = CLK_MRST_R(reg); + } else if (monbit >= 0) { + reg = CPG_RST_MON; + mask = BIT(monbit); + } else { + /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */ + udelay(35); + return 0; + } + + return readl_poll_timeout_atomic(priv->base + reg, value, + !(value & mask), 10, 200); +} + +static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = rzg2l_cpg_assert(rcdev, id); + if (ret) + return ret; + + return rzg2l_cpg_deassert(rcdev, id); } static int rzg2l_cpg_status(struct reset_controller_dev *rcdev, @@ -1173,18 +1191,21 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->resets[id].off; - u32 bitmask = BIT(info->resets[id].bit); s8 monbit = info->resets[id].monbit; + unsigned int reg; + u32 bitmask; if (info->has_clk_mon_regs) { - return !!(readl(priv->base + CLK_MRST_R(reg)) & bitmask); + reg = CLK_MRST_R(info->resets[id].off); + bitmask = BIT(info->resets[id].bit); } else if (monbit >= 0) { - u32 monbitmask = BIT(monbit); - - return !!(readl(priv->base + CPG_RST_MON) & monbitmask); + reg = CPG_RST_MON; + bitmask = BIT(monbit); + } else { + return -ENOTSUPP; } - return -ENOTSUPP; + + return !!(readl(priv->base + reg) & bitmask); } static const struct reset_control_ops rzg2l_cpg_reset_ops = { diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c index aa53797dbfc1..7782785a86e6 100644 --- a/drivers/clk/rockchip/clk-rk3128.c +++ b/drivers/clk/rockchip/clk-rk3128.c @@ -490,7 +490,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { GATE(HCLK_I2S_2CH, "hclk_i2s_2ch", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS), GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 13, GFLAGS), GATE(HCLK_HOST2, "hclk_host2", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), - GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 13, GFLAGS), + GATE(HCLK_OTG, "hclk_otg", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS), GATE(0, "hclk_peri_ahb", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 14, GFLAGS), GATE(HCLK_SPDIF, "hclk_spdif", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 9, GFLAGS), GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK2928_CLKGATE_CON(10), 12, GFLAGS), diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c index 2f54f630c8b6..1ffb755feea4 100644 --- a/drivers/clk/rockchip/clk-rk3568.c +++ b/drivers/clk/rockchip/clk-rk3568.c @@ -72,6 +72,7 @@ static struct rockchip_pll_rate_table rk3568_pll_rates[] = { RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0), RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0), RK3036_PLL_RATE(297000000, 2, 99, 4, 1, 1, 0), + RK3036_PLL_RATE(292500000, 1, 195, 4, 4, 1, 0), RK3036_PLL_RATE(241500000, 2, 161, 4, 2, 1, 0), RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0), RK3036_PLL_RATE(200000000, 1, 100, 3, 4, 1, 0), diff --git a/drivers/clk/zynqmp/clk-mux-zynqmp.c b/drivers/clk/zynqmp/clk-mux-zynqmp.c index 60359333f26d..9b5d3050b742 100644 --- a/drivers/clk/zynqmp/clk-mux-zynqmp.c +++ b/drivers/clk/zynqmp/clk-mux-zynqmp.c @@ -89,7 +89,7 @@ static int zynqmp_clk_mux_set_parent(struct clk_hw *hw, u8 index) static const struct clk_ops zynqmp_clk_mux_ops = { .get_parent = zynqmp_clk_mux_get_parent, .set_parent = zynqmp_clk_mux_set_parent, - .determine_rate = __clk_mux_determine_rate, + .determine_rate = __clk_mux_determine_rate_closest, }; static const struct clk_ops zynqmp_clk_mux_ro_ops = { diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c index 33a3b2a22659..5a00487ae408 100644 --- a/drivers/clk/zynqmp/divider.c +++ b/drivers/clk/zynqmp/divider.c @@ -110,52 +110,6 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw, return DIV_ROUND_UP_ULL(parent_rate, value); } -static void zynqmp_get_divider2_val(struct clk_hw *hw, - unsigned long rate, - struct zynqmp_clk_divider *divider, - u32 *bestdiv) -{ - int div1; - int div2; - long error = LONG_MAX; - unsigned long div1_prate; - struct clk_hw *div1_parent_hw; - struct zynqmp_clk_divider *pdivider; - struct clk_hw *div2_parent_hw = clk_hw_get_parent(hw); - - if (!div2_parent_hw) - return; - - pdivider = to_zynqmp_clk_divider(div2_parent_hw); - if (!pdivider) - return; - - div1_parent_hw = clk_hw_get_parent(div2_parent_hw); - if (!div1_parent_hw) - return; - - div1_prate = clk_hw_get_rate(div1_parent_hw); - *bestdiv = 1; - for (div1 = 1; div1 <= pdivider->max_div;) { - for (div2 = 1; div2 <= divider->max_div;) { - long new_error = ((div1_prate / div1) / div2) - rate; - - if (abs(new_error) < abs(error)) { - *bestdiv = div2; - error = new_error; - } - if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) - div2 = div2 << 1; - else - div2++; - } - if (pdivider->flags & CLK_DIVIDER_POWER_OF_TWO) - div1 = div1 << 1; - else - div1++; - } -} - /** * zynqmp_clk_divider_round_rate() - Round rate of divider clock * @hw: handle between common and hardware-specific interfaces @@ -174,6 +128,7 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw, u32 div_type = divider->div_type; u32 bestdiv; int ret; + u8 width; /* if read only, just return current value */ if (divider->flags & CLK_DIVIDER_READ_ONLY) { @@ -193,23 +148,12 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw, return DIV_ROUND_UP_ULL((u64)*prate, bestdiv); } - bestdiv = zynqmp_divider_get_val(*prate, rate, divider->flags); + width = fls(divider->max_div); - /* - * In case of two divisors, compute best divider values and return - * divider2 value based on compute value. div1 will be automatically - * set to optimum based on required total divider value. - */ - if (div_type == TYPE_DIV2 && - (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { - zynqmp_get_divider2_val(hw, rate, divider, &bestdiv); - } + rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags); - if ((clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && divider->is_frac) - bestdiv = rate % *prate ? 1 : bestdiv; - - bestdiv = min_t(u32, bestdiv, divider->max_div); - *prate = rate * bestdiv; + if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate)) + *prate = rate; return rate; } diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index ec86aecb748f..7f9c1f58a947 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c @@ -184,7 +184,7 @@ static inline u32 dmtimer_read(struct dmtimer *timer, u32 reg) * dmtimer_write - write timer registers in posted and non-posted mode * @timer: timer pointer over which write operation is to perform * @reg: lowest byte holds the register offset - * @value: data to write into the register + * @val: data to write into the register * * The posted mode bit is encoded in reg. Note that in posted mode, the write * pending bit must be checked. Otherwise a write on a register which has a @@ -937,7 +937,7 @@ static int omap_dm_timer_set_int_enable(struct omap_dm_timer *cookie, /** * omap_dm_timer_set_int_disable - disable timer interrupts - * @timer: pointer to timer handle + * @cookie: pointer to timer cookie * @mask: bit mask of interrupts to be disabled * * Disables the specified timer interrupts for a timer. diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index 69a8742c0a7a..8514bb62dd10 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -176,7 +176,7 @@ static bool __init cpu0_node_has_opp_v2_prop(void) struct device_node *np = of_cpu_device_node_get(0); bool ret = false; - if (of_get_property(np, "operating-points-v2", NULL)) + if (of_property_present(np, "operating-points-v2")) ret = true; of_node_put(np); diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c index 76e553af2071..535867a7dfdd 100644 --- a/drivers/cpufreq/imx-cpufreq-dt.c +++ b/drivers/cpufreq/imx-cpufreq-dt.c @@ -89,7 +89,7 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev) cpu_dev = get_cpu_device(0); - if (!of_find_property(cpu_dev->of_node, "cpu-supply", NULL)) + if (!of_property_present(cpu_dev->of_node, "cpu-supply")) return -ENODEV; if (of_machine_is_compatible("fsl,imx7ulp")) { diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 925fc17eaacb..39b0362a3b9a 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -230,7 +230,7 @@ static int imx6q_opp_check_speed_grading(struct device *dev) u32 val; int ret; - if (of_find_property(dev->of_node, "nvmem-cells", NULL)) { + if (of_property_present(dev->of_node, "nvmem-cells")) { ret = nvmem_cell_read_u32(dev, "speed_grade", &val); if (ret) return ret; @@ -285,7 +285,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev) u32 val; int ret = 0; - if (of_find_property(dev->of_node, "nvmem-cells", NULL)) { + if (of_property_present(dev->of_node, "nvmem-cells")) { ret = nvmem_cell_read_u32(dev, "speed_grade", &val); if (ret) return ret; diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 513a071845c2..028df8a5f537 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -310,8 +310,11 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev) #ifdef CONFIG_COMMON_CLK /* dummy clock provider as needed by OPP if clocks property is used */ - if (of_find_property(dev->of_node, "#clock-cells", NULL)) - devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); + if (of_property_present(dev->of_node, "#clock-cells")) { + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL); + if (ret) + return dev_err_probe(dev, ret, "%s: registering clock provider failed\n", __func__); + } #endif ret = cpufreq_register_driver(&scmi_cpufreq_driver); diff --git a/drivers/cpufreq/tegra20-cpufreq.c b/drivers/cpufreq/tegra20-cpufreq.c index ab7ac7df9e62..dfd2de4f8e07 100644 --- a/drivers/cpufreq/tegra20-cpufreq.c +++ b/drivers/cpufreq/tegra20-cpufreq.c @@ -25,7 +25,7 @@ static bool cpu0_node_has_opp_v2_prop(void) struct device_node *np = of_cpu_device_node_get(0); bool ret = false; - if (of_get_property(np, "operating-points-v2", NULL)) + if (of_property_present(np, "operating-points-v2")) ret = true; of_node_put(np); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index aa4e1a500691..cb8e99936abb 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -179,8 +179,11 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, wa->dma.address = dma_map_single(wa->dev, wa->address, len, dir); - if (dma_mapping_error(wa->dev, wa->dma.address)) + if (dma_mapping_error(wa->dev, wa->dma.address)) { + kfree(wa->address); + wa->address = NULL; return -ENOMEM; + } wa->dma.length = len; } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index ff8a5f20a5df..269df4ec148b 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -118,8 +118,6 @@ #define HPRE_DFX_COMMON2_LEN 0xE #define HPRE_DFX_CORE_LEN 0x43 -#define HPRE_DEV_ALG_MAX_LEN 256 - static const char hpre_name[] = "hisi_hpre"; static struct dentry *hpre_debugfs_root; static const struct pci_device_id hpre_dev_ids[] = { @@ -135,12 +133,7 @@ struct hpre_hw_error { const char *msg; }; -struct hpre_dev_alg { - u32 alg_msk; - const char *alg; -}; - -static const struct hpre_dev_alg hpre_dev_algs[] = { +static const struct qm_dev_alg hpre_dev_algs[] = { { .alg_msk = BIT(0), .alg = "rsa\n" @@ -233,6 +226,20 @@ static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10} }; +enum hpre_pre_store_cap_idx { + HPRE_CLUSTER_NUM_CAP_IDX = 0x0, + HPRE_CORE_ENABLE_BITMAP_CAP_IDX, + HPRE_DRV_ALG_BITMAP_CAP_IDX, + HPRE_DEV_ALG_BITMAP_CAP_IDX, +}; + +static const u32 hpre_pre_store_caps[] = { + HPRE_CLUSTER_NUM_CAP, + HPRE_CORE_ENABLE_BITMAP_CAP, + HPRE_DRV_ALG_BITMAP_CAP, + HPRE_DEV_ALG_BITMAP_CAP, +}; + static const struct hpre_hw_error hpre_hw_errors[] = { { .int_msk = BIT(0), @@ -352,42 +359,13 @@ bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; - cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver); + cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP_CAP_IDX].cap_val; if (alg & cap_val) return true; return false; } -static int hpre_set_qm_algs(struct hisi_qm *qm) -{ - struct device *dev = &qm->pdev->dev; - char *algs, *ptr; - u32 alg_msk; - int i; - - if (!qm->use_sva) - return 0; - - algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); - if (!algs) - return -ENOMEM; - - alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver); - - for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++) - if (alg_msk & hpre_dev_algs[i].alg_msk) - strcat(algs, hpre_dev_algs[i].alg); - - ptr = strrchr(algs, '\n'); - if (ptr) - *ptr = '\0'; - - qm->uacce->algs = algs; - - return 0; -} - static int hpre_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; @@ -457,16 +435,6 @@ static u32 vfs_num; module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); -static inline int hpre_cluster_num(struct hisi_qm *qm) -{ - return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver); -} - -static inline int hpre_cluster_core_mask(struct hisi_qm *qm) -{ - return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver); -} - struct hisi_qp *hpre_create_qp(u8 type) { int node = cpu_to_node(smp_processor_id()); @@ -533,13 +501,15 @@ static int hpre_cfg_by_dsm(struct hisi_qm *qm) static int hpre_set_cluster(struct hisi_qm *qm) { - u32 cluster_core_mask = hpre_cluster_core_mask(qm); - u8 clusters_num = hpre_cluster_num(qm); struct device *dev = &qm->pdev->dev; unsigned long offset; + u32 cluster_core_mask; + u8 clusters_num; u32 val = 0; int ret, i; + cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_ENABLE_BITMAP_CAP_IDX].cap_val; + clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; for (i = 0; i < clusters_num; i++) { offset = i * HPRE_CLSTR_ADDR_INTRVL; @@ -734,11 +704,12 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) static void hpre_cnt_regs_clear(struct hisi_qm *qm) { - u8 clusters_num = hpre_cluster_num(qm); unsigned long offset; + u8 clusters_num; int i; /* clear clusterX/cluster_ctrl */ + clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; for (i = 0; i < clusters_num; i++) { offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY); @@ -1025,13 +996,14 @@ static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) static int hpre_cluster_debugfs_init(struct hisi_qm *qm) { - u8 clusters_num = hpre_cluster_num(qm); struct device *dev = &qm->pdev->dev; char buf[HPRE_DBGFS_VAL_MAX_LEN]; struct debugfs_regset32 *regset; struct dentry *tmp_d; + u8 clusters_num; int i, ret; + clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; for (i = 0; i < clusters_num; i++) { ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); if (ret >= HPRE_DBGFS_VAL_MAX_LEN) @@ -1136,8 +1108,37 @@ static void hpre_debugfs_exit(struct hisi_qm *qm) debugfs_remove_recursive(qm->debug.debug_root); } +static int hpre_pre_store_cap_reg(struct hisi_qm *qm) +{ + struct hisi_qm_cap_record *hpre_cap; + struct device *dev = &qm->pdev->dev; + size_t i, size; + + size = ARRAY_SIZE(hpre_pre_store_caps); + hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL); + if (!hpre_cap) + return -ENOMEM; + + for (i = 0; i < size; i++) { + hpre_cap[i].type = hpre_pre_store_caps[i]; + hpre_cap[i].cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, + hpre_pre_store_caps[i], qm->cap_ver); + } + + if (hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val > HPRE_CLUSTERS_NUM_MAX) { + dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n", + hpre_cap[HPRE_CLUSTER_NUM_CAP_IDX].cap_val, HPRE_CLUSTERS_NUM_MAX); + return -EINVAL; + } + + qm->cap_tables.dev_cap_table = hpre_cap; + + return 0; +} + static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { + u64 alg_msk; int ret; if (pdev->revision == QM_HW_V1) { @@ -1168,7 +1169,16 @@ static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) return ret; } - ret = hpre_set_qm_algs(qm); + /* Fetch and save the value of capability registers */ + ret = hpre_pre_store_cap_reg(qm); + if (ret) { + pci_err(pdev, "Failed to pre-store capability registers!\n"); + hisi_qm_uninit(qm); + return ret; + } + + alg_msk = qm->cap_tables.dev_cap_table[HPRE_DEV_ALG_BITMAP_CAP_IDX].cap_val; + ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs)); if (ret) { pci_err(pdev, "Failed to set hpre algs!\n"); hisi_qm_uninit(qm); @@ -1181,11 +1191,12 @@ static int hpre_show_last_regs_init(struct hisi_qm *qm) { int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); - u8 clusters_num = hpre_cluster_num(qm); struct qm_debug *debug = &qm->debug; void __iomem *io_base; + u8 clusters_num; int i, j, idx; + clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num + com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); if (!debug->last_words) @@ -1222,10 +1233,10 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm) { int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); - u8 clusters_num = hpre_cluster_num(qm); struct qm_debug *debug = &qm->debug; struct pci_dev *pdev = qm->pdev; void __iomem *io_base; + u8 clusters_num; int i, j, idx; u32 val; @@ -1240,6 +1251,7 @@ static void hpre_show_last_dfx_regs(struct hisi_qm *qm) hpre_com_dfx_regs[i].name, debug->last_words[i], val); } + clusters_num = qm->cap_tables.dev_cap_table[HPRE_CLUSTER_NUM_CAP_IDX].cap_val; for (i = 0; i < clusters_num; i++) { io_base = qm->io_base + hpre_cluster_offsets[i]; for (j = 0; j < cluster_dfx_regs_num; j++) { diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 3284ca59a3b3..1e947146ca3d 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -237,6 +237,8 @@ #define QM_QOS_MAX_CIR_S 11 #define QM_AUTOSUSPEND_DELAY 3000 +#define QM_DEV_ALG_MAX_LEN 256 + #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ @@ -315,6 +317,13 @@ enum qm_basic_type { QM_VF_IRQ_NUM_CAP, }; +enum qm_pre_store_cap_idx { + QM_EQ_IRQ_TYPE_CAP_IDX = 0x0, + QM_AEQ_IRQ_TYPE_CAP_IDX, + QM_ABN_IRQ_TYPE_CAP_IDX, + QM_PF2VF_IRQ_TYPE_CAP_IDX, +}; + static const struct hisi_qm_cap_info qm_cap_info_comm[] = { {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, @@ -344,6 +353,13 @@ static const struct hisi_qm_cap_info qm_basic_info[] = { {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, }; +static const u32 qm_pre_store_caps[] = { + QM_EQ_IRQ_TYPE_CAP, + QM_AEQ_IRQ_TYPE_CAP, + QM_ABN_IRQ_TYPE_CAP, + QM_PF2VF_IRQ_TYPE_CAP, +}; + struct qm_mailbox { __le16 w0; __le16 queue_num; @@ -781,6 +797,40 @@ static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; } +int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, + u32 dev_algs_size) +{ + struct device *dev = &qm->pdev->dev; + char *algs, *ptr; + int i; + + if (!qm->uacce) + return 0; + + if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) { + dev_err(dev, "algs size %u is equal or larger than %d.\n", + dev_algs_size, QM_DEV_ALG_MAX_LEN); + return -EINVAL; + } + + algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); + if (!algs) + return -ENOMEM; + + for (i = 0; i < dev_algs_size; i++) + if (alg_msk & dev_algs[i].alg_msk) + strcat(algs, dev_algs[i].alg); + + ptr = strrchr(algs, '\n'); + if (ptr) { + *ptr = '\0'; + qm->uacce->algs = algs; + } + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_qm_set_algs); + static u32 qm_get_irq_num(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_PF) @@ -4804,7 +4854,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm) if (qm->fun_type == QM_HW_VF) return; - val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); + val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return; @@ -4821,7 +4871,7 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm) if (qm->fun_type == QM_HW_VF) return 0; - val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); + val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return 0; @@ -4838,7 +4888,7 @@ static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; - val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); + val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return; @@ -4852,7 +4902,7 @@ static int qm_register_mb_cmd_irq(struct hisi_qm *qm) u32 irq_vector, val; int ret; - val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); + val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0; @@ -4869,7 +4919,7 @@ static void qm_unregister_aeq_irq(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; - val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); + val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return; @@ -4883,7 +4933,7 @@ static int qm_register_aeq_irq(struct hisi_qm *qm) u32 irq_vector, val; int ret; - val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); + val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0; @@ -4901,7 +4951,7 @@ static void qm_unregister_eq_irq(struct hisi_qm *qm) struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; - val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); + val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return; @@ -4915,7 +4965,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm) u32 irq_vector, val; int ret; - val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); + val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0; @@ -5003,7 +5053,29 @@ static int qm_get_qp_num(struct hisi_qm *qm) return 0; } -static void qm_get_hw_caps(struct hisi_qm *qm) +static int qm_pre_store_irq_type_caps(struct hisi_qm *qm) +{ + struct hisi_qm_cap_record *qm_cap; + struct pci_dev *pdev = qm->pdev; + size_t i, size; + + size = ARRAY_SIZE(qm_pre_store_caps); + qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); + if (!qm_cap) + return -ENOMEM; + + for (i = 0; i < size; i++) { + qm_cap[i].type = qm_pre_store_caps[i]; + qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info, + qm_pre_store_caps[i], qm->cap_ver); + } + + qm->cap_tables.qm_cap_table = qm_cap; + + return 0; +} + +static int qm_get_hw_caps(struct hisi_qm *qm) { const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? qm_cap_info_pf : qm_cap_info_vf; @@ -5034,6 +5106,9 @@ static void qm_get_hw_caps(struct hisi_qm *qm) if (val) set_bit(cap_info[i].type, &qm->caps); } + + /* Fetch and save the value of irq type related capability registers */ + return qm_pre_store_irq_type_caps(qm); } static int qm_get_pci_res(struct hisi_qm *qm) @@ -5055,7 +5130,10 @@ static int qm_get_pci_res(struct hisi_qm *qm) goto err_request_mem_regions; } - qm_get_hw_caps(qm); + ret = qm_get_hw_caps(qm); + if (ret) + goto err_ioremap; + if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { qm->db_interval = QM_QP_DB_INTERVAL; qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 3e57fc04b377..410c83712e28 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -220,6 +220,13 @@ enum sec_cap_type { SEC_CORE4_ALG_BITMAP_HIGH, }; +enum sec_cap_reg_record_idx { + SEC_DRV_ALG_BITMAP_LOW_IDX = 0x0, + SEC_DRV_ALG_BITMAP_HIGH_IDX, + SEC_DEV_ALG_BITMAP_LOW_IDX, + SEC_DEV_ALG_BITMAP_HIGH_IDX, +}; + void sec_destroy_qps(struct hisi_qp **qps, int qp_num); struct hisi_qp **sec_create_qps(void); int sec_register_to_crypto(struct hisi_qm *qm); diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 84ae8ddd1a13..cae7c414bdaf 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -2546,8 +2546,12 @@ static int sec_register_aead(u64 alg_mask) int sec_register_to_crypto(struct hisi_qm *qm) { - u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); - int ret; + u64 alg_mask; + int ret = 0; + + alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX, + SEC_DRV_ALG_BITMAP_LOW_IDX); + ret = sec_register_skcipher(alg_mask); if (ret) @@ -2562,7 +2566,10 @@ int sec_register_to_crypto(struct hisi_qm *qm) void sec_unregister_from_crypto(struct hisi_qm *qm) { - u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); + u64 alg_mask; + + alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX, + SEC_DRV_ALG_BITMAP_LOW_IDX); sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index e384988bda91..4bab5000a13e 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -121,7 +121,6 @@ GENMASK_ULL(42, 25)) #define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \ GENMASK_ULL(45, 43)) -#define SEC_DEV_ALG_MAX_LEN 256 struct sec_hw_error { u32 int_msk; @@ -133,11 +132,6 @@ struct sec_dfx_item { u32 offset; }; -struct sec_dev_alg { - u64 alg_msk; - const char *algs; -}; - static const char sec_name[] = "hisi_sec2"; static struct dentry *sec_debugfs_root; @@ -174,15 +168,22 @@ static const struct hisi_qm_cap_info sec_basic_info[] = { {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, }; -static const struct sec_dev_alg sec_dev_algs[] = { { +static const u32 sec_pre_store_caps[] = { + SEC_DRV_ALG_BITMAP_LOW, + SEC_DRV_ALG_BITMAP_HIGH, + SEC_DEV_ALG_BITMAP_LOW, + SEC_DEV_ALG_BITMAP_HIGH, +}; + +static const struct qm_dev_alg sec_dev_algs[] = { { .alg_msk = SEC_CIPHER_BITMAP, - .algs = "cipher\n", + .alg = "cipher\n", }, { .alg_msk = SEC_DIGEST_BITMAP, - .algs = "digest\n", + .alg = "digest\n", }, { .alg_msk = SEC_AEAD_BITMAP, - .algs = "aead\n", + .alg = "aead\n", }, }; @@ -395,8 +396,8 @@ u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low) { u32 cap_val_h, cap_val_l; - cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver); - cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver); + cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val; + cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val; return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l; } @@ -1079,37 +1080,31 @@ static int sec_pf_probe_init(struct sec_dev *sec) return ret; } -static int sec_set_qm_algs(struct hisi_qm *qm) +static int sec_pre_store_cap_reg(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; - char *algs, *ptr; - u64 alg_mask; - int i; + struct hisi_qm_cap_record *sec_cap; + struct pci_dev *pdev = qm->pdev; + size_t i, size; - if (!qm->use_sva) - return 0; - - algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); - if (!algs) + size = ARRAY_SIZE(sec_pre_store_caps); + sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL); + if (!sec_cap) return -ENOMEM; - alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW); + for (i = 0; i < size; i++) { + sec_cap[i].type = sec_pre_store_caps[i]; + sec_cap[i].cap_val = hisi_qm_get_hw_info(qm, sec_basic_info, + sec_pre_store_caps[i], qm->cap_ver); + } - for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++) - if (alg_mask & sec_dev_algs[i].alg_msk) - strcat(algs, sec_dev_algs[i].algs); - - ptr = strrchr(algs, '\n'); - if (ptr) - *ptr = '\0'; - - qm->uacce->algs = algs; + qm->cap_tables.dev_cap_table = sec_cap; return 0; } static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { + u64 alg_msk; int ret; qm->pdev = pdev; @@ -1144,7 +1139,16 @@ static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) return ret; } - ret = sec_set_qm_algs(qm); + /* Fetch and save the value of capability registers */ + ret = sec_pre_store_cap_reg(qm); + if (ret) { + pci_err(qm->pdev, "Failed to pre-store capability registers!\n"); + hisi_qm_uninit(qm); + return ret; + } + + alg_msk = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH_IDX, SEC_DEV_ALG_BITMAP_LOW_IDX); + ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs)); if (ret) { pci_err(qm->pdev, "Failed to set sec algs!\n"); hisi_qm_uninit(qm); diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index 190b4fecfc74..9e3f5bca27de 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -74,7 +74,6 @@ #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) #define HZIP_WR_PORT BIT(11) -#define HZIP_DEV_ALG_MAX_LEN 256 #define HZIP_ALG_ZLIB_BIT GENMASK(1, 0) #define HZIP_ALG_GZIP_BIT GENMASK(3, 2) #define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4) @@ -107,6 +106,14 @@ #define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \ HZIP_CORE_GATED_OOO_EN) +/* zip comp high performance */ +#define HZIP_HIGH_PERF_OFFSET 0x301208 + +enum { + HZIP_HIGH_COMP_RATE, + HZIP_HIGH_COMP_PERF, +}; + static const char hisi_zip_name[] = "hisi_zip"; static struct dentry *hzip_debugfs_root; @@ -120,23 +127,18 @@ struct zip_dfx_item { u32 offset; }; -struct zip_dev_alg { - u32 alg_msk; - const char *algs; -}; - -static const struct zip_dev_alg zip_dev_algs[] = { { +static const struct qm_dev_alg zip_dev_algs[] = { { .alg_msk = HZIP_ALG_ZLIB_BIT, - .algs = "zlib\n", + .alg = "zlib\n", }, { .alg_msk = HZIP_ALG_GZIP_BIT, - .algs = "gzip\n", + .alg = "gzip\n", }, { .alg_msk = HZIP_ALG_DEFLATE_BIT, - .algs = "deflate\n", + .alg = "deflate\n", }, { .alg_msk = HZIP_ALG_LZ77_BIT, - .algs = "lz77_zstd\n", + .alg = "lz77_zstd\n", }, }; @@ -247,6 +249,26 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = { {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0} }; +enum zip_pre_store_cap_idx { + ZIP_CORE_NUM_CAP_IDX = 0x0, + ZIP_CLUSTER_COMP_NUM_CAP_IDX, + ZIP_CLUSTER_DECOMP_NUM_CAP_IDX, + ZIP_DECOMP_ENABLE_BITMAP_IDX, + ZIP_COMP_ENABLE_BITMAP_IDX, + ZIP_DRV_ALG_BITMAP_IDX, + ZIP_DEV_ALG_BITMAP_IDX, +}; + +static const u32 zip_pre_store_caps[] = { + ZIP_CORE_NUM_CAP, + ZIP_CLUSTER_COMP_NUM_CAP, + ZIP_CLUSTER_DECOMP_NUM_CAP, + ZIP_DECOMP_ENABLE_BITMAP, + ZIP_COMP_ENABLE_BITMAP, + ZIP_DRV_ALG_BITMAP, + ZIP_DEV_ALG_BITMAP, +}; + enum { HZIP_COMP_CORE0, HZIP_COMP_CORE1, @@ -352,6 +374,37 @@ static int hzip_diff_regs_show(struct seq_file *s, void *unused) return 0; } DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs); + +static int perf_mode_set(const char *val, const struct kernel_param *kp) +{ + int ret; + u32 n; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret != 0 || (n != HZIP_HIGH_COMP_PERF && + n != HZIP_HIGH_COMP_RATE)) + return -EINVAL; + + return param_set_int(val, kp); +} + +static const struct kernel_param_ops zip_com_perf_ops = { + .set = perf_mode_set, + .get = param_get_int, +}; + +/* + * perf_mode = 0 means enable high compression rate mode, + * perf_mode = 1 means enable high compression performance mode. + * These two modes only apply to the compression direction. + */ +static u32 perf_mode = HZIP_HIGH_COMP_RATE; +module_param_cb(perf_mode, &zip_com_perf_ops, &perf_mode, 0444); +MODULE_PARM_DESC(perf_mode, "ZIP high perf mode 0(default), 1(enable)"); + static const struct kernel_param_ops zip_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, @@ -410,40 +463,33 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; - cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver); + cap_val = qm->cap_tables.dev_cap_table[ZIP_DRV_ALG_BITMAP_IDX].cap_val; if ((alg & cap_val) == alg) return true; return false; } -static int hisi_zip_set_qm_algs(struct hisi_qm *qm) +static int hisi_zip_set_high_perf(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; - char *algs, *ptr; - u32 alg_mask; - int i; + u32 val; + int ret; - if (!qm->use_sva) - return 0; + val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET); + if (perf_mode == HZIP_HIGH_COMP_PERF) + val |= HZIP_HIGH_COMP_PERF; + else + val &= ~HZIP_HIGH_COMP_PERF; - algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); - if (!algs) - return -ENOMEM; + /* Set perf mode */ + writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET); + ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET, + val, val == perf_mode, HZIP_DELAY_1_US, + HZIP_POLL_TIMEOUT_US); + if (ret) + pci_err(qm->pdev, "failed to set perf mode\n"); - alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver); - - for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++) - if (alg_mask & zip_dev_algs[i].alg_msk) - strcat(algs, zip_dev_algs[i].algs); - - ptr = strrchr(algs, '\n'); - if (ptr) - *ptr = '\0'; - - qm->uacce->algs = algs; - - return 0; + return ret; } static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) @@ -542,10 +588,8 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) } /* let's open all compression/decompression cores */ - dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver); - comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info, - ZIP_COMP_ENABLE_BITMAP, qm->cap_ver); + dcomp_bm = qm->cap_tables.dev_cap_table[ZIP_DECOMP_ENABLE_BITMAP_IDX].cap_val; + comp_bm = qm->cap_tables.dev_cap_table[ZIP_COMP_ENABLE_BITMAP_IDX].cap_val; writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL); /* enable sqc,cqc writeback */ @@ -772,9 +816,8 @@ static int hisi_zip_core_debug_init(struct hisi_qm *qm) char buf[HZIP_BUF_SIZE]; int i; - zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); - zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP, - qm->cap_ver); + zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val; + zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val; for (i = 0; i < zip_core_num; i++) { if (i < zip_comp_core_num) @@ -916,7 +959,7 @@ static int hisi_zip_show_last_regs_init(struct hisi_qm *qm) u32 zip_core_num; int i, j, idx; - zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); + zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val; debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); @@ -972,9 +1015,9 @@ static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) hzip_com_dfx_regs[i].name, debug->last_words[i], val); } - zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); - zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP, - qm->cap_ver); + zip_core_num = qm->cap_tables.dev_cap_table[ZIP_CORE_NUM_CAP_IDX].cap_val; + zip_comp_core_num = qm->cap_tables.dev_cap_table[ZIP_CLUSTER_COMP_NUM_CAP_IDX].cap_val; + for (i = 0; i < zip_core_num; i++) { if (i < zip_comp_core_num) scnprintf(buf, sizeof(buf), "Comp_core-%d", i); @@ -1115,6 +1158,10 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) if (ret) return ret; + ret = hisi_zip_set_high_perf(qm); + if (ret) + return ret; + hisi_zip_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); @@ -1126,8 +1173,31 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) return ret; } +static int zip_pre_store_cap_reg(struct hisi_qm *qm) +{ + struct hisi_qm_cap_record *zip_cap; + struct pci_dev *pdev = qm->pdev; + size_t i, size; + + size = ARRAY_SIZE(zip_pre_store_caps); + zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL); + if (!zip_cap) + return -ENOMEM; + + for (i = 0; i < size; i++) { + zip_cap[i].type = zip_pre_store_caps[i]; + zip_cap[i].cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, + zip_pre_store_caps[i], qm->cap_ver); + } + + qm->cap_tables.dev_cap_table = zip_cap; + + return 0; +} + static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { + u64 alg_msk; int ret; qm->pdev = pdev; @@ -1163,7 +1233,16 @@ static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) return ret; } - ret = hisi_zip_set_qm_algs(qm); + /* Fetch and save the value of capability registers */ + ret = zip_pre_store_cap_reg(qm); + if (ret) { + pci_err(qm->pdev, "Failed to pre-store capability registers!\n"); + hisi_qm_uninit(qm); + return ret; + } + + alg_msk = qm->cap_tables.dev_cap_table[ZIP_DEV_ALG_BITMAP_IDX].cap_val; + ret = hisi_qm_set_algs(qm, alg_msk, zip_dev_algs, ARRAY_SIZE(zip_dev_algs)); if (ret) { pci_err(qm->pdev, "Failed to set zip algs!\n"); hisi_qm_uninit(qm); diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c index 32a37e3850c5..f59e32115268 100644 --- a/drivers/crypto/inside-secure/safexcel_cipher.c +++ b/drivers/crypto/inside-secure/safexcel_cipher.c @@ -742,9 +742,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, max(totlen_src, totlen_dst)); return -EINVAL; } - if (sreq->nr_src > 0) - dma_map_sg(priv->dev, src, sreq->nr_src, - DMA_BIDIRECTIONAL); + if (sreq->nr_src > 0 && + !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL)) + return -EIO; } else { if (unlikely(totlen_src && (sreq->nr_src <= 0))) { dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!", @@ -752,8 +752,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, return -EINVAL; } - if (sreq->nr_src > 0) - dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); + if (sreq->nr_src > 0 && + !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE)) + return -EIO; if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) { dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!", @@ -762,9 +763,11 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, goto unmap; } - if (sreq->nr_dst > 0) - dma_map_sg(priv->dev, dst, sreq->nr_dst, - DMA_FROM_DEVICE); + if (sreq->nr_dst > 0 && + !dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE)) { + ret = -EIO; + goto unmap; + } } memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c index f4bc06c24ad8..e7efebf8127f 100644 --- a/drivers/crypto/sa2ul.c +++ b/drivers/crypto/sa2ul.c @@ -1868,9 +1868,8 @@ static int sa_aead_setkey(struct crypto_aead *authenc, crypto_aead_set_flags(ctx->fallback.aead, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); - crypto_aead_setkey(ctx->fallback.aead, key, keylen); - return 0; + return crypto_aead_setkey(ctx->fallback.aead, key, keylen); } static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 7ab20fb95166..3b946f1313ed 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -44,7 +44,6 @@ #define FLAGS_MODE_MASK 0x000f #define FLAGS_ENCRYPT BIT(0) #define FLAGS_CBC BIT(1) -#define FLAGS_NEW_KEY BIT(3) #define SAHARA_HDR_BASE 0x00800000 #define SAHARA_HDR_SKHA_ALG_AES 0 @@ -142,8 +141,6 @@ struct sahara_hw_link { }; struct sahara_ctx { - unsigned long flags; - /* AES-specific context */ int keylen; u8 key[AES_KEYSIZE_128]; @@ -152,6 +149,7 @@ struct sahara_ctx { struct sahara_aes_reqctx { unsigned long mode; + u8 iv_out[AES_BLOCK_SIZE]; struct skcipher_request fallback_req; // keep at the end }; @@ -447,27 +445,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) int ret; int i, j; int idx = 0; + u32 len; - /* Copy new key if necessary */ - if (ctx->flags & FLAGS_NEW_KEY) { - memcpy(dev->key_base, ctx->key, ctx->keylen); - ctx->flags &= ~FLAGS_NEW_KEY; + memcpy(dev->key_base, ctx->key, ctx->keylen); - if (dev->flags & FLAGS_CBC) { - dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE; - dev->hw_desc[idx]->p1 = dev->iv_phys_base; - } else { - dev->hw_desc[idx]->len1 = 0; - dev->hw_desc[idx]->p1 = 0; - } - dev->hw_desc[idx]->len2 = ctx->keylen; - dev->hw_desc[idx]->p2 = dev->key_phys_base; - dev->hw_desc[idx]->next = dev->hw_phys_desc[1]; - - dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev); - - idx++; + if (dev->flags & FLAGS_CBC) { + dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE; + dev->hw_desc[idx]->p1 = dev->iv_phys_base; + } else { + dev->hw_desc[idx]->len1 = 0; + dev->hw_desc[idx]->p1 = 0; } + dev->hw_desc[idx]->len2 = ctx->keylen; + dev->hw_desc[idx]->p2 = dev->key_phys_base; + dev->hw_desc[idx]->next = dev->hw_phys_desc[1]; + dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev); + + idx++; + dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total); if (dev->nb_in_sg < 0) { @@ -489,24 +484,27 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) DMA_TO_DEVICE); if (!ret) { dev_err(dev->device, "couldn't map in sg\n"); - goto unmap_in; + return -EINVAL; } + ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg, DMA_FROM_DEVICE); if (!ret) { dev_err(dev->device, "couldn't map out sg\n"); - goto unmap_out; + goto unmap_in; } /* Create input links */ dev->hw_desc[idx]->p1 = dev->hw_phys_link[0]; sg = dev->in_sg; + len = dev->total; for (i = 0; i < dev->nb_in_sg; i++) { - dev->hw_link[i]->len = sg->length; + dev->hw_link[i]->len = min(len, sg->length); dev->hw_link[i]->p = sg->dma_address; if (i == (dev->nb_in_sg - 1)) { dev->hw_link[i]->next = 0; } else { + len -= min(len, sg->length); dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; sg = sg_next(sg); } @@ -515,12 +513,14 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) /* Create output links */ dev->hw_desc[idx]->p2 = dev->hw_phys_link[i]; sg = dev->out_sg; + len = dev->total; for (j = i; j < dev->nb_out_sg + i; j++) { - dev->hw_link[j]->len = sg->length; + dev->hw_link[j]->len = min(len, sg->length); dev->hw_link[j]->p = sg->dma_address; if (j == (dev->nb_out_sg + i - 1)) { dev->hw_link[j]->next = 0; } else { + len -= min(len, sg->length); dev->hw_link[j]->next = dev->hw_phys_link[j + 1]; sg = sg_next(sg); } @@ -539,9 +539,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) return 0; -unmap_out: - dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, - DMA_FROM_DEVICE); unmap_in: dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE); @@ -549,8 +546,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) return -EINVAL; } +static void sahara_aes_cbc_update_iv(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + /* Update IV buffer to contain the last ciphertext block */ + if (rctx->mode & FLAGS_ENCRYPT) { + sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv, + ivsize, req->cryptlen - ivsize); + } else { + memcpy(req->iv, rctx->iv_out, ivsize); + } +} + static int sahara_aes_process(struct skcipher_request *req) { + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct sahara_dev *dev = dev_ptr; struct sahara_ctx *ctx; struct sahara_aes_reqctx *rctx; @@ -572,8 +585,17 @@ static int sahara_aes_process(struct skcipher_request *req) rctx->mode &= FLAGS_MODE_MASK; dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; - if ((dev->flags & FLAGS_CBC) && req->iv) - memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128); + if ((dev->flags & FLAGS_CBC) && req->iv) { + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + memcpy(dev->iv_base, req->iv, ivsize); + + if (!(dev->flags & FLAGS_ENCRYPT)) { + sg_pcopy_to_buffer(req->src, sg_nents(req->src), + rctx->iv_out, ivsize, + req->cryptlen - ivsize); + } + } /* assign new context to device */ dev->ctx = ctx; @@ -586,16 +608,20 @@ static int sahara_aes_process(struct skcipher_request *req) timeout = wait_for_completion_timeout(&dev->dma_completion, msecs_to_jiffies(SAHARA_TIMEOUT_MS)); - if (!timeout) { - dev_err(dev->device, "AES timeout\n"); - return -ETIMEDOUT; - } dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg, DMA_FROM_DEVICE); dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE); + if (!timeout) { + dev_err(dev->device, "AES timeout\n"); + return -ETIMEDOUT; + } + + if ((dev->flags & FLAGS_CBC) && req->iv) + sahara_aes_cbc_update_iv(req); + return 0; } @@ -609,7 +635,6 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, /* SAHARA only supports 128bit keys */ if (keylen == AES_KEYSIZE_128) { memcpy(ctx->key, key, keylen); - ctx->flags |= FLAGS_NEW_KEY; return 0; } @@ -625,12 +650,40 @@ static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, return crypto_skcipher_setkey(ctx->fallback, key, keylen); } +static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode) +{ + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); + struct sahara_ctx *ctx = crypto_skcipher_ctx( + crypto_skcipher_reqtfm(req)); + + skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); + skcipher_request_set_callback(&rctx->fallback_req, + req->base.flags, + req->base.complete, + req->base.data); + skcipher_request_set_crypt(&rctx->fallback_req, req->src, + req->dst, req->cryptlen, req->iv); + + if (mode & FLAGS_ENCRYPT) + return crypto_skcipher_encrypt(&rctx->fallback_req); + + return crypto_skcipher_decrypt(&rctx->fallback_req); +} + static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) { struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); + struct sahara_ctx *ctx = crypto_skcipher_ctx( + crypto_skcipher_reqtfm(req)); struct sahara_dev *dev = dev_ptr; int err = 0; + if (!req->cryptlen) + return 0; + + if (unlikely(ctx->keylen != AES_KEYSIZE_128)) + return sahara_aes_fallback(req, mode); + dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n", req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); @@ -653,81 +706,21 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode) static int sahara_aes_ecb_encrypt(struct skcipher_request *req) { - struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); - struct sahara_ctx *ctx = crypto_skcipher_ctx( - crypto_skcipher_reqtfm(req)); - - if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); - skcipher_request_set_callback(&rctx->fallback_req, - req->base.flags, - req->base.complete, - req->base.data); - skcipher_request_set_crypt(&rctx->fallback_req, req->src, - req->dst, req->cryptlen, req->iv); - return crypto_skcipher_encrypt(&rctx->fallback_req); - } - return sahara_aes_crypt(req, FLAGS_ENCRYPT); } static int sahara_aes_ecb_decrypt(struct skcipher_request *req) { - struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); - struct sahara_ctx *ctx = crypto_skcipher_ctx( - crypto_skcipher_reqtfm(req)); - - if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); - skcipher_request_set_callback(&rctx->fallback_req, - req->base.flags, - req->base.complete, - req->base.data); - skcipher_request_set_crypt(&rctx->fallback_req, req->src, - req->dst, req->cryptlen, req->iv); - return crypto_skcipher_decrypt(&rctx->fallback_req); - } - return sahara_aes_crypt(req, 0); } static int sahara_aes_cbc_encrypt(struct skcipher_request *req) { - struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); - struct sahara_ctx *ctx = crypto_skcipher_ctx( - crypto_skcipher_reqtfm(req)); - - if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); - skcipher_request_set_callback(&rctx->fallback_req, - req->base.flags, - req->base.complete, - req->base.data); - skcipher_request_set_crypt(&rctx->fallback_req, req->src, - req->dst, req->cryptlen, req->iv); - return crypto_skcipher_encrypt(&rctx->fallback_req); - } - return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); } static int sahara_aes_cbc_decrypt(struct skcipher_request *req) { - struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); - struct sahara_ctx *ctx = crypto_skcipher_ctx( - crypto_skcipher_reqtfm(req)); - - if (unlikely(ctx->keylen != AES_KEYSIZE_128)) { - skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); - skcipher_request_set_callback(&rctx->fallback_req, - req->base.flags, - req->base.complete, - req->base.data); - skcipher_request_set_crypt(&rctx->fallback_req, req->src, - req->dst, req->cryptlen, req->iv); - return crypto_skcipher_decrypt(&rctx->fallback_req); - } - return sahara_aes_crypt(req, FLAGS_CBC); } @@ -784,6 +777,7 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev, int start) { struct scatterlist *sg; + unsigned int len; unsigned int i; int ret; @@ -805,12 +799,14 @@ static int sahara_sha_hw_links_create(struct sahara_dev *dev, if (!ret) return -EFAULT; + len = rctx->total; for (i = start; i < dev->nb_in_sg + start; i++) { - dev->hw_link[i]->len = sg->length; + dev->hw_link[i]->len = min(len, sg->length); dev->hw_link[i]->p = sg->dma_address; if (i == (dev->nb_in_sg + start - 1)) { dev->hw_link[i]->next = 0; } else { + len -= min(len, sg->length); dev->hw_link[i]->next = dev->hw_phys_link[i + 1]; sg = sg_next(sg); } @@ -891,24 +887,6 @@ static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev, return 0; } -static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes) -{ - if (!sg || !sg->length) - return nbytes; - - while (nbytes && sg) { - if (nbytes <= sg->length) { - sg->length = nbytes; - sg_mark_end(sg); - break; - } - nbytes -= sg->length; - sg = sg_next(sg); - } - - return nbytes; -} - static int sahara_sha_prepare_request(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); @@ -945,36 +923,20 @@ static int sahara_sha_prepare_request(struct ahash_request *req) hash_later, 0); } - /* nbytes should now be multiple of blocksize */ - req->nbytes = req->nbytes - hash_later; - - sahara_walk_and_recalc(req->src, req->nbytes); - + rctx->total = len - hash_later; /* have data from previous operation and current */ if (rctx->buf_cnt && req->nbytes) { sg_init_table(rctx->in_sg_chain, 2); sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt); - sg_chain(rctx->in_sg_chain, 2, req->src); - - rctx->total = req->nbytes + rctx->buf_cnt; rctx->in_sg = rctx->in_sg_chain; - - req->src = rctx->in_sg_chain; /* only data from previous operation */ } else if (rctx->buf_cnt) { - if (req->src) - rctx->in_sg = req->src; - else - rctx->in_sg = rctx->in_sg_chain; - /* buf was copied into rembuf above */ + rctx->in_sg = rctx->in_sg_chain; sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt); - rctx->total = rctx->buf_cnt; /* no data from previous operation */ } else { rctx->in_sg = req->src; - rctx->total = req->nbytes; - req->src = rctx->in_sg; } /* on next call, we only have the remaining data in the buffer */ @@ -995,7 +957,10 @@ static int sahara_sha_process(struct ahash_request *req) return ret; if (rctx->first) { - sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0); + ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0); + if (ret) + return ret; + dev->hw_desc[0]->next = 0; rctx->first = 0; } else { @@ -1003,7 +968,10 @@ static int sahara_sha_process(struct ahash_request *req) sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0); dev->hw_desc[0]->next = dev->hw_phys_desc[1]; - sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1); + ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1); + if (ret) + return ret; + dev->hw_desc[1]->next = 0; } @@ -1016,18 +984,19 @@ static int sahara_sha_process(struct ahash_request *req) timeout = wait_for_completion_timeout(&dev->dma_completion, msecs_to_jiffies(SAHARA_TIMEOUT_MS)); - if (!timeout) { - dev_err(dev->device, "SHA timeout\n"); - return -ETIMEDOUT; - } if (rctx->sg_in_idx) dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE); + if (!timeout) { + dev_err(dev->device, "SHA timeout\n"); + return -ETIMEDOUT; + } + memcpy(rctx->context, dev->context_base, rctx->context_size); - if (req->result) + if (req->result && rctx->last) memcpy(req->result, rctx->context, rctx->digest_size); return 0; @@ -1171,8 +1140,7 @@ static int sahara_sha_import(struct ahash_request *req, const void *in) static int sahara_sha_cra_init(struct crypto_tfm *tfm) { crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct sahara_sha_reqctx) + - SHA_BUFFER_LEN + SHA256_BLOCK_SIZE); + sizeof(struct sahara_sha_reqctx)); return 0; } diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h index 154590e1f764..7059bbe5a2eb 100644 --- a/drivers/crypto/virtio/virtio_crypto_common.h +++ b/drivers/crypto/virtio/virtio_crypto_common.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ struct data_queue { char name[32]; struct crypto_engine *engine; + struct tasklet_struct done_task; }; struct virtio_crypto { diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 3842915ea743..56dc0935c774 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -72,27 +72,28 @@ int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterl return 0; } -static void virtcrypto_dataq_callback(struct virtqueue *vq) +static void virtcrypto_done_task(unsigned long data) { - struct virtio_crypto *vcrypto = vq->vdev->priv; + struct data_queue *data_vq = (struct data_queue *)data; + struct virtqueue *vq = data_vq->vq; struct virtio_crypto_request *vc_req; - unsigned long flags; unsigned int len; - unsigned int qid = vq->index; - spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags); do { virtqueue_disable_cb(vq); while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) { - spin_unlock_irqrestore( - &vcrypto->data_vq[qid].lock, flags); if (vc_req->alg_cb) vc_req->alg_cb(vc_req, len); - spin_lock_irqsave( - &vcrypto->data_vq[qid].lock, flags); } } while (!virtqueue_enable_cb(vq)); - spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags); +} + +static void virtcrypto_dataq_callback(struct virtqueue *vq) +{ + struct virtio_crypto *vcrypto = vq->vdev->priv; + struct data_queue *dq = &vcrypto->data_vq[vq->index]; + + tasklet_schedule(&dq->done_task); } static int virtcrypto_find_vqs(struct virtio_crypto *vi) @@ -150,6 +151,8 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi) ret = -ENOMEM; goto err_engine; } + tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task, + (unsigned long)&vi->data_vq[i]); } kfree(names); @@ -496,12 +499,15 @@ static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto) static void virtcrypto_remove(struct virtio_device *vdev) { struct virtio_crypto *vcrypto = vdev->priv; + int i; dev_info(&vdev->dev, "Start virtcrypto_remove.\n"); flush_work(&vcrypto->config_work); if (virtcrypto_dev_started(vcrypto)) virtcrypto_dev_stop(vcrypto); + for (i = 0; i < vcrypto->max_data_queues; i++) + tasklet_kill(&vcrypto->data_vq[i].done_task); virtio_reset_device(vdev); virtcrypto_free_unused_reqs(vcrypto); virtcrypto_clear_crypto_engines(vcrypto); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index bd4142431980..1f1483a9e525 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1403,7 +1403,7 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, return -EINVAL; write_seqlock(&cxlsd->target_lock); - for (i = 0; i < cxlsd->nr_targets; i++) { + for (i = 0; i < cxlsd->cxld.interleave_ways; i++) { struct cxl_dport *dport = find_dport(port, target_map[i]); if (!dport) { diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index ebc1b028555c..2f7187dbfa2d 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -331,7 +331,7 @@ static ssize_t interleave_ways_store(struct device *dev, return rc; /* - * Even for x3, x9, and x12 interleaves the region interleave must be a + * Even for x3, x6, and x12 interleaves the region interleave must be a * power of 2 multiple of the host bridge interleave. */ if (!is_power_of_2(val / cxld->interleave_ways) || diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 592d48ecf241..7abcd7f2848e 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -1249,8 +1249,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( enum dma_slave_buswidth max_width; struct stm32_dma_desc *desc; size_t xfer_count, offset; - u32 num_sgs, best_burst, dma_burst, threshold; - int i; + u32 num_sgs, best_burst, threshold; + int dma_burst, i; num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT); @@ -1268,6 +1268,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST, threshold, max_width); dma_burst = stm32_dma_get_burst(chan, best_burst); + if (dma_burst < 0) { + kfree(desc); + return NULL; + } stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); desc->sg_req[i].chan_reg.dma_scr = diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c index f13674081cb6..4dca21b39bf7 100644 --- a/drivers/edac/thunderx_edac.c +++ b/drivers/edac/thunderx_edac.c @@ -1133,7 +1133,7 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id) decode_register(other, OCX_OTHER_SIZE, ocx_com_errors, ctx->reg_com_int); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); for (lane = 0; lane < OCX_RX_LANES; lane++) if (ctx->reg_com_int & BIT(lane)) { @@ -1142,12 +1142,12 @@ static irqreturn_t thunderx_ocx_com_threaded_isr(int irq, void *irq_id) lane, ctx->reg_lane_int[lane], lane, ctx->reg_lane_stat11[lane]); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); decode_register(other, OCX_OTHER_SIZE, ocx_lane_errors, ctx->reg_lane_int[lane]); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); } if (ctx->reg_com_int & OCX_COM_INT_CE) @@ -1217,7 +1217,7 @@ static irqreturn_t thunderx_ocx_lnk_threaded_isr(int irq, void *irq_id) decode_register(other, OCX_OTHER_SIZE, ocx_com_link_errors, ctx->reg_com_link_int); - strncat(msg, other, OCX_MESSAGE_SIZE); + strlcat(msg, other, OCX_MESSAGE_SIZE); if (ctx->reg_com_link_int & OCX_COM_LINK_INT_UE) edac_device_handle_ue(ocx->edac_dev, 0, 0, msg); @@ -1896,7 +1896,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id) decode_register(other, L2C_OTHER_SIZE, l2_errors, ctx->reg_int); - strncat(msg, other, L2C_MESSAGE_SIZE); + strlcat(msg, other, L2C_MESSAGE_SIZE); if (ctx->reg_int & mask_ue) edac_device_handle_ue(l2c->edac_dev, 0, 0, msg); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 17c9d825188b..667ff40f3935 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -279,6 +279,51 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_TI_SLLZ059 0x20 #define QUIRK_IR_WAKE 0x40 +// On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia +// ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register +// (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not +// clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register, +// while it is probable due to detection of any type of PCIe error. +#define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000 + +#if IS_ENABLED(CONFIG_X86) + +static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci) +{ + return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ); +} + +#define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080 + +static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev) +{ + const struct pci_dev *pcie_to_pci_bridge; + + // Detect any type of AMD Ryzen machine. + if (!static_cpu_has(X86_FEATURE_ZEN)) + return false; + + // Detect VIA VT6306/6307/6308. + if (pdev->vendor != PCI_VENDOR_ID_VIA) + return false; + if (pdev->device != PCI_DEVICE_ID_VIA_VT630X) + return false; + + // Detect Asmedia ASM1083/1085. + pcie_to_pci_bridge = pdev->bus->self; + if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA) + return false; + if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X) + return false; + + return true; +} + +#else +#define has_reboot_by_cycle_timer_read_quirk(ohci) false +#define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false +#endif + /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { unsigned short vendor, device, revision, flags; @@ -1713,6 +1758,9 @@ static u32 get_cycle_time(struct fw_ohci *ohci) s32 diff01, diff12; int i; + if (has_reboot_by_cycle_timer_read_quirk(ohci)) + return 0; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); if (ohci->quirks & QUIRK_CYCLE_TIMER) { @@ -3615,6 +3663,9 @@ static int pci_probe(struct pci_dev *dev, if (param_quirks) ohci->quirks = param_quirks; + if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev)) + ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ; + /* * Because dma_alloc_coherent() allocates at least one page, * we save space by using a common buffer for the AR request/ diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 431bda9165c3..2775bcafe40f 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -131,7 +131,7 @@ struct perf_dom_info { u32 opp_count; u32 sustained_freq_khz; u32 sustained_perf_level; - u32 mult_factor; + unsigned long mult_factor; char name[SCMI_MAX_STR_SIZE]; struct scmi_opp opp[MAX_OPPS]; struct scmi_fc_info *fc_info; @@ -223,8 +223,8 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->mult_factor = 1000; else dom_info->mult_factor = - (dom_info->sustained_freq_khz * 1000) / - dom_info->sustained_perf_level; + (dom_info->sustained_freq_khz * 1000UL) + / dom_info->sustained_perf_level; strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c index d081a6312627..bf19dd66c213 100644 --- a/drivers/firmware/meson/meson_sm.c +++ b/drivers/firmware/meson/meson_sm.c @@ -313,11 +313,14 @@ static int __init meson_sm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fw); - pr_info("secure-monitor enabled\n"); + if (devm_of_platform_populate(dev)) + goto out_in_base; if (sysfs_create_group(&pdev->dev.kobj, &meson_sm_sysfs_attr_group)) goto out_in_base; + pr_info("secure-monitor enabled\n"); + return 0; out_in_base: diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c index 597d1a367d96..6231c98ba291 100644 --- a/drivers/firmware/ti_sci.c +++ b/drivers/firmware/ti_sci.c @@ -161,7 +161,7 @@ static int ti_sci_debugfs_create(struct platform_device *pdev, { struct device *dev = &pdev->dev; struct resource *res; - char debug_name[50] = "ti_sci_debug@"; + char debug_name[50]; /* Debug region is optional */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -178,10 +178,10 @@ static int ti_sci_debugfs_create(struct platform_device *pdev, /* Setup NULL termination */ info->debug_buffer[info->debug_region_size] = 0; - info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), - sizeof(debug_name) - - sizeof("ti_sci_debug@")), - 0444, NULL, info, &ti_sci_debug_fops); + snprintf(debug_name, sizeof(debug_name), "ti_sci_debug@%s", + dev_name(dev)); + info->d = debugfs_create_file(debug_name, 0444, NULL, info, + &ti_sci_debug_fops); if (IS_ERR(info->d)) return PTR_ERR(info->d); diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index c22fcaa44a61..6b7d47a52b10 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -283,13 +283,15 @@ static void dwapb_irq_enable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 val; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - val = dwapb_read(gpio, GPIO_INTEN); - val |= BIT(irqd_to_hwirq(d)); + val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq); dwapb_write(gpio, GPIO_INTEN, val); + val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq); + dwapb_write(gpio, GPIO_INTMASK, val); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } @@ -297,12 +299,14 @@ static void dwapb_irq_disable(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct dwapb_gpio *gpio = to_dwapb_gpio(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(d); unsigned long flags; u32 val; raw_spin_lock_irqsave(&gc->bgpio_lock, flags); - val = dwapb_read(gpio, GPIO_INTEN); - val &= ~BIT(irqd_to_hwirq(d)); + val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq); + dwapb_write(gpio, GPIO_INTMASK, val); + val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq); dwapb_write(gpio, GPIO_INTEN, val); raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags); } diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 6ab1cf489d03..e40c93f0960b 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -2444,10 +2444,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) return 0; } -/* - * gpio_ioctl() - ioctl handler for the GPIO chardev - */ -static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg) { struct gpio_chardev_data *cdev = file->private_data; struct gpio_device *gdev = cdev->gdev; @@ -2484,6 +2481,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } } +/* + * gpio_ioctl() - ioctl handler for the GPIO chardev + */ +static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct gpio_chardev_data *cdev = file->private_data; + + return call_ioctl_locked(file, cmd, arg, cdev->gdev, + gpio_ioctl_unlocked); +} + #ifdef CONFIG_COMPAT static long gpio_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index fd796574f87a..06ab6066da61 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -479,6 +479,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (!adev->didt_rreg) + return -EOPNOTSUPP; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -535,6 +538,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (!adev->didt_wreg) + return -EOPNOTSUPP; + r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -590,7 +596,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, int r; if (!adev->smc_rreg) - return -EPERM; + return -EOPNOTSUPP; if (size & 0x3 || *pos & 0x3) return -EINVAL; @@ -649,7 +655,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * int r; if (!adev->smc_wreg) - return -EPERM; + return -EOPNOTSUPP; if (size & 0x3 || *pos & 0x3) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8a1b84aaaf71..a5352e5e2bd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1976,15 +1976,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; - if (adev->mman.discovery_bin) { - /* - * FIXME: The bounding box is still needed by Navi12, so - * temporarily read it from gpu_info firmware. Should be dropped - * when DAL no longer needs it. - */ - if (adev->asic_type != CHIP_NAVI12) - return 0; - } + if (adev->mman.discovery_bin) + return 0; switch (adev->asic_type) { default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b9983ca99eb7..a8e1f2cfe12d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2202,6 +2202,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, pci_wake_from_d3(pdev, TRUE); + pci_wake_from_d3(pdev, TRUE); + /* * For runpm implemented via BACO, PMFW will handle the * timing for BACO in and out: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 4642cff0e1a4..69b3829bbe53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -631,13 +631,14 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry) if (!entry->bo) return; + + entry->bo->vm_bo = NULL; shadow = amdgpu_bo_shadowed(entry->bo); if (shadow) { ttm_bo_set_bulk_move(&shadow->tbo, NULL); amdgpu_bo_unref(&shadow); } ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); - entry->bo->vm_bo = NULL; spin_lock(&entry->vm->status_lock); list_del(&entry->vm_status); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 809eca54fc61..856db876af14 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1690,6 +1690,32 @@ static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags) *flags |= AMD_CG_SUPPORT_SDMA_LS; } +static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + /* SDMA 5.2.3 (RMB) FW doesn't seem to properly + * disallow GFXOFF in some cases leading to + * hangs in SDMA. Disallow GFXOFF while SDMA is active. + * We can probably just limit this to 5.2.3, + * but it shouldn't hurt for other parts since + * this GFXOFF will be disallowed anyway when SDMA is + * active, this just makes it explicit. + */ + amdgpu_gfx_off_ctrl(adev, false); +} + +static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + /* SDMA 5.2.3 (RMB) FW doesn't seem to properly + * disallow GFXOFF in some cases leading to + * hangs in SDMA. Allow GFXOFF when SDMA is complete. + */ + amdgpu_gfx_off_ctrl(adev, true); +} + const struct amd_ip_funcs sdma_v5_2_ip_funcs = { .name = "sdma_v5_2", .early_init = sdma_v5_2_early_init, @@ -1738,6 +1764,8 @@ static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = { .test_ib = sdma_v5_2_ring_test_ib, .insert_nop = sdma_v5_2_ring_insert_nop, .pad_ib = sdma_v5_2_ring_pad_ib, + .begin_use = sdma_v5_2_ring_begin_use, + .end_use = sdma_v5_2_ring_end_use, .emit_wreg = sdma_v5_2_ring_emit_wreg, .emit_reg_wait = sdma_v5_2_ring_emit_reg_wait, .emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 2eddd7f6cd41..811dd3ea6362 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1411,9 +1411,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - adev->nbio.funcs->get_clockgating_state(adev, flags); + if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state) + adev->nbio.funcs->get_clockgating_state(adev, flags); - adev->hdp.funcs->get_clock_gating_state(adev, flags); + if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state) + adev->hdp.funcs->get_clock_gating_state(adev, flags); if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) { @@ -1429,9 +1431,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags) } /* AMD_CG_SUPPORT_ROM_MGCG */ - adev->smuio.funcs->get_clock_gating_state(adev, flags); + if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state) + adev->smuio.funcs->get_clock_gating_state(adev, flags); - adev->df.funcs->get_clockgating_state(adev, flags); + if (adev->df.funcs && adev->df.funcs->get_clockgating_state) + adev->df.funcs->get_clockgating_state(adev, flags); } static int soc15_common_set_powergating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 88bf6221d4be..8a7705db0b9a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -1019,7 +1019,7 @@ int svm_migrate_init(struct amdgpu_device *adev) } else { res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); if (IS_ERR(res)) - return -ENOMEM; + return PTR_ERR(res); pgmap->range.start = res->start; pgmap->range.end = res->end; pgmap->type = MEMORY_DEVICE_PRIVATE; @@ -1035,11 +1035,10 @@ int svm_migrate_init(struct amdgpu_device *adev) r = devm_memremap_pages(adev->dev, pgmap); if (IS_ERR(r)) { pr_err("failed to register HMM device memory\n"); + if (pgmap->type == MEMORY_DEVICE_PRIVATE) + devm_release_mem_region(adev->dev, res->start, resource_size(res)); /* Disable SVM support capability */ pgmap->type = 0; - if (pgmap->type == MEMORY_DEVICE_PRIVATE) - devm_release_mem_region(adev->dev, res->start, - res->end - res->start + 1); return PTR_ERR(r); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 713f893d2530..705d9e91b5aa 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1403,10 +1403,11 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g num_cpu++; } + if (list_empty(&kdev->io_link_props)) + return -ENODATA; + gpu_link = list_first_entry(&kdev->io_link_props, - struct kfd_iolink_properties, list); - if (!gpu_link) - return -ENOMEM; + struct kfd_iolink_properties, list); for (i = 0; i < num_cpu; i++) { /* CPU <--> GPU */ @@ -1484,15 +1485,17 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev, peer->gpu->adev)) return ret; + if (list_empty(&kdev->io_link_props)) + return -ENODATA; + iolink1 = list_first_entry(&kdev->io_link_props, - struct kfd_iolink_properties, list); - if (!iolink1) - return -ENOMEM; + struct kfd_iolink_properties, list); + + if (list_empty(&peer->io_link_props)) + return -ENODATA; iolink2 = list_first_entry(&peer->io_link_props, - struct kfd_iolink_properties, list); - if (!iolink2) - return -ENOMEM; + struct kfd_iolink_properties, list); props = kfd_alloc_struct(props); if (!props) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6d5f3c5fb4a6..13e0b521e3db 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5104,6 +5104,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, if (plane->type == DRM_PLANE_TYPE_CURSOR) return; + if (new_plane_state->rotation != DRM_MODE_ROTATE_0) + goto ffu; + num_clips = drm_plane_get_damage_clips_count(new_plane_state); clips = drm_plane_get_damage_clips(new_plane_state); diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index e507d2e1410b..93e40e0a1508 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1018,13 +1018,20 @@ static enum bp_result get_ss_info_v4_5( DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_DISPLAY_PORT: - ss_info->spread_spectrum_percentage = + if (bp->base.integrated_info) { + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage); + ss_info->spread_spectrum_percentage = + bp->base.integrated_info->gpuclk_ss_percentage; + ss_info->type.CENTER_MODE = + bp->base.integrated_info->gpuclk_ss_type; + } else { + ss_info->spread_spectrum_percentage = disp_cntl_tbl->dp_ss_percentage; - ss_info->spread_spectrum_range = + ss_info->spread_spectrum_range = disp_cntl_tbl->dp_ss_rate_10hz * 10; - if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) - ss_info->type.CENTER_MODE = true; - + if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE) + ss_info->type.CENTER_MODE = true; + } DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage); break; case AS_SIGNAL_TYPE_GPU_PLL: @@ -2830,6 +2837,8 @@ static enum bp_result get_integrated_info_v2_2( info->ma_channel_number = info_v2_2->umachannelnumber; info->dp_ss_control = le16_to_cpu(info_v2_2->reserved1); + info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage; + info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type; for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) { info->ext_disp_conn_info.gu_id[i] = diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index 893991a0eb97..28b83133db91 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -324,7 +324,7 @@ static struct wm_table lpddr5_wm_table = { { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -332,7 +332,7 @@ static struct wm_table lpddr5_wm_table = { { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -340,7 +340,7 @@ static struct wm_table lpddr5_wm_table = { { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, @@ -348,7 +348,7 @@ static struct wm_table lpddr5_wm_table = { { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, - .pstate_latency_us = 11.65333, + .pstate_latency_us = 129.0, .sr_exit_time_us = 11.5, .sr_enter_plus_exit_time_us = 14.5, .valid = true, diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 848db8676adf..46c2b991aa10 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -465,6 +465,7 @@ struct dc_cursor_mi_param { struct fixed31_32 v_scale_ratio; enum dc_rotation_angle rotation; bool mirror; + struct dc_stream_state *stream; }; /* IPP related types */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index d84579da6400..009b5861a3fe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -3427,7 +3427,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz, .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert, .rotation = pipe_ctx->plane_state->rotation, - .mirror = pipe_ctx->plane_state->horizontal_mirror + .mirror = pipe_ctx->plane_state->horizontal_mirror, + .stream = pipe_ctx->stream, }; bool pipe_split_on = false; bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) || diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 4566bc7abf17..aa252dc26326 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1075,8 +1075,16 @@ void hubp2_cursor_set_position( if (src_y_offset < 0) src_y_offset = 0; /* Save necessary cursor info x, y position. w, h is saved in attribute func. */ - hubp->cur_rect.x = src_x_offset + param->viewport.x; - hubp->cur_rect.y = src_y_offset + param->viewport.y; + if (param->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + param->rotation != ROTATION_ANGLE_0) { + hubp->cur_rect.x = 0; + hubp->cur_rect.y = 0; + hubp->cur_rect.w = param->stream->timing.h_addressable; + hubp->cur_rect.h = param->stream->timing.v_addressable; + } else { + hubp->cur_rect.x = src_x_offset + param->viewport.x; + hubp->cur_rect.y = src_y_offset + param->viewport.y; + } } void hubp2_clk_cntl(struct hubp *hubp, bool enable) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 53262f6bc40b..72bec33e371f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -994,5 +994,8 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); + + dc_dmub_srv_p_state_delegate(dc, + context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 9d224bb2b3df..ce893fe1c69f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -438,7 +438,115 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = { .use_urgent_burst_bw = 0 }; -struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { + .clock_limits = { + { + .state = 0, + .dcfclk_mhz = 560.0, + .fabricclk_mhz = 560.0, + .dispclk_mhz = 513.0, + .dppclk_mhz = 513.0, + .phyclk_mhz = 540.0, + .socclk_mhz = 560.0, + .dscclk_mhz = 171.0, + .dram_speed_mts = 1069.0, + }, + { + .state = 1, + .dcfclk_mhz = 694.0, + .fabricclk_mhz = 694.0, + .dispclk_mhz = 642.0, + .dppclk_mhz = 642.0, + .phyclk_mhz = 600.0, + .socclk_mhz = 694.0, + .dscclk_mhz = 214.0, + .dram_speed_mts = 1324.0, + }, + { + .state = 2, + .dcfclk_mhz = 875.0, + .fabricclk_mhz = 875.0, + .dispclk_mhz = 734.0, + .dppclk_mhz = 734.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 875.0, + .dscclk_mhz = 245.0, + .dram_speed_mts = 1670.0, + }, + { + .state = 3, + .dcfclk_mhz = 1000.0, + .fabricclk_mhz = 1000.0, + .dispclk_mhz = 1100.0, + .dppclk_mhz = 1100.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1000.0, + .dscclk_mhz = 367.0, + .dram_speed_mts = 2000.0, + }, + { + .state = 4, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 2000.0, + }, + { + .state = 5, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 1200.0, + .dispclk_mhz = 1284.0, + .dppclk_mhz = 1284.0, + .phyclk_mhz = 810.0, + .socclk_mhz = 1200.0, + .dscclk_mhz = 428.0, + .dram_speed_mts = 2000.0, + }, + }, + + .num_states = 5, + .sr_exit_time_us = 1.9, + .sr_enter_plus_exit_time_us = 4.4, + .urgent_latency_us = 3.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 40.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 40.0, + .max_request_size_bytes = 256, + .dram_channel_width_bytes = 16, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.5, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 131, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 16, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 45.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3850, + .xfc_bus_transport_time_us = 20, + .xfc_xbuf_latency_tolerance_us = 50, + .use_urgent_burst_bw = 0, +}; struct _vcs_dpi_ip_params_st dcn2_1_ip = { .odm_capable = 1, diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index bc96d0211360..813463ffe15c 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -417,6 +417,8 @@ struct integrated_info { /* V2.1 */ struct edp_info edp1_info; struct edp_info edp2_info; + uint32_t gpuclk_ss_percentage; + uint32_t gpuclk_ss_type; }; /* diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 9edd39322c82..67287ad07226 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -816,6 +816,8 @@ bool is_psr_su_specific_panel(struct dc_link *link) ((dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x08) || (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x07))) isPSRSUSupported = false; + else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03) + isPSRSUSupported = false; else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1) isPSRSUSupported = true; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index f5e08b60f66e..d17bfa111aa7 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -2748,10 +2748,8 @@ static int kv_parse_power_table(struct amdgpu_device *adev) non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(adev->pm.dpm.ps); + if (ps == NULL) return -ENOMEM; - } adev->pm.dpm.ps[i].ps_priv = ps; k = 0; idx = (u8 *)&power_state->v2.clockInfoIndex[0]; diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index d3fe149d8476..291223ea7ba7 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -272,10 +272,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); + if (ret) return ret; - } } if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) @@ -283,10 +281,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); + if (ret) return ret; - } } if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) @@ -294,10 +290,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); + if (ret) return ret; - } } if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) @@ -305,10 +299,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); + if (ret) return ret; - } } if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = @@ -339,10 +331,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) kcalloc(psl->ucNumEntries, sizeof(struct amdgpu_phase_shedding_limits_entry), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) return -ENOMEM; - } entry = &psl->entries[0]; for (i = 0; i < psl->ucNumEntries; i++) { @@ -383,10 +373,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) ATOM_PPLIB_CAC_Leakage_Record *entry; u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) return -ENOMEM; - } entry = &cac_table->entries[0]; for (i = 0; i < cac_table->ucNumEntries; i++) { if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { @@ -438,10 +426,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) return -ENOMEM; - } adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = limits->numEntries; entry = &limits->entries[0]; @@ -493,10 +479,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) return -ENOMEM; - } adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = limits->numEntries; entry = &limits->entries[0]; @@ -525,10 +509,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) sizeof(struct amdgpu_clock_voltage_dependency_entry); adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) return -ENOMEM; - } adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = limits->numEntries; entry = &limits->entries[0]; @@ -548,10 +530,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) le16_to_cpu(ext_hdr->usPPMTableOffset)); adev->pm.dpm.dyn_state.ppm_table = kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.ppm_table) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.ppm_table) return -ENOMEM; - } adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = le16_to_cpu(ppm->usCpuCoreNumber); @@ -583,10 +563,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) sizeof(struct amdgpu_clock_voltage_dependency_entry); adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) return -ENOMEM; - } adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = limits->numEntries; entry = &limits->entries[0]; @@ -606,10 +584,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) ATOM_PowerTune_Table *pt; adev->pm.dpm.dyn_state.cac_tdp_table = kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.cac_tdp_table) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.cac_tdp_table) return -ENOMEM; - } if (rev > 0) { ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) (mode_info->atom_context->bios + data_offset + @@ -645,10 +621,8 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) ret = amdgpu_parse_clk_voltage_dep_table( &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, dep_table); - if (ret) { - kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); + if (ret) return ret; - } } } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index c89cfef7cafa..dc0a6fba7050 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7379,10 +7379,9 @@ static int si_dpm_init(struct amdgpu_device *adev) kcalloc(4, sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { - amdgpu_free_extended_power_table(adev); + if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) return -ENOMEM; - } + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index a31a62a1ce0b..5e9410117712 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -2987,6 +2987,8 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) result = smu7_get_evv_voltages(hwmgr); if (result) { pr_info("Get EVV Voltage Failed. Abort Driver loading!\n"); + kfree(hwmgr->backend); + hwmgr->backend = NULL; return -EINVAL; } } else { @@ -3032,8 +3034,10 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) } result = smu7_update_edc_leakage_table(hwmgr); - if (result) + if (result) { + smu7_hwmgr_backend_fini(hwmgr); return result; + } return 0; } diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c index 946212a95598..5e3b8edcf794 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c @@ -403,7 +403,8 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp) static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type) { - int ret, tries = 3; + int ret = -EINVAL; + int tries = 3; u32 i; for (i = 0; i < tries; i++) { diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 7ef78283e3d3..926ab5c3c31a 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -2097,7 +2097,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) } else { if (tc->hpd_pin < 0 || tc->hpd_pin > 1) { dev_err(dev, "failed to parse HPD number\n"); - return ret; + return -EINVAL; } } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 1b5c27ed2737..ff4d0564122a 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -527,6 +527,7 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, u32 request_val = AUX_CMD_REQ(msg->request); u8 *buf = msg->buffer; unsigned int len = msg->size; + unsigned int short_len; unsigned int val; int ret; u8 addr_len[SN_AUX_LENGTH_REG + 1 - SN_AUX_ADDR_19_16_REG]; @@ -600,7 +601,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux, } if (val & AUX_IRQ_STATUS_AUX_SHORT) { - ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len); + ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &short_len); + len = min(len, short_len); if (ret) goto exit; } else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) { diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c index e0e015243a60..b588fea12502 100644 --- a/drivers/gpu/drm/bridge/ti-tpd12s015.c +++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c @@ -179,7 +179,7 @@ static int tpd12s015_probe(struct platform_device *pdev) return 0; } -static int __exit tpd12s015_remove(struct platform_device *pdev) +static int tpd12s015_remove(struct platform_device *pdev) { struct tpd12s015_device *tpd = platform_get_drvdata(pdev); @@ -197,7 +197,7 @@ MODULE_DEVICE_TABLE(of, tpd12s015_of_match); static struct platform_driver tpd12s015_driver = { .probe = tpd12s015_probe, - .remove = __exit_p(tpd12s015_remove), + .remove = tpd12s015_remove, .driver = { .name = "tpd12s015", .of_match_table = tpd12s015_of_match, diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index df9bf3c9206e..cb90e70d85e8 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -715,8 +715,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, struct drm_mode_set set; uint32_t __user *set_connectors_ptr; struct drm_modeset_acquire_ctx ctx; - int ret; - int i; + int ret, i, num_connectors = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EOPNOTSUPP; @@ -871,6 +870,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, connector->name); connector_set[i] = connector; + num_connectors++; } } @@ -879,7 +879,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, set.y = crtc_req->y; set.mode = mode; set.connectors = connector_set; - set.num_connectors = crtc_req->count_connectors; + set.num_connectors = num_connectors; set.fb = fb; if (drm_drv_uses_atomic_modeset(dev)) @@ -892,7 +892,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, drm_framebuffer_put(fb); if (connector_set) { - for (i = 0; i < crtc_req->count_connectors; i++) { + for (i = 0; i < num_connectors; i++) { if (connector_set[i]) drm_connector_put(connector_set[i]); } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 203bf8d6c34c..d41a5eaa3e89 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -895,8 +895,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto err_minors; } - if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_modeset_register_all(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = drm_modeset_register_all(dev); + if (ret) + goto err_unload; + } DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", driver->name, driver->major, driver->minor, @@ -906,6 +909,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) goto out_unlock; +err_unload: + if (dev->driver->unload) + dev->driver->unload(dev); err_minors: remove_compat_control_link(dev); drm_minor_unregister(dev, DRM_MINOR_PRIMARY); diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c index a971590b8132..e2c7373f20c6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c @@ -107,18 +107,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev, return 0; if (!priv->mapping) { - void *mapping; + void *mapping = NULL; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) mapping = arm_iommu_create_mapping(&platform_bus_type, EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE); else if (IS_ENABLED(CONFIG_IOMMU_DMA)) mapping = iommu_get_domain_for_dev(priv->dma_dev); - else - mapping = ERR_PTR(-ENODEV); - if (IS_ERR(mapping)) - return PTR_ERR(mapping); + if (!mapping) + return -ENODEV; priv->mapping = mapping; } diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index b7c11bdce2c8..1a7194a653ae 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1861,6 +1861,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) return ret; crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI); + if (IS_ERR(crtc)) + return PTR_ERR(crtc); crtc->pipe_clk = &hdata->phy_clk; ret = hdmi_create_connector(encoder); diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 18f0a5ae3bac..a502af0b6dd4 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -41,6 +41,7 @@ #include "intel_global_state.h" #include "intel_hdcp.h" #include "intel_psr.h" +#include "intel_fb.h" #include "skl_universal_plane.h" /** @@ -302,198 +303,6 @@ intel_crtc_destroy_state(struct drm_crtc *crtc, kfree(crtc_state); } -static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, - int num_scalers_need, struct intel_crtc *intel_crtc, - const char *name, int idx, - struct intel_plane_state *plane_state, - int *scaler_id) -{ - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - int j; - u32 mode; - - if (*scaler_id < 0) { - /* find a free scaler */ - for (j = 0; j < intel_crtc->num_scalers; j++) { - if (scaler_state->scalers[j].in_use) - continue; - - *scaler_id = j; - scaler_state->scalers[*scaler_id].in_use = 1; - break; - } - } - - if (drm_WARN(&dev_priv->drm, *scaler_id < 0, - "Cannot find scaler for %s:%d\n", name, idx)) - return; - - /* set scaler mode */ - if (plane_state && plane_state->hw.fb && - plane_state->hw.fb->format->is_yuv && - plane_state->hw.fb->format->num_planes > 1) { - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - if (DISPLAY_VER(dev_priv) == 9) { - mode = SKL_PS_SCALER_MODE_NV12; - } else if (icl_is_hdr_plane(dev_priv, plane->id)) { - /* - * On gen11+'s HDR planes we only use the scaler for - * scaling. They have a dedicated chroma upsampler, so - * we don't need the scaler to upsample the UV plane. - */ - mode = PS_SCALER_MODE_NORMAL; - } else { - struct intel_plane *linked = - plane_state->planar_linked_plane; - - mode = PS_SCALER_MODE_PLANAR; - - if (linked) - mode |= PS_PLANE_Y_SEL(linked->id); - } - } else if (DISPLAY_VER(dev_priv) >= 10) { - mode = PS_SCALER_MODE_NORMAL; - } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) { - /* - * when only 1 scaler is in use on a pipe with 2 scalers - * scaler 0 operates in high quality (HQ) mode. - * In this case use scaler 0 to take advantage of HQ mode - */ - scaler_state->scalers[*scaler_id].in_use = 0; - *scaler_id = 0; - scaler_state->scalers[0].in_use = 1; - mode = SKL_PS_SCALER_MODE_HQ; - } else { - mode = SKL_PS_SCALER_MODE_DYN; - } - - drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n", - intel_crtc->pipe, *scaler_id, name, idx); - scaler_state->scalers[*scaler_id].mode = mode; -} - -/** - * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests - * @dev_priv: i915 device - * @intel_crtc: intel crtc - * @crtc_state: incoming crtc_state to validate and setup scalers - * - * This function sets up scalers based on staged scaling requests for - * a @crtc and its planes. It is called from crtc level check path. If request - * is a supportable request, it attaches scalers to requested planes and crtc. - * - * This function takes into account the current scaler(s) in use by any planes - * not being part of this atomic state - * - * Returns: - * 0 - scalers were setup succesfully - * error code - otherwise - */ -int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, - struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state) -{ - struct drm_plane *plane = NULL; - struct intel_plane *intel_plane; - struct intel_plane_state *plane_state = NULL; - struct intel_crtc_scaler_state *scaler_state = - &crtc_state->scaler_state; - struct drm_atomic_state *drm_state = crtc_state->uapi.state; - struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); - int num_scalers_need; - int i; - - num_scalers_need = hweight32(scaler_state->scaler_users); - - /* - * High level flow: - * - staged scaler requests are already in scaler_state->scaler_users - * - check whether staged scaling requests can be supported - * - add planes using scalers that aren't in current transaction - * - assign scalers to requested users - * - as part of plane commit, scalers will be committed - * (i.e., either attached or detached) to respective planes in hw - * - as part of crtc_commit, scaler will be either attached or detached - * to crtc in hw - */ - - /* fail if required scalers > available scalers */ - if (num_scalers_need > intel_crtc->num_scalers){ - drm_dbg_kms(&dev_priv->drm, - "Too many scaling requests %d > %d\n", - num_scalers_need, intel_crtc->num_scalers); - return -EINVAL; - } - - /* walkthrough scaler_users bits and start assigning scalers */ - for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { - int *scaler_id; - const char *name; - int idx; - - /* skip if scaler not required */ - if (!(scaler_state->scaler_users & (1 << i))) - continue; - - if (i == SKL_CRTC_INDEX) { - name = "CRTC"; - idx = intel_crtc->base.base.id; - - /* panel fitter case: assign as a crtc scaler */ - scaler_id = &scaler_state->scaler_id; - } else { - name = "PLANE"; - - /* plane scaler case: assign as a plane scaler */ - /* find the plane that set the bit as scaler_user */ - plane = drm_state->planes[i].ptr; - - /* - * to enable/disable hq mode, add planes that are using scaler - * into this transaction - */ - if (!plane) { - struct drm_plane_state *state; - - /* - * GLK+ scalers don't have a HQ mode so it - * isn't necessary to change between HQ and dyn mode - * on those platforms. - */ - if (DISPLAY_VER(dev_priv) >= 10) - continue; - - plane = drm_plane_from_index(&dev_priv->drm, i); - state = drm_atomic_get_plane_state(drm_state, plane); - if (IS_ERR(state)) { - drm_dbg_kms(&dev_priv->drm, - "Failed to add [PLANE:%d] to drm_state\n", - plane->base.id); - return PTR_ERR(state); - } - } - - intel_plane = to_intel_plane(plane); - idx = plane->base.id; - - /* plane on different crtc cannot be a scaler user of this crtc */ - if (drm_WARN_ON(&dev_priv->drm, - intel_plane->pipe != intel_crtc->pipe)) - continue; - - plane_state = intel_atomic_get_new_plane_state(intel_state, - intel_plane); - scaler_id = &plane_state->scaler_id; - } - - intel_atomic_setup_scaler(scaler_state, num_scalers_need, - intel_crtc, name, idx, - plane_state, scaler_id); - } - - return 0; -} - struct drm_atomic_state * intel_atomic_state_alloc(struct drm_device *dev) { diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 1dc439983dd9..e506f6a87344 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -52,8 +52,4 @@ struct intel_crtc_state * intel_atomic_get_crtc_state(struct drm_atomic_state *state, struct intel_crtc *crtc); -int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, - struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state); - #endif /* __INTEL_ATOMIC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1777a12f2f42..fb8d1d63407a 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6481,6 +6481,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state, return -EINVAL; } + /* + * FIXME: Bigjoiner+async flip is busted currently. + * Remove this check once the issues are fixed. + */ + if (new_crtc_state->bigjoiner_pipes) { + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] async flip disallowed with bigjoiner\n", + crtc->base.base.id, crtc->base.name); + return -EINVAL; + } + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { if (plane->pipe != crtc->pipe) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 5970f4149090..4699c2110226 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3707,7 +3707,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, intel_dp->train_set, crtc_state->lane_count); drm_dp_set_phy_test_pattern(&intel_dp->aux, data, - link_status[DP_DPCD_REV]); + intel_dp->dpcd[DP_DPCD_REV]); } static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index eefa33c555ac..c69a638796c6 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -1176,7 +1176,8 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb) { struct drm_i915_private *i915 = to_i915(fb->base.dev); - return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR; + return (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) && + intel_fb_uses_dpt(&fb->base); } static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation) @@ -1312,9 +1313,11 @@ plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane, unsigned int tile_width, unsigned int src_stride_tiles, unsigned int dst_stride_tiles) { + struct drm_i915_private *i915 = to_i915(fb->base.dev); unsigned int stride_tiles; - if (IS_ALDERLAKE_P(to_i915(fb->base.dev))) + if ((IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14) && + src_stride_tiles < dst_stride_tiles) stride_tiles = src_stride_tiles; else stride_tiles = dst_stride_tiles; @@ -1441,8 +1444,20 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p size += remap_info->size; } else { - unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane, - remap_info->width); + unsigned int dst_stride; + + /* + * The hardware automagically calculates the CCS AUX surface + * stride from the main surface stride so can't really remap a + * smaller subset (unless we'd remap in whole AUX page units). + */ + if (intel_fb_needs_pot_stride_remap(fb) && + intel_fb_is_ccs_modifier(fb->base.modifier)) + dst_stride = remap_info->src_stride; + else + dst_stride = remap_info->width; + + dst_stride = plane_view_dst_stride_tiles(fb, color_plane, dst_stride); assign_chk_ovf(i915, remap_info->dst_stride, dst_stride); color_plane_info->mapping_stride = dst_stride * @@ -1508,7 +1523,8 @@ static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_vi memset(view, 0, sizeof(*view)); view->gtt.type = view_type; - if (view_type == I915_GTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915)) + if (view_type == I915_GTT_VIEW_REMAPPED && + (IS_ALDERLAKE_P(i915) || DISPLAY_VER(i915) >= 14)) view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE; } diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index 90f42f63128e..0b74f91e865d 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -337,6 +337,263 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, return 0; } +static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state, + int num_scalers_need, struct intel_crtc *intel_crtc, + const char *name, int idx, + struct intel_plane_state *plane_state, + int *scaler_id) +{ + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + int j; + u32 mode; + + if (*scaler_id < 0) { + /* find a free scaler */ + for (j = 0; j < intel_crtc->num_scalers; j++) { + if (scaler_state->scalers[j].in_use) + continue; + + *scaler_id = j; + scaler_state->scalers[*scaler_id].in_use = 1; + break; + } + } + + if (drm_WARN(&dev_priv->drm, *scaler_id < 0, + "Cannot find scaler for %s:%d\n", name, idx)) + return -EINVAL; + + /* set scaler mode */ + if (plane_state && plane_state->hw.fb && + plane_state->hw.fb->format->is_yuv && + plane_state->hw.fb->format->num_planes > 1) { + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + + if (DISPLAY_VER(dev_priv) == 9) { + mode = SKL_PS_SCALER_MODE_NV12; + } else if (icl_is_hdr_plane(dev_priv, plane->id)) { + /* + * On gen11+'s HDR planes we only use the scaler for + * scaling. They have a dedicated chroma upsampler, so + * we don't need the scaler to upsample the UV plane. + */ + mode = PS_SCALER_MODE_NORMAL; + } else { + struct intel_plane *linked = + plane_state->planar_linked_plane; + + mode = PS_SCALER_MODE_PLANAR; + + if (linked) + mode |= PS_PLANE_Y_SEL(linked->id); + } + } else if (DISPLAY_VER(dev_priv) >= 10) { + mode = PS_SCALER_MODE_NORMAL; + } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) { + /* + * when only 1 scaler is in use on a pipe with 2 scalers + * scaler 0 operates in high quality (HQ) mode. + * In this case use scaler 0 to take advantage of HQ mode + */ + scaler_state->scalers[*scaler_id].in_use = 0; + *scaler_id = 0; + scaler_state->scalers[0].in_use = 1; + mode = SKL_PS_SCALER_MODE_HQ; + } else { + mode = SKL_PS_SCALER_MODE_DYN; + } + + /* + * FIXME: we should also check the scaler factors for pfit, so + * this shouldn't be tied directly to planes. + */ + if (plane_state && plane_state->hw.fb) { + const struct drm_framebuffer *fb = plane_state->hw.fb; + const struct drm_rect *src = &plane_state->uapi.src; + const struct drm_rect *dst = &plane_state->uapi.dst; + int hscale, vscale, max_vscale, max_hscale; + + /* + * FIXME: When two scalers are needed, but only one of + * them needs to downscale, we should make sure that + * the one that needs downscaling support is assigned + * as the first scaler, so we don't reject downscaling + * unnecessarily. + */ + + if (DISPLAY_VER(dev_priv) >= 14) { + /* + * On versions 14 and up, only the first + * scaler supports a vertical scaling factor + * of more than 1.0, while a horizontal + * scaling factor of 3.0 is supported. + */ + max_hscale = 0x30000 - 1; + if (*scaler_id == 0) + max_vscale = 0x30000 - 1; + else + max_vscale = 0x10000; + + } else if (DISPLAY_VER(dev_priv) >= 10 || + !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) { + max_hscale = 0x30000 - 1; + max_vscale = 0x30000 - 1; + } else { + max_hscale = 0x20000 - 1; + max_vscale = 0x20000 - 1; + } + + /* + * FIXME: We should change the if-else block above to + * support HQ vs dynamic scaler properly. + */ + + /* Check if required scaling is within limits */ + hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale); + vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale); + + if (hscale < 0 || vscale < 0) { + drm_dbg_kms(&dev_priv->drm, + "Scaler %d doesn't support required plane scaling\n", + *scaler_id); + drm_rect_debug_print("src: ", src, true); + drm_rect_debug_print("dst: ", dst, false); + + return -EINVAL; + } + } + + drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n", + intel_crtc->pipe, *scaler_id, name, idx); + scaler_state->scalers[*scaler_id].mode = mode; + + return 0; +} + +/** + * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests + * @dev_priv: i915 device + * @intel_crtc: intel crtc + * @crtc_state: incoming crtc_state to validate and setup scalers + * + * This function sets up scalers based on staged scaling requests for + * a @crtc and its planes. It is called from crtc level check path. If request + * is a supportable request, it attaches scalers to requested planes and crtc. + * + * This function takes into account the current scaler(s) in use by any planes + * not being part of this atomic state + * + * Returns: + * 0 - scalers were setup successfully + * error code - otherwise + */ +int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc, + struct intel_crtc_state *crtc_state) +{ + struct drm_plane *plane = NULL; + struct intel_plane *intel_plane; + struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + struct drm_atomic_state *drm_state = crtc_state->uapi.state; + struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state); + int num_scalers_need; + int i; + + num_scalers_need = hweight32(scaler_state->scaler_users); + + /* + * High level flow: + * - staged scaler requests are already in scaler_state->scaler_users + * - check whether staged scaling requests can be supported + * - add planes using scalers that aren't in current transaction + * - assign scalers to requested users + * - as part of plane commit, scalers will be committed + * (i.e., either attached or detached) to respective planes in hw + * - as part of crtc_commit, scaler will be either attached or detached + * to crtc in hw + */ + + /* fail if required scalers > available scalers */ + if (num_scalers_need > intel_crtc->num_scalers) { + drm_dbg_kms(&dev_priv->drm, + "Too many scaling requests %d > %d\n", + num_scalers_need, intel_crtc->num_scalers); + return -EINVAL; + } + + /* walkthrough scaler_users bits and start assigning scalers */ + for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { + struct intel_plane_state *plane_state = NULL; + int *scaler_id; + const char *name; + int idx, ret; + + /* skip if scaler not required */ + if (!(scaler_state->scaler_users & (1 << i))) + continue; + + if (i == SKL_CRTC_INDEX) { + name = "CRTC"; + idx = intel_crtc->base.base.id; + + /* panel fitter case: assign as a crtc scaler */ + scaler_id = &scaler_state->scaler_id; + } else { + name = "PLANE"; + + /* plane scaler case: assign as a plane scaler */ + /* find the plane that set the bit as scaler_user */ + plane = drm_state->planes[i].ptr; + + /* + * to enable/disable hq mode, add planes that are using scaler + * into this transaction + */ + if (!plane) { + struct drm_plane_state *state; + + /* + * GLK+ scalers don't have a HQ mode so it + * isn't necessary to change between HQ and dyn mode + * on those platforms. + */ + if (DISPLAY_VER(dev_priv) >= 10) + continue; + + plane = drm_plane_from_index(&dev_priv->drm, i); + state = drm_atomic_get_plane_state(drm_state, plane); + if (IS_ERR(state)) { + drm_dbg_kms(&dev_priv->drm, + "Failed to add [PLANE:%d] to drm_state\n", + plane->base.id); + return PTR_ERR(state); + } + } + + intel_plane = to_intel_plane(plane); + idx = plane->base.id; + + /* plane on different crtc cannot be a scaler user of this crtc */ + if (drm_WARN_ON(&dev_priv->drm, + intel_plane->pipe != intel_crtc->pipe)) + continue; + + plane_state = intel_atomic_get_new_plane_state(intel_state, + intel_plane); + scaler_id = &plane_state->scaler_id; + } + + ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need, + intel_crtc, name, idx, + plane_state, scaler_id); + if (ret < 0) + return ret; + } + + return 0; +} + static int glk_coef_tap(int i) { return i % 7; diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h index 0097d5d08e10..f040f6ac061f 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.h +++ b/drivers/gpu/drm/i915/display/skl_scaler.h @@ -8,17 +8,22 @@ #include enum drm_scaling_filter; -struct drm_i915_private; -struct intel_crtc_state; -struct intel_plane_state; -struct intel_plane; enum pipe; +struct drm_i915_private; +struct intel_crtc; +struct intel_crtc_state; +struct intel_plane; +struct intel_plane_state; int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); +int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, + struct intel_crtc *intel_crtc, + struct intel_crtc_state *crtc_state); + void skl_pfit_enable(const struct intel_crtc_state *crtc_state); void skl_program_plane_scaler(struct intel_plane *plane, @@ -26,4 +31,5 @@ void skl_program_plane_scaler(struct intel_plane *plane, const struct intel_plane_state *plane_state); void skl_detach_scalers(const struct intel_crtc_state *crtc_state); void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); + #endif diff --git a/drivers/gpu/drm/mediatek/mtk_disp_merge.c b/drivers/gpu/drm/mediatek/mtk_disp_merge.c index 6428b6203ffe..211140e87568 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_merge.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_merge.c @@ -104,7 +104,7 @@ void mtk_merge_stop_cmdq(struct device *dev, struct cmdq_pkt *cmdq_pkt) mtk_ddp_write(cmdq_pkt, 0, &priv->cmdq_reg, priv->regs, DISP_REG_MERGE_CTRL); - if (priv->async_clk) + if (!cmdq_pkt && priv->async_clk) reset_control_reset(priv->reset_ctl); } diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index 2c850b6d945b..519e23a2a017 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2669,3 +2669,4 @@ MODULE_AUTHOR("Markus Schneider-Pargmann "); MODULE_AUTHOR("Bo-Chen Chen "); MODULE_DESCRIPTION("MediaTek DisplayPort Driver"); MODULE_LICENSE("GPL"); +MODULE_SOFTDEP("pre: phy_mtk_dp"); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 7fb52a573436..558000db4a10 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -736,6 +736,7 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, crtc); struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state); struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + unsigned long flags; if (mtk_crtc->event && mtk_crtc_state->base.event) DRM_ERROR("new event while there is still a pending event\n"); @@ -743,7 +744,11 @@ static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc, if (mtk_crtc_state->base.event) { mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc); WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&crtc->dev->event_lock, flags); mtk_crtc->event = mtk_crtc_state->base.event; + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + mtk_crtc_state->base.event = NULL; } } diff --git a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c index eecfa98ff52e..b288bb6eeecc 100644 --- a/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c +++ b/drivers/gpu/drm/mediatek/mtk_mdp_rdma.c @@ -223,8 +223,7 @@ int mtk_mdp_rdma_clk_enable(struct device *dev) { struct mtk_mdp_rdma *rdma = dev_get_drvdata(dev); - clk_prepare_enable(rdma->clk); - return 0; + return clk_prepare_enable(rdma->clk); } void mtk_mdp_rdma_clk_disable(struct device *dev) diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index f0c2349404b4..aebd09e2d408 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -390,6 +390,11 @@ void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane, .destroy = drm_plane_cleanup, \ DRM_GEM_SHADOW_PLANE_FUNCS +void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format); +void mgag200_crtc_set_gamma(struct mga_device *mdev, + const struct drm_format_info *format, + struct drm_color_lut *lut); + enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode); int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *new_state); diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c index bce267e0f7de..8d4538b71047 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200er.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c @@ -202,6 +202,11 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_g200er_reset_tagfifo(mdev); + if (crtc_state->gamma_lut) + mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); + else + mgag200_crtc_set_gamma_linear(mdev, format); + mgag200_enable_display(mdev); if (funcs->enable_vidrst) diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c index ac957f42abe1..56e6f986bff3 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c @@ -203,6 +203,11 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_g200ev_set_hiprilvl(mdev); + if (crtc_state->gamma_lut) + mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); + else + mgag200_crtc_set_gamma_linear(mdev, format); + mgag200_enable_display(mdev); if (funcs->enable_vidrst) diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c index bd6e573c9a1a..ff2b3c6622e7 100644 --- a/drivers/gpu/drm/mgag200/mgag200_g200se.c +++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c @@ -334,6 +334,11 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc, mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format); + if (crtc_state->gamma_lut) + mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data); + else + mgag200_crtc_set_gamma_linear(mdev, format); + mgag200_enable_display(mdev); if (funcs->enable_vidrst) diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index ae90b260312a..554adf05e073 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -28,8 +28,8 @@ * This file contains setup code for the CRTC. */ -static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, - const struct drm_format_info *format) +void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, + const struct drm_format_info *format) { int i; @@ -65,9 +65,9 @@ static void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, } } -static void mgag200_crtc_set_gamma(struct mga_device *mdev, - const struct drm_format_info *format, - struct drm_color_lut *lut) +void mgag200_crtc_set_gamma(struct mga_device *mdev, + const struct drm_format_info *format, + struct drm_color_lut *lut) { int i; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c index 6c0ffe8e4adb..5a5821e59dc1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark @@ -124,7 +124,7 @@ static void dpu_crtc_setup_lm_misr(struct dpu_crtc_state *crtc_state) continue; /* Calculate MISR over 1 frame */ - m->hw_lm->ops.setup_misr(m->hw_lm, true, 1); + m->hw_lm->ops.setup_misr(m->hw_lm); } } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 547f9f2b9fcb..b0eb881f8af1 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2013 Red Hat * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * * Author: Rob Clark */ @@ -257,7 +257,7 @@ void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc) if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) continue; - phys->hw_intf->ops.setup_misr(phys->hw_intf, true, 1); + phys->hw_intf->ops.setup_misr(phys->hw_intf); } } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c index b9dddf576c02..384558d2f960 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. */ @@ -322,9 +322,9 @@ static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf) return DPU_REG_READ(c, INTF_LINE_COUNT); } -static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf, bool enable, u32 frame_count) +static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf) { - dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, enable, frame_count); + dpu_hw_setup_misr(&intf->hw, INTF_MISR_CTRL, 0x1); } static int dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf, u32 *misr_value) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h index 643dd10bc030..e75339b96a1d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. */ @@ -80,7 +80,7 @@ struct dpu_hw_intf_ops { void (*bind_pingpong_blk)(struct dpu_hw_intf *intf, bool enable, const enum dpu_pingpong pp); - void (*setup_misr)(struct dpu_hw_intf *intf, bool enable, u32 frame_count); + void (*setup_misr)(struct dpu_hw_intf *intf); int (*collect_misr)(struct dpu_hw_intf *intf, u32 *misr_value); }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c index f5120ea91ede..cc04fb979fb5 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ @@ -99,9 +99,9 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx, } } -static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count) +static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx) { - dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, enable, frame_count); + dpu_hw_setup_misr(&ctx->hw, LM_MISR_CTRL, 0x0); } static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h index 652ddfdedec3..0a050eb247b9 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ @@ -57,7 +58,7 @@ struct dpu_hw_lm_ops { /** * setup_misr: Enable/disable MISR */ - void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count); + void (*setup_misr)(struct dpu_hw_mixer *ctx); /** * collect_misr: Read MISR signature diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c index 8062228eada6..1b7439ae686a 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. */ #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__ @@ -450,9 +450,11 @@ u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl, return 0; } +/* + * note: Aside from encoders, input_sel should be set to 0x0 by default + */ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c, - u32 misr_ctrl_offset, - bool enable, u32 frame_count) + u32 misr_ctrl_offset, u8 input_sel) { u32 config = 0; @@ -461,15 +463,9 @@ void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c, /* Clear old MISR value (in case it's read before a new value is calculated)*/ wmb(); - if (enable) { - config = (frame_count & MISR_FRAME_COUNT_MASK) | - MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK; - - DPU_REG_WRITE(c, misr_ctrl_offset, config); - } else { - DPU_REG_WRITE(c, misr_ctrl_offset, 0); - } - + config = MISR_FRAME_COUNT | MISR_CTRL_ENABLE | MISR_CTRL_FREE_RUN_MASK | + ((input_sel & 0xF) << 24); + DPU_REG_WRITE(c, misr_ctrl_offset, config); } int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h index 27f4c39e35ab..4ae2a434372c 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. */ @@ -13,7 +13,7 @@ #include "dpu_hw_catalog.h" #define REG_MASK(n) ((BIT(n)) - 1) -#define MISR_FRAME_COUNT_MASK 0xFF +#define MISR_FRAME_COUNT 0x1 #define MISR_CTRL_ENABLE BIT(8) #define MISR_CTRL_STATUS BIT(9) #define MISR_CTRL_STATUS_CLEAR BIT(10) @@ -350,9 +350,7 @@ u64 _dpu_hw_get_qos_lut(const struct dpu_qos_lut_tbl *tbl, u32 total_fl); void dpu_hw_setup_misr(struct dpu_hw_blk_reg_map *c, - u32 misr_ctrl_offset, - bool enable, - u32 frame_count); + u32 misr_ctrl_offset, u8 input_sel); int dpu_hw_collect_misr(struct dpu_hw_blk_reg_map *c, u32 misr_ctrl_offset, diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c index 169f9de4a12a..3100957225a7 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c @@ -269,6 +269,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc); + unsigned long flags; DBG("%s", mdp4_crtc->name); @@ -281,6 +282,14 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc, mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err); mdp4_disable(mdp4_kms); + if (crtc->state->event && !crtc->state->active) { + WARN_ON(mdp4_crtc->event); + spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags); + } + mdp4_crtc->enabled = false; } diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c index 5b71a5a5cd85..cdbc75e3d1f6 100644 --- a/drivers/gpu/drm/nouveau/nv04_fence.c +++ b/drivers/gpu/drm/nouveau/nv04_fence.c @@ -39,7 +39,7 @@ struct nv04_fence_priv { static int nv04_fence_emit(struct nouveau_fence *fence) { - struct nvif_push *push = fence->channel->chan.push; + struct nvif_push *push = unrcu_pointer(fence->channel)->chan.push; int ret = PUSH_WAIT(push, 2); if (ret == 0) { PUSH_NVSQ(push, NV_SW, 0x0150, fence->base.seqno); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c index 6cb5eefa45e9..5a08458fe1b7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c @@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth) type |= 0x00000001; /* PAGE_ALL */ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) - type |= 0x00000004; /* HUB_ONLY */ + type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */ mutex_lock(&vmm->mmu->mutex); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index eaf67b9e5f12..5b6d1668f405 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -68,7 +68,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; struct omap_drm_private *priv = dev->dev_private; - bool fence_cookie = dma_fence_begin_signalling(); dispc_runtime_get(priv->dispc); @@ -91,6 +90,8 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) omap_atomic_wait_for_completion(dev, old_state); drm_atomic_helper_commit_planes(dev, old_state, 0); + + drm_atomic_helper_commit_hw_done(old_state); } else { /* * OMAP3 DSS seems to have issues with the work-around above, @@ -100,12 +101,10 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_planes(dev, old_state, 0); drm_atomic_helper_commit_modeset_enables(dev, old_state); + + drm_atomic_helper_commit_hw_done(old_state); } - drm_atomic_helper_commit_hw_done(old_state); - - dma_fence_end_signalling(fence_cookie); - /* * Wait for completion of the page flips to ensure that old buffers * can't be touched by the hardware anymore before cleaning up planes. diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c index eee714cf3f49..3a7fc3ca6a6f 100644 --- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c +++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c @@ -112,6 +112,8 @@ static int kd35t133_unprepare(struct drm_panel *panel) return ret; } + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_disable(ctx->iovcc); regulator_disable(ctx->vdd); diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c index 225b9884f61a..54b28992db5d 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c @@ -288,7 +288,7 @@ static void st7701_init_sequence(struct st7701 *st7701) FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVDD_MASK, DIV_ROUND_CLOSEST(desc->avdd_mv - 6200, 200)) | FIELD_PREP(DSI_CMD2_BK1_PWRCTRL2_AVCL_MASK, - DIV_ROUND_CLOSEST(-4400 + desc->avcl_mv, 200))); + DIV_ROUND_CLOSEST(-4400 - desc->avcl_mv, 200))); /* T2D = 0.2us * T2D[3:0] */ ST7701_DSI(st7701, DSI_CMD2_BK1_SPD1, diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 6452e4e900dd..55d243048516 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -71,7 +71,12 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) } gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL); - gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL); + + /* Only enable the interrupts we care about */ + gpu_write(pfdev, GPU_INT_MASK, + GPU_IRQ_MASK_ERROR | + GPU_IRQ_PERFCNT_SAMPLE_COMPLETED | + GPU_IRQ_CLEAN_CACHES_COMPLETED); return 0; } @@ -313,28 +318,38 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) pfdev->features.shader_present, pfdev->features.l2_present); } +static u64 panfrost_get_core_mask(struct panfrost_device *pfdev) +{ + u64 core_mask; + + if (pfdev->features.l2_present == 1) + return U64_MAX; + + /* + * Only support one core group now. + * ~(l2_present - 1) unsets all bits in l2_present except + * the bottom bit. (l2_present - 2) has all the bits in + * the first core group set. AND them together to generate + * a mask of cores in the first core group. + */ + core_mask = ~(pfdev->features.l2_present - 1) & + (pfdev->features.l2_present - 2); + dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", + hweight64(core_mask), + hweight64(pfdev->features.shader_present)); + + return core_mask; +} + void panfrost_gpu_power_on(struct panfrost_device *pfdev) { int ret; u32 val; - u64 core_mask = U64_MAX; + u64 core_mask; panfrost_gpu_init_quirks(pfdev); + core_mask = panfrost_get_core_mask(pfdev); - if (pfdev->features.l2_present != 1) { - /* - * Only support one core group now. - * ~(l2_present - 1) unsets all bits in l2_present except - * the bottom bit. (l2_present - 2) has all the bits in - * the first core group set. AND them together to generate - * a mask of cores in the first core group. - */ - core_mask = ~(pfdev->features.l2_present - 1) & - (pfdev->features.l2_present - 2); - dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n", - hweight64(core_mask), - hweight64(pfdev->features.shader_present)); - } gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask); ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, val, val == (pfdev->features.l2_present & core_mask), @@ -359,9 +374,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev) void panfrost_gpu_power_off(struct panfrost_device *pfdev) { - gpu_write(pfdev, TILER_PWROFF_LO, 0); - gpu_write(pfdev, SHADER_PWROFF_LO, 0); - gpu_write(pfdev, L2_PWROFF_LO, 0); + int ret; + u32 val; + + gpu_write(pfdev, SHADER_PWROFF_LO, pfdev->features.shader_present); + ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO, + val, !val, 1, 1000); + if (ret) + dev_err(pfdev->dev, "shader power transition timeout"); + + gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present); + ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO, + val, !val, 1, 1000); + if (ret) + dev_err(pfdev->dev, "tiler power transition timeout"); + + gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present); + ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO, + val, !val, 0, 1000); + if (ret) + dev_err(pfdev->dev, "l2 power transition timeout"); } int panfrost_gpu_init(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index d4f09ecc3d22..f336b5b3b11f 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2321,7 +2321,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) switch (prim_walk) { case 1: for (i = 0; i < track->num_arrays; i++) { - size = track->arrays[i].esize * track->max_indx * 4; + size = track->arrays[i].esize * track->max_indx * 4UL; if (track->arrays[i].robj == NULL) { DRM_ERROR("(PW %u) Vertex array %u no buffer " "bound\n", prim_walk, i); @@ -2340,7 +2340,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) break; case 2: for (i = 0; i < track->num_arrays; i++) { - size = track->arrays[i].esize * (nverts - 1) * 4; + size = track->arrays[i].esize * (nverts - 1) * 4UL; if (track->arrays[i].robj == NULL) { DRM_ERROR("(PW %u) Vertex array %u no buffer " "bound\n", prim_walk, i); diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 638f861af80f..6cf54a747749 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -1275,7 +1275,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) return -EINVAL; } tmp = (reg - CB_COLOR0_BASE) / 4; - track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8; + track->cb_color_bo_offset[tmp] = (u64)radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->cb_color_base_last[tmp] = ib[idx]; track->cb_color_bo[tmp] = reloc->robj; @@ -1302,7 +1302,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) "0x%04X\n", reg); return -EINVAL; } - track->htile_offset = radeon_get_ib_value(p, idx) << 8; + track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); track->htile_bo = reloc->robj; track->db_dirty = true; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ca5598ae8bfc..1814bb8e14f1 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -687,11 +687,16 @@ static void radeon_crtc_init(struct drm_device *dev, int index) if (radeon_crtc == NULL) return; + radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0); + if (!radeon_crtc->flip_queue) { + kfree(radeon_crtc); + return; + } + drm_crtc_init(dev, &radeon_crtc->base, &radeon_crtc_funcs); drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); radeon_crtc->crtc_id = index; - radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0); rdev->mode_info.crtcs[index] = radeon_crtc; if (rdev->family >= CHIP_BONAIRE) { diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 987cabbf1318..c38b4d5d6a14 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -1204,13 +1204,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) r = radeon_bo_create(rdev, pd_size, align, true, RADEON_GEM_DOMAIN_VRAM, 0, NULL, NULL, &vm->page_directory); - if (r) + if (r) { + kfree(vm->page_tables); + vm->page_tables = NULL; return r; - + } r = radeon_vm_clear_bo(rdev, vm->page_directory); if (r) { radeon_bo_unref(&vm->page_directory); vm->page_directory = NULL; + kfree(vm->page_tables); + vm->page_tables = NULL; return r; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index a91012447b56..85e9cba49cec 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3611,6 +3611,10 @@ static int si_cp_start(struct radeon_device *rdev) for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { ring = &rdev->ring[i]; r = radeon_ring_lock(rdev, ring, 2); + if (r) { + DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); + return r; + } /* clear the compute context state */ radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c index f74f381af05f..d49c145db437 100644 --- a/drivers/gpu/drm/radeon/sumo_dpm.c +++ b/drivers/gpu/drm/radeon/sumo_dpm.c @@ -1493,8 +1493,10 @@ static int sumo_parse_power_table(struct radeon_device *rdev) non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; - if (!rdev->pm.power_state[i].clock_info) + if (!rdev->pm.power_state[i].clock_info) { + kfree(rdev->pm.dpm.ps); return -EINVAL; + } ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL); if (ps == NULL) { kfree(rdev->pm.dpm.ps); diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 08ea1c864cb2..ef1cc7bad20a 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1726,8 +1726,10 @@ static int trinity_parse_power_table(struct radeon_device *rdev) non_clock_array_index = power_state->v2.nonClockInfoIndex; non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; - if (!rdev->pm.power_state[i].clock_info) + if (!rdev->pm.power_state[i].clock_info) { + kfree(rdev->pm.dpm.ps); return -EINVAL; + } ps = kzalloc(sizeof(struct sumo_ps), GFP_KERNEL); if (ps == NULL) { kfree(rdev->pm.dpm.ps); diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 16301bdfead1..95b75236fe5e 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -2653,18 +2653,69 @@ static void dispc_init_errata(struct dispc_device *dispc) } } -static void dispc_softreset(struct dispc_device *dispc) +static int dispc_softreset(struct dispc_device *dispc) { u32 val; int ret = 0; + /* K2G display controller does not support soft reset */ + if (dispc->feat->subrev == DISPC_K2G) + return 0; + /* Soft reset */ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); /* Wait for reset to complete */ ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS, val, val & 1, 100, 5000); + if (ret) { + dev_err(dispc->dev, "failed to reset dispc\n"); + return ret; + } + + return 0; +} + +static int dispc_init_hw(struct dispc_device *dispc) +{ + struct device *dev = dispc->dev; + int ret; + + ret = pm_runtime_set_active(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to active\n"); + return ret; + } + + ret = clk_prepare_enable(dispc->fclk); + if (ret) { + dev_err(dev, "Failed to enable DSS fclk\n"); + goto err_runtime_suspend; + } + + ret = dispc_softreset(dispc); if (ret) - dev_warn(dispc->dev, "failed to reset dispc\n"); + goto err_clk_disable; + + clk_disable_unprepare(dispc->fclk); + ret = pm_runtime_set_suspended(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to suspended\n"); + return ret; + } + + return 0; + +err_clk_disable: + clk_disable_unprepare(dispc->fclk); + +err_runtime_suspend: + ret = pm_runtime_set_suspended(dev); + if (ret) { + dev_err(dev, "Failed to set DSS PM to suspended\n"); + return ret; + } + + return ret; } int dispc_init(struct tidss_device *tidss) @@ -2726,10 +2777,6 @@ int dispc_init(struct tidss_device *tidss) return r; } - /* K2G display controller does not support soft reset */ - if (feat->subrev != DISPC_K2G) - dispc_softreset(dispc); - for (i = 0; i < dispc->feat->num_vps; i++) { u32 gamma_size = dispc->feat->vp_feat.color.gamma_size; u32 *gamma_table; @@ -2778,6 +2825,10 @@ int dispc_init(struct tidss_device *tidss) of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth", &dispc->memory_bandwidth_limit); + r = dispc_init_hw(dispc); + if (r) + return r; + tidss->dispc = dispc; return 0; diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index afb2879980c6..995bac488392 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -4,8 +4,6 @@ * Author: Tomi Valkeinen */ -#include - #include #include #include @@ -27,7 +25,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *ddev = old_state->dev; struct tidss_device *tidss = to_tidss(ddev); - bool fence_cookie = dma_fence_begin_signalling(); dev_dbg(ddev->dev, "%s\n", __func__); @@ -38,7 +35,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_modeset_enables(ddev, old_state); drm_atomic_helper_commit_hw_done(old_state); - dma_fence_end_signalling(fence_cookie); drm_atomic_helper_wait_for_flip_done(ddev, old_state); drm_atomic_helper_cleanup_planes(ddev, old_state); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index f72755b8ea14..86d34b77b37d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -138,7 +138,7 @@ static int tilcdc_irq_install(struct drm_device *dev, unsigned int irq) if (ret) return ret; - priv->irq_enabled = false; + priv->irq_enabled = true; return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index ae01d22b8f84..c46f380d9149 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -595,10 +595,9 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp, if (!(flags & drm_vmw_synccpu_allow_cs)) { atomic_dec(&vmw_bo->cpu_writers); } - ttm_bo_put(&vmw_bo->base); + vmw_user_bo_unref(&vmw_bo); } - drm_gem_object_put(&vmw_bo->base.base); return ret; } @@ -638,8 +637,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data, return ret; ret = vmw_user_bo_synccpu_grab(vbo, arg->flags); - vmw_bo_unreference(&vbo); - drm_gem_object_put(&vbo->base.base); + vmw_user_bo_unref(&vbo); if (unlikely(ret != 0)) { if (ret == -ERESTARTSYS || ret == -EBUSY) return -EBUSY; @@ -713,7 +711,6 @@ int vmw_user_bo_lookup(struct drm_file *filp, } *out = gem_to_vmw_bo(gobj); - ttm_bo_get(&(*out)->base); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 79b30dc9d825..97e56a94eaf8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -407,8 +407,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * for the new COTable. Initially pin the buffer object to make sure * we can use tryreserve without failure. */ - ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement, - true, true, vmw_bo_bo_free, &buf); + ret = vmw_gem_object_create(dev_priv, new_size, &vmw_mob_placement, + true, true, vmw_bo_bo_free, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); return ret; @@ -475,7 +475,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) vmw_resource_mob_attach(res); /* Let go of the old mob. */ - vmw_bo_unreference(&old_buf); + vmw_user_bo_unref(&old_buf); res->id = vcotbl->type; ret = dma_resv_reserve_fences(bo->base.resv, 1); @@ -492,7 +492,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) out_wait: ttm_bo_unpin(bo); ttm_bo_unreserve(bo); - vmw_bo_unreference(&buf); + vmw_user_bo_unref(&buf); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 8459fab9d979..136f1cdcf8cd 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -969,6 +969,11 @@ static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio) /** * GEM related functionality - vmwgfx_gem.c */ +extern int vmw_gem_object_create(struct vmw_private *dev_priv, + size_t size, struct ttm_placement *placement, + bool interruptible, bool pin, + void (*bo_free)(struct ttm_buffer_object *bo), + struct vmw_buffer_object **p_bo); extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, @@ -1600,6 +1605,21 @@ vmw_bo_reference(struct vmw_buffer_object *buf) return buf; } +static inline struct vmw_buffer_object *vmw_user_bo_ref(struct vmw_buffer_object *vbo) +{ + drm_gem_object_get(&vbo->base.base); + return vbo; +} + +static inline void vmw_user_bo_unref(struct vmw_buffer_object **buf) +{ + struct vmw_buffer_object *tmp_buf = *buf; + + *buf = NULL; + if (tmp_buf) + drm_gem_object_put(&tmp_buf->base.base); +} + static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv) { atomic_inc(&dev_priv->num_fifo_resources); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7e59469e1cb9..bc7f02e4eceb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1147,7 +1147,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, SVGAMobId *id, struct vmw_buffer_object **vmw_bo_p) { - struct vmw_buffer_object *vmw_bo; + struct vmw_buffer_object *vmw_bo, *tmp_bo; uint32_t handle = *id; struct vmw_relocation *reloc; int ret; @@ -1159,8 +1159,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, return PTR_ERR(vmw_bo); } ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false); - ttm_bo_put(&vmw_bo->base); - drm_gem_object_put(&vmw_bo->base.base); + tmp_bo = vmw_bo; + vmw_user_bo_unref(&tmp_bo); if (unlikely(ret != 0)) return ret; @@ -1202,7 +1202,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, SVGAGuestPtr *ptr, struct vmw_buffer_object **vmw_bo_p) { - struct vmw_buffer_object *vmw_bo; + struct vmw_buffer_object *vmw_bo, *tmp_bo; uint32_t handle = ptr->gmrId; struct vmw_relocation *reloc; int ret; @@ -1214,8 +1214,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, return PTR_ERR(vmw_bo); } ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false); - ttm_bo_put(&vmw_bo->base); - drm_gem_object_put(&vmw_bo->base.base); + tmp_bo = vmw_bo; + vmw_user_bo_unref(&tmp_bo); if (unlikely(ret != 0)) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index 4d2c28e39f4e..e7a533e39155 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -133,6 +133,22 @@ void vmw_gem_destroy(struct ttm_buffer_object *bo) kfree(vbo); } +int vmw_gem_object_create(struct vmw_private *vmw, + size_t size, struct ttm_placement *placement, + bool interruptible, bool pin, + void (*bo_free)(struct ttm_buffer_object *bo), + struct vmw_buffer_object **p_bo) +{ + int ret = vmw_bo_create(vmw, size, placement, interruptible, pin, bo_free, p_bo); + + if (ret != 0) + goto out_no_bo; + + (*p_bo)->base.base.funcs = &vmw_gem_object_funcs; +out_no_bo: + return ret; +} + int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, @@ -141,16 +157,14 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, { int ret; - ret = vmw_bo_create(dev_priv, size, - (dev_priv->has_mob) ? + ret = vmw_gem_object_create(dev_priv, size, + (dev_priv->has_mob) ? &vmw_sys_placement : &vmw_vram_sys_placement, - true, false, &vmw_gem_destroy, p_vbo); + true, false, &vmw_gem_destroy, p_vbo); if (ret != 0) goto out_no_bo; - (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs; - ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle); out_no_bo: return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index aab6389cb4aa..aa571b75cd07 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1402,8 +1402,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev, /* Reserve and switch the backing mob. */ mutex_lock(&res->dev_priv->cmdbuf_mutex); (void) vmw_resource_reserve(res, false, true); - vmw_bo_unreference(&res->backup); - res->backup = vmw_bo_reference(bo_mob); + vmw_user_bo_unref(&res->backup); + res->backup = vmw_user_bo_ref(bo_mob); res->backup_offset = 0; vmw_resource_unreserve(res, false, false, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); @@ -1599,10 +1599,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, err_out: /* vmw_user_lookup_handle takes one ref so does new_fb */ - if (bo) { - vmw_bo_unreference(&bo); - drm_gem_object_put(&bo->base.base); - } + if (bo) + vmw_user_bo_unref(&bo); if (surface) vmw_surface_unreference(&surface); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index b5b311f2a91a..abc354ead4e8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -457,8 +457,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); - vmw_bo_unreference(&buf); - drm_gem_object_put(&buf->base.base); + vmw_user_bo_unref(&buf); out_unlock: mutex_unlock(&overlay->mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c7d645e5ec7b..30d1c1918bb4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -140,7 +140,7 @@ static void vmw_resource_release(struct kref *kref) if (res->coherent) vmw_bo_dirty_release(res->backup); ttm_bo_unreserve(bo); - vmw_bo_unreference(&res->backup); + vmw_user_bo_unref(&res->backup); } if (likely(res->hw_destroy != NULL)) { @@ -330,10 +330,10 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, return 0; } - ret = vmw_bo_create(res->dev_priv, res->backup_size, - res->func->backup_placement, - interruptible, false, - &vmw_bo_bo_free, &backup); + ret = vmw_gem_object_create(res->dev_priv, res->backup_size, + res->func->backup_placement, + interruptible, false, + &vmw_bo_bo_free, &backup); if (unlikely(ret != 0)) goto out_no_bo; @@ -452,11 +452,11 @@ void vmw_resource_unreserve(struct vmw_resource *res, vmw_resource_mob_detach(res); if (res->coherent) vmw_bo_dirty_release(res->backup); - vmw_bo_unreference(&res->backup); + vmw_user_bo_unref(&res->backup); } if (new_backup) { - res->backup = vmw_bo_reference(new_backup); + res->backup = vmw_user_bo_ref(new_backup); /* * The validation code should already have added a @@ -544,7 +544,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, ttm_bo_put(val_buf->bo); val_buf->bo = NULL; if (backup_dirty) - vmw_bo_unreference(&res->backup); + vmw_user_bo_unref(&res->backup); return ret; } @@ -719,7 +719,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr, goto out_no_validate; else if (!res->func->needs_backup && res->backup) { WARN_ON_ONCE(vmw_resource_mob_attached(res)); - vmw_bo_unreference(&res->backup); + vmw_user_bo_unref(&res->backup); } return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index 51e83dfa1cac..303f7a82f350 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -177,7 +177,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv, res->backup_size = size; if (byte_code) { - res->backup = vmw_bo_reference(byte_code); + res->backup = vmw_user_bo_ref(byte_code); res->backup_offset = offset; } shader->size = size; @@ -806,8 +806,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, shader_type, num_input_sig, num_output_sig, tfile, shader_handle); out_bad_arg: - vmw_bo_unreference(&buffer); - drm_gem_object_put(&buffer->base.base); + vmw_user_bo_unref(&buffer); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 1a1a286bc749..50769528c3f3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -683,9 +683,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) container_of(base, struct vmw_user_surface, prime.base); struct vmw_resource *res = &user_srf->srf.res; - if (res && res->backup) - drm_gem_object_put(&res->backup->base.base); - *p_base = NULL; vmw_resource_unreference(&res); } @@ -848,23 +845,17 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, * expect a backup buffer to be present. */ if (dev_priv->has_mob && req->shareable) { - uint32_t backup_handle; - - ret = vmw_gem_object_create_with_handle(dev_priv, - file_priv, - res->backup_size, - &backup_handle, - &res->backup); + ret = vmw_gem_object_create(dev_priv, + res->backup_size, + &vmw_sys_placement, + true, + false, + &vmw_gem_destroy, + &res->backup); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; } - vmw_bo_reference(res->backup); - /* - * We don't expose the handle to the userspace and surface - * already holds a gem reference - */ - drm_gem_handle_delete(file_priv, backup_handle); } tmp = vmw_resource_reference(&srf->res); @@ -1505,7 +1496,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, if (ret == 0) { if (res->backup->base.base.size < res->backup_size) { VMW_DEBUG_USER("Surface backup buffer too small.\n"); - vmw_bo_unreference(&res->backup); + vmw_user_bo_unref(&res->backup); ret = -EINVAL; goto out_unlock; } else { @@ -1519,8 +1510,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, res->backup_size, &backup_handle, &res->backup); - if (ret == 0) - vmw_bo_reference(res->backup); } if (unlikely(ret != 0)) { diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index d1094bb1aa42..220d6b2af4d3 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -380,7 +380,7 @@ static int asus_raw_event(struct hid_device *hdev, return 0; } -static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size) +static int asus_kbd_set_report(struct hid_device *hdev, const u8 *buf, size_t buf_size) { unsigned char *dmabuf; int ret; @@ -403,7 +403,7 @@ static int asus_kbd_set_report(struct hid_device *hdev, u8 *buf, size_t buf_size static int asus_kbd_init(struct hid_device *hdev) { - u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 }; int ret; @@ -417,7 +417,7 @@ static int asus_kbd_init(struct hid_device *hdev) static int asus_kbd_get_functions(struct hid_device *hdev, unsigned char *kbd_func) { - u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 }; + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0x05, 0x20, 0x31, 0x00, 0x08 }; u8 *readbuf; int ret; @@ -448,7 +448,7 @@ static int asus_kbd_get_functions(struct hid_device *hdev, static int rog_nkey_led_init(struct hid_device *hdev) { - u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 }; + const u8 buf_init_start[] = { FEATURE_KBD_LED_REPORT_ID1, 0xB9 }; u8 buf_init2[] = { FEATURE_KBD_LED_REPORT_ID1, 0x41, 0x53, 0x55, 0x53, 0x20, 0x54, 0x65, 0x63, 0x68, 0x2e, 0x49, 0x6e, 0x63, 0x2e, 0x00 }; u8 buf_init3[] = { FEATURE_KBD_LED_REPORT_ID1, @@ -1012,6 +1012,24 @@ static int asus_start_multitouch(struct hid_device *hdev) return 0; } +static int __maybe_unused asus_resume(struct hid_device *hdev) { + struct asus_drvdata *drvdata = hid_get_drvdata(hdev); + int ret = 0; + + if (drvdata->kbd_backlight) { + const u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, + drvdata->kbd_backlight->cdev.brightness }; + ret = asus_kbd_set_report(hdev, buf, sizeof(buf)); + if (ret < 0) { + hid_err(hdev, "Asus failed to set keyboard backlight: %d\n", ret); + goto asus_resume_err; + } + } + +asus_resume_err: + return ret; +} + static int __maybe_unused asus_reset_resume(struct hid_device *hdev) { struct asus_drvdata *drvdata = hid_get_drvdata(hdev); @@ -1303,6 +1321,7 @@ static struct hid_driver asus_driver = { .input_configured = asus_input_configured, #ifdef CONFIG_PM .reset_resume = asus_reset_resume, + .resume = asus_resume, #endif .event = asus_event, .raw_event = asus_raw_event diff --git a/drivers/hid/hid-glorious.c b/drivers/hid/hid-glorious.c index 558eb08c19ef..281b3a7187ce 100644 --- a/drivers/hid/hid-glorious.c +++ b/drivers/hid/hid-glorious.c @@ -21,6 +21,10 @@ MODULE_DESCRIPTION("HID driver for Glorious PC Gaming Race mice"); * Glorious Model O and O- specify the const flag in the consumer input * report descriptor, which leads to inputs being ignored. Fix this * by patching the descriptor. + * + * Glorious Model I incorrectly specifes the Usage Minimum for its + * keyboard HID report, causing keycodes to be misinterpreted. + * Fix this by setting Usage Minimum to 0 in that report. */ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) @@ -32,6 +36,10 @@ static __u8 *glorious_report_fixup(struct hid_device *hdev, __u8 *rdesc, rdesc[85] = rdesc[113] = rdesc[141] = \ HID_MAIN_ITEM_VARIABLE | HID_MAIN_ITEM_RELATIVE; } + if (*rsize == 156 && rdesc[41] == 1) { + hid_info(hdev, "patching Glorious Model I keyboard report descriptor\n"); + rdesc[41] = 0; + } return rdesc; } @@ -44,6 +52,8 @@ static void glorious_update_name(struct hid_device *hdev) model = "Model O"; break; case USB_DEVICE_ID_GLORIOUS_MODEL_D: model = "Model D"; break; + case USB_DEVICE_ID_GLORIOUS_MODEL_I: + model = "Model I"; break; } snprintf(hdev->name, sizeof(hdev->name), "%s %s", "Glorious", model); @@ -66,10 +76,12 @@ static int glorious_probe(struct hid_device *hdev, } static const struct hid_device_id glorious_devices[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS, + { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH, USB_DEVICE_ID_GLORIOUS_MODEL_O) }, - { HID_USB_DEVICE(USB_VENDOR_ID_GLORIOUS, + { HID_USB_DEVICE(USB_VENDOR_ID_SINOWEALTH, USB_DEVICE_ID_GLORIOUS_MODEL_D) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LAVIEW, + USB_DEVICE_ID_GLORIOUS_MODEL_I) }, { } }; MODULE_DEVICE_TABLE(hid, glorious_devices); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 130fc5f34142..1be454bafcb9 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -503,10 +503,6 @@ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 -#define USB_VENDOR_ID_GLORIOUS 0x258a -#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033 -#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036 - #define I2C_VENDOR_ID_GOODIX 0x27c6 #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 @@ -729,6 +725,9 @@ #define USB_VENDOR_ID_LABTEC 0x1020 #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 +#define USB_VENDOR_ID_LAVIEW 0x22D4 +#define USB_DEVICE_ID_GLORIOUS_MODEL_I 0x1503 + #define USB_VENDOR_ID_LCPOWER 0x1241 #define USB_DEVICE_ID_LCPOWER_LC1000 0xf767 @@ -1131,6 +1130,10 @@ #define USB_VENDOR_ID_SIGMATEL 0x066F #define USB_DEVICE_ID_SIGMATEL_STMP3780 0x3780 +#define USB_VENDOR_ID_SINOWEALTH 0x258a +#define USB_DEVICE_ID_GLORIOUS_MODEL_D 0x0033 +#define USB_DEVICE_ID_GLORIOUS_MODEL_O 0x0036 + #define USB_VENDOR_ID_SIS_TOUCH 0x0457 #define USB_DEVICE_ID_SIS9200_TOUCH 0x9200 #define USB_DEVICE_ID_SIS817_TOUCH 0x0817 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 7c1b33be9d13..149a3c74346b 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -692,7 +692,8 @@ static int lenovo_event_cptkbd(struct hid_device *hdev, * so set middlebutton_state to 3 * to never apply workaround anymore */ - if (cptkbd_data->middlebutton_state == 1 && + if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD && + cptkbd_data->middlebutton_state == 1 && usage->type == EV_REL && (usage->code == REL_X || usage->code == REL_Y)) { cptkbd_data->middlebutton_state = 3; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 8db4ae05febc..5ec1f174127a 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2048,6 +2048,11 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) }, + /* HONOR GLO-GXXX panel */ + { .driver_data = MT_CLS_VTL, + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, + 0x347d, 0x7853) }, + /* Ilitek dual touch panel */ { .driver_data = MT_CLS_NSMU, MT_USB_DEVICE(USB_VENDOR_ID_ILITEK, diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index 8a8a3dd8af0c..df07e3ae0ffb 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -325,28 +325,28 @@ struct joycon_imu_cal { * All the controller's button values are stored in a u32. * They can be accessed with bitwise ANDs. */ -static const u32 JC_BTN_Y = BIT(0); -static const u32 JC_BTN_X = BIT(1); -static const u32 JC_BTN_B = BIT(2); -static const u32 JC_BTN_A = BIT(3); -static const u32 JC_BTN_SR_R = BIT(4); -static const u32 JC_BTN_SL_R = BIT(5); -static const u32 JC_BTN_R = BIT(6); -static const u32 JC_BTN_ZR = BIT(7); -static const u32 JC_BTN_MINUS = BIT(8); -static const u32 JC_BTN_PLUS = BIT(9); -static const u32 JC_BTN_RSTICK = BIT(10); -static const u32 JC_BTN_LSTICK = BIT(11); -static const u32 JC_BTN_HOME = BIT(12); -static const u32 JC_BTN_CAP = BIT(13); /* capture button */ -static const u32 JC_BTN_DOWN = BIT(16); -static const u32 JC_BTN_UP = BIT(17); -static const u32 JC_BTN_RIGHT = BIT(18); -static const u32 JC_BTN_LEFT = BIT(19); -static const u32 JC_BTN_SR_L = BIT(20); -static const u32 JC_BTN_SL_L = BIT(21); -static const u32 JC_BTN_L = BIT(22); -static const u32 JC_BTN_ZL = BIT(23); +#define JC_BTN_Y BIT(0) +#define JC_BTN_X BIT(1) +#define JC_BTN_B BIT(2) +#define JC_BTN_A BIT(3) +#define JC_BTN_SR_R BIT(4) +#define JC_BTN_SL_R BIT(5) +#define JC_BTN_R BIT(6) +#define JC_BTN_ZR BIT(7) +#define JC_BTN_MINUS BIT(8) +#define JC_BTN_PLUS BIT(9) +#define JC_BTN_RSTICK BIT(10) +#define JC_BTN_LSTICK BIT(11) +#define JC_BTN_HOME BIT(12) +#define JC_BTN_CAP BIT(13) /* capture button */ +#define JC_BTN_DOWN BIT(16) +#define JC_BTN_UP BIT(17) +#define JC_BTN_RIGHT BIT(18) +#define JC_BTN_LEFT BIT(19) +#define JC_BTN_SR_L BIT(20) +#define JC_BTN_SL_L BIT(21) +#define JC_BTN_L BIT(22) +#define JC_BTN_ZL BIT(23) enum joycon_msg_type { JOYCON_MSG_TYPE_NONE, @@ -859,14 +859,27 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr) */ static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr) { - int i; + int i, divz = 0; for (i = 0; i < 3; i++) { ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] - ctlr->accel_cal.offset[i]; ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] - ctlr->gyro_cal.offset[i]; + + if (ctlr->imu_cal_accel_divisor[i] == 0) { + ctlr->imu_cal_accel_divisor[i] = 1; + divz++; + } + + if (ctlr->imu_cal_gyro_divisor[i] == 0) { + ctlr->imu_cal_gyro_divisor[i] = 1; + divz++; + } } + + if (divz) + hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz); } static const s16 DFLT_ACCEL_OFFSET /*= 0*/; @@ -1095,16 +1108,16 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr, JC_IMU_SAMPLES_PER_DELTA_AVG) { ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum / ctlr->imu_delta_samples_count; - /* don't ever want divide by zero shenanigans */ - if (ctlr->imu_avg_delta_ms == 0) { - ctlr->imu_avg_delta_ms = 1; - hid_warn(ctlr->hdev, - "calculated avg imu delta of 0\n"); - } ctlr->imu_delta_samples_count = 0; ctlr->imu_delta_samples_sum = 0; } + /* don't ever want divide by zero shenanigans */ + if (ctlr->imu_avg_delta_ms == 0) { + ctlr->imu_avg_delta_ms = 1; + hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n"); + } + /* useful for debugging IMU sample rate */ hid_dbg(ctlr->hdev, "imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n", diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 056bb3209128..60884066362a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -33,6 +33,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_REVB_ANSI), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM), HID_QUIRK_NOGET }, diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c index b96ae15e0ad9..6d35bb397481 100644 --- a/drivers/hid/i2c-hid/i2c-hid-acpi.c +++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c @@ -39,8 +39,13 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = { * The CHPN0001 ACPI device, which is used to describe the Chipone * ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible. */ - {"CHPN0001", 0 }, - { }, + { "CHPN0001" }, + /* + * The IDEA5002 ACPI device causes high interrupt usage and spurious + * wakeups from suspend. + */ + { "IDEA5002" }, + { } }; /* HID I²C Device: 3cdff6f7-4267-4555-ad05-b30a3d8938de */ @@ -115,9 +120,9 @@ static int i2c_hid_acpi_probe(struct i2c_client *client) } static const struct acpi_device_id i2c_hid_acpi_match[] = { - {"ACPI0C50", 0 }, - {"PNP0C50", 0 }, - { }, + { "ACPI0C50" }, + { "PNP0C50" }, + { } }; MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match); diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index c1270db12178..165ed872fa4e 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2646,8 +2646,8 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, { struct hid_data *hid_data = &wacom_wac->hid_data; bool mt = wacom_wac->features.touch_max > 1; - bool prox = hid_data->tipswitch && - report_touch_events(wacom_wac); + bool touch_down = hid_data->tipswitch && hid_data->confidence; + bool prox = touch_down && report_touch_events(wacom_wac); if (touch_is_muted(wacom_wac)) { if (!wacom_wac->shared->touch_down) @@ -2697,24 +2697,6 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac, } } -static bool wacom_wac_slot_is_active(struct input_dev *dev, int key) -{ - struct input_mt *mt = dev->mt; - struct input_mt_slot *s; - - if (!mt) - return false; - - for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { - if (s->key == key && - input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) { - return true; - } - } - - return false; -} - static void wacom_wac_finger_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { @@ -2765,14 +2747,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev, } if (usage->usage_index + 1 == field->report_count) { - if (equivalent_usage == wacom_wac->hid_data.last_slot_field) { - bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input, - wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch; - - if (wacom_wac->hid_data.confidence || touch_removed) { - wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); - } - } + if (equivalent_usage == wacom_wac->hid_data.last_slot_field) + wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input); } } diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c index 2210aa62e3d0..ec7f27a6ce01 100644 --- a/drivers/hwmon/corsair-psu.c +++ b/drivers/hwmon/corsair-psu.c @@ -837,7 +837,23 @@ static struct hid_driver corsairpsu_driver = { .reset_resume = corsairpsu_resume, #endif }; -module_hid_driver(corsairpsu_driver); + +static int __init corsair_init(void) +{ + return hid_register_driver(&corsairpsu_driver); +} + +static void __exit corsair_exit(void) +{ + hid_unregister_driver(&corsairpsu_driver); +} + +/* + * With module_init() the driver would load before the HID bus when + * built-in, so use late_initcall() instead. + */ +late_initcall(corsair_init); +module_exit(corsair_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Wilken Gottwalt "); diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 0174fbf1a963..d8e4d902b01a 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -1032,7 +1032,7 @@ struct etmv4_drvdata { u8 ctxid_size; u8 vmid_size; u8 ccsize; - u8 ccitmin; + u16 ccitmin; u8 s_ex_level; u8 ns_ex_level; u8 q_support; diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c index 016220ba0add..8d8fa8e8afe0 100644 --- a/drivers/hwtracing/ptt/hisi_ptt.c +++ b/drivers/hwtracing/ptt/hisi_ptt.c @@ -342,9 +342,9 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt) return ret; hisi_ptt->trace_irq = pci_irq_vector(pdev, HISI_PTT_TRACE_DMA_IRQ); - ret = devm_request_threaded_irq(&pdev->dev, hisi_ptt->trace_irq, - NULL, hisi_ptt_isr, 0, - DRV_NAME, hisi_ptt); + ret = devm_request_irq(&pdev->dev, hisi_ptt->trace_irq, hisi_ptt_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, DRV_NAME, + hisi_ptt); if (ret) { pci_err(pdev, "failed to request irq %d, ret = %d\n", hisi_ptt->trace_irq, ret); @@ -659,6 +659,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event) return -EOPNOTSUPP; } + if (event->attach_state & PERF_ATTACH_TASK) + return -EOPNOTSUPP; + if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type) return -ENOENT; diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index 6adf3b141316..86daf791aa27 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -249,18 +249,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status) if (!slave) return 0; - command = readl(bus->base + ASPEED_I2C_CMD_REG); + /* + * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive + * transfers with low enough latency between the nak/stop phase of the current + * command and the start/address phase of the following command that the + * interrupts are coalesced by the time we process them. + */ + if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { + irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; + bus->slave_state = ASPEED_I2C_SLAVE_STOP; + } - /* Slave was requested, restart state machine. */ + if (irq_status & ASPEED_I2CD_INTR_TX_NAK && + bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) { + irq_handled |= ASPEED_I2CD_INTR_TX_NAK; + bus->slave_state = ASPEED_I2C_SLAVE_STOP; + } + + /* Propagate any stop conditions to the slave implementation. */ + if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) { + i2c_slave_event(slave, I2C_SLAVE_STOP, &value); + bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; + } + + /* + * Now that we've dealt with any potentially coalesced stop conditions, + * address any start conditions. + */ if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) { irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH; bus->slave_state = ASPEED_I2C_SLAVE_START; } - /* Slave is not currently active, irq was for someone else. */ + /* + * If the slave has been stopped and not started then slave interrupt + * handling is complete. + */ if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE) return irq_handled; + command = readl(bus->base + ASPEED_I2C_CMD_REG); dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n", irq_status, command); @@ -279,17 +307,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status) irq_handled |= ASPEED_I2CD_INTR_RX_DONE; } - /* Slave was asked to stop. */ - if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { - irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP; - bus->slave_state = ASPEED_I2C_SLAVE_STOP; - } - if (irq_status & ASPEED_I2CD_INTR_TX_NAK && - bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) { - irq_handled |= ASPEED_I2CD_INTR_TX_NAK; - bus->slave_state = ASPEED_I2C_SLAVE_STOP; - } - switch (bus->slave_state) { case ASPEED_I2C_SLAVE_READ_REQUESTED: if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK)) @@ -324,8 +341,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status) i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); break; case ASPEED_I2C_SLAVE_STOP: - i2c_slave_event(slave, I2C_SLAVE_STOP, &value); - bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE; + /* Stop event handling is done early. Unreachable. */ break; case ASPEED_I2C_SLAVE_START: /* Slave was just started. Waiting for the next event. */; diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index b31cf4f18f85..6aa4f1f06240 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -178,6 +178,7 @@ struct rk3x_i2c_soc_data { * @clk: function clk for rk3399 or function & Bus clks for others * @pclk: Bus clk for rk3399 * @clk_rate_nb: i2c clk rate change notify + * @irq: irq number * @t: I2C known timing information * @lock: spinlock for the i2c bus * @wait: the waitqueue to wait for i2c transfer @@ -200,6 +201,7 @@ struct rk3x_i2c { struct clk *clk; struct clk *pclk; struct notifier_block clk_rate_nb; + int irq; /* Settings */ struct i2c_timings t; @@ -1087,13 +1089,18 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap, spin_unlock_irqrestore(&i2c->lock, flags); - rk3x_i2c_start(i2c); - if (!polling) { + rk3x_i2c_start(i2c); + timeout = wait_event_timeout(i2c->wait, !i2c->busy, msecs_to_jiffies(WAIT_TIMEOUT)); } else { + disable_irq(i2c->irq); + rk3x_i2c_start(i2c); + timeout = rk3x_i2c_wait_xfer_poll(i2c); + + enable_irq(i2c->irq); } spin_lock_irqsave(&i2c->lock, flags); @@ -1310,6 +1317,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev) return ret; } + i2c->irq = irq; + platform_set_drvdata(pdev, i2c); if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) { diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 36dab9cd208c..8e3838c42a8c 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -220,8 +220,17 @@ static bool is_ack(struct s3c24xx_i2c *i2c) int tries; for (tries = 50; tries; --tries) { - if (readl(i2c->regs + S3C2410_IICCON) - & S3C2410_IICCON_IRQPEND) { + unsigned long tmp = readl(i2c->regs + S3C2410_IICCON); + + if (!(tmp & S3C2410_IICCON_ACKEN)) { + /* + * Wait a bit for the bus to stabilize, + * delay estimated experimentally. + */ + usleep_range(100, 200); + return true; + } + if (tmp & S3C2410_IICCON_IRQPEND) { if (!(readl(i2c->regs + S3C2410_IICSTAT) & S3C2410_IICSTAT_LASTBIT)) return true; @@ -274,16 +283,6 @@ static void s3c24xx_i2c_message_start(struct s3c24xx_i2c *i2c, stat |= S3C2410_IICSTAT_START; writel(stat, i2c->regs + S3C2410_IICSTAT); - - if (i2c->quirks & QUIRK_POLL) { - while ((i2c->msg_num != 0) && is_ack(i2c)) { - i2c_s3c_irq_nextbyte(i2c, stat); - stat = readl(i2c->regs + S3C2410_IICSTAT); - - if (stat & S3C2410_IICSTAT_ARBITR) - dev_err(i2c->dev, "deal with arbitration loss\n"); - } - } } static inline void s3c24xx_i2c_stop(struct s3c24xx_i2c *i2c, int ret) @@ -690,7 +689,7 @@ static void s3c24xx_i2c_wait_idle(struct s3c24xx_i2c *i2c) static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, struct i2c_msg *msgs, int num) { - unsigned long timeout; + unsigned long timeout = 0; int ret; ret = s3c24xx_i2c_set_master(i2c); @@ -710,16 +709,19 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c, s3c24xx_i2c_message_start(i2c, msgs); if (i2c->quirks & QUIRK_POLL) { - ret = i2c->msg_idx; + while ((i2c->msg_num != 0) && is_ack(i2c)) { + unsigned long stat = readl(i2c->regs + S3C2410_IICSTAT); - if (ret != num) - dev_dbg(i2c->dev, "incomplete xfer (%d)\n", ret); + i2c_s3c_irq_nextbyte(i2c, stat); - goto out; + stat = readl(i2c->regs + S3C2410_IICSTAT); + if (stat & S3C2410_IICSTAT_ARBITR) + dev_err(i2c->dev, "deal with arbitration loss\n"); + } + } else { + timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); } - timeout = wait_event_timeout(i2c->wait, i2c->msg_num == 0, HZ * 5); - ret = i2c->msg_idx; /* diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 05b8b8dfa9bd..36587f38dff3 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -3,6 +3,7 @@ * i2c-core.h - interfaces internal to the I2C framework */ +#include #include struct i2c_devinfo { @@ -29,7 +30,8 @@ int i2c_dev_irq_from_resources(const struct resource *resources, */ static inline bool i2c_in_atomic_xfer_mode(void) { - return system_state > SYSTEM_RUNNING && !preemptible(); + return system_state > SYSTEM_RUNNING && + (IS_ENABLED(CONFIG_PREEMPT_COUNT) ? !preemptible() : irqs_disabled()); } static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap) diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c index 8e252cde735b..0e5d3d2e9c98 100644 --- a/drivers/iio/adc/ad7091r-base.c +++ b/drivers/iio/adc/ad7091r-base.c @@ -174,8 +174,8 @@ static const struct iio_info ad7091r_info = { static irqreturn_t ad7091r_event_handler(int irq, void *private) { - struct ad7091r_state *st = (struct ad7091r_state *) private; - struct iio_dev *iio_dev = dev_get_drvdata(st->dev); + struct iio_dev *iio_dev = private; + struct ad7091r_state *st = iio_priv(iio_dev); unsigned int i, read_val; int ret; s64 timestamp = iio_get_time_ns(iio_dev); @@ -234,7 +234,7 @@ int ad7091r_probe(struct device *dev, const char *name, if (irq) { ret = devm_request_threaded_irq(dev, irq, NULL, ad7091r_event_handler, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st); + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, iio_dev); if (ret) return ret; } diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c index 7534572f7475..811525857d29 100644 --- a/drivers/iio/adc/ad9467.c +++ b/drivers/iio/adc/ad9467.c @@ -119,9 +119,9 @@ struct ad9467_state { struct spi_device *spi; struct clk *clk; unsigned int output_mode; + unsigned int (*scales)[2]; struct gpio_desc *pwrdown_gpio; - struct gpio_desc *reset_gpio; }; static int ad9467_spi_read(struct spi_device *spi, unsigned int reg) @@ -163,9 +163,10 @@ static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg, if (readval == NULL) { ret = ad9467_spi_write(spi, reg, writeval); - ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER, - AN877_ADC_TRANSFER_SYNC); - return ret; + if (ret) + return ret; + return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER, + AN877_ADC_TRANSFER_SYNC); } ret = ad9467_spi_read(spi, reg); @@ -212,6 +213,7 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index, .channel = _chan, \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \ .scan_index = _si, \ .scan_type = { \ .sign = _sign, \ @@ -273,10 +275,13 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2) const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info); struct ad9467_state *st = adi_axi_adc_conv_priv(conv); unsigned int i, vref_val; + int ret; - vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF); + ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF); + if (ret < 0) + return ret; - vref_val &= info1->vref_mask; + vref_val = ret & info1->vref_mask; for (i = 0; i < info->num_scales; i++) { if (vref_val == info->scale_table[i][1]) @@ -297,6 +302,7 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2) struct ad9467_state *st = adi_axi_adc_conv_priv(conv); unsigned int scale_val[2]; unsigned int i; + int ret; if (val != 0) return -EINVAL; @@ -306,11 +312,13 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2) if (scale_val[0] != val || scale_val[1] != val2) continue; - ad9467_spi_write(st->spi, AN877_ADC_REG_VREF, - info->scale_table[i][1]); - ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER, - AN877_ADC_TRANSFER_SYNC); - return 0; + ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF, + info->scale_table[i][1]); + if (ret < 0) + return ret; + + return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER, + AN877_ADC_TRANSFER_SYNC); } return -EINVAL; @@ -359,6 +367,26 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv, } } +static int ad9467_read_avail(struct adi_axi_adc_conv *conv, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + const struct adi_axi_adc_chip_info *info = conv->chip_info; + struct ad9467_state *st = adi_axi_adc_conv_priv(conv); + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + *vals = (const int *)st->scales; + *type = IIO_VAL_INT_PLUS_MICRO; + /* Values are stored in a 2D matrix */ + *length = info->num_scales * 2; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode) { int ret; @@ -371,6 +399,26 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode) AN877_ADC_TRANSFER_SYNC); } +static int ad9467_scale_fill(struct adi_axi_adc_conv *conv) +{ + const struct adi_axi_adc_chip_info *info = conv->chip_info; + struct ad9467_state *st = adi_axi_adc_conv_priv(conv); + unsigned int i, val1, val2; + + st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales, + sizeof(*st->scales), GFP_KERNEL); + if (!st->scales) + return -ENOMEM; + + for (i = 0; i < info->num_scales; i++) { + __ad9467_get_scale(conv, i, &val1, &val2); + st->scales[i][0] = val1; + st->scales[i][1] = val2; + } + + return 0; +} + static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv) { struct ad9467_state *st = adi_axi_adc_conv_priv(conv); @@ -378,6 +426,21 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv) return ad9467_outputmode_set(st->spi, st->output_mode); } +static int ad9467_reset(struct device *dev) +{ + struct gpio_desc *gpio; + + gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR_OR_NULL(gpio)) + return PTR_ERR_OR_ZERO(gpio); + + fsleep(1); + gpiod_set_value_cansleep(gpio, 0); + fsleep(10 * USEC_PER_MSEC); + + return 0; +} + static int ad9467_probe(struct spi_device *spi) { const struct ad9467_chip_info *info; @@ -406,21 +469,16 @@ static int ad9467_probe(struct spi_device *spi) if (IS_ERR(st->pwrdown_gpio)) return PTR_ERR(st->pwrdown_gpio); - st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset", - GPIOD_OUT_LOW); - if (IS_ERR(st->reset_gpio)) - return PTR_ERR(st->reset_gpio); - - if (st->reset_gpio) { - udelay(1); - ret = gpiod_direction_output(st->reset_gpio, 1); - if (ret) - return ret; - mdelay(10); - } + ret = ad9467_reset(&spi->dev); + if (ret) + return ret; conv->chip_info = &info->axi_adc_info; + ret = ad9467_scale_fill(conv); + if (ret) + return ret; + id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID); if (id != conv->chip_info->id) { dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n", @@ -431,6 +489,7 @@ static int ad9467_probe(struct spi_device *spi) conv->reg_access = ad9467_reg_access; conv->write_raw = ad9467_write_raw; conv->read_raw = ad9467_read_raw; + conv->read_avail = ad9467_read_avail; conv->preenable_setup = ad9467_preenable_setup; st->output_mode = info->default_output_mode | diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index e8a8ea4140f1..ad386ac7f03c 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -143,6 +143,20 @@ static int adi_axi_adc_write_raw(struct iio_dev *indio_dev, return conv->write_raw(conv, chan, val, val2, mask); } +static int adi_axi_adc_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + struct adi_axi_adc_state *st = iio_priv(indio_dev); + struct adi_axi_adc_conv *conv = &st->client->conv; + + if (!conv->read_avail) + return -EOPNOTSUPP; + + return conv->read_avail(conv, chan, vals, type, length, mask); +} + static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { @@ -227,69 +241,11 @@ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev, } EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI); -static ssize_t in_voltage_scale_available_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct adi_axi_adc_state *st = iio_priv(indio_dev); - struct adi_axi_adc_conv *conv = &st->client->conv; - size_t len = 0; - int i; - - for (i = 0; i < conv->chip_info->num_scales; i++) { - const unsigned int *s = conv->chip_info->scale_table[i]; - - len += scnprintf(buf + len, PAGE_SIZE - len, - "%u.%06u ", s[0], s[1]); - } - buf[len - 1] = '\n'; - - return len; -} - -static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0); - -enum { - ADI_AXI_ATTR_SCALE_AVAIL, -}; - -#define ADI_AXI_ATTR(_en_, _file_) \ - [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr - -static struct attribute *adi_axi_adc_attributes[] = { - ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available), - NULL -}; - -static umode_t axi_adc_attr_is_visible(struct kobject *kobj, - struct attribute *attr, int n) -{ - struct device *dev = kobj_to_dev(kobj); - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct adi_axi_adc_state *st = iio_priv(indio_dev); - struct adi_axi_adc_conv *conv = &st->client->conv; - - switch (n) { - case ADI_AXI_ATTR_SCALE_AVAIL: - if (!conv->chip_info->num_scales) - return 0; - return attr->mode; - default: - return attr->mode; - } -} - -static const struct attribute_group adi_axi_adc_attribute_group = { - .attrs = adi_axi_adc_attributes, - .is_visible = axi_adc_attr_is_visible, -}; - static const struct iio_info adi_axi_adc_info = { .read_raw = &adi_axi_adc_read_raw, .write_raw = &adi_axi_adc_write_raw, - .attrs = &adi_axi_adc_attribute_group, .update_scan_mode = &adi_axi_adc_update_scan_mode, + .read_avail = &adi_axi_adc_read_avail, }; static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = { diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 642c5c4895e3..3ac253a27dd9 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -671,8 +671,10 @@ static int tiadc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, indio_dev); err = tiadc_request_dma(pdev, adc_dev); - if (err && err == -EPROBE_DEFER) + if (err && err != -ENODEV) { + dev_err_probe(&pdev->dev, err, "DMA request failed\n"); goto err_dma; + } return 0; diff --git a/drivers/iio/buffer/industrialio-triggered-buffer.c b/drivers/iio/buffer/industrialio-triggered-buffer.c index 8d4fc97d1005..2b7873e8a959 100644 --- a/drivers/iio/buffer/industrialio-triggered-buffer.c +++ b/drivers/iio/buffer/industrialio-triggered-buffer.c @@ -46,6 +46,16 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev, struct iio_buffer *buffer; int ret; + /* + * iio_triggered_buffer_cleanup() assumes that the buffer allocated here + * is assigned to indio_dev->buffer but this is only the case if this + * function is the first caller to iio_device_attach_buffer(). If + * indio_dev->buffer is already set then we can't proceed otherwise the + * cleanup function will try to free a buffer that was not allocated here. + */ + if (indio_dev->buffer) + return -EADDRINUSE; + buffer = iio_kfifo_allocate(); if (!buffer) { ret = -ENOMEM; diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c index 6633b35a94e6..9c9bc77003c7 100644 --- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c +++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c @@ -15,8 +15,8 @@ /* Conversion times in us */ static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000, 13000, 7000 }; -static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000, - 5000, 8000 }; +static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000, + 3000, 8000 }; static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100, 4100, 8220, 16440 }; diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c index aec55f7e1f26..2d939773445d 100644 --- a/drivers/iio/imu/adis16475.c +++ b/drivers/iio/imu/adis16475.c @@ -1243,6 +1243,59 @@ static int adis16475_config_irq_pin(struct adis16475 *st) return 0; } + +static int adis16475_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct adis16475 *st; + int ret; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); + if (!indio_dev) + return -ENOMEM; + + st = iio_priv(indio_dev); + + st->info = spi_get_device_match_data(spi); + if (!st->info) + return -EINVAL; + + ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data); + if (ret) + return ret; + + indio_dev->name = st->info->name; + indio_dev->channels = st->info->channels; + indio_dev->num_channels = st->info->num_channels; + indio_dev->info = &adis16475_info; + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = __adis_initial_startup(&st->adis); + if (ret) + return ret; + + ret = adis16475_config_irq_pin(st); + if (ret) + return ret; + + ret = adis16475_config_sync_mode(st); + if (ret) + return ret; + + ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, + adis16475_trigger_handler); + if (ret) + return ret; + + ret = devm_iio_device_register(&spi->dev, indio_dev); + if (ret) + return ret; + + adis16475_debugfs_init(indio_dev); + + return 0; +} + static const struct of_device_id adis16475_of_match[] = { { .compatible = "adi,adis16470", .data = &adis16475_chip_info[ADIS16470] }, @@ -1288,57 +1341,30 @@ static const struct of_device_id adis16475_of_match[] = { }; MODULE_DEVICE_TABLE(of, adis16475_of_match); -static int adis16475_probe(struct spi_device *spi) -{ - struct iio_dev *indio_dev; - struct adis16475 *st; - int ret; - - indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); - if (!indio_dev) - return -ENOMEM; - - st = iio_priv(indio_dev); - - st->info = device_get_match_data(&spi->dev); - if (!st->info) - return -EINVAL; - - ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data); - if (ret) - return ret; - - indio_dev->name = st->info->name; - indio_dev->channels = st->info->channels; - indio_dev->num_channels = st->info->num_channels; - indio_dev->info = &adis16475_info; - indio_dev->modes = INDIO_DIRECT_MODE; - - ret = __adis_initial_startup(&st->adis); - if (ret) - return ret; - - ret = adis16475_config_irq_pin(st); - if (ret) - return ret; - - ret = adis16475_config_sync_mode(st); - if (ret) - return ret; - - ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev, - adis16475_trigger_handler); - if (ret) - return ret; - - ret = devm_iio_device_register(&spi->dev, indio_dev); - if (ret) - return ret; - - adis16475_debugfs_init(indio_dev); - - return 0; -} +static const struct spi_device_id adis16475_ids[] = { + { "adis16470", (kernel_ulong_t)&adis16475_chip_info[ADIS16470] }, + { "adis16475-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_1] }, + { "adis16475-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_2] }, + { "adis16475-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_3] }, + { "adis16477-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_1] }, + { "adis16477-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_2] }, + { "adis16477-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_3] }, + { "adis16465-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_1] }, + { "adis16465-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_2] }, + { "adis16465-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_3] }, + { "adis16467-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_1] }, + { "adis16467-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_2] }, + { "adis16467-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_3] }, + { "adis16500", (kernel_ulong_t)&adis16475_chip_info[ADIS16500] }, + { "adis16505-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_1] }, + { "adis16505-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_2] }, + { "adis16505-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_3] }, + { "adis16507-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_1] }, + { "adis16507-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_2] }, + { "adis16507-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_3] }, + { } +}; +MODULE_DEVICE_TABLE(spi, adis16475_ids); static struct spi_driver adis16475_driver = { .driver = { @@ -1346,6 +1372,7 @@ static struct spi_driver adis16475_driver = { .of_match_table = adis16475_of_match, }, .probe = adis16475_probe, + .id_table = adis16475_ids, }; module_spi_driver(adis16475_driver); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 86fbbe904050..19a1ef5351d2 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -736,13 +736,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev, ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset, chan->channel2, val); mutex_unlock(&st->lock); - return IIO_VAL_INT; + return ret; case IIO_ACCEL: mutex_lock(&st->lock); ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset, chan->channel2, val); mutex_unlock(&st->lock); - return IIO_VAL_INT; + return ret; default: return -EINVAL; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index c2ee80546d12..58fbb1d3b7f4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2819,6 +2819,10 @@ static int free_mr_alloc_res(struct hns_roce_dev *hr_dev) return 0; create_failed_qp: + for (i--; i >= 0; i--) { + hns_roce_v2_destroy_qp(&free_mr->rsv_qp[i]->ibqp, NULL); + kfree(free_mr->rsv_qp[i]); + } hns_roce_destroy_cq(cq, NULL); kfree(cq); @@ -5791,7 +5795,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, /* Resizing SRQs is not supported yet */ if (srq_attr_mask & IB_SRQ_MAX_WR) - return -EINVAL; + return -EOPNOTSUPP; if (srq_attr_mask & IB_SRQ_LIMIT) { if (srq_attr->srq_limit > srq->wqe_cnt) diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 783e71852c50..bd1fe89ca205 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -150,7 +150,7 @@ int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata) int ret; if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)) - return -EINVAL; + return -EOPNOTSUPP; ret = hns_roce_xrcd_alloc(hr_dev, &xrcd->xrcdn); if (ret) diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index f330ce895d88..8fe0cef7e2be 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -635,7 +635,7 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox) int mthca_SYS_EN(struct mthca_dev *dev) { - u64 out; + u64 out = 0; int ret; ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D); @@ -1955,7 +1955,7 @@ int mthca_WRITE_MGM(struct mthca_dev *dev, int index, int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, u16 *hash) { - u64 imm; + u64 imm = 0; int err; err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index b54bc8865dae..1ab268b77096 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -382,7 +382,7 @@ static int mthca_init_icm(struct mthca_dev *mdev, struct mthca_init_hca_param *init_hca, u64 icm_size) { - u64 aux_pages; + u64 aux_pages = 0; int err; err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index dee8c97ff056..d967d5532459 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -317,12 +317,10 @@ struct iser_device { * * @mr: memory region * @sig_mr: signature memory region - * @mr_valid: is mr valid indicator */ struct iser_reg_resources { struct ib_mr *mr; struct ib_mr *sig_mr; - u8 mr_valid:1; }; /** diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 7b83f48f60c5..8ec470c519e8 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -580,7 +580,10 @@ static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) return -EINVAL; } - desc->rsc.mr_valid = 0; + if (desc->sig_protected) + desc->rsc.sig_mr->need_inval = false; + else + desc->rsc.mr->need_inval = false; return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 29ae2c6a250a..6efcb79c8efe 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -264,7 +264,7 @@ static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task, iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); - if (rsc->mr_valid) + if (rsc->sig_mr->need_inval) iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); @@ -288,7 +288,7 @@ static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task, wr->access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE; - rsc->mr_valid = 1; + rsc->sig_mr->need_inval = true; sig_reg->sge.lkey = mr->lkey; sig_reg->rkey = mr->rkey; @@ -313,7 +313,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, struct ib_reg_wr *wr = &tx_desc->reg_wr; int n; - if (rsc->mr_valid) + if (rsc->mr->need_inval) iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr); ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); @@ -336,7 +336,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ; - rsc->mr_valid = 1; + rsc->mr->need_inval = true; reg->sge.lkey = mr->lkey; reg->rkey = mr->rkey; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index a00ca117303a..057e69164e6d 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -135,7 +135,6 @@ iser_create_fastreg_desc(struct iser_device *device, goto err_alloc_mr_integrity; } } - desc->rsc.mr_valid = 0; return desc; diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 8404286302b0..e8011d70d079 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -286,6 +286,7 @@ static const struct xpad_device { { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, + { 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE }, { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 246958795f60..c4d8caadec59 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -746,6 +746,44 @@ static void atkbd_deactivate(struct atkbd *atkbd) ps2dev->serio->phys); } +#ifdef CONFIG_X86 +static bool atkbd_is_portable_device(void) +{ + static const char * const chassis_types[] = { + "8", /* Portable */ + "9", /* Laptop */ + "10", /* Notebook */ + "14", /* Sub-Notebook */ + "31", /* Convertible */ + "32", /* Detachable */ + }; + int i; + + for (i = 0; i < ARRAY_SIZE(chassis_types); i++) + if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i])) + return true; + + return false; +} + +/* + * On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops + * the controller is always in translated mode. In this mode mice/touchpads will + * not work. So in this case simply assume a keyboard is connected to avoid + * confusing some laptop keyboards. + * + * Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using the standard + * 0xab83 id is ok in translated mode, only atkbd_select_set() checks atkbd->id + * and in translated mode that is a no-op. + */ +static bool atkbd_skip_getid(struct atkbd *atkbd) +{ + return atkbd->translated && atkbd_is_portable_device(); +} +#else +static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; } +#endif + /* * atkbd_probe() probes for an AT keyboard on a serio port. */ @@ -754,6 +792,7 @@ static int atkbd_probe(struct atkbd *atkbd) { struct ps2dev *ps2dev = &atkbd->ps2dev; unsigned char param[2]; + bool skip_getid; /* * Some systems, where the bit-twiddling when testing the io-lines of the @@ -775,17 +814,18 @@ static int atkbd_probe(struct atkbd *atkbd) */ param[0] = param[1] = 0xa5; /* initialize with invalid values */ - if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) { + skip_getid = atkbd_skip_getid(atkbd); + if (skip_getid || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) { /* - * If the get ID command failed, we check if we can at least set the LEDs on - * the keyboard. This should work on every keyboard out there. It also turns - * the LEDs off, which we want anyway. + * If the get ID command was skipped or failed, we check if we can at least set + * the LEDs on the keyboard. This should work on every keyboard out there. + * It also turns the LEDs off, which we want anyway. */ param[0] = 0; if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS)) return -1; - atkbd->id = 0xabba; + atkbd->id = skip_getid ? 0xab83 : 0xabba; return 0; } diff --git a/drivers/input/keyboard/ipaq-micro-keys.c b/drivers/input/keyboard/ipaq-micro-keys.c index 13a66a8e3411..e0c51189e329 100644 --- a/drivers/input/keyboard/ipaq-micro-keys.c +++ b/drivers/input/keyboard/ipaq-micro-keys.c @@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev) keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes, keys->input->keycodesize * keys->input->keycodemax, GFP_KERNEL); + if (!keys->codes) + return -ENOMEM; + keys->input->keycode = keys->codes; __set_bit(EV_KEY, keys->input->evbit); diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index e79f5497948b..9116f4248fd0 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -299,6 +299,11 @@ static int soc_button_parse_btn_desc(struct device *dev, info->name = "power"; info->event_code = KEY_POWER; info->wakeup = true; + } else if (upage == 0x01 && usage == 0xc6) { + info->name = "airplane mode switch"; + info->event_type = EV_SW; + info->event_code = SW_RFKILL_ALL; + info->active_low = false; } else if (upage == 0x01 && usage == 0xca) { info->name = "rotation lock switch"; info->event_type = EV_SW; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index e43e93ac2798..b6749af46262 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -183,6 +183,7 @@ static const char * const smbus_pnp_ids[] = { "LEN009b", /* T580 */ "LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */ "LEN040f", /* P1 Gen 3 */ + "LEN0411", /* L14 Gen 1 */ "LEN200f", /* T450s */ "LEN2044", /* L470 */ "LEN2054", /* E480 */ diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index 9c39553d30fa..b585b1dab870 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -360,6 +360,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, .driver_data = (void *)(SERIO_QUIRK_DRITEK) }, + { + /* Acer TravelMate P459-G2-M */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"), + }, + .driver_data = (void *)(SERIO_QUIRK_NOMUX) + }, { /* Amoi M636/A737 */ .matches = { diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 137c9ea60a6b..4526ff2e1bd5 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -380,6 +380,9 @@ struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec) } mutex_unlock(&icc_lock); + if (!node) + return ERR_PTR(-EINVAL); + if (IS_ERR(node)) return ERR_CAST(node); diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index b2708de25ea3..d80065c8105a 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -243,6 +243,7 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,adreno" }, + { .compatible = "qcom,adreno-gmu" }, { .compatible = "qcom,mdp4" }, { .compatible = "qcom,mdss" }, { .compatible = "qcom,sc7180-mdss" }, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index cb0dc831834c..3fd1886004c2 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -29,6 +29,9 @@ #include #include #include +#ifndef __GENKSYMS__ +#include +#endif #include #include "dma-iommu.h" @@ -1015,6 +1018,8 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, return DMA_MAPPING_ERROR; } + trace_swiotlb_bounced(dev, phys, size); + aligned_size = iova_align(iovad, size); phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, iova_mask(iovad), dir, attrs); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 2378cfb7443e..509d03eb3e8d 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -97,6 +97,7 @@ config LEDS_ARIEL config LEDS_AW2013 tristate "LED support for Awinic AW2013" depends on LEDS_CLASS && I2C && OF + select REGMAP_I2C help This option enables support for the AW2013 3-channel LED driver. diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c index 8ae0d2d284af..3e69a7bde928 100644 --- a/drivers/leds/trigger/ledtrig-tty.c +++ b/drivers/leds/trigger/ledtrig-tty.c @@ -168,6 +168,10 @@ static void ledtrig_tty_deactivate(struct led_classdev *led_cdev) cancel_delayed_work_sync(&trigger_data->dwork); + kfree(trigger_data->ttyname); + tty_kref_put(trigger_data->tty); + trigger_data->tty = NULL; + kfree(trigger_data); } diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index ef70f60a73ed..653fd80f8317 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -671,6 +671,7 @@ config DM_ZONED config DM_AUDIT bool "DM audit events" + depends on BLK_DEV_DM depends on AUDIT help Generate audit events for device-mapper. diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index aebb7ef10e63..e86fa736dc4e 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -265,6 +265,7 @@ struct bcache_device { #define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_RATE_DW_RUNNING 4 int nr_stripes; +#define BCH_MIN_STRIPE_SZ ((4 << 20) >> SECTOR_SHIFT) unsigned int stripe_size; atomic_t *stripe_sectors_dirty; unsigned long *full_dirty_stripes; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 05e3157fc7b4..6a2f57ae0f3c 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -974,6 +974,9 @@ static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, * * The btree node will have either a read or a write lock held, depending on * level and op->lock. + * + * Note: Only error code or btree pointer will be returned, it is unncessary + * for callers to check NULL pointer. */ struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, struct bkey *k, int level, bool write, @@ -1085,6 +1088,10 @@ static void btree_node_free(struct btree *b) mutex_unlock(&b->c->bucket_lock); } +/* + * Only error code or btree pointer will be returned, it is unncessary for + * callers to check NULL pointer. + */ struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, int level, bool wait, struct btree *parent) diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 7660962e7b8b..70e5bd8961d2 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -905,6 +905,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, if (!d->stripe_size) d->stripe_size = 1 << 31; + else if (d->stripe_size < BCH_MIN_STRIPE_SZ) + d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size); n = DIV_ROUND_UP_ULL(sectors, d->stripe_size); if (!n || n > max_stripes) { @@ -2017,7 +2019,7 @@ static int run_cache_set(struct cache_set *c) c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL); - if (IS_ERR_OR_NULL(c->root)) + if (IS_ERR(c->root)) goto err; list_del_init(&c->root->list); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 01c7c6ca4789..18c6e0d2877b 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -913,7 +913,7 @@ static int bch_dirty_init_thread(void *arg) int cur_idx, prev_idx, skip_nr; k = p = NULL; - cur_idx = prev_idx = 0; + prev_idx = 0; bch_btree_iter_init(&c->root->keys, &iter, NULL); k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 45b09be3d86b..8f077fc34f9a 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1923,6 +1923,13 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c) } EXPORT_SYMBOL_GPL(dm_bufio_client_destroy); +void dm_bufio_client_reset(struct dm_bufio_client *c) +{ + drop_buffers(c); + flush_work(&c->shrink_work); +} +EXPORT_SYMBOL_GPL(dm_bufio_client_reset); + void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start) { c->start = start; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index fe7dad3ffa75..77fcff82c82a 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -1763,11 +1763,12 @@ static void integrity_metadata(struct work_struct *w) sectors_to_process = dio->range.n_sectors; __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { + struct bio_vec bv_copy = bv; unsigned int pos; char *mem, *checksums_ptr; again: - mem = bvec_kmap_local(&bv); + mem = bvec_kmap_local(&bv_copy); pos = 0; checksums_ptr = checksums; do { @@ -1776,7 +1777,7 @@ static void integrity_metadata(struct work_struct *w) sectors_to_process -= ic->sectors_per_block; pos += ic->sectors_per_block << SECTOR_SHIFT; sector += ic->sectors_per_block; - } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack); + } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack); kunmap_local(mem); r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, @@ -1801,9 +1802,9 @@ static void integrity_metadata(struct work_struct *w) if (!sectors_to_process) break; - if (unlikely(pos < bv.bv_len)) { - bv.bv_offset += pos; - bv.bv_len -= pos; + if (unlikely(pos < bv_copy.bv_len)) { + bv_copy.bv_offset += pos; + bv_copy.bv_len -= pos; goto again; } } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 4a0e15109997..bb0e0a270f62 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -597,6 +597,8 @@ static int __format_metadata(struct dm_pool_metadata *pmd) r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION, &pmd->tm, &pmd->metadata_sm); if (r < 0) { + pmd->tm = NULL; + pmd->metadata_sm = NULL; DMERR("tm_create_with_sm failed"); return r; } @@ -605,6 +607,7 @@ static int __format_metadata(struct dm_pool_metadata *pmd) if (IS_ERR(pmd->data_sm)) { DMERR("sm_disk_create failed"); r = PTR_ERR(pmd->data_sm); + pmd->data_sm = NULL; goto bad_cleanup_tm; } @@ -635,11 +638,15 @@ static int __format_metadata(struct dm_pool_metadata *pmd) bad_cleanup_nb_tm: dm_tm_destroy(pmd->nb_tm); + pmd->nb_tm = NULL; bad_cleanup_data_sm: dm_sm_destroy(pmd->data_sm); + pmd->data_sm = NULL; bad_cleanup_tm: dm_tm_destroy(pmd->tm); + pmd->tm = NULL; dm_sm_destroy(pmd->metadata_sm); + pmd->metadata_sm = NULL; return r; } @@ -705,6 +712,8 @@ static int __open_metadata(struct dm_pool_metadata *pmd) sizeof(disk_super->metadata_space_map_root), &pmd->tm, &pmd->metadata_sm); if (r < 0) { + pmd->tm = NULL; + pmd->metadata_sm = NULL; DMERR("tm_open_with_sm failed"); goto bad_unlock_sblock; } @@ -714,6 +723,7 @@ static int __open_metadata(struct dm_pool_metadata *pmd) if (IS_ERR(pmd->data_sm)) { DMERR("sm_disk_open failed"); r = PTR_ERR(pmd->data_sm); + pmd->data_sm = NULL; goto bad_cleanup_tm; } @@ -740,9 +750,12 @@ static int __open_metadata(struct dm_pool_metadata *pmd) bad_cleanup_data_sm: dm_sm_destroy(pmd->data_sm); + pmd->data_sm = NULL; bad_cleanup_tm: dm_tm_destroy(pmd->tm); + pmd->tm = NULL; dm_sm_destroy(pmd->metadata_sm); + pmd->metadata_sm = NULL; bad_unlock_sblock: dm_bm_unlock(sblock); @@ -789,9 +802,13 @@ static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd, bool destroy_bm) { dm_sm_destroy(pmd->data_sm); + pmd->data_sm = NULL; dm_sm_destroy(pmd->metadata_sm); + pmd->metadata_sm = NULL; dm_tm_destroy(pmd->nb_tm); + pmd->nb_tm = NULL; dm_tm_destroy(pmd->tm); + pmd->tm = NULL; if (destroy_bm) dm_block_manager_destroy(pmd->bm); } @@ -999,8 +1016,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) __func__, r); } pmd_write_unlock(pmd); - if (!pmd->fail_io) - __destroy_persistent_data_objects(pmd, true); + __destroy_persistent_data_objects(pmd, true); kfree(pmd); return 0; @@ -1875,53 +1891,29 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd) int dm_pool_abort_metadata(struct dm_pool_metadata *pmd) { int r = -EINVAL; - struct dm_block_manager *old_bm = NULL, *new_bm = NULL; /* fail_io is double-checked with pmd->root_lock held below */ if (unlikely(pmd->fail_io)) return r; - /* - * Replacement block manager (new_bm) is created and old_bm destroyed outside of - * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of - * shrinker associated with the block manager's bufio client vs pmd root_lock). - * - must take shrinker_rwsem without holding pmd->root_lock - */ - new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT, - THIN_MAX_CONCURRENT_LOCKS); - pmd_write_lock(pmd); if (pmd->fail_io) { pmd_write_unlock(pmd); - goto out; + return r; } - __set_abort_with_changes_flags(pmd); - __destroy_persistent_data_objects(pmd, false); - old_bm = pmd->bm; - if (IS_ERR(new_bm)) { - DMERR("could not create block manager during abort"); - pmd->bm = NULL; - r = PTR_ERR(new_bm); - goto out_unlock; - } - pmd->bm = new_bm; + /* destroy data_sm/metadata_sm/nb_tm/tm */ + __destroy_persistent_data_objects(pmd, false); + + /* reset bm */ + dm_block_manager_reset(pmd->bm); + + /* rebuild data_sm/metadata_sm/nb_tm/tm */ r = __open_or_format_metadata(pmd, false); - if (r) { - pmd->bm = NULL; - goto out_unlock; - } - new_bm = NULL; -out_unlock: if (r) pmd->fail_io = true; pmd_write_unlock(pmd); - dm_block_manager_destroy(old_bm); -out: - if (new_bm && !IS_ERR(new_bm)) - dm_block_manager_destroy(new_bm); - return r; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 0c2801d77090..6120f26a7969 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -528,6 +528,9 @@ static void md_end_flush(struct bio *bio) rdev_dec_pending(rdev, mddev); if (atomic_dec_and_test(&mddev->flush_pending)) { + /* The pair is percpu_ref_get() from md_flush_request() */ + percpu_ref_put(&mddev->active_io); + /* The pre-request flush has finished */ queue_work(md_wq, &mddev->flush_work); } @@ -547,12 +550,8 @@ static void submit_flushes(struct work_struct *ws) rdev_for_each_rcu(rdev, mddev) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) { - /* Take two references, one is dropped - * when request finishes, one after - * we reclaim rcu_read_lock - */ struct bio *bi; - atomic_inc(&rdev->nr_pending); + atomic_inc(&rdev->nr_pending); rcu_read_unlock(); bi = bio_alloc_bioset(rdev->bdev, 0, @@ -563,7 +562,6 @@ static void submit_flushes(struct work_struct *ws) atomic_inc(&mddev->flush_pending); submit_bio(bi); rcu_read_lock(); - rdev_dec_pending(rdev, mddev); } rcu_read_unlock(); if (atomic_dec_and_test(&mddev->flush_pending)) @@ -616,6 +614,18 @@ bool md_flush_request(struct mddev *mddev, struct bio *bio) /* new request after previous flush is completed */ if (ktime_after(req_start, mddev->prev_flush_start)) { WARN_ON(mddev->flush_bio); + /* + * Grab a reference to make sure mddev_suspend() will wait for + * this flush to be done. + * + * md_flush_reqeust() is called under md_handle_request() and + * 'active_io' is already grabbed, hence percpu_ref_is_zero() + * won't pass, percpu_ref_tryget_live() can't be used because + * percpu_ref_kill() can be called by mddev_suspend() + * concurrently. + */ + WARN_ON(percpu_ref_is_zero(&mddev->active_io)); + percpu_ref_get(&mddev->active_io); mddev->flush_bio = bio; bio = NULL; } diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 1f40100908d7..2bbfbb704c75 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -415,6 +415,12 @@ void dm_block_manager_destroy(struct dm_block_manager *bm) } EXPORT_SYMBOL_GPL(dm_block_manager_destroy); +void dm_block_manager_reset(struct dm_block_manager *bm) +{ + dm_bufio_client_reset(bm->bufio); +} +EXPORT_SYMBOL_GPL(dm_block_manager_reset); + unsigned int dm_bm_block_size(struct dm_block_manager *bm) { return dm_bufio_get_block_size(bm->bufio); diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h index 58a23b8ec190..4371d85d3c25 100644 --- a/drivers/md/persistent-data/dm-block-manager.h +++ b/drivers/md/persistent-data/dm-block-manager.h @@ -35,6 +35,7 @@ struct dm_block_manager *dm_block_manager_create( struct block_device *bdev, unsigned int block_size, unsigned int max_held_per_thread); void dm_block_manager_destroy(struct dm_block_manager *bm); +void dm_block_manager_reset(struct dm_block_manager *bm); unsigned int dm_bm_block_size(struct dm_block_manager *bm); dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm); diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h index a015cd11f6e9..85aa0a3974fe 100644 --- a/drivers/md/persistent-data/dm-space-map.h +++ b/drivers/md/persistent-data/dm-space-map.h @@ -76,7 +76,8 @@ struct dm_space_map { static inline void dm_sm_destroy(struct dm_space_map *sm) { - sm->destroy(sm); + if (sm) + sm->destroy(sm); } static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks) diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index 39885f835584..557a3ecfe75a 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -197,6 +197,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone); void dm_tm_destroy(struct dm_transaction_manager *tm) { + if (!tm) + return; + if (!tm->is_clone) wipe_shadow_table(tm); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 30f906a67def..76f7ca53d812 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1972,12 +1972,12 @@ static void end_sync_write(struct bio *bio) } static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector, - int sectors, struct page *page, int rw) + int sectors, struct page *page, blk_opf_t rw) { if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) /* success */ return 1; - if (rw == WRITE) { + if (rw == REQ_OP_WRITE) { set_bit(WriteErrorSeen, &rdev->flags); if (!test_and_set_bit(WantReplacement, &rdev->flags)) @@ -2094,7 +2094,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) rdev = conf->mirrors[d].rdev; if (r1_sync_page_io(rdev, sect, s, pages[idx], - WRITE) == 0) { + REQ_OP_WRITE) == 0) { r1_bio->bios[d]->bi_end_io = NULL; rdev_dec_pending(rdev, mddev); } @@ -2109,7 +2109,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio) rdev = conf->mirrors[d].rdev; if (r1_sync_page_io(rdev, sect, s, pages[idx], - READ) != 0) + REQ_OP_READ) != 0) atomic_add(s, &rdev->corrected_errors); } sectors -= s; @@ -2321,7 +2321,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, atomic_inc(&rdev->nr_pending); rcu_read_unlock(); r1_sync_page_io(rdev, sect, s, - conf->tmppage, WRITE); + conf->tmppage, REQ_OP_WRITE); rdev_dec_pending(rdev, mddev); } else rcu_read_unlock(); @@ -2338,7 +2338,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk, atomic_inc(&rdev->nr_pending); rcu_read_unlock(); if (r1_sync_page_io(rdev, sect, s, - conf->tmppage, READ)) { + conf->tmppage, REQ_OP_READ)) { atomic_add(s, &rdev->corrected_errors); pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n", mdname(mddev), s, diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index 9f9a97652708..d352e028491a 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -104,6 +104,8 @@ static int dvb_device_open(struct inode *inode, struct file *file) err = file->f_op->open(inode, file); up_read(&minor_rwsem); mutex_unlock(&dvbdev_mutex); + if (err) + dvb_device_put(dvbdev); return err; } fail: diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index 9a0d43c7ba9e..ce99f7dfb5a5 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -1894,7 +1894,7 @@ static int m88ds3103_probe(struct i2c_client *client, /* get frontend address */ ret = regmap_read(dev->regmap, 0x29, &utmp); if (ret) - goto err_kfree; + goto err_del_adapters; dev->dt_addr = ((utmp & 0x80) == 0) ? 0x42 >> 1 : 0x40 >> 1; dev_dbg(&client->dev, "dt addr is 0x%02x\n", dev->dt_addr); @@ -1902,11 +1902,14 @@ static int m88ds3103_probe(struct i2c_client *client, dev->dt_addr); if (IS_ERR(dev->dt_client)) { ret = PTR_ERR(dev->dt_client); - goto err_kfree; + goto err_del_adapters; } } return 0; + +err_del_adapters: + i2c_mux_del_adapters(dev->muxc); err_kfree: kfree(dev); err: diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c index e9a4f8abd21c..3071b61946c3 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c @@ -1412,7 +1412,6 @@ static int mtk_jpeg_remove(struct platform_device *pdev) { struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev); - cancel_delayed_work_sync(&jpeg->job_timeout_work); pm_runtime_disable(&pdev->dev); video_unregister_device(jpeg->vdev); v4l2_m2m_release(jpeg->m2m_dev); diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c index afbbfd5d02bc..6d200e23754e 100644 --- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c +++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c @@ -188,6 +188,7 @@ int mtk_jpeg_dec_fill_param(struct mtk_jpeg_dec_param *param) return 0; } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_fill_param); u32 mtk_jpeg_dec_get_int_status(void __iomem *base) { @@ -199,6 +200,7 @@ u32 mtk_jpeg_dec_get_int_status(void __iomem *base) return ret; } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_get_int_status); u32 mtk_jpeg_dec_enum_result(u32 irq_result) { @@ -215,11 +217,13 @@ u32 mtk_jpeg_dec_enum_result(u32 irq_result) return MTK_JPEG_DEC_RESULT_ERROR_UNKNOWN; } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_enum_result); void mtk_jpeg_dec_start(void __iomem *base) { writel(0, base + JPGDEC_REG_TRIG); } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_start); static void mtk_jpeg_dec_soft_reset(void __iomem *base) { @@ -239,6 +243,7 @@ void mtk_jpeg_dec_reset(void __iomem *base) mtk_jpeg_dec_soft_reset(base); mtk_jpeg_dec_hard_reset(base); } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_reset); static void mtk_jpeg_dec_set_brz_factor(void __iomem *base, u8 yscale_w, u8 yscale_h, u8 uvscale_w, u8 uvscale_h) @@ -407,3 +412,4 @@ void mtk_jpeg_dec_set_config(void __iomem *base, config->dma_last_mcu); mtk_jpeg_dec_set_pause_mcu_idx(base, config->total_mcu); } +EXPORT_SYMBOL_GPL(mtk_jpeg_dec_set_config); diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c index 905072871ed2..196f2bba419f 100644 --- a/drivers/media/platform/nxp/imx-mipi-csis.c +++ b/drivers/media/platform/nxp/imx-mipi-csis.c @@ -1553,8 +1553,10 @@ static int mipi_csis_remove(struct platform_device *pdev) v4l2_async_nf_cleanup(&csis->notifier); v4l2_async_unregister_subdev(&csis->sd); + if (!pm_runtime_enabled(&pdev->dev)) + mipi_csis_runtime_suspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); - mipi_csis_runtime_suspend(&pdev->dev); mipi_csis_clk_disable(csis); media_entity_cleanup(&csis->sd.entity); fwnode_handle_put(csis->sd.fwnode); diff --git a/drivers/media/platform/qcom/camss/camss-csid-gen2.c b/drivers/media/platform/qcom/camss/camss-csid-gen2.c index 904208f6f954..0147cc062e1a 100644 --- a/drivers/media/platform/qcom/camss/camss-csid-gen2.c +++ b/drivers/media/platform/qcom/camss/camss-csid-gen2.c @@ -334,13 +334,14 @@ static const struct csid_format csid_formats[] = { }, }; -static void csid_configure_stream(struct csid_device *csid, u8 enable) +static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc) { struct csid_testgen_config *tg = &csid->testgen; u32 val; u32 phy_sel = 0; u8 lane_cnt = csid->phy.lane_cnt; - struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_SRC]; + /* Source pads matching RDI channels on hardware. Pad 1 -> RDI0, Pad 2 -> RDI1, etc. */ + struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc]; const struct csid_format *format = csid_get_fmt_entry(csid->formats, csid->nformats, input_format->code); @@ -351,8 +352,19 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable) phy_sel = csid->phy.csiphy_id; if (enable) { - u8 vc = 0; /* Virtual Channel 0 */ - u8 dt_id = vc * 4; + /* + * DT_ID is a two bit bitfield that is concatenated with + * the four least significant bits of the five bit VC + * bitfield to generate an internal CID value. + * + * CSID_RDI_CFG0(vc) + * DT_ID : 28:27 + * VC : 26:22 + * DT : 21:16 + * + * CID : VC 3:0 << 2 | DT_ID 1:0 + */ + u8 dt_id = vc & 0x03; if (tg->enabled) { /* configure one DT, infinite frames */ @@ -392,42 +404,42 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable) val |= format->data_type << RDI_CFG0_DATA_TYPE; val |= vc << RDI_CFG0_VIRTUAL_CHANNEL; val |= dt_id << RDI_CFG0_DT_ID; - writel_relaxed(val, csid->base + CSID_RDI_CFG0(0)); + writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc)); /* CSID_TIMESTAMP_STB_POST_IRQ */ val = 2 << RDI_CFG1_TIMESTAMP_STB_SEL; - writel_relaxed(val, csid->base + CSID_RDI_CFG1(0)); + writel_relaxed(val, csid->base + CSID_RDI_CFG1(vc)); val = 1; - writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(0)); + writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(vc)); val = 0; - writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(0)); + writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(vc)); val = 1; - writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(0)); + writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc)); val = 0; - writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(0)); + writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc)); val = 1; - writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PERIOD(0)); + writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PERIOD(vc)); val = 0; - writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PATTERN(0)); + writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PATTERN(vc)); val = 1; - writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PERIOD(0)); + writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PERIOD(vc)); val = 0; - writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PATTERN(0)); + writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PATTERN(vc)); val = 0; - writel_relaxed(val, csid->base + CSID_RDI_CTRL(0)); + writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc)); - val = readl_relaxed(csid->base + CSID_RDI_CFG0(0)); + val = readl_relaxed(csid->base + CSID_RDI_CFG0(vc)); val |= 1 << RDI_CFG0_ENABLE; - writel_relaxed(val, csid->base + CSID_RDI_CFG0(0)); + writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc)); } if (tg->enabled) { @@ -446,6 +458,8 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable) writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0); val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN; + if (vc > 3) + val |= 1 << CSI2_RX_CFG1_VC_MODE; val |= 1 << CSI2_RX_CFG1_MISR_EN; writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1); @@ -453,7 +467,16 @@ static void csid_configure_stream(struct csid_device *csid, u8 enable) val = HALT_CMD_RESUME_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD; else val = HALT_CMD_HALT_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD; - writel_relaxed(val, csid->base + CSID_RDI_CTRL(0)); + writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc)); +} + +static void csid_configure_stream(struct csid_device *csid, u8 enable) +{ + u8 i; + /* Loop through all enabled VCs and configure stream for each */ + for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) + if (csid->phy.en_vc & BIT(i)) + __csid_configure_stream(csid, enable, i); } static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val) @@ -499,6 +522,7 @@ static irqreturn_t csid_isr(int irq, void *dev) struct csid_device *csid = dev; u32 val; u8 reset_done; + int i; val = readl_relaxed(csid->base + CSID_TOP_IRQ_STATUS); writel_relaxed(val, csid->base + CSID_TOP_IRQ_CLEAR); @@ -507,8 +531,12 @@ static irqreturn_t csid_isr(int irq, void *dev) val = readl_relaxed(csid->base + CSID_CSI2_RX_IRQ_STATUS); writel_relaxed(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR); - val = readl_relaxed(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(0)); - writel_relaxed(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(0)); + /* Read and clear IRQ status for each enabled RDI channel */ + for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++) + if (csid->phy.en_vc & BIT(i)) { + val = readl_relaxed(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i)); + writel_relaxed(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i)); + } val = 1 << IRQ_CMD_CLEAR; writel_relaxed(val, csid->base + CSID_IRQ_CMD); diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c index 88f188e0f750..6360314f04a6 100644 --- a/drivers/media/platform/qcom/camss/camss-csid.c +++ b/drivers/media/platform/qcom/camss/camss-csid.c @@ -196,6 +196,8 @@ static int csid_set_power(struct v4l2_subdev *sd, int on) return ret; } + csid->phy.need_vc_update = true; + enable_irq(csid->irq); ret = csid->ops->reset(csid); @@ -249,7 +251,10 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable) return -ENOLINK; } - csid->ops->configure_stream(csid, enable); + if (csid->phy.need_vc_update) { + csid->ops->configure_stream(csid, enable); + csid->phy.need_vc_update = false; + } return 0; } @@ -460,6 +465,7 @@ static int csid_set_format(struct v4l2_subdev *sd, { struct csid_device *csid = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; + int i; format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which); if (format == NULL) @@ -468,14 +474,14 @@ static int csid_set_format(struct v4l2_subdev *sd, csid_try_format(csid, sd_state, fmt->pad, &fmt->format, fmt->which); *format = fmt->format; - /* Propagate the format from sink to source */ + /* Propagate the format from sink to source pads */ if (fmt->pad == MSM_CSID_PAD_SINK) { - format = __csid_get_format(csid, sd_state, MSM_CSID_PAD_SRC, - fmt->which); + for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i) { + format = __csid_get_format(csid, sd_state, i, fmt->which); - *format = fmt->format; - csid_try_format(csid, sd_state, MSM_CSID_PAD_SRC, format, - fmt->which); + *format = fmt->format; + csid_try_format(csid, sd_state, i, format, fmt->which); + } } return 0; @@ -738,7 +744,6 @@ static int csid_link_setup(struct media_entity *entity, struct csid_device *csid; struct csiphy_device *csiphy; struct csiphy_lanes_cfg *lane_cfg; - struct v4l2_subdev_format format = { 0 }; sd = media_entity_to_v4l2_subdev(entity); csid = v4l2_get_subdevdata(sd); @@ -761,11 +766,22 @@ static int csid_link_setup(struct media_entity *entity, lane_cfg = &csiphy->cfg.csi2->lane_cfg; csid->phy.lane_cnt = lane_cfg->num_data; csid->phy.lane_assign = csid_get_lane_assign(lane_cfg); + } + /* Decide which virtual channels to enable based on which source pads are enabled */ + if (local->flags & MEDIA_PAD_FL_SOURCE) { + struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); + struct csid_device *csid = v4l2_get_subdevdata(sd); + struct device *dev = csid->camss->dev; - /* Reset format on source pad to sink pad format */ - format.pad = MSM_CSID_PAD_SRC; - format.which = V4L2_SUBDEV_FORMAT_ACTIVE; - csid_set_format(&csid->subdev, NULL, &format); + if (flags & MEDIA_LNK_FL_ENABLED) + csid->phy.en_vc |= BIT(local->index - 1); + else + csid->phy.en_vc &= ~BIT(local->index - 1); + + csid->phy.need_vc_update = true; + + dev_dbg(dev, "%s: Enabled CSID virtual channels mask 0x%x\n", + __func__, csid->phy.en_vc); } return 0; @@ -816,6 +832,7 @@ int msm_csid_register_entity(struct csid_device *csid, struct v4l2_subdev *sd = &csid->subdev; struct media_pad *pads = csid->pads; struct device *dev = csid->camss->dev; + int i; int ret; v4l2_subdev_init(sd, &csid_v4l2_ops); @@ -852,7 +869,8 @@ int msm_csid_register_entity(struct csid_device *csid, } pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK; - pads[MSM_CSID_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE; + for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i) + pads[i].flags = MEDIA_PAD_FL_SOURCE; sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; sd->entity.ops = &csid_media_ops; diff --git a/drivers/media/platform/qcom/camss/camss-csid.h b/drivers/media/platform/qcom/camss/camss-csid.h index f06040e44c51..d4b48432a097 100644 --- a/drivers/media/platform/qcom/camss/camss-csid.h +++ b/drivers/media/platform/qcom/camss/camss-csid.h @@ -19,8 +19,13 @@ #include #define MSM_CSID_PAD_SINK 0 -#define MSM_CSID_PAD_SRC 1 -#define MSM_CSID_PADS_NUM 2 +#define MSM_CSID_PAD_FIRST_SRC 1 +#define MSM_CSID_PADS_NUM 5 + +#define MSM_CSID_PAD_SRC (MSM_CSID_PAD_FIRST_SRC) + +/* CSID hardware can demultiplex up to 4 outputs */ +#define MSM_CSID_MAX_SRC_STREAMS 4 #define DATA_TYPE_EMBEDDED_DATA_8BIT 0x12 #define DATA_TYPE_YUV420_8BIT 0x18 @@ -81,6 +86,8 @@ struct csid_phy_config { u8 csiphy_id; u8 lane_cnt; u32 lane_assign; + u32 en_vc; + u8 need_vc_update; }; struct csid_device; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c index f2475c6235ea..2b76339f9381 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c @@ -582,7 +582,7 @@ static int rkisp1_probe(struct platform_device *pdev) ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev); if (ret) - goto err_pm_runtime_disable; + goto err_media_dev_cleanup; ret = media_device_register(&rkisp1->media_dev); if (ret) { @@ -617,6 +617,8 @@ static int rkisp1_probe(struct platform_device *pdev) media_device_unregister(&rkisp1->media_dev); err_unreg_v4l2_dev: v4l2_device_unregister(&rkisp1->v4l2_dev); +err_media_dev_cleanup: + media_device_cleanup(&rkisp1->media_dev); err_pm_runtime_disable: pm_runtime_disable(&pdev->dev); return ret; @@ -637,6 +639,8 @@ static int rkisp1_remove(struct platform_device *pdev) media_device_unregister(&rkisp1->media_dev); v4l2_device_unregister(&rkisp1->v4l2_dev); + media_device_cleanup(&rkisp1->media_dev); + pm_runtime_disable(&pdev->dev); return 0; diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c index 08840ba313e7..69a2442f3122 100644 --- a/drivers/media/platform/verisilicon/hantro_drv.c +++ b/drivers/media/platform/verisilicon/hantro_drv.c @@ -813,6 +813,8 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid) if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) { vpu->encoder = func; + v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD); + v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD); } else { vpu->decoder = func; v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD); diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c index 30e650edaea8..b2da48936e3f 100644 --- a/drivers/media/platform/verisilicon/hantro_v4l2.c +++ b/drivers/media/platform/verisilicon/hantro_v4l2.c @@ -759,6 +759,9 @@ const struct v4l2_ioctl_ops hantro_ioctl_ops = { .vidioc_g_selection = vidioc_g_selection, .vidioc_s_selection = vidioc_s_selection, + .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd, + .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd, + .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, .vidioc_encoder_cmd = vidioc_encoder_cmd, }; diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c index 727e6268567f..f1feccc28bf0 100644 --- a/drivers/media/usb/cx231xx/cx231xx-core.c +++ b/drivers/media/usb/cx231xx/cx231xx-core.c @@ -1024,6 +1024,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets, if (!dev->video_mode.isoc_ctl.urb) { dev_err(dev->dev, "cannot alloc memory for usb buffers\n"); + kfree(dma_q->p_left_data); return -ENOMEM; } @@ -1033,6 +1034,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets, dev_err(dev->dev, "cannot allocate memory for usbtransfer\n"); kfree(dev->video_mode.isoc_ctl.urb); + kfree(dma_q->p_left_data); return -ENOMEM; } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c index 14170a5d72b3..1764674de98b 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-context.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c @@ -268,7 +268,8 @@ void pvr2_context_disconnect(struct pvr2_context *mp) { pvr2_hdw_disconnect(mp->hdw); mp->disconnect_flag = !0; - pvr2_context_notify(mp); + if (!pvr2_context_shutok()) + pvr2_context_notify(mp); } diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index cfbee2cfba6b..c50387600b81 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -301,8 +301,8 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss, snprintf(name, sizeof(name), "%s-div", devname); tmp = clk_register_fractional_divider(NULL, name, __clk_get_name(tmp), + 0, lpss->priv, 1, 15, 16, 15, CLK_FRAC_DIVIDER_POWER_OF_TWO_PS, - lpss->priv, 1, 15, 16, 15, 0, NULL); if (IS_ERR(tmp)) return PTR_ERR(tmp); diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 0871c97ae51a..e599ad55ba1e 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -103,6 +103,10 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) } syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%pa", np, &res.start); + if (!syscon_config.name) { + ret = -ENOMEM; + goto err_regmap; + } syscon_config.reg_stride = reg_io_width; syscon_config.val_bits = reg_io_width * 8; syscon_config.max_register = resource_size(&res) - reg_io_width; diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 43afe40966e5..1ea1ae34b7a7 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -677,7 +677,7 @@ static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args) if (!sec_attest_info) return -ENOMEM; - info = kmalloc(sizeof(*info), GFP_KERNEL); + info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { rc = -ENOMEM; goto free_sec_attest_info; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 4e88bd49ebb9..317e20d72f77 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -869,9 +869,10 @@ static const struct block_device_operations mmc_bdops = { static int mmc_blk_part_switch_pre(struct mmc_card *card, unsigned int part_type) { + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; int ret = 0; - if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { + if ((part_type & mask) == mask) { if (card->ext_csd.cmdq_en) { ret = mmc_cmdq_disable(card); if (ret) @@ -886,9 +887,10 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card, static int mmc_blk_part_switch_post(struct mmc_card *card, unsigned int part_type) { + const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB; int ret = 0; - if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { + if ((part_type & mask) == mask) { mmc_retune_unpause(card->host); if (card->reenable_cmdq && !card->ext_csd.cmdq_en) ret = mmc_cmdq_enable(card); @@ -3183,4 +3185,3 @@ module_exit(mmc_blk_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); - diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 095724aa0e6c..2a47f08f41aa 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -671,6 +671,7 @@ EXPORT_SYMBOL(mmc_remove_host); */ void mmc_free_host(struct mmc_host *host) { + cancel_delayed_work_sync(&host->detect); mmc_pwrseq_free(host); put_device(&host->class_dev); } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 9b5a2cb110b3..d84bdb69f56b 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1061,14 +1061,15 @@ config MMC_SDHCI_XENON config MMC_SDHCI_OMAP tristate "TI SDHCI Controller Support" + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST depends on MMC_SDHCI_PLTFM && OF select THERMAL imply TI_SOC_THERMAL select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE help This selects the Secure Digital Host Controller Interface (SDHCI) - support present in TI's DRA7 SOCs. The controller supports - SD/MMC/SDIO devices. + support present in TI's Keystone/OMAP2+/DRA7 SOCs. The controller + supports SD/MMC/SDIO devices. If you have a controller with this interface, say Y or M here. @@ -1076,14 +1077,15 @@ config MMC_SDHCI_OMAP config MMC_SDHCI_AM654 tristate "Support for the SDHCI Controller in TI's AM654 SOCs" + depends on ARCH_K3 || COMPILE_TEST depends on MMC_SDHCI_PLTFM && OF select MMC_SDHCI_IO_ACCESSORS select MMC_CQHCI select REGMAP_MMIO help This selects the Secure Digital Host Controller Interface (SDHCI) - support present in TI's AM654 SOCs. The controller supports - SD/MMC/SDIO devices. + support present in TI's AM65x/AM64x/AM62x/J721E SOCs. The controller + supports SD/MMC/SDIO devices. If you have a controller with this interface, say Y or M here. diff --git a/drivers/mmc/host/meson-mx-sdhc-mmc.c b/drivers/mmc/host/meson-mx-sdhc-mmc.c index da85c2f2acb8..c0e3b1634a88 100644 --- a/drivers/mmc/host/meson-mx-sdhc-mmc.c +++ b/drivers/mmc/host/meson-mx-sdhc-mmc.c @@ -269,7 +269,7 @@ static int meson_mx_sdhc_enable_clks(struct mmc_host *mmc) static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios) { struct meson_mx_sdhc_host *host = mmc_priv(mmc); - u32 rx_clk_phase; + u32 val, rx_clk_phase; int ret; meson_mx_sdhc_disable_clks(mmc); @@ -290,27 +290,11 @@ static int meson_mx_sdhc_set_clk(struct mmc_host *mmc, struct mmc_ios *ios) mmc->actual_clock = clk_get_rate(host->sd_clk); /* - * according to Amlogic the following latching points are - * selected with empirical values, there is no (known) formula - * to calculate these. + * Phase 90 should work in most cases. For data transmission, + * meson_mx_sdhc_execute_tuning() will find a accurate value */ - if (mmc->actual_clock > 100000000) { - rx_clk_phase = 1; - } else if (mmc->actual_clock > 45000000) { - if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) - rx_clk_phase = 15; - else - rx_clk_phase = 11; - } else if (mmc->actual_clock >= 25000000) { - rx_clk_phase = 15; - } else if (mmc->actual_clock > 5000000) { - rx_clk_phase = 23; - } else if (mmc->actual_clock > 1000000) { - rx_clk_phase = 55; - } else { - rx_clk_phase = 1061; - } - + regmap_read(host->regmap, MESON_SDHC_CLKC, &val); + rx_clk_phase = FIELD_GET(MESON_SDHC_CLKC_CLK_DIV, val) / 4; regmap_update_bits(host->regmap, MESON_SDHC_CLK2, MESON_SDHC_CLK2_RX_CLK_PHASE, FIELD_PREP(MESON_SDHC_CLK2_RX_CLK_PHASE, diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c index 525f979e2a97..d5e1d735beb2 100644 --- a/drivers/mmc/host/sdhci-sprd.c +++ b/drivers/mmc/host/sdhci-sprd.c @@ -228,15 +228,19 @@ static inline void _sdhci_sprd_set_clock(struct sdhci_host *host, div = ((div & 0x300) >> 2) | ((div & 0xFF) << 8); sdhci_enable_clk(host, div); + val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); + mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; /* Enable CLK_AUTO when the clock is greater than 400K. */ if (clk > 400000) { - val = sdhci_readl(host, SDHCI_SPRD_REG_32_BUSY_POSI); - mask = SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN | - SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN; if (mask != (val & mask)) { val |= mask; sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); } + } else { + if (val & mask) { + val &= ~mask; + sdhci_writel(host, val, SDHCI_SPRD_REG_32_BUSY_POSI); + } } } diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 60b222799871..8ee60605a6dc 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -463,7 +463,7 @@ static void blktrans_notify_add(struct mtd_info *mtd) { struct mtd_blktrans_ops *tr; - if (mtd->type == MTD_ABSENT) + if (mtd->type == MTD_ABSENT || mtd->type == MTD_UBIVOLUME) return; list_for_each_entry(tr, &blktrans_majors, list) @@ -503,7 +503,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) mutex_lock(&mtd_table_mutex); list_add(&tr->list, &blktrans_majors); mtd_for_each_device(mtd) - if (mtd->type != MTD_ABSENT) + if (mtd->type != MTD_ABSENT && mtd->type != MTD_UBIVOLUME) tr->add_mtd(tr, mtd); mutex_unlock(&mtd_table_mutex); return 0; diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index 02d500176838..bea1a7d3edd7 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -20,7 +20,7 @@ #define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ -#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait +#define IFC_TIMEOUT_MSECS 1000 /* Maximum timeout to wait for IFC NAND Machine */ struct fsl_ifc_ctrl; diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 2d20be6ffb7e..ddd087c2c3ed 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -80,11 +80,11 @@ static struct mld2_grec mldv2_zero_grec; static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb) { - BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) > + BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct tc_skb_cb) > sizeof_field(struct sk_buff, cb)); return (struct amt_skb_cb *)((void *)skb->cb + - sizeof(struct qdisc_skb_cb)); + sizeof(struct tc_skb_cb)); } static void __amt_source_gc_work(void) diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index ef1a4a7c47b2..3efd55669056 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -1119,6 +1119,8 @@ static int vsc73xx_gpio_probe(struct vsc73xx *vsc) vsc->gc.label = devm_kasprintf(vsc->dev, GFP_KERNEL, "VSC%04x", vsc->chipid); + if (!vsc->gc.label) + return -ENOMEM; vsc->gc.ngpio = 4; vsc->gc.owner = THIS_MODULE; vsc->gc.parent = vsc->dev; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index 3d6f0a466a9e..f9f886289b97 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, * compare it to the stored version, just create the meta */ if (io_sq->disable_meta_caching) { - if (unlikely(!ena_tx_ctx->meta_valid)) - return -EINVAL; - *have_meta = true; return ena_com_create_meta(io_sq, ena_meta); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 42a66b74c1e5..044b8afde69a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring, struct ena_tx_buffer *tx_info); static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter, int first_index, int count); +static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter, + int first_index, int count); /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */ static void ena_increase_stat(u64 *statp, u64 cnt, @@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter) static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter) { + u32 xdp_first_ring = adapter->xdp_first_ring; + u32 xdp_num_queues = adapter->xdp_num_queues; int rc = 0; - rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring, - adapter->xdp_num_queues); + rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); if (rc) goto setup_err; - rc = ena_create_io_tx_queues_in_range(adapter, - adapter->xdp_first_ring, - adapter->xdp_num_queues); + rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues); if (rc) goto create_err; return 0; create_err: - ena_free_all_io_tx_resources(adapter); + ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues); setup_err: return rc; } @@ -1617,20 +1618,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring, } } -static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp) +static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs) { struct ena_rx_buffer *rx_info; int ret; + /* XDP multi-buffer packets not supported */ + if (unlikely(num_descs > 1)) { + netdev_err_once(rx_ring->adapter->netdev, + "xdp: dropped unsupported multi-buffer packets\n"); + ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp); + return ENA_XDP_DROP; + } + rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; xdp_prepare_buff(xdp, page_address(rx_info->page), rx_info->page_offset, rx_ring->ena_bufs[0].len, false); - /* If for some reason we received a bigger packet than - * we expect, then we simply drop it - */ - if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU)) - return ENA_XDP_DROP; ret = ena_xdp_execute(rx_ring, xdp); @@ -1699,7 +1703,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ena_rx_ctx.l4_proto, ena_rx_ctx.hash); if (ena_xdp_present_ring(rx_ring)) - xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp); + xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs); /* allocate skb and fill it */ if (xdp_verdict == ENA_XDP_PASS) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b5a49166fa97..4d9d7d1edb9b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -938,11 +938,14 @@ void aq_ring_free(struct aq_ring_s *self) return; kfree(self->buff_ring); + self->buff_ring = NULL; - if (self->dx_ring) + if (self->dx_ring) { dma_free_coherent(aq_nic_get_dev(self->aq_nic), self->size * self->dx_size, self->dx_ring, self->dx_ring_pa); + self->dx_ring = NULL; + } } unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 5935be190b9e..5f2a6fcba967 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -866,10 +866,13 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", offset, adapter->ring_size); err = -1; - goto failed; + goto free_buffer; } return 0; +free_buffer: + kfree(tx_ring->tx_buffer); + tx_ring->tx_buffer = NULL; failed: if (adapter->ring_vir_addr != NULL) { dma_free_coherent(&pdev->dev, adapter->ring_size, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 16c490692f42..4950fde82d17 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1923,8 +1923,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, /* Skip VLAN tag if present */ if (ether_type == ETH_P_8021Q) { - struct vlan_ethhdr *vhdr = - (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e81cb825dff4..df4d88d35701 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1796,6 +1796,21 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, napi_gro_receive(&bnapi->napi, skb); } +static bool bnxt_rx_ts_valid(struct bnxt *bp, u32 flags, + struct rx_cmp_ext *rxcmp1, u32 *cmpl_ts) +{ + u32 ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); + + if (BNXT_PTP_RX_TS_VALID(flags)) + goto ts_valid; + if (!bp->ptp_all_rx_tstamp || !ts || !BNXT_ALL_RX_TS_VALID(flags)) + return false; + +ts_valid: + *cmpl_ts = ts; + return true; +} + /* returns the following: * 1 - 1 packet successfully received * 0 - successful TPA_START, packet not completed yet @@ -1821,6 +1836,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct sk_buff *skb; struct xdp_buff xdp; u32 flags, misc; + u32 cmpl_ts; void *data; int rc = 0; @@ -2043,10 +2059,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) == - RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) { + if (bnxt_rx_ts_valid(bp, flags, rxcmp1, &cmpl_ts)) { if (bp->flags & BNXT_FLAG_CHIP_P5) { - u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp); u64 ns, ts; if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) { @@ -10708,8 +10722,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_free_skbs(bp); /* Save ring stats before shutdown */ - if (bp->bnapi && irq_re_init) + if (bp->bnapi && irq_re_init) { bnxt_get_ring_stats(bp, &bp->net_stats_prev); + bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); + } if (irq_re_init) { bnxt_free_irq(bp); bnxt_del_napi(bp); @@ -10717,10 +10733,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_free_mem(bp, irq_re_init); } -int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) +void bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { - int rc = 0; - if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { /* If we get here, it means firmware reset is in progress * while we are trying to close. We can safely proceed with @@ -10735,15 +10749,18 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) #ifdef CONFIG_BNXT_SRIOV if (bp->sriov_cfg) { + int rc; + rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait, !bp->sriov_cfg, BNXT_SRIOV_CFG_WAIT_TMO); - if (rc) - netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); + if (!rc) + netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete, proceeding to close!\n"); + else if (rc < 0) + netdev_warn(bp->dev, "SRIOV config operation interrupted, proceeding to close!\n"); } #endif __bnxt_close_nic(bp, irq_re_init, link_re_init); - return rc; } static int bnxt_close(struct net_device *dev) @@ -10958,6 +10975,34 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) clear_bit(BNXT_STATE_READ_STATS, &bp->state); } +static void bnxt_get_one_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats, + struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; + u64 *hw_stats = cpr->stats.sw_stats; + + stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; + stats->rx_total_resets += sw_stats->rx.rx_resets; + stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; + stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; + stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; + stats->rx_total_ring_discards += + BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); + stats->tx_total_ring_discards += + BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); + stats->total_missed_irqs += sw_stats->cmn.missed_irqs; +} + +void bnxt_get_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) + bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); +} + static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) { struct net_device *dev = bp->dev; @@ -12036,6 +12081,8 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); + if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) + netdev_info(bp->dev, "Receive PF driver unload event!\n"); if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp, 0); bnxt_hwrm_port_qstats_ext(bp, 0); @@ -13014,8 +13061,6 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) } } } - if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) - netdev_info(bp->dev, "Receive PF driver unload event!\n"); } #else @@ -13882,6 +13927,8 @@ static int bnxt_resume(struct device *device) if (rc) goto resume_exit; + bnxt_clear_reservations(bp, true); + if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) { rc = -ENODEV; goto resume_exit; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 1d2588c92977..111098b4b606 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -160,7 +160,7 @@ struct rx_cmp { #define RX_CMP_FLAGS_ERROR (1 << 6) #define RX_CMP_FLAGS_PLACEMENT (7 << 7) #define RX_CMP_FLAGS_RSS_VALID (1 << 10) - #define RX_CMP_FLAGS_UNUSED (1 << 11) + #define RX_CMP_FLAGS_PKT_METADATA_PRESENT (1 << 11) #define RX_CMP_FLAGS_ITYPES_SHIFT 12 #define RX_CMP_FLAGS_ITYPES_MASK 0xf000 #define RX_CMP_FLAGS_ITYPE_UNKNOWN (0 << 12) @@ -187,6 +187,12 @@ struct rx_cmp { __le32 rx_cmp_rss_hash; }; +#define BNXT_PTP_RX_TS_VALID(flags) \ + (((flags) & RX_CMP_FLAGS_ITYPES_MASK) == RX_CMP_FLAGS_ITYPE_PTP_W_TS) + +#define BNXT_ALL_RX_TS_VALID(flags) \ + !((flags) & RX_CMP_FLAGS_PKT_METADATA_PRESENT) + #define RX_CMP_HASH_VALID(rxcmp) \ ((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID)) @@ -950,6 +956,17 @@ struct bnxt_sw_stats { struct bnxt_cmn_sw_stats cmn; }; +struct bnxt_total_ring_err_stats { + u64 rx_total_l4_csum_errors; + u64 rx_total_resets; + u64 rx_total_buf_errors; + u64 rx_total_oom_discards; + u64 rx_total_netpoll_discards; + u64 rx_total_ring_discards; + u64 tx_total_ring_discards; + u64 total_missed_irqs; +}; + struct bnxt_stats_mem { u64 *sw_stats; u64 *hw_masks; @@ -2007,6 +2024,8 @@ struct bnxt { u8 pri2cos_idx[8]; u8 pri2cos_valid; + struct bnxt_total_ring_err_stats ring_err_stats_prev; + u16 hwrm_max_req_len; u16 hwrm_max_ext_req_len; unsigned int hwrm_cmd_timeout; @@ -2330,7 +2349,9 @@ int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); void bnxt_reenable_sriov(struct bnxt *bp); -int bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_get_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats); int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf); void bnxt_fw_exception(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 8a6f788f6294..2bdebd9c069d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -478,15 +478,8 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, return -ENODEV; } bnxt_ulp_stop(bp); - if (netif_running(bp->dev)) { - rc = bnxt_close_nic(bp, true, true); - if (rc) { - NL_SET_ERR_MSG_MOD(extack, "Failed to close"); - dev_close(bp->dev); - rtnl_unlock(); - break; - } - } + if (netif_running(bp->dev)) + bnxt_close_nic(bp, true, true); bnxt_vf_reps_free(bp); rc = bnxt_hwrm_func_drv_unrgtr(bp); if (rc) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 89f046ce1373..7260b4671ecc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -164,9 +164,8 @@ static int bnxt_set_coalesce(struct net_device *dev, reset_coalesce: if (test_bit(BNXT_STATE_OPEN, &bp->state)) { if (update_stats) { - rc = bnxt_close_nic(bp, true, false); - if (!rc) - rc = bnxt_open_nic(bp, true, false); + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); } else { rc = bnxt_hwrm_set_coal(bp); } @@ -956,12 +955,7 @@ static int bnxt_set_channels(struct net_device *dev, * before PF unload */ } - rc = bnxt_close_nic(bp, true, false); - if (rc) { - netdev_err(bp->dev, "Set channel failure rc :%x\n", - rc); - return rc; - } + bnxt_close_nic(bp, true, false); } if (sh) { @@ -3634,12 +3628,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, bnxt_run_fw_tests(bp, test_mask, &test_results); } else { bnxt_ulp_stop(bp); - rc = bnxt_close_nic(bp, true, false); - if (rc) { - etest->flags |= ETH_TEST_FL_FAILED; - bnxt_ulp_start(bp, rc); - return; - } + bnxt_close_nic(bp, true, false); bnxt_run_fw_tests(bp, test_mask, &test_results); buf[BNXT_MACLPBK_TEST_IDX] = 1; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 4faaa9a50f4b..ae734314f8de 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -506,9 +506,8 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) if (netif_running(bp->dev)) { if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) { - rc = bnxt_close_nic(bp, false, false); - if (!rc) - rc = bnxt_open_nic(bp, false, false); + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); } else { bnxt_ptp_cfg_tstamp_filters(bp); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 1ae082eb9e90..c2a991308215 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2131,8 +2131,10 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) /* Note: if we ever change from DMA_TX_APPEND_CRC below we * will need to restore software padding of "runt" packets */ + len_stat |= DMA_TX_APPEND_CRC; + if (!i) { - len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + len_stat |= DMA_SOP; if (skb->ip_summed == CHECKSUM_PARTIAL) len_stat |= DMA_TX_DO_CSUM; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index f60a16de565e..0c694ab3c110 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6447,6 +6447,14 @@ static void tg3_dump_state(struct tg3 *tp) int i; u32 *regs; + /* If it is a PCI error, all registers will be 0xffff, + * we don't dump them out, just report the error and return + */ + if (tp->pdev->error_state != pci_channel_io_normal) { + netdev_err(tp->dev, "PCI channel ERROR!\n"); + return; + } + regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC); if (!regs) return; @@ -11184,7 +11192,8 @@ static void tg3_reset_task(struct work_struct *work) rtnl_lock(); tg3_full_lock(tp, 0); - if (tp->pcierr_recovery || !netif_running(tp->dev)) { + if (tp->pcierr_recovery || !netif_running(tp->dev) || + tp->pdev->error_state != pci_channel_io_normal) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); rtnl_unlock(); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b12152e2fca0..a9e4e6464a04 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1125,7 +1125,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, struct be_wrb_params *wrb_params) { - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb); unsigned int eth_hdr_len; struct iphdr *ip; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index b58162ce81d8..de62eee58a00 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -509,8 +509,6 @@ static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch, memcpy(skb->data, fd_vaddr + fd_offset, fd_length); - dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); - return skb; } @@ -528,6 +526,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct dpaa2_eth_drv_stats *percpu_extras; struct device *dev = priv->net_dev->dev.parent; struct dpaa2_fas *fas; + bool recycle_rx_buf = false; void *buf_data; u32 status = 0; u32 xdp_act; @@ -560,6 +559,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, dma_unmap_page(dev, addr, priv->rx_buf_size, DMA_BIDIRECTIONAL); skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); + } else { + recycle_rx_buf = true; } } else if (fd_format == dpaa2_fd_sg) { WARN_ON(priv->xdp_prog); @@ -607,6 +608,8 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, list_add_tail(&skb->list, ch->rx_list); + if (recycle_rx_buf) + dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd)); return; err_build_skb: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index eea7d7a07c00..59888826469b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -227,17 +227,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, struct ethtool_stats *stats, u64 *data) { - int i = 0; - int j, k, err; - int num_cnt; - union dpni_statistics dpni_stats; - u32 fcnt, bcnt; - u32 fcnt_rx_total = 0, fcnt_tx_total = 0; - u32 bcnt_rx_total = 0, bcnt_tx_total = 0; - u32 buf_cnt; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - struct dpaa2_eth_drv_stats *extras; - struct dpaa2_eth_ch_stats *ch_stats; + union dpni_statistics dpni_stats; int dpni_stats_page_size[DPNI_STATISTICS_CNT] = { sizeof(dpni_stats.page_0), sizeof(dpni_stats.page_1), @@ -247,6 +238,13 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, sizeof(dpni_stats.page_5), sizeof(dpni_stats.page_6), }; + u32 fcnt_rx_total = 0, fcnt_tx_total = 0; + u32 bcnt_rx_total = 0, bcnt_tx_total = 0; + struct dpaa2_eth_ch_stats *ch_stats; + struct dpaa2_eth_drv_stats *extras; + int j, k, err, num_cnt, i = 0; + u32 fcnt, bcnt; + u32 buf_cnt; memset(data, 0, sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index c39b866e2582..16d3c3610720 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -139,7 +139,8 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block, err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, filter_block->acl_id, acl_entry_cfg); - dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), + dma_unmap_single(dev, acl_entry_cfg->key_iova, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE); if (err) { dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err); @@ -181,8 +182,8 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block, err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, block->acl_id, acl_entry_cfg); - dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), - DMA_TO_DEVICE); + dma_unmap_single(dev, acl_entry_cfg->key_iova, + DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE, DMA_TO_DEVICE); if (err) { dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err); kfree(cmd_buff); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 2b5909fa93cf..b98ef4ba172f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1978,9 +1978,6 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, return notifier_from_errno(err); } -static struct notifier_block dpaa2_switch_port_switchdev_nb; -static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb; - static int dpaa2_switch_port_bridge_join(struct net_device *netdev, struct net_device *upper_dev, struct netlink_ext_ack *extack) @@ -2023,9 +2020,7 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev, goto err_egress_flood; err = switchdev_bridge_port_offload(netdev, netdev, NULL, - &dpaa2_switch_port_switchdev_nb, - &dpaa2_switch_port_switchdev_blocking_nb, - false, extack); + NULL, NULL, false, extack); if (err) goto err_switchdev_offload; @@ -2059,9 +2054,7 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) { - switchdev_bridge_port_unoffload(netdev, NULL, - &dpaa2_switch_port_switchdev_nb, - &dpaa2_switch_port_switchdev_blocking_nb); + switchdev_bridge_port_unoffload(netdev, NULL, NULL, NULL); } static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 33226a22d8a4..6d1b76002282 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3541,31 +3541,26 @@ static int fec_set_features(struct net_device *netdev, return 0; } -static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb) -{ - struct vlan_ethhdr *vhdr; - unsigned short vlan_TCI = 0; - - if (skb->protocol == htons(ETH_P_ALL)) { - vhdr = (struct vlan_ethhdr *)(skb->data); - vlan_TCI = ntohs(vhdr->h_vlan_TCI); - } - - return vlan_TCI; -} - static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, struct net_device *sb_dev) { struct fec_enet_private *fep = netdev_priv(ndev); - u16 vlan_tag; + u16 vlan_tag = 0; if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) return netdev_pick_tx(ndev, skb, NULL); - vlan_tag = fec_enet_get_raw_vlan_tci(skb); - if (!vlan_tag) + /* VLAN is present in the payload.*/ + if (eth_type_vlan(skb->protocol)) { + struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); + + vlan_tag = ntohs(vhdr->h_vlan_TCI); + /* VLAN is present in the skb but not yet pushed in the payload.*/ + } else if (skb_vlan_tag_present(skb)) { + vlan_tag = skb->vlan_tci; + } else { return vlan_tag; + } return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 5ad22b815b2f..78d6752fe051 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1532,7 +1532,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, if (unlikely(rc < 0)) return rc; - vhdr = (struct vlan_ethhdr *)skb->data; + vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b4157ff370a3..63d43ef86f9b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -104,12 +104,18 @@ static struct workqueue_struct *i40e_wq; static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f, struct net_device *netdev, int delta) { + struct netdev_hw_addr_list *ha_list; struct netdev_hw_addr *ha; if (!f || !netdev) return; - netdev_for_each_mc_addr(ha, netdev) { + if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr)) + ha_list = &netdev->uc; + else + ha_list = &netdev->mc; + + netdev_hw_addr_list_for_each(ha, ha_list) { if (ether_addr_equal(ha->addr, f->macaddr)) { ha->refcount += delta; if (ha->refcount <= 0) @@ -16444,6 +16450,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev) return; i40e_reset_and_rebuild(pf, false, false); +#ifdef CONFIG_PCI_IOV + i40e_restore_all_vfs_msi_state(pdev); +#endif /* CONFIG_PCI_IOV */ } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6d26ee8eefae..94cf82668efa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2986,7 +2986,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, rc = skb_cow_head(skb, 0); if (rc < 0) return rc; - vhdr = (struct vlan_ethhdr *)skb->data; + vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI = htons(tx_flags >> I40E_TX_FLAGS_VLAN_SHIFT); } else { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index cb925baf72ce..c7d761426d6c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -152,6 +152,32 @@ void i40e_vc_notify_reset(struct i40e_pf *pf) (u8 *)&pfe, sizeof(struct virtchnl_pf_event)); } +#ifdef CONFIG_PCI_IOV +void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev) +{ + u16 vf_id; + u16 pos; + + /* Continue only if this is a PF */ + if (!pdev->is_physfn) + return; + + if (!pci_num_vf(pdev)) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + struct pci_dev *vf_dev = NULL; + + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); + while ((vf_dev = pci_get_device(pdev->vendor, vf_id, vf_dev))) { + if (vf_dev->is_virtfn && vf_dev->physfn == pdev) + pci_restore_msi_state(vf_dev); + } + } +} +#endif /* CONFIG_PCI_IOV */ + /** * i40e_vc_notify_vf_reset * @vf: pointer to the VF structure @@ -3451,16 +3477,16 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf, bool found = false; int bkt; - if (!tc_filter->action) { + if (tc_filter->action != VIRTCHNL_ACTION_TC_REDIRECT) { dev_info(&pf->pdev->dev, - "VF %d: Currently ADq doesn't support Drop Action\n", - vf->vf_id); + "VF %d: ADQ doesn't support this action (%d)\n", + vf->vf_id, tc_filter->action); goto err; } /* action_meta is TC number here to which the filter is applied */ if (!tc_filter->action_meta || - tc_filter->action_meta > I40E_MAX_VF_VSI) { + tc_filter->action_meta > vf->num_tc) { dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n", vf->vf_id, tc_filter->action_meta); goto err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 358bbdb58795..bd497cc5303a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -135,6 +135,9 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable); void i40e_vc_notify_link_state(struct i40e_pf *pf); void i40e_vc_notify_reset(struct i40e_pf *pf); +#ifdef CONFIG_PCI_IOV +void i40e_restore_all_vfs_msi_state(struct pci_dev *pdev); +#endif /* CONFIG_PCI_IOV */ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, struct ifla_vf_stats *vf_stats); diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 7389855fa307..ee0871d92930 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -303,6 +303,7 @@ struct iavf_adapter { #define IAVF_FLAG_QUEUES_DISABLED BIT(17) #define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18) #define IAVF_FLAG_REINIT_MSIX_NEEDED BIT(20) +#define IAVF_FLAG_FDIR_ENABLED BIT(21) /* duplicates for common code */ #define IAVF_FLAG_DCB_ENABLED 0 /* flags for admin queue service task */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 31e02624aca4..f4ac2b164b3e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1063,7 +1063,7 @@ iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter, struct iavf_fdir_fltr *rule = NULL; int ret = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; spin_lock_bh(&adapter->fdir_fltr_lock); @@ -1205,7 +1205,7 @@ iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd, unsigned int cnt = 0; int val = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; cmd->data = IAVF_MAX_FDIR_FILTERS; @@ -1397,7 +1397,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx int count = 50; int err; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; if (fsp->flow_type & FLOW_MAC_EXT) @@ -1438,12 +1438,16 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_lock_bh(&adapter->fdir_fltr_lock); iavf_fdir_list_add_fltr(adapter, fltr); adapter->fdir_active_fltr++; - fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; - adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + if (adapter->link_up) { + fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + } else { + fltr->state = IAVF_FDIR_FLTR_INACTIVE; + } spin_unlock_bh(&adapter->fdir_fltr_lock); - mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); - + if (adapter->link_up) + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); ret: if (err && fltr) kfree(fltr); @@ -1465,7 +1469,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx struct iavf_fdir_fltr *fltr = NULL; int err = 0; - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) return -EOPNOTSUPP; spin_lock_bh(&adapter->fdir_fltr_lock); @@ -1474,6 +1478,11 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) { fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST; adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + } else if (fltr->state == IAVF_FDIR_FLTR_INACTIVE) { + list_del(&fltr->list); + kfree(fltr); + adapter->fdir_active_fltr--; + fltr = NULL; } else { err = -EBUSY; } @@ -1782,7 +1791,7 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, ret = 0; break; case ETHTOOL_GRXCLSRLCNT: - if (!FDIR_FLTR_SUPPORT(adapter)) + if (!(adapter->flags & IAVF_FLAG_FDIR_ENABLED)) break; spin_lock_bh(&adapter->fdir_fltr_lock); cmd->rule_cnt = adapter->fdir_active_fltr; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h index 9eb9f73f6adf..d31bd923ba8c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -6,12 +6,25 @@ struct iavf_adapter; -/* State of Flow Director filter */ +/* State of Flow Director filter + * + * *_REQUEST states are used to mark filter to be sent to PF driver to perform + * an action (either add or delete filter). *_PENDING states are an indication + * that request was sent to PF and the driver is waiting for response. + * + * Both DELETE and DISABLE states are being used to delete a filter in PF. + * The difference is that after a successful response filter in DEL_PENDING + * state is being deleted from VF driver as well and filter in DIS_PENDING state + * is being changed to INACTIVE state. + */ enum iavf_fdir_fltr_state_t { IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */ IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */ IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */ IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */ + IAVF_FDIR_FLTR_DIS_REQUEST, /* Filter scheduled to be disabled */ + IAVF_FDIR_FLTR_DIS_PENDING, /* Filter pending disable by the PF */ + IAVF_FDIR_FLTR_INACTIVE, /* Filter inactive on link down */ IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */ }; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 4836bac2bd09..b9c4b311cd62 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1368,18 +1368,20 @@ static void iavf_clear_cloud_filters(struct iavf_adapter *adapter) **/ static void iavf_clear_fdir_filters(struct iavf_adapter *adapter) { - struct iavf_fdir_fltr *fdir, *fdirtmp; + struct iavf_fdir_fltr *fdir; /* remove all Flow Director filters */ spin_lock_bh(&adapter->fdir_fltr_lock); - list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, - list) { + list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) { - list_del(&fdir->list); - kfree(fdir); - adapter->fdir_active_fltr--; - } else { - fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + /* Cancel a request, keep filter as inactive */ + fdir->state = IAVF_FDIR_FLTR_INACTIVE; + } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || + fdir->state == IAVF_FDIR_FLTR_ACTIVE) { + /* Disable filters which are active or have a pending + * request to PF to be added + */ + fdir->state = IAVF_FDIR_FLTR_DIS_REQUEST; } } spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -4210,6 +4212,33 @@ static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type, } } +/** + * iavf_restore_fdir_filters + * @adapter: board private structure + * + * Restore existing FDIR filters when VF netdev comes back up. + **/ +static void iavf_restore_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *f; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry(f, &adapter->fdir_list_head, list) { + if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST) { + /* Cancel a request, keep filter as active */ + f->state = IAVF_FDIR_FLTR_ACTIVE; + } else if (f->state == IAVF_FDIR_FLTR_DIS_PENDING || + f->state == IAVF_FDIR_FLTR_INACTIVE) { + /* Add filters which are inactive or have a pending + * request to PF to be deleted + */ + f->state = IAVF_FDIR_FLTR_ADD_REQUEST; + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); +} + /** * iavf_open - Called when a network interface is made active * @netdev: network interface device structure @@ -4277,8 +4306,9 @@ static int iavf_open(struct net_device *netdev) spin_unlock_bh(&adapter->mac_vlan_list_lock); - /* Restore VLAN filters that were removed with IFF_DOWN */ + /* Restore filters that were removed with IFF_DOWN */ iavf_restore_filters(adapter); + iavf_restore_fdir_filters(adapter); iavf_configure(adapter); @@ -4415,6 +4445,49 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) return ret; } +/** + * iavf_disable_fdir - disable Flow Director and clear existing filters + * @adapter: board private structure + **/ +static void iavf_disable_fdir(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *fdir, *fdirtmp; + bool del_filters = false; + + adapter->flags &= ~IAVF_FLAG_FDIR_ENABLED; + + /* remove all Flow Director filters */ + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, + list) { + if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST || + fdir->state == IAVF_FDIR_FLTR_INACTIVE) { + /* Delete filters not registered in PF */ + list_del(&fdir->list); + kfree(fdir); + adapter->fdir_active_fltr--; + } else if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING || + fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST || + fdir->state == IAVF_FDIR_FLTR_ACTIVE) { + /* Filters registered in PF, schedule their deletion */ + fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST; + del_filters = true; + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { + /* Request to delete filter already sent to PF, change + * state to DEL_PENDING to delete filter after PF's + * response, not set as INACTIVE + */ + fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (del_filters) { + adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER; + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); + } +} + #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_HW_VLAN_STAG_RX | \ @@ -4437,6 +4510,13 @@ static int iavf_set_features(struct net_device *netdev, iavf_set_vlan_offload_features(adapter, netdev->features, features); + if ((netdev->features & NETIF_F_NTUPLE) ^ (features & NETIF_F_NTUPLE)) { + if (features & NETIF_F_NTUPLE) + adapter->flags |= IAVF_FLAG_FDIR_ENABLED; + else + iavf_disable_fdir(adapter); + } + return 0; } @@ -4732,6 +4812,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev, { struct iavf_adapter *adapter = netdev_priv(netdev); + if (!FDIR_FLTR_SUPPORT(adapter)) + features &= ~NETIF_F_NTUPLE; + return iavf_fix_netdev_vlan_features(adapter, features); } @@ -4849,6 +4932,12 @@ int iavf_process_config(struct iavf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (FDIR_FLTR_SUPPORT(adapter)) { + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->features |= NETIF_F_NTUPLE; + adapter->flags |= IAVF_FLAG_FDIR_ENABLED; + } + netdev->priv_flags |= IFF_UNICAST_FLT; /* Do not turn on offloads when they are requested to be turned off. diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 5a66b05c0322..951ef350323a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1752,8 +1752,8 @@ void iavf_add_fdir_filter(struct iavf_adapter *adapter) **/ void iavf_del_fdir_filter(struct iavf_adapter *adapter) { + struct virtchnl_fdir_del f = {}; struct iavf_fdir_fltr *fdir; - struct virtchnl_fdir_del f; bool process_fltr = false; int len; @@ -1770,11 +1770,16 @@ void iavf_del_fdir_filter(struct iavf_adapter *adapter) list_for_each_entry(fdir, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) { process_fltr = true; - memset(&f, 0, len); f.vsi_id = fdir->vc_add_msg.vsi_id; f.flow_id = fdir->flow_id; fdir->state = IAVF_FDIR_FLTR_DEL_PENDING; break; + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_REQUEST) { + process_fltr = true; + f.vsi_id = fdir->vc_add_msg.vsi_id; + f.flow_id = fdir->flow_id; + fdir->state = IAVF_FDIR_FLTR_DIS_PENDING; + break; } } spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -1918,6 +1923,48 @@ static void iavf_netdev_features_vlan_strip_set(struct net_device *netdev, netdev->features &= ~NETIF_F_HW_VLAN_CTAG_RX; } +/** + * iavf_activate_fdir_filters - Reactivate all FDIR filters after a reset + * @adapter: private adapter structure + * + * Called after a reset to re-add all FDIR filters and delete some of them + * if they were pending to be deleted. + */ +static void iavf_activate_fdir_filters(struct iavf_adapter *adapter) +{ + struct iavf_fdir_fltr *f, *ftmp; + bool add_filters = false; + + spin_lock_bh(&adapter->fdir_fltr_lock); + list_for_each_entry_safe(f, ftmp, &adapter->fdir_list_head, list) { + if (f->state == IAVF_FDIR_FLTR_ADD_REQUEST || + f->state == IAVF_FDIR_FLTR_ADD_PENDING || + f->state == IAVF_FDIR_FLTR_ACTIVE) { + /* All filters and requests have been removed in PF, + * restore them + */ + f->state = IAVF_FDIR_FLTR_ADD_REQUEST; + add_filters = true; + } else if (f->state == IAVF_FDIR_FLTR_DIS_REQUEST || + f->state == IAVF_FDIR_FLTR_DIS_PENDING) { + /* Link down state, leave filters as inactive */ + f->state = IAVF_FDIR_FLTR_INACTIVE; + } else if (f->state == IAVF_FDIR_FLTR_DEL_REQUEST || + f->state == IAVF_FDIR_FLTR_DEL_PENDING) { + /* Delete filters that were pending to be deleted, the + * list on PF is already cleared after a reset + */ + list_del(&f->list); + kfree(f); + adapter->fdir_active_fltr--; + } + } + spin_unlock_bh(&adapter->fdir_fltr_lock); + + if (add_filters) + adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; +} + /** * iavf_virtchnl_completion * @adapter: adapter structure @@ -2095,7 +2142,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(fdir, &adapter->fdir_list_head, list) { - if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { + if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING || + fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { fdir->state = IAVF_FDIR_FLTR_ACTIVE; dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n", iavf_stat_str(&adapter->hw, @@ -2232,6 +2280,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->mac_vlan_list_lock); + iavf_activate_fdir_filters(adapter); + iavf_parse_vf_resource_msg(adapter); /* negotiated VIRTCHNL_VF_OFFLOAD_VLAN_V2, so wait for the @@ -2421,7 +2471,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head, list) { if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) { - if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || + del_fltr->status == + VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n", fdir->loc); list_del(&fdir->list); @@ -2433,6 +2485,17 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, del_fltr->status); iavf_print_fdir_fltr(adapter, fdir); } + } else if (fdir->state == IAVF_FDIR_FLTR_DIS_PENDING) { + if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS || + del_fltr->status == + VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) { + fdir->state = IAVF_FDIR_FLTR_INACTIVE; + } else { + fdir->state = IAVF_FDIR_FLTR_ACTIVE; + dev_info(&adapter->pdev->dev, "Failed to disable Flow Director filter with status: %d\n", + del_fltr->status); + iavf_print_fdir_fltr(adapter, fdir); + } } } spin_unlock_bh(&adapter->fdir_fltr_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f0f39364819a..ab46cfca4028 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2138,7 +2138,7 @@ static int ice_configure_phy(struct ice_vsi *vsi) /* Ensure we have media as we cannot configure a medialess port */ if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) - return -EPERM; + return -ENOMEDIUM; ice_print_topo_conflict(vsi); @@ -9065,8 +9065,14 @@ int ice_stop(struct net_device *netdev) int link_err = ice_force_phys_link_state(vsi, false); if (link_err) { - netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", - vsi->vsi_num, link_err); + if (link_err == -ENOMEDIUM) + netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", + vsi->vsi_num); + else + netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", + vsi->vsi_num, link_err); + + ice_vsi_close(vsi); return -EIO; } } diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 43c05b41627f..2a894ca49d93 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -538,6 +538,7 @@ struct igc_nfc_filter { u16 etype; __be16 vlan_etype; u16 vlan_tci; + u16 vlan_tci_mask; u8 src_addr[ETH_ALEN]; u8 dst_addr[ETH_ALEN]; u8 user_data[8]; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 81897f7a90a9..2bee9cace598 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -957,6 +957,7 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev, } #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +#define VLAN_TCI_FULL_MASK ((__force __be16)~0) static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -979,10 +980,16 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_etype = rule->filter.vlan_etype; + fsp->m_ext.vlan_etype = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { fsp->flow_type |= FLOW_EXT; fsp->h_ext.vlan_tci = htons(rule->filter.vlan_tci); - fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + fsp->m_ext.vlan_tci = htons(rule->filter.vlan_tci_mask); } if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { @@ -1217,6 +1224,7 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { rule->filter.vlan_tci = ntohs(fsp->h_ext.vlan_tci); + rule->filter.vlan_tci_mask = ntohs(fsp->m_ext.vlan_tci); rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI; } @@ -1254,11 +1262,19 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); } - /* When multiple filter options or user data or vlan etype is set, use a - * flex filter. + /* The i225/i226 has various different filters. Flex filters provide a + * way to match up to the first 128 bytes of a packet. Use them for: + * a) For specific user data + * b) For VLAN EtherType + * c) For full TCI match + * d) Or in case multiple filter criteria are set + * + * Otherwise, use the simple MAC, VLAN PRIO or EtherType filters. */ if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + ((rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) && + rule->filter.vlan_tci_mask == ntohs(VLAN_TCI_FULL_MASK)) || (rule->filter.match_flags & (rule->filter.match_flags - 1))) rule->flex = true; else @@ -1328,6 +1344,26 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, return -EINVAL; } + /* There are two ways to match the VLAN TCI: + * 1. Match on PCP field and use vlan prio filter for it + * 2. Match on complete TCI field and use flex filter for it + */ + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_tci && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK) && + fsp->m_ext.vlan_tci != VLAN_TCI_FULL_MASK) { + netdev_dbg(netdev, "VLAN mask not supported\n"); + return -EOPNOTSUPP; + } + + /* VLAN EtherType can only be matched by full mask. */ + if ((fsp->flow_type & FLOW_EXT) && + fsp->m_ext.vlan_etype && + fsp->m_ext.vlan_etype != ETHER_TYPE_FULL_MASK) { + netdev_dbg(netdev, "VLAN EtherType mask not supported\n"); + return -EOPNOTSUPP; + } + if (fsp->location >= IGC_MAX_RXNFC_RULES) { netdev_dbg(netdev, "Invalid location\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 725db36e399d..31ea0781b65e 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -178,7 +178,7 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_TQAVCC(i), tqavcc); wr32(IGC_TQAVHC(i), - 0x80000000 + ring->hicredit * 0x7735); + 0x80000000 + ring->hicredit * 0x7736); } else { /* Disable any CBS for the queue */ txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6105419ae2d5..9e0e13638c46 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8822,7 +8822,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (skb_cow_head(skb, 0)) goto out_drop; - vhdr = (struct vlan_ethhdr *)skb->data; + vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); } else { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 65c0373d34d1..90be87dc105d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -78,7 +78,7 @@ static bool is_dev_rpm(void *cgxd) bool is_lmac_valid(struct cgx *cgx, int lmac_id) { - if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX) + if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac) return false; return test_bit(lmac_id, &cgx->lmac_bmap); } @@ -90,7 +90,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) { int tmp, id = 0; - for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { if (tmp == lmac_id) break; id++; @@ -121,7 +121,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) { - if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) + if (!cgx || lmac_id >= cgx->max_lmac_per_mac) return NULL; return cgx->lmac_idmap[lmac_id]; @@ -1410,7 +1410,7 @@ int cgx_get_fwdata_base(u64 *base) if (!cgx) return -ENXIO; - first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); + first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac); if (!err) @@ -1499,7 +1499,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) { - int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); + int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac); u64 req = 0; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); @@ -1537,7 +1537,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work) int i, err; /* Do Link up for all the enabled lmacs */ - for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { err = cgx_fwi_link_change(cgx, i, true); if (err) dev_info(dev, "cgx port %d:%d Link up command failed\n", @@ -1557,14 +1557,6 @@ int cgx_lmac_linkup_start(void *cgxd) return 0; } -static void cgx_lmac_get_fifolen(struct cgx *cgx) -{ - u64 cfg; - - cfg = cgx_read(cgx, 0, CGX_CONST); - cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); -} - static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac, int cnt, bool req_free) { @@ -1619,17 +1611,14 @@ static int cgx_lmac_init(struct cgx *cgx) u64 lmac_list; int i, err; - cgx_lmac_get_fifolen(cgx); - - cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx); /* lmac_list specifies which lmacs are enabled * when bit n is set to 1, LMAC[n] is enabled */ if (cgx->mac_ops->non_contiguous_serdes_lane) lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; - if (cgx->lmac_count > MAX_LMAC_PER_CGX) - cgx->lmac_count = MAX_LMAC_PER_CGX; + if (cgx->lmac_count > cgx->max_lmac_per_mac) + cgx->lmac_count = cgx->max_lmac_per_mac; for (i = 0; i < cgx->lmac_count; i++) { lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL); @@ -1707,7 +1696,7 @@ static int cgx_lmac_exit(struct cgx *cgx) } /* Free all lmac related resources */ - for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) { lmac = cgx->lmac_idmap[i]; if (!lmac) continue; @@ -1723,6 +1712,12 @@ static int cgx_lmac_exit(struct cgx *cgx) static void cgx_populate_features(struct cgx *cgx) { + u64 cfg; + + cfg = cgx_read(cgx, 0, CGX_CONST); + cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg); + cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg); + if (is_dev_rpm(cgx)) cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 04338db38671..09ddb00f63cc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -18,11 +18,8 @@ /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 0 -#define CGX_ID_MASK 0x7 -#define MAX_LMAC_PER_CGX 4 +#define CGX_ID_MASK 0xF #define MAX_DMAC_ENTRIES_PER_CGX 32 -#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ -#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) /* Registers */ #define CGXX_CMRX_CFG 0x00 @@ -56,6 +53,7 @@ #define CGXX_SCRATCH1_REG 0x1058 #define CGX_CONST 0x2000 #define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0) +#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24) #define CGXX_SPUX_CONTROL1 0x10000 #define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700 #define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800 diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 52b6016789fa..697cfec74aa1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -128,7 +128,10 @@ struct cgx { struct pci_dev *pdev; u8 cgx_id; u8 lmac_count; - struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + /* number of LMACs per MAC could be 4 or 8 */ + u8 max_lmac_per_mac; +#define MAX_LMAC_COUNT 8 + struct lmac *lmac_idmap[MAX_LMAC_COUNT]; struct work_struct cgx_cmd_work; struct workqueue_struct *cgx_cmd_workq; struct list_head cgx_list; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index d027c23b8ef8..aaff91bc7415 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -514,7 +514,7 @@ struct npc_lt_def { u8 ltype_mask; u8 ltype_match; u8 lid; -}; +} __packed; struct npc_lt_def_ipsec { u8 ltype_mask; @@ -522,7 +522,7 @@ struct npc_lt_def_ipsec { u8 lid; u8 spi_offset; u8 spi_nz; -}; +} __packed; struct npc_lt_def_apad { u8 ltype_mask; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index a70e1153fa04..6b4792a942d8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -283,6 +283,11 @@ void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable) cfg = FIELD_SET(RPM_PFC_CLASS_MASK, 0, cfg); rpm_write(rpm, lmac_id, RPMX_CMRX_PRT_CBFC_CTL, cfg); + /* Disable forward pause to driver */ + cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG); + cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD; + rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg); + /* Enable channel mask for all LMACS */ rpm_write(rpm, 0, RPMX_CMR_CHAN_MSK_OR, ~0ULL); } @@ -451,12 +456,10 @@ int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 p if (rx_pause) { cfg &= ~(RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD); + RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE); } else { cfg |= (RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE | - RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_FWD); + RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE); } if (tx_pause) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 95a7bc396e8e..0b76dfa979d4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -480,7 +480,7 @@ struct rvu { u8 cgx_mapped_pfs; u8 cgx_cnt_max; /* CGX port count max */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ - u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for + u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for * every cgx lmac port */ unsigned long pf_notify_bmap; /* Flags for PF notification */ @@ -850,6 +850,7 @@ u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable); +int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable); int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause, u16 pfc_en); int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index c60b9580ca96..bcb4385d0621 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) return (cgx_features_get(cgxd) & feature); } +#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx) /* Returns bitmap of mapped PFs */ -static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) +static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) { return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } @@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) if (!pfmap) return -ENODEV; else - return find_first_bit(&pfmap, 16); + return find_first_bit(&pfmap, + rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); } static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) @@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) if (!cgx_cnt_max) return 0; - if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF) + if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF) return -EINVAL; /* Alloc map table * An additional entry is required since PF id starts from 1 and * hence entry at offset 0 is invalid. */ - size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8); rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); if (!rvu->pf2cgxlmac_map) return -ENOMEM; @@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) memset(rvu->pf2cgxlmac_map, 0xFF, size); /* Reverse map table */ - rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, - cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16), - GFP_KERNEL); + rvu->cgxlmac2pf_map = + devm_kzalloc(rvu->dev, + cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64), + GFP_KERNEL); if (!rvu->cgxlmac2pf_map) return -ENOMEM; @@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) if (!rvu_cgx_pdata(cgx, rvu)) continue; lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); - for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), iter); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); @@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); do { - pfid = find_first_bit(&pfmap, 16); + pfid = find_first_bit(&pfmap, + rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx); clear_bit(pfid, &pfmap); /* check if notification is enabled */ @@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) if (!cgxd) continue; lmac_bmap = cgx_get_lmac_bmap(cgxd); - for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) { err = cgx_lmac_evh_register(&cb, cgxd, lmac); if (err) dev_err(rvu->dev, @@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu) if (!cgxd) continue; lmac_bmap = cgx_get_lmac_bmap(cgxd); - for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) + for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) cgx_lmac_evh_unregister(cgxd, lmac); } @@ -456,6 +460,23 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start); } +int rvu_cgx_tx_enable(struct rvu *rvu, u16 pcifunc, bool enable) +{ + int pf = rvu_get_pf(pcifunc); + struct mac_ops *mac_ops; + u8 cgx_id, lmac_id; + void *cgxd; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return LMAC_AF_ERR_PERM_DENIED; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgxd = rvu_cgx_pdata(cgx_id, rvu); + mac_ops = get_mac_ops(cgxd); + + return mac_ops->mac_tx_enable(cgxd, lmac_id, enable); +} + int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable) { struct mac_ops *mac_ops; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 5c9dc3f9262f..cc5d342e026c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2618,7 +2618,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) rvu->rvu_dbg.cgx = debugfs_create_dir(dname, rvu->rvu_dbg.cgx_root); - for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) { /* lmac debugfs dir */ sprintf(dname, "lmac%d", lmac_id); rvu->rvu_dbg.lmac = diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index d60951299899..b9a4efb95533 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -642,7 +642,7 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl) rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq"); if (!rvu_dl->devlink_wq) - goto err; + return -ENOMEM; INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work); INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work); @@ -650,9 +650,6 @@ static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl) INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work); return 0; -err: - rvu_nix_health_reporters_destroy(rvu_dl); - return -ENOMEM; } static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 959f36efdc4a..bb99302eab67 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -3923,90 +3923,18 @@ static void nix_find_link_frs(struct rvu *rvu, req->minlen = minlen; } -static int -nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, - u16 pcifunc, u64 tx_credits) -{ - struct rvu_hwinfo *hw = rvu->hw; - int pf = rvu_get_pf(pcifunc); - u8 cgx_id = 0, lmac_id = 0; - unsigned long poll_tmo; - bool restore_tx_en = 0; - struct nix_hw *nix_hw; - u64 cfg, sw_xoff = 0; - u32 schq = 0; - u32 credits; - int rc; - - nix_hw = get_nix_hw(rvu->hw, blkaddr); - if (!nix_hw) - return NIX_AF_ERR_INVALID_NIXBLK; - - if (tx_credits == nix_hw->tx_credits[link]) - return 0; - - /* Enable cgx tx if disabled for credits to be back */ - if (is_pf_cgxmapped(rvu, pf)) { - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), - lmac_id, true); - } - - mutex_lock(&rvu->rsrc_lock); - /* Disable new traffic to link */ - if (hw->cap.nix_shaping) { - schq = nix_get_tx_link(rvu, pcifunc); - sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); - rvu_write64(rvu, blkaddr, - NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); - } - - rc = NIX_AF_ERR_LINK_CREDITS; - poll_tmo = jiffies + usecs_to_jiffies(200000); - /* Wait for credits to return */ - do { - if (time_after(jiffies, poll_tmo)) - goto exit; - usleep_range(100, 200); - - cfg = rvu_read64(rvu, blkaddr, - NIX_AF_TX_LINKX_NORM_CREDIT(link)); - credits = (cfg >> 12) & 0xFFFFFULL; - } while (credits != nix_hw->tx_credits[link]); - - cfg &= ~(0xFFFFFULL << 12); - cfg |= (tx_credits << 12); - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - rc = 0; - - nix_hw->tx_credits[link] = tx_credits; - -exit: - /* Enable traffic back */ - if (hw->cap.nix_shaping && !sw_xoff) - rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); - - /* Restore state of cgx tx */ - if (restore_tx_en) - rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); - - mutex_unlock(&rvu->rsrc_lock); - return rc; -} - int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; int pf = rvu_get_pf(pcifunc); - int blkaddr, schq, link = -1; - struct nix_txsch *txsch; - u64 cfg, lmac_fifo_len; + int blkaddr, link = -1; struct nix_hw *nix_hw; struct rvu_pfvf *pfvf; u8 cgx = 0, lmac = 0; u16 max_mtu; + u64 cfg; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -4027,25 +3955,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) return NIX_AF_ERR_FRS_INVALID; - /* Check if requester wants to update SMQ's */ - if (!req->update_smq) - goto rx_frscfg; - - /* Update min/maxlen in each of the SMQ attached to this PF/VF */ - txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; - mutex_lock(&rvu->rsrc_lock); - for (schq = 0; schq < txsch->schq.max; schq++) { - if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) - continue; - cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); - cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); - if (req->update_minlen) - cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); - rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); - } - mutex_unlock(&rvu->rsrc_lock); - -rx_frscfg: /* Check if config is for SDP link */ if (req->sdp_link) { if (!hw->sdp_links) @@ -4068,7 +3977,6 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; - linkcfg: nix_find_link_frs(rvu, req, pcifunc); @@ -4078,19 +3986,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, cfg = (cfg & ~0xFFFFULL) | req->minlen; rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); - if (req->sdp_link || pf == 0) - return 0; - - /* Update transmit credits for CGX links */ - lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, lmac); - if (!lmac_fifo_len) { - dev_err(rvu->dev, - "%s: Failed to get CGX/RPM%d:LMAC%d FIFO size\n", - __func__, cgx, lmac); - return 0; - } - return nix_config_link_credits(rvu, blkaddr, link, pcifunc, - (lmac_fifo_len - req->maxlen) / 16); + return 0; } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, @@ -4183,7 +4079,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, /* Get LMAC id's from bitmap */ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); - for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { + for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) { lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); if (!lmac_fifo_len) { dev_err(rvu->dev, @@ -4610,7 +4506,13 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); clear_bit(NIXLF_INITIALIZED, &pfvf->flags); - return rvu_cgx_start_stop_io(rvu, pcifunc, false); + err = rvu_cgx_start_stop_io(rvu, pcifunc, false); + if (err) + return err; + + rvu_cgx_tx_enable(rvu, pcifunc, true); + + return 0; } #define RX_SA_BASE GENMASK_ULL(52, 7) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index f65805860c8d..0bcf3e559280 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -671,6 +671,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, int blkaddr, ucast_idx, index; struct nix_rx_action action = { 0 }; u64 relaxed_mask; + u8 flow_key_alg; if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc)) return; @@ -701,6 +702,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, action.op = NIX_RX_ACTIONOP_UCAST; } + flow_key_alg = action.flow_key_alg; + /* RX_ACTION set to MCAST for CGX PF's */ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list && is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { @@ -740,7 +743,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, req.vf = pcifunc; req.index = action.index; req.match_id = action.match_id; - req.flow_key_alg = action.flow_key_alg; + req.flow_key_alg = flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } @@ -854,6 +857,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u8 mac_addr[ETH_ALEN] = { 0 }; struct nix_rx_action action = { 0 }; struct rvu_pfvf *pfvf; + u8 flow_key_alg; u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ @@ -888,6 +892,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, *(u64 *)&action = npc_get_mcam_action(rvu, mcam, blkaddr, ucast_idx); + flow_key_alg = action.flow_key_alg; if (action.op != NIX_RX_ACTIONOP_RSS) { *(u64 *)&action = 0; action.op = NIX_RX_ACTIONOP_UCAST; @@ -924,7 +929,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, req.vf = pcifunc | vf_func; req.index = action.index; req.match_id = action.match_id; - req.flow_key_alg = action.flow_key_alg; + req.flow_key_alg = flow_key_alg; rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); } @@ -990,11 +995,38 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, mutex_unlock(&mcam->lock); } +static void npc_update_rx_action_with_alg_idx(struct rvu *rvu, struct nix_rx_action action, + struct rvu_pfvf *pfvf, int mcam_index, int blkaddr, + int alg_idx) + +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + struct rvu_hwinfo *hw = rvu->hw; + int bank, op_rss; + + if (!is_mcam_entry_enabled(rvu, mcam, blkaddr, mcam_index)) + return; + + op_rss = (!hw->cap.nix_rx_multicast || !pfvf->use_mce_list); + + bank = npc_get_bank(mcam, mcam_index); + mcam_index &= (mcam->banksize - 1); + + /* If Rx action is MCAST update only RSS algorithm index */ + if (!op_rss) { + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank)); + + action.flow_key_alg = alg_idx; + } + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(mcam_index, bank), *(u64 *)&action); +} + void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index) { struct npc_mcam *mcam = &rvu->hw->mcam; - struct rvu_hwinfo *hw = rvu->hw; struct nix_rx_action action; int blkaddr, index, bank; struct rvu_pfvf *pfvf; @@ -1050,15 +1082,16 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, /* If PF's promiscuous entry is enabled, * Set RSS action for that entry as well */ - if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) && - is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { - bank = npc_get_bank(mcam, index); - index &= (mcam->banksize - 1); + npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr, + alg_idx); - rvu_write64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index, bank), - *(u64 *)&action); - } + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_ALLMULTI_ENTRY); + /* If PF's allmulti entry is enabled, + * Set RSS action for that entry as well + */ + npc_update_rx_action_with_alg_idx(rvu, action, pfvf, index, blkaddr, + alg_idx); } void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c index 34fa59575fa9..54e0dfdc9d98 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c @@ -1999,7 +1999,9 @@ int rvu_npc_exact_init(struct rvu *rvu) /* Install SDP drop rule */ drop_mcam_idx = &table->num_drop_rules; - max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE; + max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx + + PF_CGXMAP_BASE; + for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) { if (rvu->pf2cgxlmac_map[i] == 0xFF) continue; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index bfddbff7bcdf..28fb643d2917 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -399,9 +399,10 @@ static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct otx2_nic *pfvf = netdev_priv(dev); + u8 old_pfc_en; int err; - /* Save PFC configuration to interface */ + old_pfc_en = pfvf->pfc_en; pfvf->pfc_en = pfc->pfc_en; if (pfvf->hw.tx_queues >= NIX_PF_PFC_PRIO_MAX) @@ -411,13 +412,17 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) * supported by the tx queue configuration */ err = otx2_check_pfc_config(pfvf); - if (err) + if (err) { + pfvf->pfc_en = old_pfc_en; return err; + } process_pfc: err = otx2_config_priority_flow_ctrl(pfvf); - if (err) + if (err) { + pfvf->pfc_en = old_pfc_en; return err; + } /* Request Per channel Bpids */ if (pfc->pfc_en) @@ -425,6 +430,12 @@ static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) err = otx2_pfc_txschq_update(pfvf); if (err) { + if (pfc->pfc_en) + otx2_nix_config_bp(pfvf, false); + + otx2_pfc_txschq_stop(pfvf); + pfvf->pfc_en = old_pfc_en; + otx2_config_priority_flow_ctrl(pfvf); dev_err(pfvf->dev, "%s failed to update TX schedulers\n", __func__); return err; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 55807e2043ed..a2d8ac620405 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1638,6 +1638,21 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) mutex_unlock(&mbox->lock); } +static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf) +{ + int vf; + + /* The AF driver will determine whether to allow the VF netdev or not */ + if (is_otx2_vf(pfvf->pcifunc)) + return true; + + /* check if there are any trusted VFs associated with the PF netdev */ + for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++) + if (pfvf->vf_configs[vf].trusted) + return true; + return false; +} + static void otx2_do_set_rx_mode(struct otx2_nic *pf) { struct net_device *netdev = pf->netdev; @@ -1670,7 +1685,8 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf) if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST)) req->mode |= NIX_RX_MODE_ALLMULTI; - req->mode |= NIX_RX_MODE_USE_MCE; + if (otx2_promisc_use_mce_list(pf)) + req->mode |= NIX_RX_MODE_USE_MCE; otx2_sync_mbox_msg(&pf->mbox); mutex_unlock(&pf->mbox.lock); @@ -2634,11 +2650,14 @@ static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf, pf->vf_configs[vf].trusted = enable; rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF); - if (rc) + if (rc) { pf->vf_configs[vf].trusted = !enable; - else + } else { netdev_info(pf->netdev, "VF %d is %strusted\n", vf, enable ? "" : "not "); + otx2_set_rx_mode(netdev); + } + return rc; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index b3253e263ebc..ac6a0785b10d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -48,6 +48,25 @@ #define CREATE_TRACE_POINTS #include "diag/cmd_tracepoint.h" +struct mlx5_ifc_mbox_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_mbox_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + enum { CMD_IF_REV = 5, }; @@ -71,6 +90,26 @@ enum { MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, }; +static u16 in_to_opcode(void *in) +{ + return MLX5_GET(mbox_in, in, opcode); +} + +/* Returns true for opcodes that might be triggered very frequently and throttle + * the command interface. Limit their command slots usage. + */ +static bool mlx5_cmd_is_throttle_opcode(u16 op) +{ + switch (op) { + case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: + case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: + case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: + case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: + return true; + } + return false; +} + static struct mlx5_cmd_work_ent * cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, struct mlx5_cmd_msg *out, void *uout, int uout_size, @@ -92,6 +131,7 @@ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, ent->context = context; ent->cmd = cmd; ent->page_queue = page_queue; + ent->op = in_to_opcode(in->first.data); refcount_set(&ent->refcnt, 1); return ent; @@ -116,24 +156,27 @@ static u8 alloc_token(struct mlx5_cmd *cmd) return token; } -static int cmd_alloc_index(struct mlx5_cmd *cmd) +static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent) { unsigned long flags; int ret; spin_lock_irqsave(&cmd->alloc_lock, flags); - ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); - if (ret < cmd->max_reg_cmds) - clear_bit(ret, &cmd->bitmask); + ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds); + if (ret < cmd->vars.max_reg_cmds) { + clear_bit(ret, &cmd->vars.bitmask); + ent->idx = ret; + cmd->ent_arr[ent->idx] = ent; + } spin_unlock_irqrestore(&cmd->alloc_lock, flags); - return ret < cmd->max_reg_cmds ? ret : -ENOMEM; + return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM; } static void cmd_free_index(struct mlx5_cmd *cmd, int idx) { lockdep_assert_held(&cmd->alloc_lock); - set_bit(idx, &cmd->bitmask); + set_bit(idx, &cmd->vars.bitmask); } static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) @@ -152,7 +195,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) if (ent->idx >= 0) { cmd_free_index(cmd, ent->idx); - up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); + up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem); } cmd_free_ent(ent); @@ -162,7 +205,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) { - return cmd->cmd_buf + (idx << cmd->log_stride); + return cmd->cmd_buf + (idx << cmd->vars.log_stride); } static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) @@ -753,25 +796,6 @@ static int cmd_status_to_err(u8 status) } } -struct mlx5_ifc_mbox_out_bits { - u8 status[0x8]; - u8 reserved_at_8[0x18]; - - u8 syndrome[0x20]; - - u8 reserved_at_40[0x40]; -}; - -struct mlx5_ifc_mbox_in_bits { - u8 opcode[0x10]; - u8 uid[0x10]; - - u8 reserved_at_20[0x10]; - u8 op_mod[0x10]; - - u8 reserved_at_40[0x40]; -}; - void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) { u32 syndrome = MLX5_GET(mbox_out, out, syndrome); @@ -789,7 +813,7 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) u16 opcode, op_mod; u16 uid; - opcode = MLX5_GET(mbox_in, in, opcode); + opcode = in_to_opcode(in); op_mod = MLX5_GET(mbox_in, in, op_mod); uid = MLX5_GET(mbox_in, in, uid); @@ -801,7 +825,7 @@ int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) { /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ if (err == -ENXIO) { - u16 opcode = MLX5_GET(mbox_in, in, opcode); + u16 opcode = in_to_opcode(in); u32 syndrome; u8 status; @@ -830,9 +854,9 @@ static void dump_command(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent, int input) { struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; - u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); struct mlx5_cmd_mailbox *next = msg->next; int n = mlx5_calc_cmd_blocks(msg); + u16 op = ent->op; int data_only; u32 offset = 0; int dump_len; @@ -884,11 +908,6 @@ static void dump_command(struct mlx5_core_dev *dev, mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); } -static u16 msg_to_opcode(struct mlx5_cmd_msg *in) -{ - return MLX5_GET(mbox_in, in->first.data, opcode); -} - static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); static void cb_timeout_handler(struct work_struct *work) @@ -906,13 +925,13 @@ static void cb_timeout_handler(struct work_struct *work) /* Maybe got handled by eq recover ? */ if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); goto out; /* phew, already handled */ } ent->ret = -ETIMEDOUT; mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", - ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + ent->idx, mlx5_command_str(ent->op), ent->op); mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); out: @@ -955,10 +974,10 @@ static void cmd_work_handler(struct work_struct *work) cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); complete(&ent->handling); - sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; + sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem; down(sem); if (!ent->page_queue) { - alloc_ret = cmd_alloc_index(cmd); + alloc_ret = cmd_alloc_index(cmd, ent); if (alloc_ret < 0) { mlx5_core_err_rl(dev, "failed to allocate command entry\n"); if (ent->callback) { @@ -973,20 +992,18 @@ static void cmd_work_handler(struct work_struct *work) up(sem); return; } - ent->idx = alloc_ret; } else { - ent->idx = cmd->max_reg_cmds; + ent->idx = cmd->vars.max_reg_cmds; spin_lock_irqsave(&cmd->alloc_lock, flags); - clear_bit(ent->idx, &cmd->bitmask); + clear_bit(ent->idx, &cmd->vars.bitmask); + cmd->ent_arr[ent->idx] = ent; spin_unlock_irqrestore(&cmd->alloc_lock, flags); } - cmd->ent_arr[ent->idx] = ent; lay = get_inst(cmd, ent->idx); ent->lay = lay; memset(lay, 0, sizeof(*lay)); memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); - ent->op = be32_to_cpu(lay->in[0]) >> 16; if (ent->in->next) lay->in_ptr = cpu_to_be64(ent->in->next->dma); lay->inlen = cpu_to_be32(ent->in->len); @@ -1099,12 +1116,12 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, */ if (wait_for_completion_timeout(&ent->done, timeout)) { mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); return; } mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); ent->ret = -ETIMEDOUT; mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); @@ -1131,12 +1148,10 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) if (err == -ETIMEDOUT) { mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", - mlx5_command_str(msg_to_opcode(ent->in)), - msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); } else if (err == -ECANCELED) { mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", - mlx5_command_str(msg_to_opcode(ent->in)), - msg_to_opcode(ent->in)); + mlx5_command_str(ent->op), ent->op); } mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err, deliv_status_to_str(ent->status), ent->status); @@ -1170,7 +1185,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, u8 status = 0; int err = 0; s64 ds; - u16 op; if (callback && page_queue) return -EINVAL; @@ -1210,9 +1224,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, goto out_free; ds = ent->ts2 - ent->ts1; - op = MLX5_GET(mbox_in, in->first.data, opcode); - if (op < MLX5_CMD_OP_MAX) { - stats = &cmd->stats[op]; + if (ent->op < MLX5_CMD_OP_MAX) { + stats = &cmd->stats[ent->op]; spin_lock_irq(&stats->lock); stats->sum += ds; ++stats->n; @@ -1220,7 +1233,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, } mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, "fw exec time for %s is %lld nsec\n", - mlx5_command_str(op), ds); + mlx5_command_str(ent->op), ds); out_free: status = ent->status; @@ -1558,15 +1571,15 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) - down(&cmd->sem); - down(&cmd->pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + down(&cmd->vars.sem); + down(&cmd->vars.pages_sem); cmd->allowed_opcode = opcode; - up(&cmd->pages_sem); - for (i = 0; i < cmd->max_reg_cmds; i++) - up(&cmd->sem); + up(&cmd->vars.pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + up(&cmd->vars.sem); } static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) @@ -1574,15 +1587,15 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) - down(&cmd->sem); - down(&cmd->pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + down(&cmd->vars.sem); + down(&cmd->vars.pages_sem); cmd->mode = mode; - up(&cmd->pages_sem); - for (i = 0; i < cmd->max_reg_cmds; i++) - up(&cmd->sem); + up(&cmd->vars.pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + up(&cmd->vars.sem); } static int cmd_comp_notifier(struct notifier_block *nb, @@ -1641,7 +1654,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force /* there can be at most 32 command queues */ vector = vec & 0xffffffff; - for (i = 0; i < (1 << cmd->log_sz); i++) { + for (i = 0; i < (1 << cmd->vars.log_sz); i++) { if (test_bit(i, &vector)) { ent = cmd->ent_arr[i]; @@ -1730,7 +1743,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) /* wait for pending handlers to complete */ mlx5_eq_synchronize_cmd_irq(dev); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); - vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); + vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1); if (!vector) goto no_trig; @@ -1739,14 +1752,14 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) * to guarantee pending commands will not get freed in the meanwhile. * For that reason, it also has to be done inside the alloc_lock. */ - for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) + for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz)) cmd_ent_get(cmd->ent_arr[i]); vector |= MLX5_TRIGGERED_CMD_COMP; spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%llx\n", vector); mlx5_cmd_comp_handler(dev, vector, true); - for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) + for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz)) cmd_ent_put(cmd->ent_arr[i]); return; @@ -1759,22 +1772,22 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) { - while (down_trylock(&cmd->sem)) { + for (i = 0; i < cmd->vars.max_reg_cmds; i++) { + while (down_trylock(&cmd->vars.sem)) { mlx5_cmd_trigger_completions(dev); cond_resched(); } } - while (down_trylock(&cmd->pages_sem)) { + while (down_trylock(&cmd->vars.pages_sem)) { mlx5_cmd_trigger_completions(dev); cond_resched(); } /* Unlock cmdif */ - up(&cmd->pages_sem); - for (i = 0; i < cmd->max_reg_cmds; i++) - up(&cmd->sem); + up(&cmd->vars.pages_sem); + for (i = 0; i < cmd->vars.max_reg_cmds; i++) + up(&cmd->vars.sem); } static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, @@ -1817,7 +1830,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, static int is_manage_pages(void *in) { - return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; + return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES; } /* Notes: @@ -1828,8 +1841,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size, mlx5_cmd_cbk_t callback, void *context, bool force_polling) { - u16 opcode = MLX5_GET(mbox_in, in, opcode); struct mlx5_cmd_msg *inb, *outb; + u16 opcode = in_to_opcode(in); + bool throttle_op; int pages_queue; gfp_t gfp; u8 token; @@ -1838,13 +1852,21 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) return -ENXIO; + throttle_op = mlx5_cmd_is_throttle_opcode(opcode); + if (throttle_op) { + /* atomic context may not sleep */ + if (callback) + return -EINVAL; + down(&dev->cmd.vars.throttle_sem); + } + pages_queue = is_manage_pages(in); gfp = callback ? GFP_ATOMIC : GFP_KERNEL; inb = alloc_msg(dev, in_size, gfp); if (IS_ERR(inb)) { err = PTR_ERR(inb); - return err; + goto out_up; } token = alloc_token(&dev->cmd); @@ -1878,6 +1900,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, mlx5_free_cmd_msg(dev, outb); out_in: free_msg(dev, inb); +out_up: + if (throttle_op) + up(&dev->cmd.vars.throttle_sem); return err; } @@ -1952,8 +1977,8 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); - u16 opcode = MLX5_GET(mbox_in, in, opcode); u16 op_mod = MLX5_GET(mbox_in, in, op_mod); + u16 opcode = in_to_opcode(in); return cmd_status_err(dev, err, opcode, op_mod, out); } @@ -1998,8 +2023,8 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) { int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); - u16 opcode = MLX5_GET(mbox_in, in, opcode); u16 op_mod = MLX5_GET(mbox_in, in, op_mod); + u16 opcode = in_to_opcode(in); err = cmd_status_err(dev, err, opcode, op_mod, out); return mlx5_cmd_check(dev, err, in, out); @@ -2051,7 +2076,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, work->ctx = ctx; work->user_callback = callback; - work->opcode = MLX5_GET(mbox_in, in, opcode); + work->opcode = in_to_opcode(in); work->op_mod = MLX5_GET(mbox_in, in, op_mod); work->out = out; if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) @@ -2187,16 +2212,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) goto err_free_pool; cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; - cmd->log_sz = cmd_l >> 4 & 0xf; - cmd->log_stride = cmd_l & 0xf; - if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { + cmd->vars.log_sz = cmd_l >> 4 & 0xf; + cmd->vars.log_stride = cmd_l & 0xf; + if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) { mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n", - 1 << cmd->log_sz); + 1 << cmd->vars.log_sz); err = -EINVAL; goto err_free_page; } - if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { + if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) { mlx5_core_err(dev, "command queue size overflow\n"); err = -EINVAL; goto err_free_page; @@ -2204,13 +2229,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) cmd->state = MLX5_CMDIF_STATE_DOWN; cmd->checksum_disabled = 1; - cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; - cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; + cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1; + cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1; - cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; - if (cmd->cmdif_rev > CMD_IF_REV) { + cmd->vars.cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; + if (cmd->vars.cmdif_rev > CMD_IF_REV) { mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n", - CMD_IF_REV, cmd->cmdif_rev); + CMD_IF_REV, cmd->vars.cmdif_rev); err = -EOPNOTSUPP; goto err_free_page; } @@ -2220,8 +2245,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) for (i = 0; i < MLX5_CMD_OP_MAX; i++) spin_lock_init(&cmd->stats[i].lock); - sema_init(&cmd->sem, cmd->max_reg_cmds); - sema_init(&cmd->pages_sem, 1); + sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds); + sema_init(&cmd->vars.pages_sem, 1); + sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2)); cmd_h = (u32)((u64)(cmd->dma) >> 32); cmd_l = (u32)(cmd->dma); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index bb95b40d25eb..e0b0729e238c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count, int ret; cmd = filp->private_data; - weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds); - field = cmd->max_reg_cmds - weight; + weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds); + field = cmd->vars.max_reg_cmds - weight; ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field); return simple_read_from_buffer(buf, count, pos, tbuf, ret); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index 374c0011a127..3ba54ffa54bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -691,7 +691,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) while (block_timestamp > tracer->last_timestamp) { /* Check block override if it's not the first block */ - if (!tracer->last_timestamp) { + if (tracer->last_timestamp) { u64 *ts_event; /* To avoid block override be the HW in case of buffer * wraparound, the time stamp of the previous block diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index bc76fe6b0623..0ee456480a48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -847,6 +847,7 @@ enum { MLX5E_STATE_DESTROYING, MLX5E_STATE_XDP_TX_ENABLED, MLX5E_STATE_XDP_ACTIVE, + MLX5E_STATE_CHANNELS_ACTIVE, }; struct mlx5e_modify_sq_param { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c index be83ad9db82a..e1283531e0b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c @@ -154,6 +154,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty in = kvzalloc(inlen, GFP_KERNEL); if (!in || !ft->g) { kfree(ft->g); + ft->g = NULL; kvfree(in); return -ENOMEM; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 4db0483c066a..83bb0811e774 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -300,6 +300,9 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, if (err) goto destroy_neigh_entry; + e->encap_size = ipv4_encap_size; + e->encap_header = encap_header; + if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); /* the encap entry will be made valid on neigh update event @@ -319,8 +322,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, goto destroy_neigh_entry; } - e->encap_size = ipv4_encap_size; - e->encap_header = encap_header; e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); mlx5e_route_lookup_ipv4_put(&attr); @@ -403,12 +404,16 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, if (err) goto free_encap; + e->encap_size = ipv4_encap_size; + kfree(e->encap_header); + e->encap_header = encap_header; + if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); /* the encap entry will be made valid on neigh update event * and not used before that. */ - goto free_encap; + goto release_neigh; } memset(&reformat_params, 0, sizeof(reformat_params)); @@ -422,10 +427,6 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv, goto free_encap; } - e->encap_size = ipv4_encap_size; - kfree(e->encap_header); - e->encap_header = encap_header; - e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); mlx5e_route_lookup_ipv4_put(&attr); @@ -567,6 +568,9 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, if (err) goto destroy_neigh_entry; + e->encap_size = ipv6_encap_size; + e->encap_header = encap_header; + if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); /* the encap entry will be made valid on neigh update event @@ -586,8 +590,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, goto destroy_neigh_entry; } - e->encap_size = ipv6_encap_size; - e->encap_header = encap_header; e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); mlx5e_route_lookup_ipv6_put(&attr); @@ -669,12 +671,16 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, if (err) goto free_encap; + e->encap_size = ipv6_encap_size; + kfree(e->encap_header); + e->encap_header = encap_header; + if (!(nud_state & NUD_VALID)) { neigh_event_send(attr.n, NULL); /* the encap entry will be made valid on neigh update event * and not used before that. */ - goto free_encap; + goto release_neigh; } memset(&reformat_params, 0, sizeof(reformat_params)); @@ -688,10 +694,6 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv, goto free_encap; } - e->encap_size = ipv6_encap_size; - kfree(e->encap_header); - e->encap_header = encap_header; - e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev)); mlx5e_route_lookup_ipv6_put(&attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index eeba91d9c521..ceeb23f478e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -49,7 +49,7 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id); - if (count == sizeof(drvinfo->fw_version)) + if (count >= sizeof(drvinfo->fw_version)) snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%04d", fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 42e6f2fcf5f5..9910a0480f58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2586,6 +2586,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) { int i; + ASSERT_RTNL(); if (chs->ptp) { mlx5e_ptp_close(chs->ptp); chs->ptp = NULL; @@ -2865,17 +2866,29 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) if (mlx5e_is_vport_rep(priv)) mlx5e_rep_activate_channels(priv); + set_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state); + mlx5e_wait_channels_min_rx_wqes(&priv->channels); if (priv->rx_res) mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels); } +static void mlx5e_cancel_tx_timeout_work(struct mlx5e_priv *priv) +{ + WARN_ON_ONCE(test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state)); + if (current_work() != &priv->tx_timeout_work) + cancel_work_sync(&priv->tx_timeout_work); +} + void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { if (priv->rx_res) mlx5e_rx_res_channels_deactivate(priv->rx_res); + clear_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state); + mlx5e_cancel_tx_timeout_work(priv); + if (mlx5e_is_vport_rep(priv)) mlx5e_rep_deactivate_channels(priv); @@ -4617,8 +4630,17 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) struct net_device *netdev = priv->netdev; int i; - rtnl_lock(); - mutex_lock(&priv->state_lock); + /* Take rtnl_lock to ensure no change in netdev->real_num_tx_queues + * through this flow. However, channel closing flows have to wait for + * this work to finish while holding rtnl lock too. So either get the + * lock or find that channels are being closed for other reason and + * this work is not relevant anymore. + */ + while (!rtnl_trylock()) { + if (!test_bit(MLX5E_STATE_CHANNELS_ACTIVE, &priv->state)) + return; + msleep(20); + } if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; @@ -4637,7 +4659,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) } unlock: - mutex_unlock(&priv->state_lock); rtnl_unlock(); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2653cb96c310..5aeca9534f15 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -76,7 +76,7 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev, count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%04d (%.16s)", fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id); - if (count == sizeof(drvinfo->fw_version)) + if (count >= sizeof(drvinfo->fw_version)) snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%04d", fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index d136360ac6a9..a6d3fc96e168 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -25,7 +25,7 @@ struct mlx5_irq { struct atomic_notifier_head nh; cpumask_var_t mask; - char name[MLX5_MAX_IRQ_NAME]; + char name[MLX5_MAX_IRQ_FORMATTED_NAME]; struct mlx5_irq_pool *pool; int refcount; u32 index; @@ -236,8 +236,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i, else irq_sf_set_name(pool, name, i); ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh); - snprintf(irq->name, MLX5_MAX_IRQ_NAME, - "%s@pci:%s", name, pci_name(dev->pdev)); + snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME, + MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev)); err = request_irq(irq->irqn, irq_int_handler, 0, irq->name, &irq->nh); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h index 5c7e68bee43a..4047179307c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.h @@ -7,6 +7,9 @@ #include #define MLX5_MAX_IRQ_NAME (32) +#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s") +#define MLX5_MAX_IRQ_FORMATTED_NAME \ + (MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR)) /* max irq_index is 2047, so four chars */ #define MLX5_MAX_IRQ_IDX_CHARS (4) #define MLX5_EQ_REFS_PER_IRQ (2) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index d5c317325030..3f68e3198aa6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -277,7 +277,7 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, req_list_size = max_list_size; } - out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_in) + + out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) + req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout); out = kvzalloc(out_sz, GFP_KERNEL); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 2292d63a279c..83c4659390fd 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -130,9 +130,15 @@ static int mlxbf_gige_open(struct net_device *netdev) { struct mlxbf_gige *priv = netdev_priv(netdev); struct phy_device *phydev = netdev->phydev; + u64 control; u64 int_en; int err; + /* Perform general init of GigE block */ + control = readq(priv->base + MLXBF_GIGE_CONTROL); + control |= MLXBF_GIGE_CONTROL_PORT_EN; + writeq(control, priv->base + MLXBF_GIGE_CONTROL); + err = mlxbf_gige_request_irqs(priv); if (err) return err; @@ -147,14 +153,14 @@ static int mlxbf_gige_open(struct net_device *netdev) */ priv->valid_polarity = 0; - err = mlxbf_gige_rx_init(priv); - if (err) - goto free_irqs; + phy_start(phydev); + err = mlxbf_gige_tx_init(priv); if (err) - goto rx_deinit; - - phy_start(phydev); + goto free_irqs; + err = mlxbf_gige_rx_init(priv); + if (err) + goto tx_deinit; netif_napi_add(netdev, &priv->napi, mlxbf_gige_poll); napi_enable(&priv->napi); @@ -176,8 +182,8 @@ static int mlxbf_gige_open(struct net_device *netdev) return 0; -rx_deinit: - mlxbf_gige_rx_deinit(priv); +tx_deinit: + mlxbf_gige_tx_deinit(priv); free_irqs: mlxbf_gige_free_irqs(priv); @@ -279,7 +285,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev) void __iomem *plu_base; void __iomem *base; int addr, phy_irq; - u64 control; int err; base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC); @@ -294,11 +299,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev) if (IS_ERR(plu_base)) return PTR_ERR(plu_base); - /* Perform general init of GigE block */ - control = readq(base + MLXBF_GIGE_CONTROL); - control |= MLXBF_GIGE_CONTROL_PORT_EN; - writeq(control, base + MLXBF_GIGE_CONTROL); - netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv)); if (!netdev) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c index 0d5a41a2ae01..699984358493 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c @@ -142,6 +142,9 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv) writeq(MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS_EN, priv->base + MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS); + writeq(ilog2(priv->rx_q_entries), + priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2); + /* Clear MLXBF_GIGE_INT_MASK 'receive pkt' bit to * indicate readiness to receive interrupts */ @@ -154,9 +157,6 @@ int mlxbf_gige_rx_init(struct mlxbf_gige *priv) data |= MLXBF_GIGE_RX_DMA_EN; writeq(data, priv->base + MLXBF_GIGE_RX_DMA); - writeq(ilog2(priv->rx_q_entries), - priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2); - return 0; free_wqe_and_skb: @@ -267,6 +267,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) priv->stats.rx_truncate_errors++; } + /* Read receive consumer index before replenish so that this routine + * returns accurate return value even if packet is received into + * just-replenished buffer prior to exiting this routine. + */ + rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI); + rx_ci_rem = rx_ci % priv->rx_q_entries; + /* Let hardware know we've replenished one buffer */ rx_pi++; @@ -279,8 +286,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts) rx_pi_rem = rx_pi % priv->rx_q_entries; if (rx_pi_rem == 0) priv->valid_polarity ^= 1; - rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI); - rx_ci_rem = rx_ci % priv->rx_q_entries; if (skb) netif_receive_skb(skb); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 4c98950380d5..d231f4d2888b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -301,6 +301,7 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core, unsigned long *p_index) { unsigned int num_rows, entry_size; + unsigned long index; /* We only allow allocations of entire rows */ if (num_erps % erp_core->num_erp_banks != 0) @@ -309,10 +310,11 @@ mlxsw_sp_acl_erp_table_alloc(struct mlxsw_sp_acl_erp_core *erp_core, entry_size = erp_core->erpt_entries_size[region_type]; num_rows = num_erps / erp_core->num_erp_banks; - *p_index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size); - if (*p_index == 0) + index = gen_pool_alloc(erp_core->erp_tables, num_rows * entry_size); + if (!index) return -ENOBUFS; - *p_index -= MLXSW_SP_ACL_ERP_GENALLOC_OFFSET; + + *p_index = index - MLXSW_SP_ACL_ERP_GENALLOC_OFFSET; return 0; } diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h index fecd43754cea..e5ec0a363aff 100644 --- a/drivers/net/ethernet/micrel/ks8851.h +++ b/drivers/net/ethernet/micrel/ks8851.h @@ -350,6 +350,8 @@ union ks8851_tx_hdr { * @rxd: Space for receiving SPI data, in DMA-able space. * @txd: Space for transmitting SPI data, in DMA-able space. * @msg_enable: The message flags controlling driver output (see ethtool). + * @tx_space: Free space in the hardware TX buffer (cached copy of KS_TXMIR). + * @queued_len: Space required in hardware TX buffer for queued packets in txq. * @fid: Incrementing frame id tag. * @rc_ier: Cached copy of KS_IER. * @rc_ccr: Cached copy of KS_CCR. @@ -399,6 +401,7 @@ struct ks8851_net { struct work_struct rxctrl_work; struct sk_buff_head txq; + unsigned int queued_len; struct eeprom_93cx6 eeprom; struct regulator *vdd_reg; diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index cfbc900d4aeb..0bf13b38b8f5 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -362,16 +362,18 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) handled |= IRQ_RXPSI; if (status & IRQ_TXI) { - handled |= IRQ_TXI; - - /* no lock here, tx queue should have been stopped */ - - /* update our idea of how much tx space is available to the - * system */ - ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); + unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR); netif_dbg(ks, intr, ks->netdev, - "%s: txspace %d\n", __func__, ks->tx_space); + "%s: txspace %d\n", __func__, tx_space); + + spin_lock(&ks->statelock); + ks->tx_space = tx_space; + if (netif_queue_stopped(ks->netdev)) + netif_wake_queue(ks->netdev); + spin_unlock(&ks->statelock); + + handled |= IRQ_TXI; } if (status & IRQ_RXI) @@ -414,9 +416,6 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) if (status & IRQ_LCI) mii_check_link(&ks->mii); - if (status & IRQ_TXI) - netif_wake_queue(ks->netdev); - return IRQ_HANDLED; } @@ -500,6 +499,7 @@ static int ks8851_net_open(struct net_device *dev) ks8851_wrreg16(ks, KS_ISR, ks->rc_ier); ks8851_wrreg16(ks, KS_IER, ks->rc_ier); + ks->queued_len = 0; netif_start_queue(ks->netdev); netif_dbg(ks, ifup, ks->netdev, "network device up\n"); diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c index 70bc7253454f..88e26c120b48 100644 --- a/drivers/net/ethernet/micrel/ks8851_spi.c +++ b/drivers/net/ethernet/micrel/ks8851_spi.c @@ -286,6 +286,18 @@ static void ks8851_wrfifo_spi(struct ks8851_net *ks, struct sk_buff *txp, netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__); } +/** + * calc_txlen - calculate size of message to send packet + * @len: Length of data + * + * Returns the size of the TXFIFO message needed to send + * this packet. + */ +static unsigned int calc_txlen(unsigned int len) +{ + return ALIGN(len + 4, 4); +} + /** * ks8851_rx_skb_spi - receive skbuff * @ks: The device state @@ -305,7 +317,9 @@ static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb) */ static void ks8851_tx_work(struct work_struct *work) { + unsigned int dequeued_len = 0; struct ks8851_net_spi *kss; + unsigned short tx_space; struct ks8851_net *ks; unsigned long flags; struct sk_buff *txb; @@ -322,6 +336,8 @@ static void ks8851_tx_work(struct work_struct *work) last = skb_queue_empty(&ks->txq); if (txb) { + dequeued_len += calc_txlen(txb->len); + ks8851_wrreg16_spi(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); ks8851_wrfifo_spi(ks, txb, last); @@ -332,6 +348,13 @@ static void ks8851_tx_work(struct work_struct *work) } } + tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR); + + spin_lock(&ks->statelock); + ks->queued_len -= dequeued_len; + ks->tx_space = tx_space; + spin_unlock(&ks->statelock); + ks8851_unlock_spi(ks, &flags); } @@ -346,18 +369,6 @@ static void ks8851_flush_tx_work_spi(struct ks8851_net *ks) flush_work(&kss->tx_work); } -/** - * calc_txlen - calculate size of message to send packet - * @len: Length of data - * - * Returns the size of the TXFIFO message needed to send - * this packet. - */ -static unsigned int calc_txlen(unsigned int len) -{ - return ALIGN(len + 4, 4); -} - /** * ks8851_start_xmit_spi - transmit packet using SPI * @skb: The buffer to transmit @@ -386,16 +397,17 @@ static netdev_tx_t ks8851_start_xmit_spi(struct sk_buff *skb, spin_lock(&ks->statelock); - if (needed > ks->tx_space) { + if (ks->queued_len + needed > ks->tx_space) { netif_stop_queue(dev); ret = NETDEV_TX_BUSY; } else { - ks->tx_space -= needed; + ks->queued_len += needed; skb_queue_tail(&ks->txq, skb); } spin_unlock(&ks->statelock); - schedule_work(&kss->tx_work); + if (ret == NETDEV_TX_OK) + schedule_work(&kss->tx_work); return ret; } diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig index fe4e7a7d9c0b..8b6c4cc37c53 100644 --- a/drivers/net/ethernet/microsoft/Kconfig +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -19,6 +19,7 @@ config MICROSOFT_MANA tristate "Microsoft Azure Network Adapter (MANA) support" depends on PCI_MSI && X86_64 depends on PCI_HYPERV + select PAGE_POOL help This driver supports Microsoft Azure Network Adapter (MANA). So far, the driver is only supported on X86_64. diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c index 0066219bb0e8..6b95262dad90 100644 --- a/drivers/net/ethernet/mscc/ocelot_stats.c +++ b/drivers/net/ethernet/mscc/ocelot_stats.c @@ -216,10 +216,10 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_64]; rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_65_127]; rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_128_255]; - rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_128_255]; - rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_256_511]; - rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_512_1023]; - rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526]; + rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_256_511]; + rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_512_1023]; + rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_1024_1526]; + rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1527_MAX]; } void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port, diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index de8d54b23f73..c005a9df59d1 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1862,7 +1862,7 @@ netxen_tso_check(struct net_device *netdev, if (protocol == cpu_to_be16(ETH_P_8021Q)) { - vh = (struct vlan_ethhdr *)skb->data; + vh = skb_vlan_eth_hdr(skb); protocol = vh->h_vlan_encapsulated_proto; flags = FLAGS_VLAN_TAGGED; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 65e20693c549..33f4f58ee51c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -933,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) p_dma->virt_addr = NULL; } kfree(p_mngr->ilt_shadow); + p_mngr->ilt_shadow = NULL; } static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 0d57ffcedf0c..fc78bc959ded 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -2591,6 +2591,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { netdev_err(qdev->ndev, "lBufQ failed\n"); + kfree(qdev->lrg_buf); return -ENOMEM; } qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; @@ -2615,6 +2616,7 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) qdev->lrg_buf_q_alloc_size, qdev->lrg_buf_q_alloc_virt_addr, qdev->lrg_buf_q_alloc_phy_addr); + kfree(qdev->lrg_buf); return -ENOMEM; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 92930a055cbc..41894d154013 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -318,7 +318,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, if (adapter->flags & QLCNIC_VLAN_FILTERING) { if (protocol == ETH_P_8021Q) { - vh = (struct vlan_ethhdr *)skb->data; + vh = skb_vlan_eth_hdr(skb); vlan_id = ntohs(vh->h_vlan_TCI); } else if (skb_vlan_tag_present(skb)) { vlan_id = skb_vlan_tag_get(skb); @@ -468,7 +468,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, u32 producer = tx_ring->producer; if (protocol == ETH_P_8021Q) { - vh = (struct vlan_ethhdr *)skb->data; + vh = skb_vlan_eth_hdr(skb); flags = QLCNIC_FLAGS_VLAN_TAGGED; vlan_tci = ntohs(vh->h_vlan_TCI); protocol = ntohs(vh->h_vlan_encapsulated_proto); diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index f62c39544e08..a739c06ede4e 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -30,6 +30,8 @@ #define QCASPI_MAX_REGS 0x20 +#define QCASPI_RX_MAX_FRAMES 4 + static const u16 qcaspi_spi_regs[] = { SPI_REG_BFR_SIZE, SPI_REG_WRBUF_SPC_AVA, @@ -252,9 +254,9 @@ qcaspi_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, { struct qcaspi *qca = netdev_priv(dev); - ring->rx_max_pending = 4; + ring->rx_max_pending = QCASPI_RX_MAX_FRAMES; ring->tx_max_pending = TX_RING_MAX_LEN; - ring->rx_pending = 4; + ring->rx_pending = QCASPI_RX_MAX_FRAMES; ring->tx_pending = qca->txr.count; } @@ -263,22 +265,21 @@ qcaspi_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { - const struct net_device_ops *ops = dev->netdev_ops; struct qcaspi *qca = netdev_priv(dev); - if ((ring->rx_pending) || + if (ring->rx_pending != QCASPI_RX_MAX_FRAMES || (ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - if (netif_running(dev)) - ops->ndo_stop(dev); + if (qca->spi_thread) + kthread_park(qca->spi_thread); qca->txr.count = max_t(u32, ring->tx_pending, TX_RING_MIN_LEN); qca->txr.count = min_t(u16, qca->txr.count, TX_RING_MAX_LEN); - if (netif_running(dev)) - ops->ndo_open(dev); + if (qca->spi_thread) + kthread_unpark(qca->spi_thread); return 0; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 4a1b94e5a8ea..82f5173a2cfd 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -581,6 +581,18 @@ qcaspi_spi_thread(void *data) netdev_info(qca->net_dev, "SPI thread created\n"); while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_park()) { + netif_tx_disable(qca->net_dev); + netif_carrier_off(qca->net_dev); + qcaspi_flush_tx_ring(qca); + kthread_parkme(); + if (qca->sync == QCASPI_SYNC_READY) { + netif_carrier_on(qca->net_dev); + netif_wake_queue(qca->net_dev); + } + continue; + } + if ((qca->intr_req == qca->intr_svc) && !qca->txr.skb[qca->txr.head]) schedule(); @@ -609,11 +621,17 @@ qcaspi_spi_thread(void *data) if (intr_cause & SPI_INT_CPU_ON) { qcaspi_qca7k_sync(qca, QCASPI_EVENT_CPUON); + /* Frame decoding in progress */ + if (qca->frm_handle.state != qca->frm_handle.init) + qca->net_dev->stats.rx_dropped++; + + qcafrm_fsm_init_spi(&qca->frm_handle); + qca->stats.device_reset++; + /* not synced. */ if (qca->sync != QCASPI_SYNC_READY) continue; - qca->stats.device_reset++; netif_wake_queue(qca->net_dev); netif_carrier_on(qca->net_dev); } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 27b1663c476e..64b209a0ad21 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -391,7 +391,7 @@ static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev) struct rtnl_link_ops rmnet_link_ops __read_mostly = { .kind = "rmnet", - .maxtype = __IFLA_RMNET_MAX, + .maxtype = IFLA_RMNET_MAX, .priv_size = sizeof(struct rmnet_priv), .setup = rmnet_vnd_setup, .validate = rmnet_rtnl_validate, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d22457f2cf9c..06663c11ca96 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1145,7 +1145,7 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp) { r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); r8168ep_ocp_write(tp, 0x01, 0x30, r8168ep_ocp_read(tp, 0x30) | 0x01); - rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 10); + rtl_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10000, 30); } static void rtl8168_driver_start(struct rtl8169_private *tp) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 68cb5616ef99..e7b70006261f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -68,16 +68,27 @@ int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) return -ETIMEDOUT; } -static int ravb_config(struct net_device *ndev) +static int ravb_set_opmode(struct net_device *ndev, u32 opmode) { + u32 csr_ops = 1U << (opmode & CCC_OPC); + u32 ccc_mask = CCC_OPC; int error; - /* Set config mode */ - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); - /* Check if the operating mode is changed to the config mode */ - error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); - if (error) - netdev_err(ndev, "failed to switch device to config mode\n"); + /* If gPTP active in config mode is supported it needs to be configured + * along with CSEL and operating mode in the same access. This is a + * hardware limitation. + */ + if (opmode & CCC_GAC) + ccc_mask |= CCC_GAC | CCC_CSEL; + + /* Set operating mode */ + ravb_modify(ndev, CCC, ccc_mask, opmode); + /* Check if the operating mode is changed to the requested one */ + error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops); + if (error) { + netdev_err(ndev, "failed to switch device to requested mode (%u)\n", + opmode & CCC_OPC); + } return error; } @@ -675,7 +686,7 @@ static int ravb_dmac_init(struct net_device *ndev) int error; /* Set CONFIG mode */ - error = ravb_config(ndev); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); if (error) return error; @@ -684,9 +695,7 @@ static int ravb_dmac_init(struct net_device *ndev) return error; /* Setting the control will start the AVB-DMAC process. */ - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); - - return 0; + return ravb_set_opmode(ndev, CCC_OPC_OPERATION); } static void ravb_get_tx_tstamp(struct net_device *ndev) @@ -1048,7 +1057,7 @@ static int ravb_stop_dma(struct net_device *ndev) return error; /* Stop AVB-DMAC process */ - return ravb_config(ndev); + return ravb_set_opmode(ndev, CCC_OPC_CONFIG); } /* E-MAC interrupt handler */ @@ -1956,7 +1965,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; unsigned long flags; - u32 dma_addr; + dma_addr_t dma_addr; void *buffer; u32 entry; u32 len; @@ -2576,21 +2585,25 @@ static int ravb_set_gti(struct net_device *ndev) return 0; } -static void ravb_set_config_mode(struct net_device *ndev) +static int ravb_set_config_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; + int error; if (info->gptp) { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); + if (error) + return error; /* Set CSEL value */ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); } else if (info->ccc_gac) { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | - CCC_GAC | CCC_CSEL_HPB); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); } else { - ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); + error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); } + + return error; } /* Set tx and rx clock internal delay modes */ @@ -2810,7 +2823,9 @@ static int ravb_probe(struct platform_device *pdev) ndev->ethtool_ops = &ravb_ethtool_ops; /* Set AVB config mode */ - ravb_set_config_mode(ndev); + error = ravb_set_config_mode(ndev); + if (error) + goto out_disable_gptp_clk; if (info->gptp || info->ccc_gac) { /* Set GTI value */ @@ -2933,8 +2948,7 @@ static int ravb_remove(struct platform_device *pdev) dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, priv->desc_bat_dma); - /* Set reset mode */ - ravb_write(ndev, CCC_OPC_RESET, CCC); + ravb_set_opmode(ndev, CCC_OPC_RESET); clk_disable_unprepare(priv->gptp_clk); clk_disable_unprepare(priv->refclk); @@ -3018,8 +3032,11 @@ static int __maybe_unused ravb_resume(struct device *dev) int ret = 0; /* If WoL is enabled set reset mode to rearm the WoL logic */ - if (priv->wol_enabled) - ravb_write(ndev, CCC_OPC_RESET, CCC); + if (priv->wol_enabled) { + ret = ravb_set_opmode(ndev, CCC_OPC_RESET); + if (ret) + return ret; + } /* All register have been reset to default values. * Restore all registers which where setup at probe time and @@ -3027,7 +3044,9 @@ static int __maybe_unused ravb_resume(struct device *dev) */ /* Set AVB config mode */ - ravb_set_config_mode(ndev); + ret = ravb_set_config_mode(ndev); + if (ret) + return ret; if (info->gptp || info->ccc_gac) { /* Set GTI value */ diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 9220afeddee8..3f290791df1c 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -820,8 +820,10 @@ int efx_probe_filters(struct efx_nic *efx) } if (!success) { - efx_for_each_channel(channel, efx) + efx_for_each_channel(channel, efx) { kfree(channel->rps_flow_id); + channel->rps_flow_id = NULL; + } efx->type->filter_table_remove(efx); rc = -ENOMEM; goto out_unlock; diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c index 898e5c61d908..d381d8164f07 100644 --- a/drivers/net/ethernet/sfc/tx_tso.c +++ b/drivers/net/ethernet/sfc/tx_tso.c @@ -147,7 +147,7 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) EFX_WARN_ON_ONCE_PARANOID(((struct ethhdr *)skb->data)->h_proto != protocol); if (protocol == htons(ETH_P_8021Q)) { - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb); protocol = veh->h_vlan_encapsulated_proto; } diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 31ff35174034..58091ee2bfe6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -256,7 +256,7 @@ config DWMAC_INTEL config DWMAC_LOONGSON tristate "Loongson PCI DWMAC support" default MACH_LOONGSON64 - depends on STMMAC_ETH && PCI + depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI depends on COMMON_CLK help This selects the LOONGSON PCI bus support for the stmmac driver, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index a25c187d3185..e129ee1020f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -59,26 +59,19 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id return -ENODEV; } - if (!of_device_is_compatible(np, "loongson, pci-gmac")) { - pr_info("dwmac_loongson_pci: Incompatible OF node\n"); - return -ENODEV; - } - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); if (!plat) return -ENOMEM; + plat->mdio_bus_data = devm_kzalloc(&pdev->dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); + if (!plat->mdio_bus_data) + return -ENOMEM; + plat->mdio_node = of_get_child_by_name(np, "mdio"); if (plat->mdio_node) { dev_info(&pdev->dev, "Found MDIO subnode\n"); - - plat->mdio_bus_data = devm_kzalloc(&pdev->dev, - sizeof(*plat->mdio_bus_data), - GFP_KERNEL); - if (!plat->mdio_bus_data) { - ret = -ENOMEM; - goto err_put_node; - } plat->mdio_bus_data->needs_reset = true; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index bdbf86cb102a..46944c02b45e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -247,6 +247,7 @@ struct stmmac_priv { u32 msg_enable; int wolopts; int wol_irq; + bool wol_irq_disabled; int clk_csr; struct timer_list eee_ctrl_timer; int lpi_irq; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 35c8dd92d369..f03aa8a0b895 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -761,10 +761,16 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts) { pr_info("stmmac: wakeup enable\n"); device_set_wakeup_enable(priv->device, 1); - enable_irq_wake(priv->wol_irq); + /* Avoid unbalanced enable_irq_wake calls */ + if (priv->wol_irq_disabled) + enable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = false; } else { device_set_wakeup_enable(priv->device, 0); - disable_irq_wake(priv->wol_irq); + /* Avoid unbalanced disable_irq_wake calls */ + if (!priv->wol_irq_disabled) + disable_irq_wake(priv->wol_irq); + priv->wol_irq_disabled = true; } mutex_lock(&priv->lock); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 69aac8ed84f6..8f8de14347a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3519,6 +3519,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev) /* Request the Wake IRQ in case of another line * is used for WoL */ + priv->wol_irq_disabled = true; if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { int_name = priv->int_name_wol; sprintf(int_name, "%s:%s", dev->name, "wol"); @@ -4566,13 +4567,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) { - struct vlan_ethhdr *veth; - __be16 vlan_proto; + struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); + __be16 vlan_proto = veth->h_vlan_proto; u16 vlanid; - veth = (struct vlan_ethhdr *)skb->data; - vlan_proto = veth->h_vlan_proto; - if ((vlan_proto == htons(ETH_P_8021Q) && dev->features & NETIF_F_HW_VLAN_CTAG_RX) || (vlan_proto == htons(ETH_P_8021AD) && diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 5f177ea80725..379fc887ddf4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -483,7 +483,11 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->parent = priv->device; err = of_mdiobus_register(new_bus, mdio_node); - if (err != 0) { + if (err == -ENODEV) { + err = 0; + dev_info(dev, "MDIO bus is disabled\n"); + goto bus_register_fail; + } else if (err) { dev_err_probe(dev, err, "Cannot register the MDIO bus\n"); goto bus_register_fail; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 9f2553799895..76fabeae512d 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -54,7 +54,7 @@ #define AM65_CPSW_MAX_PORTS 8 #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN -#define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) +#define AM65_CPSW_MAX_PACKET_SIZE 2024 #define AM65_CPSW_REG_CTL 0x004 #define AM65_CPSW_REG_STAT_PORT_EN 0x014 @@ -1990,7 +1990,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) eth_hw_addr_set(port->ndev, port->slave.mac_addr); port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE; - port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE; + port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE - + (VLAN_ETH_HLEN + ETH_FCS_LEN); port->ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 54a17b576eac..7cbcf51bae92 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -3302,6 +3302,7 @@ static struct phy_driver ksphy_driver[] = { .flags = PHY_POLL_CABLE_TEST, .driver_data = &ksz9021_type, .probe = kszphy_probe, + .soft_reset = genphy_soft_reset, .config_init = ksz9131_config_init, .config_intr = kszphy_config_intr, .handle_interrupt = kszphy_handle_interrupt, diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 556b2d1cd2ac..293eaf6b3ec9 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -285,8 +285,10 @@ static int __team_options_register(struct team *team, return 0; inst_rollback: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { __team_option_inst_del_option(team, dst_opts[i]); + list_del(&dst_opts[i]->list); + } i = option_count; alloc_rollback: diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c index a017e9de2119..7b8afa589a53 100644 --- a/drivers/net/usb/aqc111.c +++ b/drivers/net/usb/aqc111.c @@ -1079,17 +1079,17 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 pkt_count = 0; u64 desc_hdr = 0; u16 vlan_tag = 0; - u32 skb_len = 0; + u32 skb_len; if (!skb) goto err; - if (skb->len == 0) + skb_len = skb->len; + if (skb_len < sizeof(desc_hdr)) goto err; - skb_len = skb->len; /* RX Descriptor Header */ - skb_trim(skb, skb->len - sizeof(desc_hdr)); + skb_trim(skb, skb_len - sizeof(desc_hdr)); desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb)); /* Check these packets */ diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index 3777c7e2e6fc..e47bb125048d 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -161,7 +161,9 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf) u8 buf[ETH_ALEN]; struct ax88172a_private *priv; - usbnet_get_endpoints(dev, intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret) + return ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 4ea0e155bb0d..5a1bf42ce156 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -173,6 +173,7 @@ struct ax88179_data { u8 in_pm; u32 wol_supported; u32 wolopts; + u8 disconnecting; }; struct ax88179_int_data { @@ -208,6 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, { int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, void *, u16); + struct ax88179_data *ax179_data = dev->driver_priv; BUG_ON(!dev); @@ -219,7 +221,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, ret = fn(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); - if (unlikely(ret < 0)) + if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting))) netdev_warn(dev->net, "Failed to read reg index 0x%04x: %d\n", index, ret); @@ -231,6 +233,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, { int ret; int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16); + struct ax88179_data *ax179_data = dev->driver_priv; BUG_ON(!dev); @@ -242,7 +245,7 @@ static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index, ret = fn(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, data, size); - if (unlikely(ret < 0)) + if (unlikely((ret < 0) && !(ret == -ENODEV && ax179_data->disconnecting))) netdev_warn(dev->net, "Failed to write reg index 0x%04x: %d\n", index, ret); @@ -492,6 +495,20 @@ static int ax88179_resume(struct usb_interface *intf) return usbnet_resume(intf); } +static void ax88179_disconnect(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct ax88179_data *ax179_data; + + if (!dev) + return; + + ax179_data = dev->driver_priv; + ax179_data->disconnecting = 1; + + usbnet_disconnect(intf); +} + static void ax88179_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo) { @@ -1906,7 +1923,7 @@ static struct usb_driver ax88179_178a_driver = { .suspend = ax88179_suspend, .resume = ax88179_resume, .reset_resume = ax88179_resume, - .disconnect = usbnet_disconnect, + .disconnect = ax88179_disconnect, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, }; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 4fb981b8732e..2d82481d34e6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1288,6 +1288,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x19d2, 0x0168, 4)}, {QMI_FIXED_INTF(0x19d2, 0x0176, 3)}, {QMI_FIXED_INTF(0x19d2, 0x0178, 3)}, + {QMI_FIXED_INTF(0x19d2, 0x0189, 4)}, /* ZTE MF290 */ {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */ {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 4d833781294a..958a02b19554 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -8288,43 +8288,6 @@ static bool rtl_check_vendor_ok(struct usb_interface *intf) return true; } -static bool rtl_vendor_mode(struct usb_interface *intf) -{ - struct usb_host_interface *alt = intf->cur_altsetting; - struct usb_device *udev; - struct usb_host_config *c; - int i, num_configs; - - if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) - return rtl_check_vendor_ok(intf); - - /* The vendor mode is not always config #1, so to find it out. */ - udev = interface_to_usbdev(intf); - c = udev->config; - num_configs = udev->descriptor.bNumConfigurations; - if (num_configs < 2) - return false; - - for (i = 0; i < num_configs; (i++, c++)) { - struct usb_interface_descriptor *desc = NULL; - - if (c->desc.bNumInterfaces > 0) - desc = &c->intf_cache[0]->altsetting->desc; - else - continue; - - if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) { - usb_driver_set_configuration(udev, c->desc.bConfigurationValue); - break; - } - } - - if (i == num_configs) - dev_err(&intf->dev, "Unexpected Device\n"); - - return false; -} - static int rtl8152_pre_reset(struct usb_interface *intf) { struct r8152 *tp = usb_get_intfdata(intf); @@ -9556,9 +9519,8 @@ static int rtl_fw_init(struct r8152 *tp) return 0; } -u8 rtl8152_get_version(struct usb_interface *intf) +static u8 __rtl_get_hw_ver(struct usb_device *udev) { - struct usb_device *udev = interface_to_usbdev(intf); u32 ocp_data = 0; __le32 *tmp; u8 version; @@ -9628,10 +9590,19 @@ u8 rtl8152_get_version(struct usb_interface *intf) break; default: version = RTL_VER_UNKNOWN; - dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data); + dev_info(&udev->dev, "Unknown version 0x%04x\n", ocp_data); break; } + return version; +} + +u8 rtl8152_get_version(struct usb_interface *intf) +{ + u8 version; + + version = __rtl_get_hw_ver(interface_to_usbdev(intf)); + dev_dbg(&intf->dev, "Detected version 0x%04x\n", version); return version; @@ -9675,7 +9646,10 @@ static int rtl8152_probe(struct usb_interface *intf, if (version == RTL_VER_UNKNOWN) return -ENODEV; - if (!rtl_vendor_mode(intf)) + if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC) + return -ENODEV; + + if (!rtl_check_vendor_ok(intf)) return -ENODEV; usb_reset_device(udev); @@ -9875,43 +9849,37 @@ static void rtl8152_disconnect(struct usb_interface *intf) } } -#define REALTEK_USB_DEVICE(vend, prod) { \ - USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC), \ -}, \ -{ \ - USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \ - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), \ -} - /* table of devices that work with this driver */ static const struct usb_device_id rtl8152_table[] = { /* Realtek */ - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155), - REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156), + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8050) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8053) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8152) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8153) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8155) }, + { USB_DEVICE(VENDOR_ID_REALTEK, 0x8156) }, /* Microsoft */ - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab), - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6), - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927), - REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e), - REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e), - REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387), - REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041), - REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff), - REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601), + { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab) }, + { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6) }, + { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) }, + { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) }, + { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) }, + { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) }, + { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, + { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, + { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, + { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, {} }; @@ -9931,7 +9899,68 @@ static struct usb_driver rtl8152_driver = { .disable_hub_initiated_lpm = 1, }; -module_usb_driver(rtl8152_driver); +static int rtl8152_cfgselector_probe(struct usb_device *udev) +{ + struct usb_host_config *c; + int i, num_configs; + + /* Switch the device to vendor mode, if and only if the vendor mode + * driver supports it. + */ + if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN) + return 0; + + /* The vendor mode is not always config #1, so to find it out. */ + c = udev->config; + num_configs = udev->descriptor.bNumConfigurations; + for (i = 0; i < num_configs; (i++, c++)) { + struct usb_interface_descriptor *desc = NULL; + + if (!c->desc.bNumInterfaces) + continue; + desc = &c->intf_cache[0]->altsetting->desc; + if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) + break; + } + + if (i == num_configs) + return -ENODEV; + + if (usb_set_configuration(udev, c->desc.bConfigurationValue)) { + dev_err(&udev->dev, "Failed to set configuration %d\n", + c->desc.bConfigurationValue); + return -ENODEV; + } + + return 0; +} + +static struct usb_device_driver rtl8152_cfgselector_driver = { + .name = MODULENAME "-cfgselector", + .probe = rtl8152_cfgselector_probe, + .id_table = rtl8152_table, + .generic_subclass = 1, + .supports_autosuspend = 1, +}; + +static int __init rtl8152_driver_init(void) +{ + int ret; + + ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE); + if (ret) + return ret; + return usb_register(&rtl8152_driver); +} + +static void __exit rtl8152_driver_exit(void) +{ + usb_deregister(&rtl8152_driver); + usb_deregister_device_driver(&rtl8152_cfgselector_driver); +} + +module_init(rtl8152_driver_init); +module_exit(rtl8152_driver_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index 76f275ca53e9..70d468f01338 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -813,8 +813,8 @@ static int ath11k_core_get_rproc(struct ath11k_base *ab) prproc = rproc_get_by_phandle(rproc_phandle); if (!prproc) { - ath11k_err(ab, "failed to get rproc\n"); - return -EINVAL; + ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n"); + return -EPROBE_DEFER; } ab_ahb->tgt_rproc = prproc; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 157d1f31c487..c5a306b01fe2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -348,8 +348,8 @@ #define RFIC_REG_RD 0xAD0470 #define WFPM_CTRL_REG 0xA03030 #define WFPM_OTP_CFG1_ADDR 0x00a03098 -#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4) -#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5) +#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(5) +#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(4) #define WFPM_GP2 0xA030B4 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index a3cefbc43e80..2c14188f34bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -99,17 +99,6 @@ static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm, active_cnt = 2; } - /* - * If the firmware requested it, then we know that it supports - * getting zero for the values to indicate "use one, but pick - * which one yourself", which means it can dynamically pick one - * that e.g. has better RSSI. - */ - if (mvm->fw_static_smps_request && active_cnt == 1 && idle_cnt == 1) { - idle_cnt = 0; - active_cnt = 0; - } - *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) << PHY_RX_CHAIN_VALID_POS); *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index caaf4d52e2c6..76219486b9c2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -2200,7 +2200,7 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids) WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0) - cmd.flags |= CMD_WANT_SKB; + cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL; IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n", sta_id, tids); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 69b95ad5993b..2ec4ee8ab317 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -745,7 +745,7 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) } } -void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); +void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq); static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { @@ -792,7 +792,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); } -void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq); void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 90a46faaaffd..91b73e7a4113 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1381,7 +1381,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * if it is true then one of the handlers took the page. */ - if (reclaim) { + if (reclaim && txq) { u16 sequence = le16_to_cpu(pkt->hdr.sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index = iwl_txq_get_cmd_index(txq, index); @@ -1781,7 +1781,7 @@ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) return inta; } -void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) +void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct isr_statistics *isr_stats = &trans_pcie->isr_stats; @@ -1805,7 +1805,7 @@ void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans) isr_stats->rfkill++; if (prev != report) - iwl_trans_pcie_rf_kill(trans, report); + iwl_trans_pcie_rf_kill(trans, report, from_irq); mutex_unlock(&trans_pcie->mutex); if (hw_rfkill) { @@ -1945,7 +1945,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) /* HW RF KILL switch toggled */ if (inta & CSR_INT_BIT_RF_KILL) { - iwl_pcie_handle_rfkill_irq(trans); + iwl_pcie_handle_rfkill_irq(trans, true); handled |= CSR_INT_BIT_RF_KILL; } @@ -2362,7 +2362,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* HW RF KILL switch toggled */ if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) - iwl_pcie_handle_rfkill_irq(trans); + iwl_pcie_handle_rfkill_irq(trans, true); if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { IWL_ERR(trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 39ab6526e6b8..c7ed35b3dd8d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1080,7 +1080,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); if (prev != report) - iwl_trans_pcie_rf_kill(trans, report); + iwl_trans_pcie_rf_kill(trans, report, false); return hw_rfkill; } @@ -1234,7 +1234,7 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) trans_pcie->hw_mask = trans_pcie->hw_init_mask; } -static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) +static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1261,7 +1261,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); - iwl_pcie_synchronize_irqs(trans); + if (!from_irq) + iwl_pcie_synchronize_irqs(trans); iwl_pcie_rx_napi_sync(trans); iwl_pcie_tx_stop(trans); iwl_pcie_rx_stop(trans); @@ -1451,7 +1452,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, clear_bit(STATUS_RFKILL_OPMODE, &trans->status); } if (hw_rfkill != was_in_rfkill) - iwl_trans_pcie_rf_kill(trans, hw_rfkill); + iwl_trans_pcie_rf_kill(trans, hw_rfkill, false); } static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) @@ -1466,12 +1467,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) mutex_lock(&trans_pcie->mutex); trans_pcie->opmode_down = true; was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); - _iwl_trans_pcie_stop_device(trans); + _iwl_trans_pcie_stop_device(trans, false); iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); mutex_unlock(&trans_pcie->mutex); } -void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) +void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq) { struct iwl_trans_pcie __maybe_unused *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1484,7 +1485,7 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) if (trans->trans_cfg->gen2) _iwl_trans_pcie_gen2_stop_device(trans); else - _iwl_trans_pcie_stop_device(trans); + _iwl_trans_pcie_stop_device(trans, from_irq); } } @@ -2815,7 +2816,7 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file, IWL_WARN(trans, "changing debug rfkill %d->%d\n", trans_pcie->debug_rfkill, new_value); trans_pcie->debug_rfkill = new_value; - iwl_pcie_handle_rfkill_irq(trans); + iwl_pcie_handle_rfkill_irq(trans, false); return count; } @@ -3034,7 +3035,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 i, r, j, rb_len = 0; - spin_lock(&rxq->lock); + spin_lock_bh(&rxq->lock); r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; @@ -3058,7 +3059,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, *data = iwl_fw_error_next_data(*data); } - spin_unlock(&rxq->lock); + spin_unlock_bh(&rxq->lock); return rb_len; } diff --git a/drivers/net/wireless/marvell/libertas/Kconfig b/drivers/net/wireless/marvell/libertas/Kconfig index 6d62ab49aa8d..c7d02adb3eea 100644 --- a/drivers/net/wireless/marvell/libertas/Kconfig +++ b/drivers/net/wireless/marvell/libertas/Kconfig @@ -2,8 +2,6 @@ config LIBERTAS tristate "Marvell 8xxx Libertas WLAN driver support" depends on CFG80211 - select WIRELESS_EXT - select WEXT_SPY select LIB80211 select FW_LOADER help diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index bcd564dc3554..c907da2a4789 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2046,6 +2046,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, mwifiex_set_sys_config_invalid_data(bss_cfg); + memcpy(bss_cfg->mac_addr, priv->curr_addr, ETH_ALEN); + if (params->beacon_interval) bss_cfg->beacon_period = params->beacon_interval; if (params->dtim_period) diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index b4f945a549f7..863f5f2247a0 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -165,6 +165,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32) #define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35) #define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) +#define TLV_TYPE_UAP_MAC_ADDRESS (PROPRIETARY_TLV_BASE_ID + 43) #define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) #define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) #define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48) diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 091e7ca79376..e8825f302de8 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -107,6 +107,7 @@ struct mwifiex_uap_bss_param { u8 qos_info; u8 power_constraint; struct mwifiex_types_wmm_info wmm_info; + u8 mac_addr[ETH_ALEN]; }; enum { diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index e78a201cd150..491e36611909 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -468,6 +468,7 @@ void mwifiex_config_uap_11d(struct mwifiex_private *priv, static int mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) { + struct host_cmd_tlv_mac_addr *mac_tlv; struct host_cmd_tlv_dtim_period *dtim_period; struct host_cmd_tlv_beacon_period *beacon_period; struct host_cmd_tlv_ssid *ssid; @@ -487,6 +488,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) int i; u16 cmd_size = *param_size; + mac_tlv = (struct host_cmd_tlv_mac_addr *)tlv; + mac_tlv->header.type = cpu_to_le16(TLV_TYPE_UAP_MAC_ADDRESS); + mac_tlv->header.len = cpu_to_le16(ETH_ALEN); + memcpy(mac_tlv->mac_addr, bss_cfg->mac_addr, ETH_ALEN); + cmd_size += sizeof(struct host_cmd_tlv_mac_addr); + tlv += sizeof(struct host_cmd_tlv_mac_addr); + if (bss_cfg->ssid.ssid_len) { ssid = (struct host_cmd_tlv_ssid *)tlv; ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID); diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 9bc8758573fc..0a88048b8976 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -62,7 +62,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) goto out_put_node; } - offset = be32_to_cpup(list); + offset += be32_to_cpup(list); ret = mtd_read(mtd, offset, len, &retlen, eep); put_mtd_device(mtd); if (mtd_is_bitflip(ret)) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 9c753c6aabef..60c9f9c56a4f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -564,8 +564,7 @@ struct mt76_sdio { struct mt76_worker txrx_worker; struct mt76_worker status_worker; struct mt76_worker net_worker; - - struct work_struct stat_work; + struct mt76_worker stat_worker; u8 *xmit_buf; u32 xmit_buf_sz; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c index 304212f5f8da..d742b22428f0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -205,8 +205,8 @@ static int mt7663s_suspend(struct device *dev) mt76_worker_disable(&mdev->mt76.sdio.txrx_worker); mt76_worker_disable(&mdev->mt76.sdio.status_worker); mt76_worker_disable(&mdev->mt76.sdio.net_worker); + mt76_worker_disable(&mdev->mt76.sdio.stat_worker); - cancel_work_sync(&mdev->mt76.sdio.stat_work); clear_bit(MT76_READING_STATS, &mdev->mphy.state); mt76_tx_status_check(&mdev->mt76, true); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index 10dda1693d7d..19640ff76bdc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -1036,21 +1036,26 @@ int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2, u8 type[2]; u8 rsvd[64]; } __packed req = { + .ver = 1, .idx = idx, .env = env_cap, }; int ret, valid_cnt = 0; - u8 i, *pos; + u16 buf_len = 0; + u8 *pos; if (!clc) return 0; + buf_len = le16_to_cpu(clc->len) - sizeof(*clc); pos = clc->data; - for (i = 0; i < clc->nr_country; i++) { + while (buf_len > 16) { struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos; u16 len = le16_to_cpu(rule->len); + u16 offset = len + sizeof(*rule); - pos += len + sizeof(*rule); + pos += offset; + buf_len -= offset; if (rule->alpha2[0] != alpha2[0] || rule->alpha2[1] != alpha2[1]) continue; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c index 3b25a06fd946..8898ba69b8e9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c @@ -222,7 +222,7 @@ static int mt7921s_suspend(struct device *__dev) mt76_txq_schedule_all(&dev->mphy); mt76_worker_disable(&mdev->tx_worker); mt76_worker_disable(&mdev->sdio.status_worker); - cancel_work_sync(&mdev->sdio.stat_work); + mt76_worker_disable(&mdev->sdio.stat_worker); clear_bit(MT76_READING_STATS, &dev->mphy.state); mt76_tx_status_check(mdev, true); @@ -254,6 +254,7 @@ static int mt7921s_suspend(struct device *__dev) restore_worker: mt76_worker_enable(&mdev->tx_worker); mt76_worker_enable(&mdev->sdio.status_worker); + mt76_worker_enable(&mdev->sdio.stat_worker); if (!pm->ds_enable) mt76_connac_mcu_set_deep_sleep(mdev, false); @@ -286,6 +287,7 @@ static int mt7921s_resume(struct device *__dev) mt76_worker_enable(&mdev->sdio.txrx_worker); mt76_worker_enable(&mdev->sdio.status_worker); mt76_worker_enable(&mdev->sdio.net_worker); + mt76_worker_enable(&mdev->sdio.stat_worker); /* restore previous ds setting */ if (!pm->ds_enable) diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c index 1b3adb3d91e8..fd07b6623392 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c @@ -107,7 +107,7 @@ int mt7921s_mac_reset(struct mt7921_dev *dev) mt76_worker_disable(&dev->mt76.sdio.txrx_worker); mt76_worker_disable(&dev->mt76.sdio.status_worker); mt76_worker_disable(&dev->mt76.sdio.net_worker); - cancel_work_sync(&dev->mt76.sdio.stat_work); + mt76_worker_disable(&dev->mt76.sdio.stat_worker); mt7921s_disable_irq(&dev->mt76); mt7921s_wfsys_reset(dev); @@ -115,6 +115,7 @@ int mt7921s_mac_reset(struct mt7921_dev *dev) mt76_worker_enable(&dev->mt76.sdio.txrx_worker); mt76_worker_enable(&dev->mt76.sdio.status_worker); mt76_worker_enable(&dev->mt76.sdio.net_worker); + mt76_worker_enable(&dev->mt76.sdio.stat_worker); dev->fw_assert = false; clear_bit(MT76_MCU_RESET, &dev->mphy.state); diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index 176207f3177c..fc4fb9463564 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -481,21 +481,21 @@ static void mt76s_status_worker(struct mt76_worker *w) if (dev->drv->tx_status_data && ndata_frames > 0 && !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) && !test_bit(MT76_STATE_SUSPEND, &dev->phy.state)) - ieee80211_queue_work(dev->hw, &dev->sdio.stat_work); + mt76_worker_schedule(&sdio->stat_worker); } while (nframes > 0); if (resched) mt76_worker_schedule(&dev->tx_worker); } -static void mt76s_tx_status_data(struct work_struct *work) +static void mt76s_tx_status_data(struct mt76_worker *worker) { struct mt76_sdio *sdio; struct mt76_dev *dev; u8 update = 1; u16 count = 0; - sdio = container_of(work, struct mt76_sdio, stat_work); + sdio = container_of(worker, struct mt76_sdio, stat_worker); dev = container_of(sdio, struct mt76_dev, sdio); while (true) { @@ -508,7 +508,7 @@ static void mt76s_tx_status_data(struct work_struct *work) } if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) - ieee80211_queue_work(dev->hw, &sdio->stat_work); + mt76_worker_schedule(&sdio->status_worker); else clear_bit(MT76_READING_STATS, &dev->phy.state); } @@ -600,8 +600,8 @@ void mt76s_deinit(struct mt76_dev *dev) mt76_worker_teardown(&sdio->txrx_worker); mt76_worker_teardown(&sdio->status_worker); mt76_worker_teardown(&sdio->net_worker); + mt76_worker_teardown(&sdio->stat_worker); - cancel_work_sync(&sdio->stat_work); clear_bit(MT76_READING_STATS, &dev->phy.state); mt76_tx_status_check(dev, true); @@ -644,10 +644,14 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, if (err) return err; + err = mt76_worker_setup(dev->hw, &sdio->stat_worker, mt76s_tx_status_data, + "sdio-sta"); + if (err) + return err; + sched_set_fifo_low(sdio->status_worker.task); sched_set_fifo_low(sdio->net_worker.task); - - INIT_WORK(&sdio->stat_work, mt76s_tx_status_data); + sched_set_fifo_low(sdio->stat_worker.task); dev->queue_ops = &sdio_queue_ops; dev->bus = bus_ops; diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c index 76d0a778636a..311676c1ece0 100644 --- a/drivers/net/wireless/purelifi/plfxlc/usb.c +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -493,9 +493,12 @@ int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer, void *context) { struct usb_device *udev = interface_to_usbdev(usb->ez_usb); - struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC); + struct urb *urb; int r; + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + return -ENOMEM; usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), (void *)buffer, buffer_len, complete_fn, context); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index ca79f652fef3..6116c1bec155 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -164,21 +164,29 @@ static bool _rtl_pci_platform_switch_device_pci_aspm( struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) - value |= 0x40; + value &= PCI_EXP_LNKCTL_ASPMC; - pci_write_config_byte(rtlpci->pdev, 0x80, value); + if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE) + value |= PCI_EXP_LNKCTL_CCC; + + pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC | value, + value); return false; } -/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/ -static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value) +/* @value is PCI_EXP_LNKCTL_CLKREQ_EN or 0 to enable/disable clk request. */ +static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u16 value) { struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - pci_write_config_byte(rtlpci->pdev, 0x81, value); + value &= PCI_EXP_LNKCTL_CLKREQ_EN; + + pcie_capability_clear_and_set_word(rtlpci->pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN, + value); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) udelay(100); @@ -192,11 +200,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw) struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; - u8 num4bytes = pcipriv->ndis_adapter.num4bytes; /*Retrieve original configuration settings. */ u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg; - u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter. - pcibridge_linkctrlreg; u16 aspmlevel = 0; u8 tmp_u1b = 0; @@ -221,16 +226,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw) /*Set corresponding value. */ aspmlevel |= BIT(0) | BIT(1); linkctrl_reg &= ~aspmlevel; - pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1)); _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg); - udelay(50); - - /*4 Disable Pci Bridge ASPM */ - pci_write_config_byte(rtlpci->pdev, (num4bytes << 2), - pcibridge_linkctrlreg); - - udelay(50); } /*Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for @@ -245,9 +242,7 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw) struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor; - u8 num4bytes = pcipriv->ndis_adapter.num4bytes; u16 aspmlevel; - u8 u_pcibridge_aspmsetting; u8 u_device_aspmsetting; if (!ppsc->support_aspm) @@ -259,25 +254,6 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw) return; } - /*4 Enable Pci Bridge ASPM */ - - u_pcibridge_aspmsetting = - pcipriv->ndis_adapter.pcibridge_linkctrlreg | - rtlpci->const_hostpci_aspm_setting; - - if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) - u_pcibridge_aspmsetting &= ~BIT(0); - - pci_write_config_byte(rtlpci->pdev, (num4bytes << 2), - u_pcibridge_aspmsetting); - - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "PlatformEnableASPM(): Write reg[%x] = %x\n", - (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10), - u_pcibridge_aspmsetting); - - udelay(50); - /*Get ASPM level (with/without Clock Req) */ aspmlevel = rtlpci->const_devicepci_aspm_setting; u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg; @@ -291,7 +267,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw) if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) { _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level & - RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0); + RT_RF_OFF_LEVL_CLK_REQ) ? + PCI_EXP_LNKCTL_CLKREQ_EN : 0); RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ); } udelay(100); @@ -358,22 +335,6 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw, return tpriv != NULL; } -static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw) -{ - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_pci *rtlpci = rtl_pcidev(pcipriv); - u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset; - u8 linkctrl_reg; - u8 num4bbytes; - - num4bbytes = (capabilityoffset + 0x10) / 4; - - /*Read Link Control Register */ - pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg); - - pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg; -} - static void rtl_pci_parse_configuration(struct pci_dev *pdev, struct ieee80211_hw *hw) { @@ -2033,12 +1994,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, PCI_SLOT(bridge_pdev->devfn); pcipriv->ndis_adapter.pcibridge_funcnum = PCI_FUNC(bridge_pdev->devfn); - pcipriv->ndis_adapter.pcibridge_pciehdr_offset = - pci_pcie_cap(bridge_pdev); - pcipriv->ndis_adapter.num4bytes = - (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4; - - rtl_pci_get_linkcontrol_field(hw); if (pcipriv->ndis_adapter.pcibridge_vendor == PCI_BRIDGE_VENDOR_AMD) { @@ -2055,13 +2010,11 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg); rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, - "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n", + "pci_bridge busnumber:devnumber:funcnumber:vendor:amd %d:%d:%d:%x:%x\n", pcipriv->ndis_adapter.pcibridge_busnum, pcipriv->ndis_adapter.pcibridge_devnum, pcipriv->ndis_adapter.pcibridge_funcnum, pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor], - pcipriv->ndis_adapter.pcibridge_pciehdr_offset, - pcipriv->ndis_adapter.pcibridge_linkctrlreg, pcipriv->ndis_adapter.amd_l1_patch); rtl_pci_parse_configuration(pdev, hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h index 866861626a0a..d6307197dfea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.h +++ b/drivers/net/wireless/realtek/rtlwifi/pci.h @@ -236,11 +236,6 @@ struct mp_adapter { u16 pcibridge_vendorid; u16 pcibridge_deviceid; - u8 num4bytes; - - u8 pcibridge_pciehdr_offset; - u8 pcibridge_linkctrlreg; - bool amd_l1_patch; }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index 12d0b3a87af7..0fab3a0c7d49 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c @@ -16,12 +16,6 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw, static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data); -static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask) -{ - u32 i = ffs(bitmask); - - return i ? i - 1 : 32; -} static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw); static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw, @@ -51,7 +45,7 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask); originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl88e_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); returnvalue = (originalvalue & bitmask) >> bitshift; rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, @@ -74,7 +68,7 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw, if (bitmask != MASKDWORD) { originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl88e_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((originalvalue & (~bitmask)) | (data << bitshift)); } @@ -99,7 +93,7 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw, original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl88e_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock(&rtlpriv->locks.rf_lock); @@ -127,7 +121,7 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw, original_value = _rtl88e_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl88e_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c index 3d29c8dbb255..144ee780e1b6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c @@ -17,7 +17,7 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask); originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); returnvalue = (originalvalue & bitmask) >> bitshift; rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, @@ -40,7 +40,7 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, if (bitmask != MASKDWORD) { originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((originalvalue & (~bitmask)) | (data << bitshift)); } @@ -143,14 +143,6 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, } EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write); -u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask) -{ - u32 i = ffs(bitmask); - - return i ? i - 1 : 32; -} -EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift); - static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw) { rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h index 75afa6253ad0..e64d377dfe9e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h @@ -196,7 +196,6 @@ bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw, void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw); void rtl92c_phy_set_io(struct ieee80211_hw *hw); void rtl92c_bb_block_on(struct ieee80211_hw *hw); -u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask); long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw, enum wireless_mode wirelessmode, u8 txpwridx); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c index da54e51badd3..fa70a7d5539f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c @@ -39,7 +39,7 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, rfpath, regaddr); } - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock(&rtlpriv->locks.rf_lock); @@ -110,7 +110,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, original_value = _rtl92c_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); @@ -122,7 +122,7 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, original_value = _rtl92c_phy_fw_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h index 7582a162bd11..c7a0d4c776f0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h @@ -94,7 +94,6 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset); u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset); -u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask); void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data); void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c index a8d9fe269f31..0b8cb7e61fd8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c @@ -32,7 +32,7 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw, original_value = _rtl92c_phy_fw_rf_serial_read(hw, rfpath, regaddr); } - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n", @@ -56,7 +56,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw, original_value = _rtl92c_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); @@ -67,7 +67,7 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw, original_value = _rtl92c_phy_fw_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl92c_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index d18c092b6142..d835a27429f0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -169,13 +169,6 @@ static const u8 channel_all[59] = { 157, 159, 161, 163, 165 }; -static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask) -{ - u32 i = ffs(bitmask); - - return i ? i - 1 : 32; -} - u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -198,7 +191,7 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) } else { originalvalue = rtl_read_dword(rtlpriv, regaddr); } - bitshift = _rtl92d_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); returnvalue = (originalvalue & bitmask) >> bitshift; rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n", @@ -230,7 +223,7 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw, dbi_direct); else originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92d_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((originalvalue & (~bitmask)) | (data << bitshift)); } if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob) @@ -317,7 +310,7 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, regaddr, rfpath, bitmask); spin_lock(&rtlpriv->locks.rf_lock); original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl92d_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock(&rtlpriv->locks.rf_lock); rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, @@ -343,7 +336,7 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, if (bitmask != RFREG_OFFSET_MASK) { original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl92d_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index cc0bcaf13e96..73ef602bfb01 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -16,7 +16,6 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw, static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw, enum radio_path rfpath, u32 offset, u32 data); -static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask); static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw); static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw); static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw, @@ -46,7 +45,7 @@ u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask); originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); returnvalue = (originalvalue & bitmask) >> bitshift; rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, @@ -68,7 +67,7 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, if (bitmask != MASKDWORD) { originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((originalvalue & (~bitmask)) | (data << bitshift)); } @@ -92,7 +91,7 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw, spin_lock(&rtlpriv->locks.rf_lock); original_value = _rtl92ee_phy_rf_serial_read(hw , rfpath, regaddr); - bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock(&rtlpriv->locks.rf_lock); @@ -119,7 +118,7 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw, if (bitmask != RFREG_OFFSET_MASK) { original_value = _rtl92ee_phy_rf_serial_read(hw, rfpath, addr); - bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = (original_value & (~bitmask)) | (data << bitshift); } @@ -201,13 +200,6 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw, pphyreg->rf3wire_offset, data_and_addr); } -static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask) -{ - u32 i = ffs(bitmask); - - return i ? i - 1 : 32; -} - bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw) { return _rtl92ee_phy_config_mac_with_headerfile(hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c index aaa004d4d6d0..0e2b9698088b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c @@ -14,13 +14,6 @@ #include "hw.h" #include "table.h" -static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask) -{ - u32 i = ffs(bitmask); - - return i ? i - 1 : 32; -} - u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) { struct rtl_priv *rtlpriv = rtl_priv(hw); @@ -30,7 +23,7 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) regaddr, bitmask); originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92s_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); returnvalue = (originalvalue & bitmask) >> bitshift; rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n", @@ -52,7 +45,7 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, if (bitmask != MASKDWORD) { originalvalue = rtl_read_dword(rtlpriv, regaddr); - bitshift = _rtl92s_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((originalvalue & (~bitmask)) | (data << bitshift)); } @@ -160,7 +153,7 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl92s_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); readback_value = (original_value & bitmask) >> bitshift; spin_unlock(&rtlpriv->locks.rf_lock); @@ -191,7 +184,7 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath, if (bitmask != RFREG_OFFSET_MASK) { original_value = _rtl92s_phy_rf_serial_read(hw, rfpath, regaddr); - bitshift = _rtl92s_phy_calculate_bit_shift(bitmask); + bitshift = calculate_bit_shift(bitmask); data = ((original_value & (~bitmask)) | (data << bitshift)); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 5323ead30db0..fa1839d8ee55 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -29,9 +29,10 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw, u32 data); static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask) { - u32 i = ffs(bitmask); + if (WARN_ON_ONCE(!bitmask)) + return 0; - return i ? i - 1 : 32; + return __ffs(bitmask); } static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw); /*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/ diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 31f9e9e5c680..0bac788ccd6e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -3105,4 +3105,11 @@ static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw, return ieee80211_find_sta(mac->vif, mac_addr); } +static inline u32 calculate_bit_shift(u32 bitmask) +{ + if (WARN_ON_ONCE(!bitmask)) + return 0; + + return __ffs(bitmask); +} #endif diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index fabca307867a..0970d6bcba43 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -266,9 +266,9 @@ static void rtw_ops_configure_filter(struct ieee80211_hw *hw, if (changed_flags & FIF_ALLMULTI) { if (*new_flags & FIF_ALLMULTI) - rtwdev->hal.rcr |= BIT_AM | BIT_AB; + rtwdev->hal.rcr |= BIT_AM; else - rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB); + rtwdev->hal.rcr &= ~(BIT_AM); } if (changed_flags & FIF_FCSFAIL) { if (*new_flags & FIF_FCSFAIL) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c3a8d78a41a7..271604098574 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; - shinfo->nr_frags++, gop++, nr_slots--) { + nr_slots--) { + if (unlikely(!txp->size)) { + unsigned long flags; + + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); + spin_unlock_irqrestore(&queue->response_lock, flags); + ++txp; + continue; + } + index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, txp == first ? extra_count : 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + ++shinfo->nr_frags; + ++gop; if (txp == first) txp = txfrags; @@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue, shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, txp++, gop++) { + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { + if (unlikely(!txp->size)) { + unsigned long flags; + + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, 0, + XEN_NETIF_RSP_OKAY); + push_tx_responses(queue); + spin_unlock_irqrestore(&queue->response_lock, + flags); + continue; + } + index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); + ++shinfo->nr_frags; + ++gop; } - skb_shinfo(skb)->frag_list = nskb; - } else if (nskb) { + if (shinfo->nr_frags) { + skb_shinfo(skb)->frag_list = nskb; + nskb = NULL; + } + } + + if (nskb) { /* A frag_list skb was allocated but it is no longer needed - * because enough slots were converted to copy ops above. + * because enough slots were converted to copy ops above or some + * were empty. */ kfree_skb(nskb); } diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index 9dfd3d029305..69aef668f105 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -834,6 +834,8 @@ static void nvme_queue_auth_work(struct work_struct *work) } fail2: + if (chap->status == 0) + chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", __func__, chap->qid, chap->status); tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 8f69f83e277d..36531ac0d630 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1511,7 +1511,8 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, if (id->ncap == 0) { /* namespace not allocated or attached */ info->is_removed = true; - return -ENODEV; + ret = -ENODEV; + goto error; } info->anagrpid = id->anagrpid; @@ -1529,8 +1530,10 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, !memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) memcpy(ids->nguid, id->nguid, sizeof(ids->nguid)); } + +error: kfree(id); - return 0; + return ret; } static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, @@ -1845,16 +1848,18 @@ static int nvme_init_ms(struct nvme_ns *ns, struct nvme_id_ns *id) return ret; } -static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) +static int nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) { struct nvme_ctrl *ctrl = ns->ctrl; + int ret; - if (nvme_init_ms(ns, id)) - return; + ret = nvme_init_ms(ns, id); + if (ret) + return ret; ns->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); if (!ns->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) - return; + return 0; if (ctrl->ops->flags & NVME_F_FABRICS) { /* @@ -1863,7 +1868,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) * remap the separate metadata buffer from the block layer. */ if (WARN_ON_ONCE(!(id->flbas & NVME_NS_FLBAS_META_EXT))) - return; + return 0; ns->features |= NVME_NS_EXT_LBAS; @@ -1890,6 +1895,7 @@ static void nvme_configure_metadata(struct nvme_ns *ns, struct nvme_id_ns *id) else ns->features |= NVME_NS_METADATA_SUPPORTED; } + return 0; } static void nvme_set_queue_limits(struct nvme_ctrl *ctrl, @@ -1919,9 +1925,10 @@ static void nvme_update_disk_info(struct gendisk *disk, /* * The block layer can't support LBA sizes larger than the page size - * yet, so catch this early and don't allow block I/O. + * or smaller than a sector size yet, so catch this early and don't + * allow block I/O. */ - if (ns->lba_shift > PAGE_SHIFT) { + if (ns->lba_shift > PAGE_SHIFT || ns->lba_shift < SECTOR_SHIFT) { capacity = 0; bs = (1 << 9); } @@ -2070,7 +2077,11 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, ns->lba_shift = id->lbaf[lbaf].ds; nvme_set_queue_limits(ns->ctrl, ns->queue); - nvme_configure_metadata(ns, id); + ret = nvme_configure_metadata(ns, id); + if (ret < 0) { + blk_mq_unfreeze_queue(ns->disk->queue); + goto out; + } nvme_set_chunk_sectors(ns, id); nvme_update_disk_info(ns->disk, ns, id); @@ -4828,6 +4839,8 @@ static void nvme_fw_act_work(struct work_struct *work) struct nvme_ctrl, fw_act_work); unsigned long fw_act_timeout; + nvme_auth_stop(ctrl); + if (ctrl->mtfa) fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100); @@ -4883,7 +4896,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) * firmware activation. */ if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { - nvme_auth_stop(ctrl); requeue = false; queue_work(nvme_wq, &ctrl->fw_act_work); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 118bf08a708b..a892d679e338 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -382,6 +382,11 @@ struct nvme_ctrl { enum nvme_dctype dctype; }; +static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) +{ + return READ_ONCE(ctrl->state); +} + enum nvme_iopolicy { NVME_IOPOLICY_NUMA, NVME_IOPOLICY_RR, diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 6a2816f3b4e8..73ae16059a1c 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -16,6 +16,7 @@ #endif #include #include +#include #include "nvmet.h" @@ -508,6 +509,7 @@ static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, down_write(&nvmet_ana_sem); oldgrpid = ns->anagrpid; + newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS); nvmet_ana_group_enabled[newgrpid]++; ns->anagrpid = newgrpid; nvmet_ana_group_enabled[oldgrpid]--; @@ -1580,6 +1582,7 @@ static struct config_group *nvmet_ana_groups_make_group( grp->grpid = grpid; down_write(&nvmet_ana_sem); + grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS); nvmet_ana_group_enabled[grpid]++; up_write(&nvmet_ana_sem); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 355d80323b83..ce42afe8f64e 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -18,6 +18,7 @@ #include "nvmet.h" #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE) +#define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */ /* Define the socket priority to use for connections were it is desirable * that the NIC consider performing optimized packet processing or filtering. @@ -861,7 +862,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) icresp->hdr.pdo = 0; icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen); icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); - icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */ + icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA); icresp->cpda = 0; if (queue->hdr_digest) icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE; @@ -914,6 +915,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) { struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; + unsigned int exp_data_len; if (likely(queue->nr_cmds)) { if (unlikely(data->ttag >= queue->nr_cmds)) { @@ -932,12 +934,24 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) data->ttag, le32_to_cpu(data->data_offset), cmd->rbytes_done); /* FIXME: use path and transport errors */ - nvmet_req_complete(&cmd->req, - NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_tcp_fatal_error(queue); return -EPROTO; } + exp_data_len = le32_to_cpu(data->hdr.plen) - + nvmet_tcp_hdgst_len(queue) - + nvmet_tcp_ddgst_len(queue) - + sizeof(*data); + cmd->pdu_len = le32_to_cpu(data->data_length); + if (unlikely(cmd->pdu_len != exp_data_len || + cmd->pdu_len == 0 || + cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) { + pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len); + /* FIXME: use proper transport errors */ + nvmet_tcp_fatal_error(queue); + return -EPROTO; + } cmd->pdu_recv = 0; nvmet_tcp_build_pdu_iovec(cmd); queue->cmd = cmd; diff --git a/drivers/nvme/target/trace.h b/drivers/nvme/target/trace.h index 6109b3806b12..974d99d47f51 100644 --- a/drivers/nvme/target/trace.h +++ b/drivers/nvme/target/trace.h @@ -53,8 +53,7 @@ static inline void __assign_req_name(char *name, struct nvmet_req *req) return; } - strncpy(name, req->ns->device_path, - min_t(size_t, DISK_NAME_LEN, strlen(req->ns->device_path))); + strscpy_pad(name, req->ns->device_path, DISK_NAME_LEN); } #endif @@ -85,7 +84,7 @@ TRACE_EVENT(nvmet_req_init, __entry->flags = cmd->common.flags; __entry->nsid = le32_to_cpu(cmd->common.nsid); __entry->metadata = le64_to_cpu(cmd->common.metadata); - memcpy(__entry->cdw10, &cmd->common.cdw10, + memcpy(__entry->cdw10, &cmd->common.cdws, sizeof(__entry->cdw10)); ), TP_printk("nvmet%s: %sqid=%d, cmdid=%u, nsid=%u, flags=%#x, " diff --git a/drivers/of/base.c b/drivers/of/base.c index d5a5c35eba72..f849bbb9ef8c 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1646,6 +1646,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np, out_args->np = new; of_node_put(cur); cur = new; + new = NULL; } put: of_node_put(cur); diff --git a/drivers/of/unittest-data/tests-phandle.dtsi b/drivers/of/unittest-data/tests-phandle.dtsi index 6b33be4c4416..aa0d7027ffa6 100644 --- a/drivers/of/unittest-data/tests-phandle.dtsi +++ b/drivers/of/unittest-data/tests-phandle.dtsi @@ -38,6 +38,13 @@ provider4: provider4 { phandle-map-pass-thru = <0x0 0xf0>; }; + provider5: provider5 { + #phandle-cells = <2>; + phandle-map = <2 7 &provider4 2 3>; + phandle-map-mask = <0xff 0xf>; + phandle-map-pass-thru = <0x0 0xf0>; + }; + consumer-a { phandle-list = <&provider1 1>, <&provider2 2 0>, @@ -64,7 +71,8 @@ consumer-b { <&provider4 4 0x100>, <&provider4 0 0x61>, <&provider0>, - <&provider4 19 0x20>; + <&provider4 19 0x20>, + <&provider5 2 7>; phandle-list-bad-phandle = <12345678 0 0>; phandle-list-bad-args = <&provider2 1 0>, <&provider4 0>; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index edd2342598e4..e541a8960f1d 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -448,6 +448,9 @@ static void __init of_unittest_parse_phandle_with_args(void) unittest(passed, "index %i - data error on node %pOF rc=%i\n", i, args.np, rc); + + if (rc == 0) + of_node_put(args.np); } /* Check for missing list property */ @@ -537,8 +540,9 @@ static void __init of_unittest_parse_phandle_with_args(void) static void __init of_unittest_parse_phandle_with_args_map(void) { - struct device_node *np, *p0, *p1, *p2, *p3; + struct device_node *np, *p[6] = {}; struct of_phandle_args args; + unsigned int prefs[6]; int i, rc; np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-b"); @@ -547,34 +551,24 @@ static void __init of_unittest_parse_phandle_with_args_map(void) return; } - p0 = of_find_node_by_path("/testcase-data/phandle-tests/provider0"); - if (!p0) { - pr_err("missing testcase data\n"); - return; - } - - p1 = of_find_node_by_path("/testcase-data/phandle-tests/provider1"); - if (!p1) { - pr_err("missing testcase data\n"); - return; - } - - p2 = of_find_node_by_path("/testcase-data/phandle-tests/provider2"); - if (!p2) { - pr_err("missing testcase data\n"); - return; - } - - p3 = of_find_node_by_path("/testcase-data/phandle-tests/provider3"); - if (!p3) { - pr_err("missing testcase data\n"); - return; + p[0] = of_find_node_by_path("/testcase-data/phandle-tests/provider0"); + p[1] = of_find_node_by_path("/testcase-data/phandle-tests/provider1"); + p[2] = of_find_node_by_path("/testcase-data/phandle-tests/provider2"); + p[3] = of_find_node_by_path("/testcase-data/phandle-tests/provider3"); + p[4] = of_find_node_by_path("/testcase-data/phandle-tests/provider4"); + p[5] = of_find_node_by_path("/testcase-data/phandle-tests/provider5"); + for (i = 0; i < ARRAY_SIZE(p); ++i) { + if (!p[i]) { + pr_err("missing testcase data\n"); + return; + } + prefs[i] = kref_read(&p[i]->kobj.kref); } rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells"); - unittest(rc == 7, "of_count_phandle_with_args() returned %i, expected 7\n", rc); + unittest(rc == 8, "of_count_phandle_with_args() returned %i, expected 8\n", rc); - for (i = 0; i < 8; i++) { + for (i = 0; i < 9; i++) { bool passed = true; memset(&args, 0, sizeof(args)); @@ -585,13 +579,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void) switch (i) { case 0: passed &= !rc; - passed &= (args.np == p1); + passed &= (args.np == p[1]); passed &= (args.args_count == 1); passed &= (args.args[0] == 1); break; case 1: passed &= !rc; - passed &= (args.np == p3); + passed &= (args.np == p[3]); passed &= (args.args_count == 3); passed &= (args.args[0] == 2); passed &= (args.args[1] == 5); @@ -602,28 +596,36 @@ static void __init of_unittest_parse_phandle_with_args_map(void) break; case 3: passed &= !rc; - passed &= (args.np == p0); + passed &= (args.np == p[0]); passed &= (args.args_count == 0); break; case 4: passed &= !rc; - passed &= (args.np == p1); + passed &= (args.np == p[1]); passed &= (args.args_count == 1); passed &= (args.args[0] == 3); break; case 5: passed &= !rc; - passed &= (args.np == p0); + passed &= (args.np == p[0]); passed &= (args.args_count == 0); break; case 6: passed &= !rc; - passed &= (args.np == p2); + passed &= (args.np == p[2]); passed &= (args.args_count == 2); passed &= (args.args[0] == 15); passed &= (args.args[1] == 0x20); break; case 7: + passed &= !rc; + passed &= (args.np == p[3]); + passed &= (args.args_count == 3); + passed &= (args.args[0] == 2); + passed &= (args.args[1] == 5); + passed &= (args.args[2] == 3); + break; + case 8: passed &= (rc == -ENOENT); break; default: @@ -632,6 +634,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void) unittest(passed, "index %i - data error on node %s rc=%i\n", i, args.np->full_name, rc); + + if (rc == 0) + of_node_put(args.np); } /* Check for missing list property */ @@ -678,6 +683,13 @@ static void __init of_unittest_parse_phandle_with_args_map(void) "OF: /testcase-data/phandle-tests/consumer-b: #phandle-cells = 2 found 1"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + for (i = 0; i < ARRAY_SIZE(p); ++i) { + unittest(prefs[i] == kref_read(&p[i]->kobj.kref), + "provider%d: expected:%d got:%d\n", + i, prefs[i], kref_read(&p[i]->kobj.kref)); + of_node_put(p[i]); + } } static void __init of_unittest_property_string(void) diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c index 9f5d784cd95d..3644997a8342 100644 --- a/drivers/parport/parport_serial.c +++ b/drivers/parport/parport_serial.c @@ -65,6 +65,10 @@ enum parport_pc_pci_cards { sunix_5069a, sunix_5079a, sunix_5099a, + brainboxes_uc257, + brainboxes_is300, + brainboxes_uc414, + brainboxes_px263, }; /* each element directly indexed from enum list, above */ @@ -158,6 +162,10 @@ static struct parport_pc_pci cards[] = { /* sunix_5069a */ { 1, { { 1, 2 }, } }, /* sunix_5079a */ { 1, { { 1, 2 }, } }, /* sunix_5099a */ { 1, { { 1, 2 }, } }, + /* brainboxes_uc257 */ { 1, { { 3, -1 }, } }, + /* brainboxes_is300 */ { 1, { { 3, -1 }, } }, + /* brainboxes_uc414 */ { 1, { { 3, -1 }, } }, + /* brainboxes_px263 */ { 1, { { 3, -1 }, } }, }; static struct pci_device_id parport_serial_pci_tbl[] = { @@ -277,6 +285,38 @@ static struct pci_device_id parport_serial_pci_tbl[] = { { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX, 0x0104, 0, 0, sunix_5099a }, + /* Brainboxes UC-203 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0bc1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0bc2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + + /* Brainboxes UC-257 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0861, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0862, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0863, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + + /* Brainboxes UC-414 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0e61, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc414 }, + + /* Brainboxes UC-475 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0981, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0982, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + + /* Brainboxes IS-300/IS-500 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0da0, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_is300 }, + + /* Brainboxes PX-263/PX-295 */ + { PCI_VENDOR_ID_INTASHIELD, 0x402c, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px263 }, + { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl); @@ -542,6 +582,30 @@ static struct pciserial_board pci_parport_serial_boards[] = { .base_baud = 921600, .uart_offset = 0x8, }, + [brainboxes_uc257] = { + .flags = FL_BASE2, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [brainboxes_is300] = { + .flags = FL_BASE2, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [brainboxes_uc414] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + [brainboxes_px263] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8, + }, }; struct parport_serial_private { diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index d2634dafb68e..7ecad72cff7e 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -1219,7 +1219,16 @@ static int ks_pcie_probe(struct platform_device *pdev) goto err_link; } + /* Obtain references to the PHYs */ + for (i = 0; i < num_lanes; i++) + phy_pm_runtime_get_sync(ks_pcie->phy[i]); + ret = ks_pcie_enable_phy(ks_pcie); + + /* Release references to the PHYs */ + for (i = 0; i < num_lanes; i++) + phy_pm_runtime_put_sync(ks_pcie->phy[i]); + if (ret) { dev_err(dev, "failed to enable phy\n"); goto err_link; diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 83ddb190292e..59c164b5c64a 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -600,6 +600,7 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no, } aligned_offset = msg_addr & (epc->mem->window.page_size - 1); + msg_addr &= ~aligned_offset; ret = dw_pcie_ep_map_addr(epc, func_no, 0, ep->msi_mem_phys, msg_addr, epc->mem->window.page_size); if (ret) diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index fe0f732f6e43..a860f25473df 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -80,13 +80,49 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); +/* + * Some Loongson PCIe ports have hardware limitations on their Maximum Read + * Request Size. They can't handle anything larger than this. Sane + * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for + * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly, + * so we have to enforce maximum safe MRRS, which is 256 bytes. + */ +#ifdef CONFIG_MIPS +static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev) +{ + struct pci_bus *bus = pdev->bus; + struct pci_dev *bridge; + static const struct pci_device_id bridge_devids[] = { + { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) }, + { 0, }, + }; + + /* look for the matching bridge */ + while (!pci_is_root_bus(bus)) { + bridge = bus->self; + bus = bus->parent; + + if (pci_match_id(bridge_devids, bridge)) { + if (pcie_get_readrq(pdev) > 256) { + pci_info(pdev, "limiting MRRS to 256\n"); + pcie_set_readrq(pdev, 256); + } + break; + } + } +} +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk); +#endif + static void loongson_mrrs_quirk(struct pci_dev *pdev) { - /* - * Some Loongson PCIe ports have h/w limitations of maximum read - * request size. They can't handle anything larger than this. So - * force this limit on any devices attached under these ports. - */ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); bridge->no_inc_mrrs = 1; diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c index b8612ce5f4d0..40c38ca5a42e 100644 --- a/drivers/pci/controller/pcie-mediatek-gen3.c +++ b/drivers/pci/controller/pcie-mediatek-gen3.c @@ -245,35 +245,60 @@ static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie, resource_size_t cpu_addr, resource_size_t pci_addr, resource_size_t size, - unsigned long type, int num) + unsigned long type, int *num) { + resource_size_t remaining = size; + resource_size_t table_size; + resource_size_t addr_align; + const char *range_type; void __iomem *table; u32 val; - if (num >= PCIE_MAX_TRANS_TABLES) { - dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", - (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); - return -ENODEV; + while (remaining && (*num < PCIE_MAX_TRANS_TABLES)) { + /* Table size needs to be a power of 2 */ + table_size = BIT(fls(remaining) - 1); + + if (cpu_addr > 0) { + addr_align = BIT(ffs(cpu_addr) - 1); + table_size = min(table_size, addr_align); + } + + /* Minimum size of translate table is 4KiB */ + if (table_size < 0x1000) { + dev_err(pcie->dev, "illegal table size %#llx\n", + (unsigned long long)table_size); + return -EINVAL; + } + + table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + *num * PCIE_ATR_TLB_SET_OFFSET; + writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(table_size) - 1), table); + writel_relaxed(upper_32_bits(cpu_addr), table + PCIE_ATR_SRC_ADDR_MSB_OFFSET); + writel_relaxed(lower_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET); + writel_relaxed(upper_32_bits(pci_addr), table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET); + + if (type == IORESOURCE_IO) { + val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO; + range_type = "IO"; + } else { + val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM; + range_type = "MEM"; + } + + writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET); + + dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", + range_type, *num, (unsigned long long)cpu_addr, + (unsigned long long)pci_addr, (unsigned long long)table_size); + + cpu_addr += table_size; + pci_addr += table_size; + remaining -= table_size; + (*num)++; } - table = pcie->base + PCIE_TRANS_TABLE_BASE_REG + - num * PCIE_ATR_TLB_SET_OFFSET; - - writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1), - table); - writel_relaxed(upper_32_bits(cpu_addr), - table + PCIE_ATR_SRC_ADDR_MSB_OFFSET); - writel_relaxed(lower_32_bits(pci_addr), - table + PCIE_ATR_TRSL_ADDR_LSB_OFFSET); - writel_relaxed(upper_32_bits(pci_addr), - table + PCIE_ATR_TRSL_ADDR_MSB_OFFSET); - - if (type == IORESOURCE_IO) - val = PCIE_ATR_TYPE_IO | PCIE_ATR_TLP_TYPE_IO; - else - val = PCIE_ATR_TYPE_MEM | PCIE_ATR_TLP_TYPE_MEM; - - writel_relaxed(val, table + PCIE_ATR_TRSL_PARAM_OFFSET); + if (remaining) + dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n", + (unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES); return 0; } @@ -380,30 +405,20 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) resource_size_t cpu_addr; resource_size_t pci_addr; resource_size_t size; - const char *range_type; - if (type == IORESOURCE_IO) { + if (type == IORESOURCE_IO) cpu_addr = pci_pio_to_address(res->start); - range_type = "IO"; - } else if (type == IORESOURCE_MEM) { + else if (type == IORESOURCE_MEM) cpu_addr = res->start; - range_type = "MEM"; - } else { + else continue; - } pci_addr = res->start - entry->offset; size = resource_size(res); err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size, - type, table_index); + type, &table_index); if (err) return err; - - dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n", - range_type, table_index, (unsigned long long)cpu_addr, - (unsigned long long)pci_addr, (unsigned long long)size); - - table_index++; } return 0; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index ae5ad05ddc1d..11bdef206d12 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -617,12 +617,18 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc) if (status & MSI_STATUS){ unsigned long imsi_status; + /* + * The interrupt status can be cleared even if the + * MSI status remains pending. As such, given the + * edge-triggered interrupt type, its status should + * be cleared before being dispatched to the + * handler of the underlying device. + */ + writel(MSI_STATUS, port->base + PCIE_INT_STATUS); while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) generic_handle_domain_irq(port->inner_domain, bit); } - /* Clear MSI interrupt status */ - writel(MSI_STATUS, port->base + PCIE_INT_STATUS); } } diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index ea0195337bab..6efa3d8db9a5 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -504,15 +504,12 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); - if (pci_is_root_bus(bus)) - __pci_bus_size_bridges(dev->subordinate, &add_list); + __pci_bus_size_bridges(dev->subordinate, + &add_list); } } } - if (pci_is_root_bus(bus)) - __pci_bus_assign_resources(bus, &add_list, NULL); - else - pci_assign_unassigned_bridge_resources(bus->self); + __pci_bus_assign_resources(bus, &add_list, NULL); } acpiphp_sanitize_bus(bus); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 8df156c28aad..5368a37154cf 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1302,6 +1302,9 @@ static int pci_set_full_power_state(struct pci_dev *dev) pci_restore_bars(dev); } + if (dev->bus->self) + pcie_aspm_pm_state_change(dev->bus->self); + return 0; } @@ -1396,6 +1399,9 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) pci_power_name(dev->current_state), pci_power_name(state)); + if (dev->bus->self) + pcie_aspm_pm_state_change(dev->bus->self); + return 0; } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 02effb2c918e..a39889f2b222 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -567,10 +567,12 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active); #ifdef CONFIG_PCIEASPM void pcie_aspm_init_link_state(struct pci_dev *pdev); void pcie_aspm_exit_link_state(struct pci_dev *pdev); +void pcie_aspm_pm_state_change(struct pci_dev *pdev); void pcie_aspm_powersave_config_link(struct pci_dev *pdev); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } +static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { } static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { } #endif diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 5d1756f53ba8..25736d408e88 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1055,6 +1055,25 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) up_read(&pci_bus_sem); } +/* @pdev: the root port or switch downstream port */ +void pcie_aspm_pm_state_change(struct pci_dev *pdev) +{ + struct pcie_link_state *link = pdev->link_state; + + if (aspm_disabled || !link) + return; + /* + * Devices changed PM state, we should recheck if latency + * meets all functions' requirement + */ + down_read(&pci_bus_sem); + mutex_lock(&aspm_lock); + pcie_update_aspm_capable(link->root); + pcie_config_aspm_path(link); + mutex_unlock(&aspm_lock); + up_read(&pci_bus_sem); +} + void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { struct pcie_link_state *link = pdev->link_state; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c132839d99dc..8765544bac35 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4602,17 +4602,21 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) * But the implementation could block peer-to-peer transactions between them * and provide ACS-like functionality. */ -static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) +static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) { if (!pci_is_pcie(dev) || ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) return -ENOTTY; + /* + * Future Zhaoxin Root Ports and Switch Downstream Ports will + * implement ACS capability in accordance with the PCIe Spec. + */ switch (dev->device) { case 0x0710 ... 0x071e: case 0x0721: - case 0x0723 ... 0x0732: + case 0x0723 ... 0x0752: return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig index 530426a74f75..b3cea8d56c4f 100644 --- a/drivers/pinctrl/cirrus/Kconfig +++ b/drivers/pinctrl/cirrus/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config PINCTRL_LOCHNAGAR tristate "Cirrus Logic Lochnagar pinctrl driver" - depends on MFD_LOCHNAGAR + # Avoid clash caused by MIPS defining RST, which is used in the driver + depends on MFD_LOCHNAGAR && !MIPS select GPIOLIB select PINMUX select PINCONF diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index f71c6457e350..2425d4813c3c 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -1033,6 +1033,13 @@ static const struct of_device_id atmel_pctrl_of_match[] = { } }; +/* + * This lock class allows to tell lockdep that parent IRQ and children IRQ do + * not share the same class so it does not raise false positive + */ +static struct lock_class_key atmel_lock_key; +static struct lock_class_key atmel_request_key; + static int atmel_pinctrl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1185,6 +1192,7 @@ static int atmel_pinctrl_probe(struct platform_device *pdev) irq_set_chip_and_handler(irq, &atmel_gpio_irq_chip, handle_simple_irq); irq_set_chip_data(irq, atmel_pioctrl); + irq_set_lockdep_class(irq, &atmel_lock_key, &atmel_request_key); dev_dbg(dev, "atmel gpio irq domain: hwirq: %d, linux irq: %d\n", i, irq); diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c index 68509a2301b8..5abab6bc763a 100644 --- a/drivers/pinctrl/pinctrl-cy8c95x0.c +++ b/drivers/pinctrl/pinctrl-cy8c95x0.c @@ -749,6 +749,8 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip, ret = regmap_read(chip->regmap, reg, ®_val); if (reg_val & bit) arg = 1; + if (param == PIN_CONFIG_OUTPUT_ENABLE) + arg = !arg; *config = pinconf_to_config_packed(param, (u16)arg); out: @@ -857,7 +859,7 @@ static int cy8c95x0_setup_gpiochip(struct cy8c95x0_pinctrl *chip) gc->get_direction = cy8c95x0_gpio_get_direction; gc->get_multiple = cy8c95x0_gpio_get_multiple; gc->set_multiple = cy8c95x0_gpio_set_multiple; - gc->set_config = gpiochip_generic_config, + gc->set_config = gpiochip_generic_config; gc->can_sleep = true; gc->add_pin_ranges = cy8c95x0_add_pin_ranges; diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c index 5b544fb7f3d8..3b18a03075f4 100644 --- a/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c +++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c @@ -489,7 +489,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev, nmaps = 0; ngroups = 0; - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { int npinmux = of_property_count_u32_elems(child, "pinmux"); int npins = of_property_count_u32_elems(child, "pins"); @@ -524,7 +524,7 @@ static int starfive_dt_node_to_map(struct pinctrl_dev *pctldev, nmaps = 0; ngroups = 0; mutex_lock(&sfp->mutex); - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { int npins; int i; diff --git a/drivers/platform/x86/intel/telemetry/core.c b/drivers/platform/x86/intel/telemetry/core.c index fdf55b5d6948..e4be40f73eeb 100644 --- a/drivers/platform/x86/intel/telemetry/core.c +++ b/drivers/platform/x86/intel/telemetry/core.c @@ -102,7 +102,7 @@ static const struct telemetry_core_ops telm_defpltops = { /** * telemetry_update_events() - Update telemetry Configuration * @pss_evtconfig: PSS related config. No change if num_evts = 0. - * @pss_evtconfig: IOSS related config. No change if num_evts = 0. + * @ioss_evtconfig: IOSS related config. No change if num_evts = 0. * * This API updates the IOSS & PSS Telemetry configuration. Old config * is overwritten. Call telemetry_reset_events when logging is over @@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(telemetry_reset_events); /** * telemetry_get_eventconfig() - Returns the pss and ioss events enabled * @pss_evtconfig: Pointer to PSS related configuration. - * @pss_evtconfig: Pointer to IOSS related configuration. + * @ioss_evtconfig: Pointer to IOSS related configuration. * @pss_len: Number of u32 elements allocated for pss_evtconfig array * @ioss_len: Number of u32 elements allocated for ioss_evtconfig array * diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c index c5e4e35c8d20..8e2b07ed2ce9 100644 --- a/drivers/platform/x86/intel/vbtn.c +++ b/drivers/platform/x86/intel/vbtn.c @@ -73,10 +73,10 @@ struct intel_vbtn_priv { bool wakeup_mode; }; -static void detect_tablet_mode(struct platform_device *device) +static void detect_tablet_mode(struct device *dev) { - struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev); - acpi_handle handle = ACPI_HANDLE(&device->dev); + struct intel_vbtn_priv *priv = dev_get_drvdata(dev); + acpi_handle handle = ACPI_HANDLE(dev); unsigned long long vgbs; acpi_status status; int m; @@ -89,6 +89,8 @@ static void detect_tablet_mode(struct platform_device *device) input_report_switch(priv->switches_dev, SW_TABLET_MODE, m); m = (vgbs & VGBS_DOCK_MODE_FLAG) ? 1 : 0; input_report_switch(priv->switches_dev, SW_DOCK, m); + + input_sync(priv->switches_dev); } /* @@ -134,7 +136,7 @@ static int intel_vbtn_input_setup(struct platform_device *device) priv->switches_dev->id.bustype = BUS_HOST; if (priv->has_switches) { - detect_tablet_mode(device); + detect_tablet_mode(&device->dev); ret = input_register_device(priv->switches_dev); if (ret) @@ -198,6 +200,9 @@ static void notify_handler(acpi_handle handle, u32 event, void *context) autorelease = val && (!ke_rel || ke_rel->type == KE_IGNORE); sparse_keymap_report_event(input_dev, event, val, autorelease); + + /* Some devices need this to report further events */ + acpi_evaluate_object(handle, "VBDL", NULL, NULL); } /* @@ -358,7 +363,13 @@ static void intel_vbtn_pm_complete(struct device *dev) static int intel_vbtn_pm_resume(struct device *dev) { + struct intel_vbtn_priv *priv = dev_get_drvdata(dev); + intel_vbtn_pm_complete(dev); + + if (priv->has_switches) + detect_tablet_mode(dev); + return 0; } diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index 483bb6565166..40477d1d41b5 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -124,36 +124,60 @@ static void intel_vsec_remove_aux(void *data) auxiliary_device_uninit(data); } +static DEFINE_MUTEX(vsec_ida_lock); + static void intel_vsec_dev_release(struct device *dev) { struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev); + xa_erase(&auxdev_array, intel_vsec_dev->id); + + mutex_lock(&vsec_ida_lock); ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id); + mutex_unlock(&vsec_ida_lock); + kfree(intel_vsec_dev->resource); kfree(intel_vsec_dev); } -static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev, - const char *name) +int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, + struct intel_vsec_device *intel_vsec_dev, + const char *name) { struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev; int ret, id; - ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL); + ret = xa_alloc(&auxdev_array, &intel_vsec_dev->id, intel_vsec_dev, + PMT_XA_LIMIT, GFP_KERNEL); if (ret < 0) { kfree(intel_vsec_dev->resource); kfree(intel_vsec_dev); return ret; } - auxdev->id = ret; + mutex_lock(&vsec_ida_lock); + id = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL); + mutex_unlock(&vsec_ida_lock); + if (id < 0) { + xa_erase(&auxdev_array, intel_vsec_dev->id); + kfree(intel_vsec_dev->resource); + kfree(intel_vsec_dev); + return id; + } + + if (!parent) + parent = &pdev->dev; + + auxdev->id = id; auxdev->name = name; - auxdev->dev.parent = &pdev->dev; + auxdev->dev.parent = parent; auxdev->dev.release = intel_vsec_dev_release; ret = auxiliary_device_init(auxdev); if (ret < 0) { + mutex_lock(&vsec_ida_lock); ida_free(intel_vsec_dev->ida, auxdev->id); + mutex_unlock(&vsec_ida_lock); kfree(intel_vsec_dev->resource); kfree(intel_vsec_dev); return ret; @@ -165,19 +189,14 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in return ret; } - ret = devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, + ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux, auxdev); if (ret < 0) return ret; - /* Add auxdev to list */ - ret = xa_alloc(&auxdev_array, &id, intel_vsec_dev, PMT_XA_LIMIT, - GFP_KERNEL); - if (ret) - return ret; - return 0; } +EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC); static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header, struct intel_vsec_platform_info *info) @@ -235,7 +254,8 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he else intel_vsec_dev->ida = &intel_vsec_ida; - return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id)); + return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev, + intel_vsec_name(header->id)); } static bool intel_vsec_walk_header(struct pci_dev *pdev, diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h index 3deeb05cf394..330672588868 100644 --- a/drivers/platform/x86/intel/vsec.h +++ b/drivers/platform/x86/intel/vsec.h @@ -38,8 +38,15 @@ struct intel_vsec_device { struct ida *ida; struct intel_vsec_platform_info *info; int num_resources; + int id; /* xa */ + void *priv_data; + size_t priv_data_size; }; +int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, + struct intel_vsec_device *intel_vsec_dev, + const char *name); + static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev) { return container_of(dev, struct intel_vsec_device, auxdev.dev); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 05a55bc31c79..6edd2e294750 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8149,8 +8149,19 @@ static struct ibm_struct volume_driver_data = { * TPACPI_FAN_WR_TPEC is also available and should be used to * command the fan. The X31/X40/X41 seems to have 8 fan levels, * but the ACPI tables just mention level 7. + * + * TPACPI_FAN_RD_TPEC_NS: + * This mode is used for a few ThinkPads (L13 Yoga Gen2, X13 Yoga Gen2 etc.) + * that are using non-standard EC locations for reporting fan speeds. + * Currently these platforms only provide fan rpm reporting. + * */ +#define FAN_RPM_CAL_CONST 491520 /* FAN RPM calculation offset for some non-standard ECFW */ + +#define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */ +#define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */ + enum { /* Fan control constants */ fan_status_offset = 0x2f, /* EC register 0x2f */ fan_rpm_offset = 0x84, /* EC register 0x84: LSB, 0x85 MSB (RPM) @@ -8158,6 +8169,11 @@ enum { /* Fan control constants */ fan_select_offset = 0x31, /* EC register 0x31 (Firmware 7M) bit 0 selects which fan is active */ + fan_status_offset_ns = 0x93, /* Special status/control offset for non-standard EC Fan1 */ + fan2_status_offset_ns = 0x96, /* Special status/control offset for non-standard EC Fan2 */ + fan_rpm_status_ns = 0x95, /* Special offset for Fan1 RPM status for non-standard EC */ + fan2_rpm_status_ns = 0x98, /* Special offset for Fan2 RPM status for non-standard EC */ + TP_EC_FAN_FULLSPEED = 0x40, /* EC fan mode: full speed */ TP_EC_FAN_AUTO = 0x80, /* EC fan mode: auto fan control */ @@ -8168,6 +8184,7 @@ enum fan_status_access_mode { TPACPI_FAN_NONE = 0, /* No fan status or control */ TPACPI_FAN_RD_ACPI_GFAN, /* Use ACPI GFAN */ TPACPI_FAN_RD_TPEC, /* Use ACPI EC regs 0x2f, 0x84-0x85 */ + TPACPI_FAN_RD_TPEC_NS, /* Use non-standard ACPI EC regs (eg: L13 Yoga gen2 etc.) */ }; enum fan_control_access_mode { @@ -8195,6 +8212,8 @@ static u8 fan_control_desired_level; static u8 fan_control_resume_level; static int fan_watchdog_maxinterval; +static bool fan_with_ns_addr; + static struct mutex fan_mutex; static void fan_watchdog_fire(struct work_struct *ignored); @@ -8325,6 +8344,15 @@ static int fan_get_status(u8 *status) } break; + case TPACPI_FAN_RD_TPEC_NS: + /* Default mode is AUTO which means controlled by EC */ + if (!acpi_ec_read(fan_status_offset_ns, &s)) + return -EIO; + + if (status) + *status = s; + + break; default: return -ENXIO; @@ -8341,7 +8369,8 @@ static int fan_get_status_safe(u8 *status) if (mutex_lock_killable(&fan_mutex)) return -ERESTARTSYS; rc = fan_get_status(&s); - if (!rc) + /* NS EC doesn't have register with level settings */ + if (!rc && !fan_with_ns_addr) fan_update_desired_level(s); mutex_unlock(&fan_mutex); @@ -8368,7 +8397,13 @@ static int fan_get_speed(unsigned int *speed) if (likely(speed)) *speed = (hi << 8) | lo; + break; + case TPACPI_FAN_RD_TPEC_NS: + if (!acpi_ec_read(fan_rpm_status_ns, &lo)) + return -EIO; + if (speed) + *speed = lo ? FAN_RPM_CAL_CONST / lo : 0; break; default: @@ -8380,7 +8415,7 @@ static int fan_get_speed(unsigned int *speed) static int fan2_get_speed(unsigned int *speed) { - u8 hi, lo; + u8 hi, lo, status; bool rc; switch (fan_status_access_mode) { @@ -8396,7 +8431,21 @@ static int fan2_get_speed(unsigned int *speed) if (likely(speed)) *speed = (hi << 8) | lo; + break; + case TPACPI_FAN_RD_TPEC_NS: + rc = !acpi_ec_read(fan2_status_offset_ns, &status); + if (rc) + return -EIO; + if (!(status & FAN_NS_CTRL_STATUS)) { + pr_info("secondary fan control not supported\n"); + return -EIO; + } + rc = !acpi_ec_read(fan2_rpm_status_ns, &lo); + if (rc) + return -EIO; + if (speed) + *speed = lo ? FAN_RPM_CAL_CONST / lo : 0; break; default: @@ -8899,6 +8948,7 @@ static const struct attribute_group fan_driver_attr_group = { #define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */ #define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */ #define TPACPI_FAN_NOFAN 0x0008 /* no fan available */ +#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1), @@ -8917,6 +8967,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */ TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */ + TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS), /* L13 Yoga Gen 2 */ + TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS), /* X13 Yoga Gen 2*/ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ }; @@ -8951,18 +9003,27 @@ static int __init fan_init(struct ibm_init_struct *iibm) return -ENODEV; } + if (quirks & TPACPI_FAN_NS) { + pr_info("ECFW with non-standard fan reg control found\n"); + fan_with_ns_addr = 1; + /* Fan ctrl support from host is undefined for now */ + tp_features.fan_ctrl_status_undef = 1; + } + if (gfan_handle) { /* 570, 600e/x, 770e, 770x */ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; } else { /* all other ThinkPads: note that even old-style * ThinkPad ECs supports the fan control register */ - if (likely(acpi_ec_read(fan_status_offset, - &fan_control_initial_status))) { + if (fan_with_ns_addr || + likely(acpi_ec_read(fan_status_offset, &fan_control_initial_status))) { int res; unsigned int speed; - fan_status_access_mode = TPACPI_FAN_RD_TPEC; + fan_status_access_mode = fan_with_ns_addr ? + TPACPI_FAN_RD_TPEC_NS : TPACPI_FAN_RD_TPEC; + if (quirks & TPACPI_FAN_Q1) fan_quirk1_setup(); /* Try and probe the 2nd fan */ @@ -8971,7 +9032,8 @@ static int __init fan_init(struct ibm_init_struct *iibm) if (res >= 0 && speed != FAN_NOT_PRESENT) { /* It responded - so let's assume it's there */ tp_features.second_fan = 1; - tp_features.second_fan_ctl = 1; + /* fan control not currently available for ns ECFW */ + tp_features.second_fan_ctl = !fan_with_ns_addr; pr_info("secondary fan control detected & enabled\n"); } else { /* Fan not auto-detected */ @@ -9146,6 +9208,7 @@ static int fan_read(struct seq_file *m) str_enabled_disabled(status), status); break; + case TPACPI_FAN_RD_TPEC_NS: case TPACPI_FAN_RD_TPEC: /* all except 570, 600e/x, 770e, 770x */ rc = fan_get_status_safe(&status); @@ -9160,13 +9223,22 @@ static int fan_read(struct seq_file *m) seq_printf(m, "speed:\t\t%d\n", speed); - if (status & TP_EC_FAN_FULLSPEED) - /* Disengaged mode takes precedence */ - seq_printf(m, "level:\t\tdisengaged\n"); - else if (status & TP_EC_FAN_AUTO) - seq_printf(m, "level:\t\tauto\n"); - else - seq_printf(m, "level:\t\t%d\n", status); + if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) { + /* + * No full speed bit in NS EC + * EC Auto mode is set by default. + * No other levels settings available + */ + seq_printf(m, "level:\t\t%s\n", status & FAN_NS_CTRL ? "unknown" : "auto"); + } else { + if (status & TP_EC_FAN_FULLSPEED) + /* Disengaged mode takes precedence */ + seq_printf(m, "level:\t\tdisengaged\n"); + else if (status & TP_EC_FAN_AUTO) + seq_printf(m, "level:\t\tauto\n"); + else + seq_printf(m, "level:\t\t%d\n", status); + } break; case TPACPI_FAN_NONE: diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c index 01ad84fd147c..686eb8d86e22 100644 --- a/drivers/power/supply/bq256xx_charger.c +++ b/drivers/power/supply/bq256xx_charger.c @@ -1514,13 +1514,16 @@ static int bq256xx_hw_init(struct bq256xx_device *bq) wd_reg_val = i; break; } - if (bq->watchdog_timer > bq256xx_watchdog_time[i] && + if (i + 1 < BQ256XX_NUM_WD_VAL && + bq->watchdog_timer > bq256xx_watchdog_time[i] && bq->watchdog_timer < bq256xx_watchdog_time[i + 1]) wd_reg_val = i; } ret = regmap_update_bits(bq->regmap, BQ256XX_CHARGER_CONTROL_1, BQ256XX_WATCHDOG_MASK, wd_reg_val << BQ256XX_WDT_BIT_SHIFT); + if (ret) + return ret; ret = power_supply_get_battery_info(bq->charger, &bat_info); if (ret == -ENOMEM) diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c index 473522b4326a..9d957cf8edf0 100644 --- a/drivers/power/supply/cw2015_battery.c +++ b/drivers/power/supply/cw2015_battery.c @@ -491,7 +491,7 @@ static int cw_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: if (cw_battery_valid_time_to_empty(cw_bat)) - val->intval = cw_bat->time_to_empty; + val->intval = cw_bat->time_to_empty * 60; else val->intval = 0; break; diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index d333e7422f4a..9726f96bf763 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -171,7 +171,7 @@ of_pwm_single_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) pwm->args.period = args->args[0]; pwm->args.polarity = PWM_POLARITY_NORMAL; - if (args->args_count == 2 && args->args[2] & PWM_POLARITY_INVERTED) + if (args->args_count == 2 && args->args[1] & PWM_POLARITY_INVERTED) pwm->args.polarity = PWM_POLARITY_INVERSED; return pwm; diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c index a5fdf97c0d2e..246499128142 100644 --- a/drivers/pwm/pwm-jz4740.c +++ b/drivers/pwm/pwm-jz4740.c @@ -60,9 +60,10 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) snprintf(name, sizeof(name), "timer%u", pwm->hwpwm); clk = clk_get(chip->dev, name); - if (IS_ERR(clk)) - return dev_err_probe(chip->dev, PTR_ERR(clk), - "Failed to get clock\n"); + if (IS_ERR(clk)) { + dev_err(chip->dev, "error %pe: Failed to get clock\n", clk); + return PTR_ERR(clk); + } err = clk_prepare_enable(clk); if (err < 0) { diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index 794ca5b02968..bdcdb7f38312 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -115,14 +115,14 @@ static int stm32_pwm_raw_capture(struct stm32_pwm *priv, struct pwm_device *pwm, int ret; /* Ensure registers have been updated, enable counter and capture */ - regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG); - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN); + regmap_set_bits(priv->regmap, TIM_EGR, TIM_EGR_UG); + regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN); /* Use cc1 or cc3 DMA resp for PWM input channels 1 & 2 or 3 & 4 */ dma_id = pwm->hwpwm < 2 ? STM32_TIMERS_DMA_CH1 : STM32_TIMERS_DMA_CH3; ccen = pwm->hwpwm < 2 ? TIM_CCER_CC12E : TIM_CCER_CC34E; ccr = pwm->hwpwm < 2 ? TIM_CCR1 : TIM_CCR3; - regmap_update_bits(priv->regmap, TIM_CCER, ccen, ccen); + regmap_set_bits(priv->regmap, TIM_CCER, ccen); /* * Timer DMA burst mode. Request 2 registers, 2 bursts, to get both @@ -160,8 +160,8 @@ static int stm32_pwm_raw_capture(struct stm32_pwm *priv, struct pwm_device *pwm, } stop: - regmap_update_bits(priv->regmap, TIM_CCER, ccen, 0); - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); + regmap_clear_bits(priv->regmap, TIM_CCER, ccen); + regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN); return ret; } @@ -359,7 +359,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch, regmap_write(priv->regmap, TIM_PSC, prescaler); regmap_write(priv->regmap, TIM_ARR, prd - 1); - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, TIM_CR1_ARPE); + regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE); /* Calculate the duty cycles */ dty = prd * duty_ns; @@ -377,7 +377,7 @@ static int stm32_pwm_config(struct stm32_pwm *priv, int ch, else regmap_update_bits(priv->regmap, TIM_CCMR2, mask, ccmr); - regmap_update_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE, TIM_BDTR_MOE); + regmap_set_bits(priv->regmap, TIM_BDTR, TIM_BDTR_MOE); return 0; } @@ -411,13 +411,13 @@ static int stm32_pwm_enable(struct stm32_pwm *priv, int ch) if (priv->have_complementary_output) mask |= TIM_CCER_CC1NE << (ch * 4); - regmap_update_bits(priv->regmap, TIM_CCER, mask, mask); + regmap_set_bits(priv->regmap, TIM_CCER, mask); /* Make sure that registers are updated */ - regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG); + regmap_set_bits(priv->regmap, TIM_EGR, TIM_EGR_UG); /* Enable controller */ - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN); + regmap_set_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN); return 0; } @@ -431,11 +431,11 @@ static void stm32_pwm_disable(struct stm32_pwm *priv, int ch) if (priv->have_complementary_output) mask |= TIM_CCER_CC1NE << (ch * 4); - regmap_update_bits(priv->regmap, TIM_CCER, mask, 0); + regmap_clear_bits(priv->regmap, TIM_CCER, mask); /* When all channels are disabled, we can disable the controller */ if (!active_channels(priv)) - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0); + regmap_clear_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN); clk_disable(priv->clk); } @@ -568,41 +568,30 @@ static void stm32_pwm_detect_complementary(struct stm32_pwm *priv) * If complementary bit doesn't exist writing 1 will have no * effect so we can detect it. */ - regmap_update_bits(priv->regmap, - TIM_CCER, TIM_CCER_CC1NE, TIM_CCER_CC1NE); + regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE); regmap_read(priv->regmap, TIM_CCER, &ccer); - regmap_update_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE, 0); + regmap_clear_bits(priv->regmap, TIM_CCER, TIM_CCER_CC1NE); priv->have_complementary_output = (ccer != 0); } -static int stm32_pwm_detect_channels(struct stm32_pwm *priv) +static unsigned int stm32_pwm_detect_channels(struct stm32_pwm *priv, + unsigned int *num_enabled) { - u32 ccer; - int npwm = 0; + u32 ccer, ccer_backup; /* * If channels enable bits don't exist writing 1 will have no * effect so we can detect and count them. */ - regmap_update_bits(priv->regmap, - TIM_CCER, TIM_CCER_CCXE, TIM_CCER_CCXE); + regmap_read(priv->regmap, TIM_CCER, &ccer_backup); + regmap_set_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE); regmap_read(priv->regmap, TIM_CCER, &ccer); - regmap_update_bits(priv->regmap, TIM_CCER, TIM_CCER_CCXE, 0); + regmap_write(priv->regmap, TIM_CCER, ccer_backup); - if (ccer & TIM_CCER_CC1E) - npwm++; + *num_enabled = hweight32(ccer_backup & TIM_CCER_CCXE); - if (ccer & TIM_CCER_CC2E) - npwm++; - - if (ccer & TIM_CCER_CC3E) - npwm++; - - if (ccer & TIM_CCER_CC4E) - npwm++; - - return npwm; + return hweight32(ccer & TIM_CCER_CCXE); } static int stm32_pwm_probe(struct platform_device *pdev) @@ -611,6 +600,8 @@ static int stm32_pwm_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent); struct stm32_pwm *priv; + unsigned int num_enabled; + unsigned int i; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); @@ -633,7 +624,11 @@ static int stm32_pwm_probe(struct platform_device *pdev) priv->chip.dev = dev; priv->chip.ops = &stm32pwm_ops; - priv->chip.npwm = stm32_pwm_detect_channels(priv); + priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled); + + /* Initialize clock refcount to number of enabled PWM channels. */ + for (i = 0; i < num_enabled; i++) + clk_enable(priv->clk); ret = pwmchip_add(&priv->chip); if (ret < 0) diff --git a/drivers/reset/core.c b/drivers/reset/core.c index f0a076e94118..92cc13ef3e56 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -807,6 +807,9 @@ static void __reset_control_put_internal(struct reset_control *rstc) { lockdep_assert_held(&reset_list_mutex); + if (IS_ERR_OR_NULL(rstc)) + return; + kref_put(&rstc->refcnt, __reset_control_release); } @@ -1017,11 +1020,8 @@ EXPORT_SYMBOL_GPL(reset_control_put); void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs) { mutex_lock(&reset_list_mutex); - while (num_rstcs--) { - if (IS_ERR_OR_NULL(rstcs[num_rstcs].rstc)) - continue; + while (num_rstcs--) __reset_control_put_internal(rstcs[num_rstcs].rstc); - } mutex_unlock(&reset_list_mutex); } EXPORT_SYMBOL_GPL(reset_control_bulk_put); diff --git a/drivers/reset/hisilicon/hi6220_reset.c b/drivers/reset/hisilicon/hi6220_reset.c index 5ca145b64e63..30951914afac 100644 --- a/drivers/reset/hisilicon/hi6220_reset.c +++ b/drivers/reset/hisilicon/hi6220_reset.c @@ -164,7 +164,7 @@ static int hi6220_reset_probe(struct platform_device *pdev) if (!data) return -ENOMEM; - type = (enum hi6220_reset_ctrl_type)of_device_get_match_data(dev); + type = (uintptr_t)of_device_get_match_data(dev); regmap = syscon_node_to_regmap(np); if (IS_ERR(regmap)) { diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 0c1df1d5f1ac..a165b1a59fde 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "scm_blk.h" @@ -130,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { msb = &scmrq->aob->msb[i]; - aidaw = msb->data_addr; + aidaw = (u64)phys_to_virt(msb->data_addr); if ((msb->flags & MSB_FLAG_IDA) && aidaw && IS_ALIGNED(aidaw, PAGE_SIZE)) @@ -195,12 +196,12 @@ static int scm_request_prepare(struct scm_request *scmrq) msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE; msb->flags |= MSB_FLAG_IDA; - msb->data_addr = (u64) aidaw; + msb->data_addr = (u64)virt_to_phys(aidaw); rq_for_each_segment(bv, req, iter) { WARN_ON(bv.bv_offset); msb->blk_count += bv.bv_len >> 12; - aidaw->data_addr = (u64) page_address(bv.bv_page); + aidaw->data_addr = virt_to_phys(page_address(bv.bv_page)); aidaw++; } diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 7c6efde75da6..5e115e8b2ba4 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1678,7 +1678,6 @@ struct aac_dev u32 handle_pci_error; bool init_reset; u8 soft_reset_support; - u8 use_map_queue; }; #define aac_adapter_interrupt(dev) \ diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 013a9a334972..25cee03d7f97 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -223,12 +223,8 @@ int aac_fib_setup(struct aac_dev * dev) struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) { struct fib *fibptr; - u32 blk_tag; - int i; - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); - i = blk_mq_unique_tag_to_tag(blk_tag); - fibptr = &dev->fibs[i]; + fibptr = &dev->fibs[scsi_cmd_to_rq(scmd)->tag]; /* * Null out fields that depend on being zero at the start of * each I/O diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index bff49b8ab057..5ba5c18b77b4 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -506,15 +505,6 @@ static int aac_slave_configure(struct scsi_device *sdev) return 0; } -static void aac_map_queues(struct Scsi_Host *shost) -{ - struct aac_dev *aac = (struct aac_dev *)shost->hostdata; - - blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], - aac->pdev, 0); - aac->use_map_queue = true; -} - /** * aac_change_queue_depth - alter queue depths * @sdev: SCSI device we are considering @@ -1499,7 +1489,6 @@ static struct scsi_host_template aac_driver_template = { .bios_param = aac_biosparm, .shost_groups = aac_host_groups, .slave_configure = aac_slave_configure, - .map_queues = aac_map_queues, .change_queue_depth = aac_change_queue_depth, .sdev_groups = aac_dev_groups, .eh_abort_handler = aac_eh_abort, @@ -1787,8 +1776,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_lun = AAC_MAX_LUN; pci_set_drvdata(pdev, shost); - shost->nr_hw_queues = aac->max_msix; - shost->host_tagset = 1; error = scsi_add_host(shost, &pdev->dev); if (error) @@ -1921,7 +1908,6 @@ static void aac_remove_one(struct pci_dev *pdev) struct aac_dev *aac = (struct aac_dev *)shost->hostdata; aac_cancel_rescan_worker(aac); - aac->use_map_queue = false; scsi_remove_host(shost); __aac_shutdown(aac); diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 61949f374188..11ef58204e96 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -493,10 +493,6 @@ static int aac_src_deliver_message(struct fib *fib) #endif u16 vector_no; - struct scsi_cmnd *scmd; - u32 blk_tag; - struct Scsi_Host *shost = dev->scsi_host_ptr; - struct blk_mq_queue_map *qmap; atomic_inc(&q->numpending); @@ -509,25 +505,8 @@ static int aac_src_deliver_message(struct fib *fib) if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && dev->sa_firmware) vector_no = aac_get_vector(dev); - else { - if (!fib->vector_no || !fib->callback_data) { - if (shost && dev->use_map_queue) { - qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; - vector_no = qmap->mq_map[raw_smp_processor_id()]; - } - /* - * We hardcode the vector_no for - * reserved commands as a valid shost is - * absent during the init - */ - else - vector_no = 0; - } else { - scmd = (struct scsi_cmnd *)fib->callback_data; - blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd)); - vector_no = blk_mq_unique_tag_to_hwq(blk_tag); - } - } + else + vector_no = fib->vector_no; if (native_hba) { if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) { diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 05ddbb9bb7d8..451a58e0fd96 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -429,7 +429,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, struct fcoe_ctlr *ctlr; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; - struct sk_buff *tmp_skb; interface = container_of(ptype, struct bnx2fc_interface, fcoe_packet_type); @@ -441,11 +440,9 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, goto err; } - tmp_skb = skb_share_check(skb, GFP_ATOMIC); - if (!tmp_skb) - goto err; - - skb = tmp_skb; + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return -1; if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index 6fedc3b7d1ab..eb895a65ea8f 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -52,9 +52,10 @@ int fnic_debugfs_init(void) fc_trc_flag->fnic_trace = 2; fc_trc_flag->fc_trace = 3; fc_trc_flag->fc_clear = 4; + return 0; } - return 0; + return -ENOMEM; } /* diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index a8142e2b9643..450a8578157c 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -1502,12 +1502,12 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba) { if (!hisi_hba->hw->soft_reset) - return -1; + return -ENOENT; down(&hisi_hba->sem); if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { up(&hisi_hba->sem); - return -1; + return -EPERM; } if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index c4305ec38ebf..0c80ff9affa3 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -3330,7 +3330,7 @@ static void debugfs_snapshot_global_reg_v3_hw(struct hisi_hba *hisi_hba) u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data; int i; - for (i = 0; i < debugfs_axi_reg.count; i++, databuf++) + for (i = 0; i < debugfs_global_reg.count; i++, databuf++) *databuf = hisi_sas_read32(hisi_hba, 4 * i); } @@ -4946,6 +4946,7 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) { struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct hisi_hba *hisi_hba = sha->lldd_ha; + struct Scsi_Host *shost = hisi_hba->shost; struct device *dev = hisi_hba->dev; int rc; @@ -4954,6 +4955,10 @@ static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) rc = hw_init_v3_hw(hisi_hba); if (rc) { dev_err(dev, "FLR: hw init failed rc=%d\n", rc); + clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); + scsi_unblock_requests(shost); + clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); + up(&hisi_hba->sem); return; } @@ -4981,7 +4986,7 @@ static int _suspend_v3_hw(struct device *device) } if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) - return -1; + return -EPERM; dev_warn(dev, "entering suspend state\n"); diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index d10c6afb7f9c..8c662d08706f 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -223,6 +223,22 @@ static long mpi3mr_bsg_pel_enable(struct mpi3mr_ioc *mrioc, return rval; } + if (mrioc->unrecoverable) { + dprint_bsg_err(mrioc, "%s: unrecoverable controller\n", + __func__); + return -EFAULT; + } + + if (mrioc->reset_in_progress) { + dprint_bsg_err(mrioc, "%s: reset in progress\n", __func__); + return -EAGAIN; + } + + if (mrioc->stop_bsgs) { + dprint_bsg_err(mrioc, "%s: bsgs are blocked\n", __func__); + return -EAGAIN; + } + sg_copy_to_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, &pel_enable, sizeof(pel_enable)); diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index 6d55698ea4d1..85f5b349c7e4 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -1044,8 +1044,14 @@ void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc) tgtdev = NULL; list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) { if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) && - !tgtdev->is_hidden && !tgtdev->host_exposed) - mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id); + !tgtdev->is_hidden) { + if (!tgtdev->host_exposed) + mpi3mr_report_tgtdev_to_host(mrioc, + tgtdev->perst_id); + else if (tgtdev->starget) + starget_for_each_device(tgtdev->starget, + (void *)tgtdev, mpi3mr_update_sdev); + } } } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 2c53965f903f..4b07bdc4e3b6 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -1118,6 +1118,7 @@ static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd, scsi_log_send(scmd); scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER; + scmd->flags |= SCMD_LAST; /* * Lock sdev->state_mutex to avoid that scsi_device_quiesce() can @@ -2412,6 +2413,7 @@ scsi_ioctl_reset(struct scsi_device *dev, int __user *arg) scsi_init_command(dev, scmd); scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL; + scmd->flags |= SCMD_LAST; memset(&scmd->sdb, 0, sizeof(scmd->sdb)); scmd->cmd_len = 0; diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c index bc400669ee02..16a05143d0d6 100644 --- a/drivers/soc/qcom/llcc-qcom.c +++ b/drivers/soc/qcom/llcc-qcom.c @@ -680,14 +680,14 @@ static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config, u32 disable_cap_alloc, retain_pc; disable_cap_alloc = config->dis_cap_alloc << config->slice_id; - ret = regmap_write(drv_data->bcast_regmap, - LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc); + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_SCID_DIS_CAP_ALLOC, + BIT(config->slice_id), disable_cap_alloc); if (ret) return ret; retain_pc = config->retain_on_pc << config->slice_id; - ret = regmap_write(drv_data->bcast_regmap, - LLCC_TRP_PCB_ACT, retain_pc); + ret = regmap_update_bits(drv_data->bcast_regmap, LLCC_TRP_PCB_ACT, + BIT(config->slice_id), retain_pc); if (ret) return ret; } diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c index b10ea69a638e..2624441d2fa9 100644 --- a/drivers/soundwire/stream.c +++ b/drivers/soundwire/stream.c @@ -744,14 +744,15 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count) * sdw_ml_sync_bank_switch: Multilink register bank switch * * @bus: SDW bus instance + * @multi_link: whether this is a multi-link stream with hardware-based sync * * Caller function should free the buffers on error */ -static int sdw_ml_sync_bank_switch(struct sdw_bus *bus) +static int sdw_ml_sync_bank_switch(struct sdw_bus *bus, bool multi_link) { unsigned long time_left; - if (!bus->multi_link) + if (!multi_link) return 0; /* Wait for completion of transfer */ @@ -848,7 +849,7 @@ static int do_bank_switch(struct sdw_stream_runtime *stream) bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT; /* Check if bank switch was successful */ - ret = sdw_ml_sync_bank_switch(bus); + ret = sdw_ml_sync_bank_switch(bus, multi_link); if (ret < 0) { dev_err(bus->dev, "multi link bank switch failed: %d\n", ret); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 946e2186d244..15ea11ebcbe0 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1101,9 +1101,10 @@ config SPI_ZYNQ_QSPI config SPI_ZYNQMP_GQSPI tristate "Xilinx ZynqMP GQSPI controller" - depends on (SPI_MASTER && HAS_DMA) || COMPILE_TEST + depends on (SPI_MEM && HAS_DMA) || COMPILE_TEST help Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. + This controller only supports SPI memory interface. config SPI_AMD tristate "AMD SPI controller" diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index c4f22d50dba5..78daf2b2143c 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -22,6 +22,7 @@ #include #include #include +#include #include /* SPI register offsets */ @@ -278,6 +279,7 @@ struct atmel_spi { bool keep_cs; u32 fifo_size; + bool last_polarity; u8 native_cs_free; u8 native_cs_for_gpio; }; @@ -290,6 +292,22 @@ struct atmel_spi_device { #define SPI_MAX_DMA_XFER 65535 /* true for both PDC and DMA */ #define INVALID_DMA_ADDRESS 0xffffffff +/* + * This frequency can be anything supported by the controller, but to avoid + * unnecessary delay, the highest possible frequency is chosen. + * + * This frequency is the highest possible which is not interfering with other + * chip select registers (see Note for Serial Clock Bit Rate configuration in + * Atmel-11121F-ATARM-SAMA5D3-Series-Datasheet_02-Feb-16, page 1283) + */ +#define DUMMY_MSG_FREQUENCY 0x02 +/* + * 8 bits is the minimum data the controller is capable of sending. + * + * This message can be anything as it should not be treated by any SPI device. + */ +#define DUMMY_MSG 0xAA + /* * Version 2 of the SPI controller has * - CR.LASTXFER @@ -303,6 +321,43 @@ static bool atmel_spi_is_v2(struct atmel_spi *as) return as->caps.is_spi2; } +/* + * Send a dummy message. + * + * This is sometimes needed when using a CS GPIO to force clock transition when + * switching between devices with different polarities. + */ +static void atmel_spi_send_dummy(struct atmel_spi *as, struct spi_device *spi, int chip_select) +{ + u32 status; + u32 csr; + + /* + * Set a clock frequency to allow sending message on SPI bus. + * The frequency here can be anything, but is needed for + * the controller to send the data. + */ + csr = spi_readl(as, CSR0 + 4 * chip_select); + csr = SPI_BFINS(SCBR, DUMMY_MSG_FREQUENCY, csr); + spi_writel(as, CSR0 + 4 * chip_select, csr); + + /* + * Read all data coming from SPI bus, needed to be able to send + * the message. + */ + spi_readl(as, RDR); + while (spi_readl(as, SR) & SPI_BIT(RDRF)) { + spi_readl(as, RDR); + cpu_relax(); + } + + spi_writel(as, TDR, DUMMY_MSG); + + readl_poll_timeout_atomic(as->regs + SPI_SR, status, + (status & SPI_BIT(TXEMPTY)), 1, 1000); +} + + /* * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby * they assume that spi slave device state will not change on deselect, so @@ -319,11 +374,17 @@ static bool atmel_spi_is_v2(struct atmel_spi *as) * Master on Chip Select 0.") No workaround exists for that ... so for * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, * and (c) will trigger that first erratum in some cases. + * + * When changing the clock polarity, the SPI controller waits for the next + * transmission to enforce the default clock state. This may be an issue when + * using a GPIO as Chip Select: the clock level is applied only when the first + * packet is sent, once the CS has already been asserted. The workaround is to + * avoid this by sending a first (dummy) message before toggling the CS state. */ - static void cs_activate(struct atmel_spi *as, struct spi_device *spi) { struct atmel_spi_device *asd = spi->controller_state; + bool new_polarity; int chip_select; u32 mr; @@ -352,6 +413,25 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) } mr = spi_readl(as, MR); + + /* + * Ensures the clock polarity is valid before we actually + * assert the CS to avoid spurious clock edges to be + * processed by the spi devices. + */ + if (spi_get_csgpiod(spi, 0)) { + new_polarity = (asd->csr & SPI_BIT(CPOL)) != 0; + if (new_polarity != as->last_polarity) { + /* + * Need to disable the GPIO before sending the dummy + * message because it is already set by the spi core. + */ + gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 0); + atmel_spi_send_dummy(as, spi, chip_select); + as->last_polarity = new_polarity; + gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), 1); + } + } } else { u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; int i; diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 9bca3d076f05..51ceaa485724 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -30,12 +30,15 @@ #include +#define SH_MSIOF_FLAG_FIXED_DTDL_200 BIT(0) + struct sh_msiof_chipdata { u32 bits_per_word_mask; u16 tx_fifo_size; u16 rx_fifo_size; u16 ctlr_flags; u16 min_div_pow; + u32 flags; }; struct sh_msiof_spi_priv { @@ -1073,6 +1076,16 @@ static const struct sh_msiof_chipdata rcar_gen3_data = { .min_div_pow = 1, }; +static const struct sh_msiof_chipdata rcar_r8a7795_data = { + .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) | + SPI_BPW_MASK(24) | SPI_BPW_MASK(32), + .tx_fifo_size = 64, + .rx_fifo_size = 64, + .ctlr_flags = SPI_CONTROLLER_MUST_TX, + .min_div_pow = 1, + .flags = SH_MSIOF_FLAG_FIXED_DTDL_200, +}; + static const struct of_device_id sh_msiof_match[] = { { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data }, @@ -1083,6 +1096,7 @@ static const struct of_device_id sh_msiof_match[] = { { .compatible = "renesas,msiof-r8a7793", .data = &rcar_gen2_data }, { .compatible = "renesas,msiof-r8a7794", .data = &rcar_gen2_data }, { .compatible = "renesas,rcar-gen2-msiof", .data = &rcar_gen2_data }, + { .compatible = "renesas,msiof-r8a7795", .data = &rcar_r8a7795_data }, { .compatible = "renesas,msiof-r8a7796", .data = &rcar_gen3_data }, { .compatible = "renesas,rcar-gen3-msiof", .data = &rcar_gen3_data }, { .compatible = "renesas,rcar-gen4-msiof", .data = &rcar_gen3_data }, @@ -1280,6 +1294,9 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) return -ENXIO; } + if (chipdata->flags & SH_MSIOF_FLAG_FIXED_DTDL_200) + info->dtdl = 200; + if (info->mode == MSIOF_SPI_SLAVE) ctlr = spi_alloc_slave(&pdev->dev, sizeof(struct sh_msiof_spi_priv)); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 5d046be8b2dd..22d227878bc4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -360,6 +360,18 @@ const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev) } EXPORT_SYMBOL_GPL(spi_get_device_id); +const void *spi_get_device_match_data(const struct spi_device *sdev) +{ + const void *match; + + match = device_get_match_data(&sdev->dev); + if (match) + return match; + + return (const void *)spi_get_device_id(sdev)->driver_data; +} +EXPORT_SYMBOL_GPL(spi_get_device_match_data); + static int spi_match_device(struct device *dev, struct device_driver *drv) { const struct spi_device *spi = to_spi_device(dev); @@ -592,7 +604,7 @@ static void spi_dev_set_name(struct spi_device *spi) } dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev), - spi->chip_select); + spi_get_chipselect(spi, 0)); } static int spi_dev_check(struct device *dev, void *data) @@ -601,7 +613,7 @@ static int spi_dev_check(struct device *dev, void *data) struct spi_device *new_spi = data; if (spi->controller == new_spi->controller && - spi->chip_select == new_spi->chip_select) + spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0)) return -EBUSY; return 0; } @@ -626,7 +638,7 @@ static int __spi_add_device(struct spi_device *spi) status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check); if (status) { dev_err(dev, "chipselect %d already in use\n", - spi->chip_select); + spi_get_chipselect(spi, 0)); return status; } @@ -637,7 +649,7 @@ static int __spi_add_device(struct spi_device *spi) } if (ctlr->cs_gpiods) - spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; + spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]); /* * Drivers may modify this initial i/o setup, but will @@ -680,8 +692,8 @@ int spi_add_device(struct spi_device *spi) int status; /* Chipselects are numbered 0..max; validate. */ - if (spi->chip_select >= ctlr->num_chipselect) { - dev_err(dev, "cs%d >= max %d\n", spi->chip_select, + if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) { + dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0), ctlr->num_chipselect); return -EINVAL; } @@ -702,8 +714,8 @@ static int spi_add_device_locked(struct spi_device *spi) struct device *dev = ctlr->dev.parent; /* Chipselects are numbered 0..max; validate. */ - if (spi->chip_select >= ctlr->num_chipselect) { - dev_err(dev, "cs%d >= max %d\n", spi->chip_select, + if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) { + dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0), ctlr->num_chipselect); return -EINVAL; } @@ -749,7 +761,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias)); - proxy->chip_select = chip->chip_select; + spi_set_chipselect(proxy, 0, chip->chip_select); proxy->max_speed_hz = chip->max_speed_hz; proxy->mode = chip->mode; proxy->irq = chip->irq; @@ -958,24 +970,23 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) * Avoid calling into the driver (or doing delays) if the chip select * isn't actually changing from the last time this was called. */ - if (!force && ((enable && spi->controller->last_cs == spi->chip_select) || - (!enable && spi->controller->last_cs != spi->chip_select)) && + if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) || + (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) && (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH))) return; trace_spi_set_cs(spi, activate); - spi->controller->last_cs = enable ? spi->chip_select : -1; + spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1; spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH; - if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) { + if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate) spi_delay_exec(&spi->cs_hold, NULL); - } if (spi->mode & SPI_CS_HIGH) enable = !enable; - if (spi->cs_gpiod) { + if (spi_get_csgpiod(spi, 0)) { if (!(spi->mode & SPI_NO_CS)) { /* * Historically ACPI has no means of the GPIO polarity and @@ -988,10 +999,10 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) * into account. */ if (has_acpi_companion(&spi->dev)) - gpiod_set_value_cansleep(spi->cs_gpiod, !enable); + gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable); else /* Polarity handled by GPIO library */ - gpiod_set_value_cansleep(spi->cs_gpiod, activate); + gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate); } /* Some SPI masters need both GPIO CS & slave_select */ if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && @@ -1001,7 +1012,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) spi->controller->set_cs(spi, !enable); } - if (spi->cs_gpiod || !spi->controller->set_cs_timing) { + if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) { if (activate) spi_delay_exec(&spi->cs_setup, NULL); else @@ -2291,7 +2302,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi, nc, rc); return rc; } - spi->chip_select = value; + spi_set_chipselect(spi, 0, value); /* Device speed */ if (!of_property_read_u32(nc, "spi-max-frequency", &value)) @@ -2405,7 +2416,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi, strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias)); /* Use provided chip-select for ancillary device */ - ancillary->chip_select = chip_select; + spi_set_chipselect(ancillary, 0, chip_select); /* Take over SPI mode/speed from SPI main device */ ancillary->max_speed_hz = spi->max_speed_hz; @@ -2652,7 +2663,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, spi->mode |= lookup.mode; spi->irq = lookup.irq; spi->bits_per_word = lookup.bits_per_word; - spi->chip_select = lookup.chip_select; + spi_set_chipselect(spi, 0, lookup.chip_select); return spi; } @@ -3611,6 +3622,37 @@ static int __spi_validate_bits_per_word(struct spi_controller *ctlr, return 0; } +/** + * spi_set_cs_timing - configure CS setup, hold, and inactive delays + * @spi: the device that requires specific CS timing configuration + * + * Return: zero on success, else a negative error code. + */ +static int spi_set_cs_timing(struct spi_device *spi) +{ + struct device *parent = spi->controller->dev.parent; + int status = 0; + + if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) { + if (spi->controller->auto_runtime_pm) { + status = pm_runtime_get_sync(parent); + if (status < 0) { + pm_runtime_put_noidle(parent); + dev_err(&spi->controller->dev, "Failed to power device: %d\n", + status); + return status; + } + + status = spi->controller->set_cs_timing(spi); + pm_runtime_mark_last_busy(parent); + pm_runtime_put_autosuspend(parent); + } else { + status = spi->controller->set_cs_timing(spi); + } + } + return status; +} + /** * spi_setup - setup SPI mode and clock rate * @spi: the device whose settings are being modified @@ -3707,6 +3749,12 @@ int spi_setup(struct spi_device *spi) } } + status = spi_set_cs_timing(spi); + if (status) { + mutex_unlock(&spi->controller->io_mutex); + return status; + } + if (spi->controller->auto_runtime_pm && spi->controller->set_cs) { status = pm_runtime_resume_and_get(spi->controller->dev.parent); if (status < 0) { @@ -3790,7 +3838,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) * cs_change is set for each transfer. */ if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || - spi->cs_gpiod)) { + spi_get_csgpiod(spi, 0))) { size_t maxsize; int ret; diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c index ad511f2c3324..01e8851e639d 100644 --- a/drivers/spmi/spmi-mtk-pmif.c +++ b/drivers/spmi/spmi-mtk-pmif.c @@ -50,6 +50,7 @@ struct pmif { struct clk_bulk_data clks[PMIF_MAX_CLKS]; size_t nclks; const struct pmif_data *data; + raw_spinlock_t lock; }; static const char * const pmif_clock_names[] = { @@ -314,6 +315,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, struct ch_reg *inf_reg; int ret; u32 data, cmd; + unsigned long flags; /* Check for argument validation. */ if (sid & ~0xf) { @@ -334,6 +336,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, else return -EINVAL; + raw_spin_lock_irqsave(&arb->lock, flags); /* Wait for Software Interface FSM state to be IDLE. */ inf_reg = &arb->chan; ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], @@ -343,6 +346,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, /* set channel ready if the data has transferred */ if (pmif_is_fsm_vldclr(arb)) pmif_writel(arb, 1, inf_reg->ch_rdy); + raw_spin_unlock_irqrestore(&arb->lock, flags); dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n"); return ret; } @@ -350,6 +354,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, /* Send the command. */ cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr; pmif_writel(arb, cmd, inf_reg->ch_send); + raw_spin_unlock_irqrestore(&arb->lock, flags); /* * Wait for Software Interface FSM state to be WFVLDCLR, @@ -376,7 +381,8 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, struct pmif *arb = spmi_controller_get_drvdata(ctrl); struct ch_reg *inf_reg; int ret; - u32 data, cmd; + u32 data, wdata, cmd; + unsigned long flags; if (len > 4) { dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len); @@ -394,6 +400,10 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, else return -EINVAL; + /* Set the write data. */ + memcpy(&wdata, buf, len); + + raw_spin_lock_irqsave(&arb->lock, flags); /* Wait for Software Interface FSM state to be IDLE. */ inf_reg = &arb->chan; ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], @@ -403,17 +413,17 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, /* set channel ready if the data has transferred */ if (pmif_is_fsm_vldclr(arb)) pmif_writel(arb, 1, inf_reg->ch_rdy); + raw_spin_unlock_irqrestore(&arb->lock, flags); dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n"); return ret; } - /* Set the write data. */ - memcpy(&data, buf, len); - pmif_writel(arb, data, inf_reg->wdata); + pmif_writel(arb, wdata, inf_reg->wdata); /* Send the command. */ cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr; pmif_writel(arb, cmd, inf_reg->ch_send); + raw_spin_unlock_irqrestore(&arb->lock, flags); return 0; } @@ -488,6 +498,8 @@ static int mtk_spmi_probe(struct platform_device *pdev) arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset; arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset; + raw_spin_lock_init(&arb->lock); + platform_set_drvdata(pdev, ctrl); err = spmi_controller_add(ctrl); diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index 671ee8843c88..5703a9ddb6d0 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -349,7 +349,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb) /* Get ethernet protocol */ eth = (struct ethhdr *)skb->data; if (ntohs(eth->h_proto) == ETH_P_8021Q) { - vlan_eth = (struct vlan_ethhdr *)skb->data; + vlan_eth = skb_vlan_eth_hdr(skb); mac_proto = ntohs(vlan_eth->h_vlan_encapsulated_proto); network_data = skb->data + VLAN_ETH_HLEN; nic_type |= NIC_TYPE_F_VLAN; @@ -435,7 +435,7 @@ static netdev_tx_t gdm_lte_tx(struct sk_buff *skb, struct net_device *dev) * driver based on the NIC mac */ if (nic_type & NIC_TYPE_F_VLAN) { - struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *vlan_eth = skb_vlan_eth_hdr(skb); nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK; data_buf = skb->data + (VLAN_ETH_HLEN - ETH_HLEN); diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c index a9bd1e71ea48..d16cf4115d03 100644 --- a/drivers/staging/media/rkvdec/rkvdec.c +++ b/drivers/staging/media/rkvdec/rkvdec.c @@ -461,6 +461,9 @@ static const struct v4l2_ioctl_ops rkvdec_ioctl_ops = { .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, + + .vidioc_decoder_cmd = v4l2_m2m_ioctl_stateless_decoder_cmd, + .vidioc_try_decoder_cmd = v4l2_m2m_ioctl_stateless_try_decoder_cmd, }; static int rkvdec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 7e81a53dbf3c..8d74e97c9874 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -338,11 +338,13 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, } iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); - if (is_write) + if (is_write) { + file_start_write(fd); ret = vfs_iter_write(fd, &iter, &pos, 0); - else + file_end_write(fd); + } else { ret = vfs_iter_read(fd, &iter, &pos, 0); - + } if (is_write) { if (ret < 0 || ret != data_length) { pr_err("%s() write returned %d\n", __func__, ret); @@ -474,7 +476,9 @@ fd_execute_write_same(struct se_cmd *cmd) } iov_iter_bvec(&iter, ITER_SOURCE, bvec, nolb, len); + file_start_write(fd_dev->fd_file); ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0); + file_end_write(fd_dev->fd_file); kfree(bvec); if (ret < 0 || ret != len) { diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c index d89f92032c1c..f691bce5c147 100644 --- a/drivers/thunderbolt/debugfs.c +++ b/drivers/thunderbolt/debugfs.c @@ -943,7 +943,7 @@ static void margining_port_remove(struct tb_port *port) snprintf(dir_name, sizeof(dir_name), "port%d", port->port); parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); if (parent) - debugfs_remove_recursive(debugfs_lookup("margining", parent)); + debugfs_lookup_and_remove("margining", parent); kfree(port->usb4->margining); port->usb4->margining = NULL; diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c index 15a2387a5b25..4f4502fb5454 100644 --- a/drivers/tty/serial/8250/8250_bcm2835aux.c +++ b/drivers/tty/serial/8250/8250_bcm2835aux.c @@ -119,6 +119,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev) /* get the clock - this also enables the HW */ data->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(data->clk)) + return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n"); /* get the interrupt */ ret = platform_get_irq(pdev, 0); diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index b406cba10b0e..dca1abe36324 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -442,7 +442,7 @@ static int generic_rs485_config(struct uart_port *port, struct ktermios *termios } static const struct serial_rs485 generic_rs485_supported = { - .flags = SER_RS485_ENABLED, + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND, }; static const struct exar8250_platform exar8250_default_platform = { @@ -486,7 +486,8 @@ static int iot2040_rs485_config(struct uart_port *port, struct ktermios *termios } static const struct serial_rs485 iot2040_rs485_supported = { - .flags = SER_RS485_ENABLED | SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS, + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | + SER_RS485_RX_DURING_TX | SER_RS485_TERMINATE_BUS, }; static const struct property_entry iot2040_gpio_properties[] = { diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 0b04d810b3e6..037d613006f5 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1476,7 +1476,7 @@ static int omap8250_remove(struct platform_device *pdev) err = pm_runtime_resume_and_get(&pdev->dev); if (err) - return err; + dev_err(&pdev->dev, "Failed to resume hardware\n"); serial8250_unregister_port(priv->line); priv->line = -ENODEV; diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index d2137f6eff32..f8962a3d4421 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -450,13 +450,13 @@ static void imx_uart_stop_tx(struct uart_port *port) ucr1 = imx_uart_readl(sport, UCR1); imx_uart_writel(sport, ucr1 & ~UCR1_TRDYEN, UCR1); + ucr4 = imx_uart_readl(sport, UCR4); usr2 = imx_uart_readl(sport, USR2); - if (!(usr2 & USR2_TXDC)) { + if ((!(usr2 & USR2_TXDC)) && (ucr4 & UCR4_TCEN)) { /* The shifter is still busy, so retry once TC triggers */ return; } - ucr4 = imx_uart_readl(sport, UCR4); ucr4 &= ~UCR4_TCEN; imx_uart_writel(sport, ucr4, UCR4); @@ -2229,7 +2229,6 @@ static enum hrtimer_restart imx_trigger_stop_tx(struct hrtimer *t) return HRTIMER_NORESTART; } -static const struct serial_rs485 imx_no_rs485 = {}; /* No RS485 if no RTS */ static const struct serial_rs485 imx_rs485_supported = { .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | SER_RS485_RX_DURING_TX, @@ -2319,8 +2318,6 @@ static int imx_uart_probe(struct platform_device *pdev) /* RTS is required to control the RS485 transmitter */ if (sport->have_rtscts || sport->have_rtsgpio) sport->port.rs485_supported = imx_rs485_supported; - else - sport->port.rs485_supported = imx_no_rs485; sport->port.flags = UPF_BOOT_AUTOCONF; timer_setup(&sport->timer, imx_uart_timeout, 0); @@ -2347,7 +2344,7 @@ static int imx_uart_probe(struct platform_device *pdev) /* For register access, we only need to enable the ipg clock. */ ret = clk_prepare_enable(sport->clk_ipg); if (ret) { - dev_err(&pdev->dev, "failed to enable per clk: %d\n", ret); + dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret); return ret; } @@ -2359,14 +2356,8 @@ static int imx_uart_probe(struct platform_device *pdev) sport->ufcr = readl(sport->port.membase + UFCR); ret = uart_get_rs485_mode(&sport->port); - if (ret) { - clk_disable_unprepare(sport->clk_ipg); - return ret; - } - - if (sport->port.rs485.flags & SER_RS485_ENABLED && - (!sport->have_rtscts && !sport->have_rtsgpio)) - dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); + if (ret) + goto err_clk; /* * If using the i.MX UART RTS/CTS control then the RTS (CTS_B) @@ -2446,8 +2437,6 @@ static int imx_uart_probe(struct platform_device *pdev) imx_uart_writel(sport, ucr3, UCR3); } - clk_disable_unprepare(sport->clk_ipg); - hrtimer_init(&sport->trigger_start_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_init(&sport->trigger_stop_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sport->trigger_start_tx.function = imx_trigger_start_tx; @@ -2463,7 +2452,7 @@ static int imx_uart_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to request rx irq: %d\n", ret); - return ret; + goto err_clk; } ret = devm_request_irq(&pdev->dev, txirq, imx_uart_txint, 0, @@ -2471,7 +2460,7 @@ static int imx_uart_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to request tx irq: %d\n", ret); - return ret; + goto err_clk; } ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0, @@ -2479,14 +2468,14 @@ static int imx_uart_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to request rts irq: %d\n", ret); - return ret; + goto err_clk; } } else { ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, dev_name(&pdev->dev), sport); if (ret) { dev_err(&pdev->dev, "failed to request irq: %d\n", ret); - return ret; + goto err_clk; } } @@ -2494,7 +2483,12 @@ static int imx_uart_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sport); - return uart_add_one_port(&imx_uart_uart_driver, &sport->port); + ret = uart_add_one_port(&imx_uart_uart_driver, &sport->port); + +err_clk: + clk_disable_unprepare(sport->clk_ipg); + + return ret; } static int imx_uart_remove(struct platform_device *pdev) diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 7d0d2718ef59..beb7896ebf8a 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1512,6 +1512,13 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev) return omap_up_info; } +static const struct serial_rs485 serial_omap_rs485_supported = { + .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | + SER_RS485_RX_DURING_TX, + .delay_rts_before_send = 1, + .delay_rts_after_send = 1, +}; + static int serial_omap_probe_rs485(struct uart_omap_port *up, struct device *dev) { @@ -1526,6 +1533,9 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up, if (!np) return 0; + up->port.rs485_config = serial_omap_config_rs485; + up->port.rs485_supported = serial_omap_rs485_supported; + ret = uart_get_rs485_mode(&up->port); if (ret) return ret; @@ -1560,13 +1570,6 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up, return 0; } -static const struct serial_rs485 serial_omap_rs485_supported = { - .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | - SER_RS485_RX_DURING_TX, - .delay_rts_before_send = 1, - .delay_rts_after_send = 1, -}; - static int serial_omap_probe(struct platform_device *pdev) { struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev); @@ -1634,17 +1637,11 @@ static int serial_omap_probe(struct platform_device *pdev) dev_info(up->port.dev, "no wakeirq for uart%d\n", up->port.line); - ret = serial_omap_probe_rs485(up, &pdev->dev); - if (ret < 0) - goto err_rs485; - sprintf(up->name, "OMAP UART%d", up->port.line); up->port.mapbase = mem->start; up->port.membase = base; up->port.flags = omap_up_info->flags; up->port.uartclk = omap_up_info->uartclk; - up->port.rs485_config = serial_omap_config_rs485; - up->port.rs485_supported = serial_omap_rs485_supported; if (!up->port.uartclk) { up->port.uartclk = DEFAULT_CLK_SPEED; dev_warn(&pdev->dev, @@ -1652,6 +1649,10 @@ static int serial_omap_probe(struct platform_device *pdev) DEFAULT_CLK_SPEED); } + ret = serial_omap_probe_rs485(up, &pdev->dev); + if (ret < 0) + goto err_rs485; + up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE; cpu_latency_qos_add_request(&up->pm_qos_request, up->latency); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index b4b849415c50..db33790e6675 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #define SC16IS7XX_NAME "sc16is7xx" @@ -1716,9 +1717,12 @@ static int sc16is7xx_spi_probe(struct spi_device *spi) /* Setup SPI bus */ spi->bits_per_word = 8; - /* only supports mode 0 on SC16IS762 */ + /* For all variants, only mode 0 is supported */ + if ((spi->mode & SPI_MODE_X_MASK) != SPI_MODE_0) + return dev_err_probe(&spi->dev, -EINVAL, "Unsupported SPI mode\n"); + spi->mode = spi->mode ? : SPI_MODE_0; - spi->max_speed_hz = spi->max_speed_hz ? : 15000000; + spi->max_speed_hz = spi->max_speed_hz ? : 4 * HZ_PER_MHZ; ret = spi_setup(spi); if (ret) return ret; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index d4e57f9017db..f0ed30d0a697 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1353,20 +1353,28 @@ static void uart_sanitize_serial_rs485(struct uart_port *port, struct serial_rs4 return; } - /* Pick sane settings if the user hasn't */ - if ((supported_flags & (SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND)) && - !(rs485->flags & SER_RS485_RTS_ON_SEND) == - !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) { - dev_warn_ratelimited(port->dev, - "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n", - port->name, port->line); - rs485->flags |= SER_RS485_RTS_ON_SEND; - rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; - supported_flags |= SER_RS485_RTS_ON_SEND|SER_RS485_RTS_AFTER_SEND; - } - rs485->flags &= supported_flags; + /* Pick sane settings if the user hasn't */ + if (!(rs485->flags & SER_RS485_RTS_ON_SEND) == + !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) { + if (supported_flags & SER_RS485_RTS_ON_SEND) { + rs485->flags |= SER_RS485_RTS_ON_SEND; + rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; + + dev_warn_ratelimited(port->dev, + "%s (%d): invalid RTS setting, using RTS_ON_SEND instead\n", + port->name, port->line); + } else { + rs485->flags |= SER_RS485_RTS_AFTER_SEND; + rs485->flags &= ~SER_RS485_RTS_ON_SEND; + + dev_warn_ratelimited(port->dev, + "%s (%d): invalid RTS setting, using RTS_AFTER_SEND instead\n", + port->name, port->line); + } + } + uart_sanitize_serial_rs485_delays(port, rs485); /* Return clean padding area to userspace */ @@ -1428,7 +1436,7 @@ static int uart_set_rs485_config(struct tty_struct *tty, struct uart_port *port, int ret; unsigned long flags; - if (!port->rs485_config) + if (!(port->rs485_supported.flags & SER_RS485_ENABLED)) return -ENOTTY; if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user))) @@ -3420,6 +3428,9 @@ int uart_get_rs485_mode(struct uart_port *port) u32 rs485_delay[2]; int ret; + if (!(port->rs485_supported.flags & SER_RS485_ENABLED)) + return 0; + ret = device_property_read_u32_array(dev, "rs485-rts-delay", rs485_delay, 2); if (!ret) { diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h index c5ee21912755..91515e1ebc8d 100644 --- a/drivers/tty/tty.h +++ b/drivers/tty/tty.h @@ -63,7 +63,7 @@ int tty_check_change(struct tty_struct *tty); void __stop_tty(struct tty_struct *tty); void __start_tty(struct tty_struct *tty); void tty_write_unlock(struct tty_struct *tty); -int tty_write_lock(struct tty_struct *tty, int ndelay); +int tty_write_lock(struct tty_struct *tty, bool ndelay); void tty_vhangup_session(struct tty_struct *tty); void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty); int tty_signal_session_leader(struct tty_struct *tty, int exit_session); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 8fb6c6853556..aaf77a5616ff 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -939,7 +939,7 @@ void tty_write_unlock(struct tty_struct *tty) wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT); } -int tty_write_lock(struct tty_struct *tty, int ndelay) +int tty_write_lock(struct tty_struct *tty, bool ndelay) { if (!mutex_trylock(&tty->atomic_write_lock)) { if (ndelay) @@ -1153,7 +1153,7 @@ int tty_send_xchar(struct tty_struct *tty, char ch) return 0; } - if (tty_write_lock(tty, 0) < 0) + if (tty_write_lock(tty, false) < 0) return -ERESTARTSYS; down_read(&tty->termios_rwsem); @@ -2472,22 +2472,25 @@ static int send_break(struct tty_struct *tty, unsigned int duration) return 0; if (tty->driver->flags & TTY_DRIVER_HARDWARE_BREAK) - retval = tty->ops->break_ctl(tty, duration); - else { - /* Do the work ourselves */ - if (tty_write_lock(tty, 0) < 0) - return -EINTR; - retval = tty->ops->break_ctl(tty, -1); - if (retval) - goto out; - if (!signal_pending(current)) - msleep_interruptible(duration); + return tty->ops->break_ctl(tty, duration); + + /* Do the work ourselves */ + if (tty_write_lock(tty, false) < 0) + return -EINTR; + + retval = tty->ops->break_ctl(tty, -1); + if (!retval) { + msleep_interruptible(duration); retval = tty->ops->break_ctl(tty, 0); -out: - tty_write_unlock(tty); - if (signal_pending(current)) - retval = -EINTR; + } else if (retval == -EOPNOTSUPP) { + /* some drivers can tell only dynamically */ + retval = 0; } + tty_write_unlock(tty); + + if (signal_pending(current)) + retval = -EINTR; + return retval; } diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index ad1cf51ecd11..8767c504b95d 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -506,7 +506,7 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) if (retval < 0) return retval; - if (tty_write_lock(tty, 0) < 0) + if (tty_write_lock(tty, false) < 0) goto retry_write_wait; /* Racing writer? */ diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 62082d64ece0..2d572f6c8ec8 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -466,13 +466,13 @@ static int uio_open(struct inode *inode, struct file *filep) mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); - mutex_unlock(&minor_lock); if (!idev) { ret = -ENODEV; + mutex_unlock(&minor_lock); goto out; } - get_device(&idev->dev); + mutex_unlock(&minor_lock); if (!try_module_get(idev->owner)) { ret = -ENODEV; @@ -1064,9 +1064,8 @@ void uio_unregister_device(struct uio_info *info) wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); - device_unregister(&idev->dev); - uio_free_minor(minor); + device_unregister(&idev->dev); return; } diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index 69a44bd7e5d0..ccdd525bd7c8 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -1117,6 +1117,8 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, dma_addr_t trb_dma; u32 togle_pcs = 1; int sg_iter = 0; + int num_trb_req; + int trb_burst; int num_trb; int address; u32 control; @@ -1125,15 +1127,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, struct scatterlist *s = NULL; bool sg_supported = !!(request->num_mapped_sgs); - if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) - num_trb = priv_ep->interval; - else - num_trb = sg_supported ? request->num_mapped_sgs : 1; + num_trb_req = sg_supported ? request->num_mapped_sgs : 1; - if (num_trb > priv_ep->free_trbs) { - priv_ep->flags |= EP_RING_FULL; - return -ENOBUFS; - } + /* ISO transfer require each SOF have a TD, each TD include some TRBs */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) + num_trb = priv_ep->interval * num_trb_req; + else + num_trb = num_trb_req; priv_req = to_cdns3_request(request); address = priv_ep->endpoint.desc->bEndpointAddress; @@ -1182,14 +1182,31 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); + + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { + /* + * ISO require LINK TRB must be first one of TD. + * Fill LINK TRBs for left trb space to simply software process logic. + */ + while (priv_ep->enqueue) { + *trb = *link_trb; + trace_cdns3_prepare_trb(priv_ep, trb); + + cdns3_ep_inc_enq(priv_ep); + trb = priv_ep->trb_pool + priv_ep->enqueue; + priv_req->trb = trb; + } + } + } + + if (num_trb > priv_ep->free_trbs) { + priv_ep->flags |= EP_RING_FULL; + return -ENOBUFS; } if (priv_dev->dev_ver <= DEV_VER_V2) togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); - if (sg_supported) - s = request->sg; - /* set incorrect Cycle Bit for first trb*/ control = priv_ep->pcs ? 0 : TRB_CYCLE; trb->length = 0; @@ -1207,6 +1224,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, do { u32 length; + if (!(sg_iter % num_trb_req) && sg_supported) + s = request->sg; + /* fill TRB */ control |= TRB_TYPE(TRB_NORMAL); if (sg_supported) { @@ -1221,7 +1241,36 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, total_tdl += DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); - trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | + trb_burst = priv_ep->trb_burst_size; + + /* + * Supposed DMA cross 4k bounder problem should be fixed at DEV_VER_V2, but still + * met problem when do ISO transfer if sg enabled. + * + * Data pattern likes below when sg enabled, package size is 1k and mult is 2 + * [UVC Header(8B) ] [data(3k - 8)] ... + * + * The received data at offset 0xd000 will get 0xc000 data, len 0x70. Error happen + * as below pattern: + * 0xd000: wrong + * 0xe000: wrong + * 0xf000: correct + * 0x10000: wrong + * 0x11000: wrong + * 0x12000: correct + * ... + * + * But it is still unclear about why error have not happen below 0xd000, it should + * cross 4k bounder. But anyway, the below code can fix this problem. + * + * To avoid DMA cross 4k bounder at ISO transfer, reduce burst len according to 16. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_dev->dev_ver <= DEV_VER_V2) + if (ALIGN_DOWN(trb->buffer, SZ_4K) != + ALIGN_DOWN(trb->buffer + length, SZ_4K)) + trb_burst = 16; + + trb->length |= cpu_to_le32(TRB_BURST_LEN(trb_burst) | TRB_LEN(length)); pcs = priv_ep->pcs ? TRB_CYCLE : 0; @@ -1248,7 +1297,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, if (sg_supported) { trb->control |= cpu_to_le32(TRB_ISP); /* Don't set chain bit for last TRB */ - if (sg_iter < num_trb - 1) + if ((sg_iter % num_trb_req) < num_trb_req - 1) trb->control |= cpu_to_le32(TRB_CHAIN); s = sg_next(s); @@ -1506,6 +1555,12 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, /* The TRB was changed as link TRB, and the request was handled at ep_dequeue */ while (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { + + /* ISO ep_traddr may stop at LINK TRB */ + if (priv_ep->dequeue == cdns3_get_dma_pos(priv_dev, priv_ep) && + priv_ep->type == USB_ENDPOINT_XFER_ISOC) + break; + trace_cdns3_complete_trb(priv_ep, trb); cdns3_ep_inc_deq(priv_ep); trb = priv_ep->trb_pool + priv_ep->dequeue; @@ -1538,6 +1593,10 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, } if (request_handled) { + /* TRBs are duplicated by priv_ep->interval time for ISO IN */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && priv_ep->dir) + request->actual /= priv_ep->interval; + cdns3_gadget_giveback(priv_ep, priv_req, 0); request_handled = false; transfer_end = false; @@ -2033,11 +2092,10 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) bool is_iso_ep = (priv_ep->type == USB_ENDPOINT_XFER_ISOC); struct cdns3_device *priv_dev = priv_ep->cdns3_dev; u32 bEndpointAddress = priv_ep->num | priv_ep->dir; - u32 max_packet_size = 0; - u8 maxburst = 0; + u32 max_packet_size = priv_ep->wMaxPacketSize; + u8 maxburst = priv_ep->bMaxBurst; u32 ep_cfg = 0; u8 buffering; - u8 mult = 0; int ret; buffering = priv_dev->ep_buf_size - 1; @@ -2059,8 +2117,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) break; default: ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_ISOC); - mult = priv_dev->ep_iso_burst - 1; - buffering = mult + 1; + buffering = (priv_ep->bMaxBurst + 1) * (priv_ep->mult + 1) - 1; } switch (priv_dev->gadget.speed) { @@ -2071,17 +2128,8 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) max_packet_size = is_iso_ep ? 1024 : 512; break; case USB_SPEED_SUPER: - /* It's limitation that driver assumes in driver. */ - mult = 0; - max_packet_size = 1024; - if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) { - maxburst = priv_dev->ep_iso_burst - 1; - buffering = (mult + 1) * - (maxburst + 1); - - if (priv_ep->interval > 1) - buffering++; - } else { + if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) { + max_packet_size = 1024; maxburst = priv_dev->ep_buf_size - 1; } break; @@ -2110,7 +2158,6 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) if (priv_dev->dev_ver < DEV_VER_V2) priv_ep->trb_burst_size = 16; - mult = min_t(u8, mult, EP_CFG_MULT_MAX); buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX); maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX); @@ -2144,7 +2191,7 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable) } ep_cfg |= EP_CFG_MAXPKTSIZE(max_packet_size) | - EP_CFG_MULT(mult) | + EP_CFG_MULT(priv_ep->mult) | /* must match EP setting */ EP_CFG_BUFFERING(buffering) | EP_CFG_MAXBURST(maxburst); @@ -2234,6 +2281,13 @@ usb_ep *cdns3_gadget_match_ep(struct usb_gadget *gadget, priv_ep->type = usb_endpoint_type(desc); priv_ep->flags |= EP_CLAIMED; priv_ep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0; + priv_ep->wMaxPacketSize = usb_endpoint_maxp(desc); + priv_ep->mult = USB_EP_MAXP_MULT(priv_ep->wMaxPacketSize); + priv_ep->wMaxPacketSize &= USB_ENDPOINT_MAXP_MASK; + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC && comp_desc) { + priv_ep->mult = USB_SS_MULT(comp_desc->bmAttributes) - 1; + priv_ep->bMaxBurst = comp_desc->bMaxBurst; + } spin_unlock_irqrestore(&priv_dev->lock, flags); return &priv_ep->endpoint; @@ -3015,22 +3069,40 @@ static int cdns3_gadget_check_config(struct usb_gadget *gadget) struct cdns3_endpoint *priv_ep; struct usb_ep *ep; int n_in = 0; + int iso = 0; + int out = 1; int total; + int n; list_for_each_entry(ep, &gadget->ep_list, ep_list) { priv_ep = ep_to_cdns3_ep(ep); - if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN)) - n_in++; + if (!(priv_ep->flags & EP_CLAIMED)) + continue; + + n = (priv_ep->mult + 1) * (priv_ep->bMaxBurst + 1); + if (ep->address & USB_DIR_IN) { + /* + * ISO transfer: DMA start move data when get ISO, only transfer + * data as min(TD size, iso). No benefit for allocate bigger + * internal memory than 'iso'. + */ + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) + iso += n; + else + n_in++; + } else { + if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) + out = max_t(int, out, n); + } } /* 2KB are reserved for EP0, 1KB for out*/ - total = 2 + n_in + 1; + total = 2 + n_in + out + iso; if (total > priv_dev->onchip_buffers) return -ENOMEM; - priv_dev->ep_buf_size = priv_dev->ep_iso_burst = - (priv_dev->onchip_buffers - 2) / (n_in + 1); + priv_dev->ep_buf_size = (priv_dev->onchip_buffers - 2 - iso) / (n_in + out); return 0; } diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h index fbe4a8e3aa89..086a7bb83897 100644 --- a/drivers/usb/cdns3/cdns3-gadget.h +++ b/drivers/usb/cdns3/cdns3-gadget.h @@ -1168,6 +1168,9 @@ struct cdns3_endpoint { u8 dir; u8 num; u8 type; + u8 mult; + u8 bMaxBurst; + u16 wMaxPacketSize; int interval; int free_trbs; diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index b9227f41cf1c..763d6858a8e6 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -523,6 +523,13 @@ static irqreturn_t ci_irq_handler(int irq, void *data) u32 otgsc = 0; if (ci->in_lpm) { + /* + * If we already have a wakeup irq pending there, + * let's just return to wait resume finished firstly. + */ + if (ci->wakeup_int) + return IRQ_HANDLED; + disable_irq_nosync(irq); ci->wakeup_int = true; pm_runtime_get(ci->dev); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 36bf051b345b..2a7eea4e251a 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -892,6 +892,9 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state) struct acm *acm = tty->driver_data; int retval; + if (!(acm->ctrl_caps & USB_CDC_CAP_BRK)) + return -EOPNOTSUPP; + retval = acm_send_break(acm, state ? 0xffff : 0); if (retval < 0) dev_dbg(&acm->control->dev, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index fb3efddd2f29..31da608c8641 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -277,48 +277,11 @@ int dwc3_core_soft_reset(struct dwc3 *dwc) /* * We're resetting only the device side because, if we're in host mode, * XHCI driver will reset the host block. If dwc3 was configured for - * host-only mode or current role is host, then we can return early. + * host-only mode, then we can return early. */ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) return 0; - /* - * If the dr_mode is host and the dwc->current_dr_role is not the - * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode - * isn't executed yet. Ensure the phy is ready before the controller - * updates the GCTL.PRTCAPDIR or other settings by soft-resetting - * the phy. - * - * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n - * is port index. If this is a multiport host, then we need to reset - * all active ports. - */ - if (dwc->dr_mode == USB_DR_MODE_HOST) { - u32 usb3_port; - u32 usb2_port; - - usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); - usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST; - dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port); - - usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); - usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port); - - /* Small delay for phy reset assertion */ - usleep_range(1000, 2000); - - usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST; - dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port); - - usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port); - - /* Wait for clock synchronization */ - msleep(50); - return 0; - } - reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg |= DWC3_DCTL_CSFTRST; reg &= ~DWC3_DCTL_RUN_STOP; diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index c4c1fbc12b4c..dc968960769e 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -429,8 +429,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, temp = size; size -= temp; next += temp; - if (temp == size) - goto done; } temp = snprintf(next, size, "\n"); @@ -440,7 +438,6 @@ static void qh_lines(struct fotg210_hcd *fotg210, struct fotg210_qh *qh, size -= temp; next += temp; -done: *sizep = size; *nextp = next; } diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index a0921687444b..bb4f80627cdc 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -7,6 +7,7 @@ * Chunfeng Yun */ +#include #include #include #include @@ -73,6 +74,9 @@ #define FRMCNT_LEV1_RANG (0x12b << 8) #define FRMCNT_LEV1_RANG_MASK GENMASK(19, 8) +#define HSCH_CFG1 0x960 +#define SCH3_RXFIFO_DEPTH_MASK GENMASK(21, 20) + #define SS_GEN2_EOF_CFG 0x990 #define SSG2EOF_OFFSET 0x3c @@ -114,6 +118,8 @@ #define SSC_IP_SLEEP_EN BIT(4) #define SSC_SPM_INT_EN BIT(1) +#define SCH_FIFO_TO_KB(x) ((x) >> 10) + enum ssusb_uwk_vers { SSUSB_UWK_V1 = 1, SSUSB_UWK_V2, @@ -165,6 +171,35 @@ static void xhci_mtk_set_frame_interval(struct xhci_hcd_mtk *mtk) writel(value, hcd->regs + SS_GEN2_EOF_CFG); } +/* + * workaround: usb3.2 gen1 isoc rx hw issue + * host send out unexpected ACK afer device fininsh a burst transfer with + * a short packet. + */ +static void xhci_mtk_rxfifo_depth_set(struct xhci_hcd_mtk *mtk) +{ + struct usb_hcd *hcd = mtk->hcd; + u32 value; + + if (!mtk->rxfifo_depth) + return; + + value = readl(hcd->regs + HSCH_CFG1); + value &= ~SCH3_RXFIFO_DEPTH_MASK; + value |= FIELD_PREP(SCH3_RXFIFO_DEPTH_MASK, + SCH_FIFO_TO_KB(mtk->rxfifo_depth) - 1); + writel(value, hcd->regs + HSCH_CFG1); +} + +static void xhci_mtk_init_quirk(struct xhci_hcd_mtk *mtk) +{ + /* workaround only for mt8195 */ + xhci_mtk_set_frame_interval(mtk); + + /* workaround for SoCs using SSUSB about before IPM v1.6.0 */ + xhci_mtk_rxfifo_depth_set(mtk); +} + static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk) { struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs; @@ -453,8 +488,7 @@ static int xhci_mtk_setup(struct usb_hcd *hcd) if (ret) return ret; - /* workaround only for mt8195 */ - xhci_mtk_set_frame_interval(mtk); + xhci_mtk_init_quirk(mtk); } ret = xhci_gen_setup(hcd, xhci_mtk_quirks); @@ -531,6 +565,8 @@ static int xhci_mtk_probe(struct platform_device *pdev) of_property_read_u32(node, "mediatek,u2p-dis-msk", &mtk->u2p_dis_msk); + of_property_read_u32(node, "rx-fifo-depth", &mtk->rxfifo_depth); + ret = usb_wakeup_of_property_parse(mtk, node); if (ret) { dev_err(dev, "failed to parse uwk property\n"); diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index 1174a510dd38..2a6a47d0f09a 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -160,6 +160,8 @@ struct xhci_hcd_mtk { struct regmap *uwk; u32 uwk_reg_base; u32 uwk_vers; + /* quirk */ + u32 rxfifo_depth; }; static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd) diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index abb1cd35d8a6..447887f101f5 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -1247,14 +1247,19 @@ static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) struct mon_reader_bin *rp = vmf->vma->vm_private_data; unsigned long offset, chunk_idx; struct page *pageptr; + unsigned long flags; + spin_lock_irqsave(&rp->b_lock, flags); offset = vmf->pgoff << PAGE_SHIFT; - if (offset >= rp->b_size) + if (offset >= rp->b_size) { + spin_unlock_irqrestore(&rp->b_lock, flags); return VM_FAULT_SIGBUS; + } chunk_idx = offset / CHUNK_SIZE; pageptr = rp->b_vec[chunk_idx].pg; get_page(pageptr); vmf->page = pageptr; + spin_unlock_irqrestore(&rp->b_lock, flags); return 0; } diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index 9299df53eb9d..160c9264339f 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -388,8 +388,7 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect) static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy) { - return IS_ENABLED(CONFIG_USB_OTG) && - mxs_phy->phy.last_event == USB_EVENT_ID; + return mxs_phy->phy.last_event == USB_EVENT_ID; } static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 05e28a5ce42b..fe2173e37b06 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1033,9 +1033,9 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, ACTISENSE_USG_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_NGT_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_NGW_PID) }, - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AC_PID) }, - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AD_PID) }, - { USB_DEVICE(FTDI_VID, ACTISENSE_D9AE_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_UID_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_USA_PID) }, + { USB_DEVICE(FTDI_VID, ACTISENSE_NGX_PID) }, { USB_DEVICE(FTDI_VID, ACTISENSE_D9AF_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEAGAUGE_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASWITCH_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index e2099445db70..21a2b5a25fc0 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1568,9 +1568,9 @@ #define ACTISENSE_USG_PID 0xD9A9 /* USG USB Serial Adapter */ #define ACTISENSE_NGT_PID 0xD9AA /* NGT NMEA2000 Interface */ #define ACTISENSE_NGW_PID 0xD9AB /* NGW NMEA2000 Gateway */ -#define ACTISENSE_D9AC_PID 0xD9AC /* Actisense Reserved */ -#define ACTISENSE_D9AD_PID 0xD9AD /* Actisense Reserved */ -#define ACTISENSE_D9AE_PID 0xD9AE /* Actisense Reserved */ +#define ACTISENSE_UID_PID 0xD9AC /* USB Isolating Device */ +#define ACTISENSE_USA_PID 0xD9AD /* USB to Serial Adapter */ +#define ACTISENSE_NGX_PID 0xD9AE /* NGX NMEA2000 Gateway */ #define ACTISENSE_D9AF_PID 0xD9AF /* Actisense Reserved */ #define CHETCO_SEAGAUGE_PID 0xA548 /* SeaGauge USB Adapter */ #define CHETCO_SEASWITCH_PID 0xA549 /* SeaSwitch USB Adapter */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 7f2aa72d52e6..4adef9259870 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -272,6 +272,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200U 0x0901 +#define QUECTEL_PRODUCT_EG912Y 0x6001 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200A 0x6005 #define QUECTEL_PRODUCT_EM061K_LWW 0x6008 @@ -1232,6 +1233,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, 0x0700, 0xff), /* BG95 */ .driver_info = RSVD(3) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, @@ -1244,6 +1246,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG912Y, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, @@ -2242,6 +2245,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, { USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0da, 0xff), /* Foxconn T99W265 MBIM variant */ + .driver_info = RSVD(3) | RSVD(5) }, { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */ .driver_info = RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */ diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 20dcbccb290b..fd68204374f2 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1305,6 +1305,17 @@ UNUSUAL_DEV( 0x090c, 0x6000, 0x0100, 0x0100, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_INITIAL_READ10 ), +/* + * Patch by Tasos Sahanidis + * This flash drive always shows up with write protect enabled + * during the first mode sense. + */ +UNUSUAL_DEV(0x0951, 0x1697, 0x0100, 0x0100, + "Kingston", + "DT Ultimate G3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_WP_DETECT), + /* * This Pentax still camera is not conformant * to the USB storage specification: - diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c index 5a09a09cca70..cce3d1837104 100644 --- a/drivers/vdpa/alibaba/eni_vdpa.c +++ b/drivers/vdpa/alibaba/eni_vdpa.c @@ -497,7 +497,7 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!eni_vdpa->vring) { ret = -ENOMEM; ENI_ERR(pdev, "failed to allocate virtqueues\n"); - goto err; + goto err_remove_vp_legacy; } for (i = 0; i < eni_vdpa->queues; i++) { @@ -509,11 +509,13 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues); if (ret) { ENI_ERR(pdev, "failed to register to vdpa bus\n"); - goto err; + goto err_remove_vp_legacy; } return 0; +err_remove_vp_legacy: + vp_legacy_remove(&eni_vdpa->ldev); err: put_device(&eni_vdpa->vdpa.dev); return ret; diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index ed5e4a5699b8..fc8d5ad1ee47 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -132,11 +132,7 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy return 0; inode_lock(inode); - /* Kill off the delayed work */ - cancel_delayed_work_sync(&info->deferred_work); - - /* Run it immediately */ - schedule_delayed_work(&info->deferred_work, 0); + flush_delayed_work(&info->deferred_work); inode_unlock(inode); return 0; @@ -321,7 +317,7 @@ static void fb_deferred_io_lastclose(struct fb_info *info) struct page *page; int i; - cancel_delayed_work_sync(&info->deferred_work); + flush_delayed_work(&info->deferred_work); /* clear out the mapping that we setup */ for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c index b194e71f07bf..aa51cb72cbba 100644 --- a/drivers/video/fbdev/imsttfb.c +++ b/drivers/video/fbdev/imsttfb.c @@ -1419,7 +1419,6 @@ static int init_imstt(struct fb_info *info) if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) { printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel); - framebuffer_release(info); return -ENODEV; } @@ -1452,10 +1451,11 @@ static int init_imstt(struct fb_info *info) FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_YPAN; - fb_alloc_cmap(&info->cmap, 0, 0); + if (fb_alloc_cmap(&info->cmap, 0, 0)) + return -ENODEV; if (register_framebuffer(info) < 0) { - framebuffer_release(info); + fb_dealloc_cmap(&info->cmap); return -ENODEV; } diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index 36ada87b49a4..32b8374abeca 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c @@ -42,6 +42,7 @@ #include