Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: arch/s390/net/bpf_jit_comp.c drivers/net/ethernet/ti/netcp_ethss.c net/bridge/br_multicast.c net/ipv4/ip_fragment.c All four conflicts were cases of simple overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5510b3c2a1
@ -35,3 +35,6 @@ the PCIe specification.
|
||||
|
||||
NOTE: this only applies to the SMMU itself, not
|
||||
masters connected upstream of the SMMU.
|
||||
|
||||
- hisilicon,broken-prefetch-cmd
|
||||
: Avoid sending CMD_PREFETCH_* commands to the SMMU.
|
||||
|
@ -17,7 +17,6 @@ Required properties:
|
||||
"fsl,imx6sx-usdhc"
|
||||
|
||||
Optional properties:
|
||||
- fsl,cd-controller : Indicate to use controller internal card detection
|
||||
- fsl,wp-controller : Indicate to use controller internal write protection
|
||||
- fsl,delay-line : Specify the number of delay cells for override mode.
|
||||
This is used to set the clock delay for DLL(Delay Line) on override mode
|
||||
@ -35,7 +34,6 @@ esdhc@70004000 {
|
||||
compatible = "fsl,imx51-esdhc";
|
||||
reg = <0x70004000 0x4000>;
|
||||
interrupts = <1>;
|
||||
fsl,cd-controller;
|
||||
fsl,wp-controller;
|
||||
};
|
||||
|
||||
|
@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
|
||||
Required properties:
|
||||
- compatible : "mediatek,mt8173-max98090"
|
||||
- mediatek,audio-codec: the phandle of the MAX98090 audio codec
|
||||
- mediatek,platform: the phandle of MT8173 ASoC platform
|
||||
|
||||
Example:
|
||||
|
||||
sound {
|
||||
compatible = "mediatek,mt8173-max98090";
|
||||
mediatek,audio-codec = <&max98090>;
|
||||
mediatek,platform = <&afe>;
|
||||
};
|
||||
|
||||
|
@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
|
||||
Required properties:
|
||||
- compatible : "mediatek,mt8173-rt5650-rt5676"
|
||||
- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
|
||||
- mediatek,platform: the phandle of MT8173 ASoC platform
|
||||
|
||||
Example:
|
||||
|
||||
sound {
|
||||
compatible = "mediatek,mt8173-rt5650-rt5676";
|
||||
mediatek,audio-codec = <&rt5650 &rt5676>;
|
||||
mediatek,platform = <&afe>;
|
||||
};
|
||||
|
||||
|
@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
|
||||
Required properties:
|
||||
- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
|
||||
- reg: Base address and size of the controllers memory area
|
||||
- clocks: phandle to the AHB clock.
|
||||
- clocks: phandle of the AHB clock.
|
||||
- clock-names: has to be "ahb".
|
||||
- #address-cells: <1>, as required by generic SPI binding.
|
||||
- #size-cells: <0>, also as required by generic SPI binding.
|
||||
@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
|
||||
|
||||
Example:
|
||||
|
||||
spi@1F000000 {
|
||||
spi@1f000000 {
|
||||
compatible = "qca,ar9132-spi", "qca,ar7100-spi";
|
||||
reg = <0x1F000000 0x10>;
|
||||
reg = <0x1f000000 0x10>;
|
||||
|
||||
clocks = <&pll 2>;
|
||||
clock-names = "ahb";
|
||||
|
@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
|
||||
temp[2-9]_input CPU temperatures (1/1000 degree,
|
||||
0.125 degree resolution)
|
||||
|
||||
fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode
|
||||
pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
|
||||
Setting SmartFan mode is supported only if it has been
|
||||
previously configured by BIOS (or configuration EEPROM)
|
||||
|
||||
fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode
|
||||
pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
|
||||
|
||||
The driver checks sensor control registers and does not export the sensors
|
||||
that are not enabled. Anyway, a sensor that is enabled may actually be not
|
||||
|
@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
buf += "#include <linux/string.h>\n"
|
||||
buf += "#include <linux/configfs.h>\n"
|
||||
buf += "#include <linux/ctype.h>\n"
|
||||
buf += "#include <asm/unaligned.h>\n\n"
|
||||
buf += "#include <asm/unaligned.h>\n"
|
||||
buf += "#include <scsi/scsi_proto.h>\n\n"
|
||||
buf += "#include <target/target_core_base.h>\n"
|
||||
buf += "#include <target/target_core_fabric.h>\n"
|
||||
buf += "#include <target/target_core_fabric_configfs.h>\n"
|
||||
@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
buf += " }\n"
|
||||
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
|
||||
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
|
||||
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n"
|
||||
buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
|
||||
|
||||
if proto_ident == "FC":
|
||||
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
|
||||
elif proto_ident == "SAS":
|
||||
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
|
||||
elif proto_ident == "iSCSI":
|
||||
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
|
||||
|
||||
buf += " if (ret < 0) {\n"
|
||||
buf += " kfree(tpg);\n"
|
||||
buf += " return NULL;\n"
|
||||
@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
|
||||
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
|
||||
buf += " .module = THIS_MODULE,\n"
|
||||
buf += " .name = " + fabric_mod_name + ",\n"
|
||||
buf += " .name = \"" + fabric_mod_name + "\",\n"
|
||||
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
|
||||
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
|
||||
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
|
||||
@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
|
||||
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
|
||||
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
|
||||
buf += "\n"
|
||||
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
|
||||
buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
|
||||
buf += "};\n\n"
|
||||
|
||||
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
|
||||
buf += "{\n"
|
||||
buf += " return target_register_template(" + fabric_mod_name + "_ops);\n"
|
||||
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
|
||||
buf += "};\n\n"
|
||||
|
||||
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
|
||||
buf += "{\n"
|
||||
buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n"
|
||||
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
|
||||
buf += "};\n\n"
|
||||
|
||||
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
|
||||
|
16
MAINTAINERS
16
MAINTAINERS
@ -5899,7 +5899,6 @@ S: Supported
|
||||
F: Documentation/s390/kvm.txt
|
||||
F: arch/s390/include/asm/kvm*
|
||||
F: arch/s390/kvm/
|
||||
F: drivers/s390/kvm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
|
||||
M: Christoffer Dall <christoffer.dall@linaro.org>
|
||||
@ -6848,6 +6847,12 @@ T: git git://linuxtv.org/anttip/media_tree.git
|
||||
S: Maintained
|
||||
F: drivers/media/usb/msi2500/
|
||||
|
||||
MSYSTEMS DISKONCHIP G3 MTD DRIVER
|
||||
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Maintained
|
||||
F: drivers/mtd/devices/docg3*
|
||||
|
||||
MT9M032 APTINA SENSOR DRIVER
|
||||
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -10912,6 +10917,15 @@ F: drivers/block/virtio_blk.c
|
||||
F: include/linux/virtio_*.h
|
||||
F: include/uapi/linux/virtio_*.h
|
||||
|
||||
VIRTIO DRIVERS FOR S390
|
||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Cornelia Huck <cornelia.huck@de.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/s390/virtio/
|
||||
|
||||
VIRTIO GPU DRIVER
|
||||
M: David Airlie <airlied@linux.ie>
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -10,6 +10,7 @@
|
||||
*/
|
||||
|
||||
/dts-v1/;
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/input/input.h>
|
||||
#include "imx25.dtsi"
|
||||
|
||||
@ -114,8 +115,8 @@ &can1 {
|
||||
&esdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc1>;
|
||||
cd-gpios = <&gpio2 1 0>;
|
||||
wp-gpios = <&gpio2 0 0>;
|
||||
cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -98,7 +98,7 @@ &ecspi2 {
|
||||
&esdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc1>;
|
||||
cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
|
||||
bus-width = <4>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -103,8 +103,8 @@ volume-down {
|
||||
&esdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc1>;
|
||||
cd-gpios = <&gpio1 1 0>;
|
||||
wp-gpios = <&gpio1 9 0>;
|
||||
cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -124,8 +124,8 @@ &can2 {
|
||||
&esdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc1>;
|
||||
cd-gpios = <&gpio1 1 0>;
|
||||
wp-gpios = <&gpio1 9 0>;
|
||||
cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -147,8 +147,8 @@ &ssi2 {
|
||||
&esdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc3>;
|
||||
cd-gpios = <&gpio3 11 0>;
|
||||
wp-gpios = <&gpio3 12 0>;
|
||||
cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
|
||||
bus-width = <8>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -41,8 +41,8 @@ volume-down {
|
||||
&esdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc1>;
|
||||
cd-gpios = <&gpio3 13 0>;
|
||||
wp-gpios = <&gpio4 11 0>;
|
||||
cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -41,8 +41,8 @@ &esdhc2 {
|
||||
pinctrl-0 = <&pinctrl_esdhc2>,
|
||||
<&pinctrl_esdhc2_cdwp>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
wp-gpios = <&gpio1 2 0>;
|
||||
cd-gpios = <&gpio1 4 0>;
|
||||
wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
|
@ -183,7 +183,7 @@ spidev1: spi@1 {
|
||||
};
|
||||
|
||||
&esdhc1 {
|
||||
cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
|
||||
fsl,wp-controller;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc1>;
|
||||
@ -191,7 +191,7 @@ &esdhc1 {
|
||||
};
|
||||
|
||||
&esdhc2 {
|
||||
cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
|
||||
fsl,wp-controller;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc2>;
|
||||
|
@ -119,8 +119,8 @@ &audmux {
|
||||
&esdhc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_esdhc2>;
|
||||
cd-gpios = <&gpio3 25 0>;
|
||||
wp-gpios = <&gpio2 19 0>;
|
||||
cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -305,8 +305,8 @@ &usbotg {
|
||||
&usdhc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
cd-gpios = <&gpio1 4 0>;
|
||||
wp-gpios = <&gpio1 2 0>;
|
||||
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
@ -314,8 +314,8 @@ &usdhc2 {
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
cd-gpios = <&gpio7 0 0>;
|
||||
wp-gpios = <&gpio7 1 0>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -11,6 +11,7 @@
|
||||
*/
|
||||
|
||||
/dts-v1/;
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include "imx6q.dtsi"
|
||||
|
||||
/ {
|
||||
@ -196,8 +197,8 @@ &usbotg {
|
||||
};
|
||||
|
||||
&usdhc3 {
|
||||
cd-gpios = <&gpio6 11 0>;
|
||||
wp-gpios = <&gpio6 14 0>;
|
||||
cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3
|
||||
|
@ -7,6 +7,7 @@
|
||||
*/
|
||||
|
||||
/dts-v1/;
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include "imx6q.dtsi"
|
||||
|
||||
/ {
|
||||
@ -161,7 +162,7 @@ &usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&gpio6 11 0>;
|
||||
cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -251,7 +251,7 @@ &usdhc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
@ -260,7 +260,7 @@ &usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
|
@ -173,7 +173,7 @@ &usdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc1>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@ -181,7 +181,7 @@ &usdhc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -392,7 +392,7 @@ &usbotg {
|
||||
&usdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc1>;
|
||||
cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
|
||||
no-1-8-v;
|
||||
status = "okay";
|
||||
};
|
||||
@ -400,7 +400,7 @@ &usdhc1 {
|
||||
&usdhc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
|
||||
no-1-8-v;
|
||||
status = "okay";
|
||||
|
@ -258,6 +258,6 @@ &usdhc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
cd-gpios = <&gpio1 4 0>;
|
||||
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -1,3 +1,5 @@
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
regulators {
|
||||
compatible = "simple-bus";
|
||||
@ -181,7 +183,7 @@ &usbotg {
|
||||
&usdhc2 { /* module slot */
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
cd-gpios = <&gpio2 2 0>;
|
||||
cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -318,7 +318,7 @@ &usbh1 {
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -324,7 +324,7 @@ &usbh1 {
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -417,7 +417,7 @@ &usbh1 {
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -299,6 +299,6 @@ &pinctrl_hummingboard_usdhc2_aux
|
||||
&pinctrl_hummingboard_usdhc2
|
||||
>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
cd-gpios = <&gpio1 4 0>;
|
||||
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -453,7 +453,7 @@ &usbotg {
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
cd-gpios = <&gpio7 0 0>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
@ -461,7 +461,7 @@ &usdhc3 {
|
||||
&usdhc4 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc4>;
|
||||
cd-gpios = <&gpio2 6 0>;
|
||||
cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -409,8 +409,8 @@ &usbotg {
|
||||
&usdhc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
cd-gpios = <&gpio1 4 0>;
|
||||
wp-gpios = <&gpio1 2 0>;
|
||||
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
@ -418,7 +418,7 @@ &usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3
|
||||
&pinctrl_usdhc3_cdwp>;
|
||||
cd-gpios = <&gpio1 27 0>;
|
||||
wp-gpios = <&gpio1 29 0>;
|
||||
cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -342,7 +342,7 @@ &usdhc2 {
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@ -351,6 +351,6 @@ &usdhc3 {
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -467,8 +467,8 @@ &usdhc3 {
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
|
||||
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
|
||||
cd-gpios = <&gpio6 15 0>;
|
||||
wp-gpios = <&gpio1 13 0>;
|
||||
cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -448,8 +448,8 @@ &usbotg {
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
cd-gpios = <&gpio7 0 0>;
|
||||
wp-gpios = <&gpio7 1 0>;
|
||||
cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
@ -457,7 +457,7 @@ &usdhc3 {
|
||||
&usdhc4 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc4>;
|
||||
cd-gpios = <&gpio2 6 0>;
|
||||
cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
|
||||
vmmc-supply = <®_3p3v>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -562,8 +562,8 @@ &usdhc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
bus-width = <8>;
|
||||
cd-gpios = <&gpio2 2 0>;
|
||||
wp-gpios = <&gpio2 3 0>;
|
||||
cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@ -571,8 +571,8 @@ &usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
bus-width = <8>;
|
||||
cd-gpios = <&gpio2 0 0>;
|
||||
wp-gpios = <&gpio2 1 0>;
|
||||
cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -680,7 +680,7 @@ &usdhc1 {
|
||||
pinctrl-0 = <&pinctrl_usdhc1>;
|
||||
bus-width = <4>;
|
||||
no-1-8-v;
|
||||
cd-gpios = <&gpio7 2 0>;
|
||||
cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
|
||||
fsl,wp-controller;
|
||||
status = "okay";
|
||||
};
|
||||
@ -690,7 +690,7 @@ &usdhc2 {
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
bus-width = <4>;
|
||||
no-1-8-v;
|
||||
cd-gpios = <&gpio7 3 0>;
|
||||
cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
|
||||
fsl,wp-controller;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -9,6 +9,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
regulators {
|
||||
compatible = "simple-bus";
|
||||
@ -250,13 +252,13 @@ &usbotg {
|
||||
&usdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc1>;
|
||||
cd-gpios = <&gpio1 2 0>;
|
||||
cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&usdhc3 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
cd-gpios = <&gpio3 9 0>;
|
||||
cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -617,8 +617,8 @@ &usdhc1 {
|
||||
pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
|
||||
pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
|
||||
bus-width = <8>;
|
||||
cd-gpios = <&gpio4 7 0>;
|
||||
wp-gpios = <&gpio4 6 0>;
|
||||
cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@ -627,8 +627,8 @@ &usdhc2 {
|
||||
pinctrl-0 = <&pinctrl_usdhc2>;
|
||||
pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
|
||||
pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
|
||||
cd-gpios = <&gpio5 0 0>;
|
||||
wp-gpios = <&gpio4 29 0>;
|
||||
cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
@ -637,6 +637,6 @@ &usdhc3 {
|
||||
pinctrl-0 = <&pinctrl_usdhc3>;
|
||||
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
|
||||
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
|
||||
cd-gpios = <&gpio3 22 0>;
|
||||
cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -49,7 +49,7 @@ &usdhc3 {
|
||||
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
|
||||
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
|
||||
bus-width = <8>;
|
||||
cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
|
||||
keep-power-in-suspend;
|
||||
enable-sdio-wakeup;
|
||||
@ -61,7 +61,7 @@ &usdhc4 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc4>;
|
||||
bus-width = <8>;
|
||||
cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
|
||||
no-1-8-v;
|
||||
keep-power-in-suspend;
|
||||
enable-sdio-wakup;
|
||||
|
@ -293,7 +293,7 @@ &usdhc3 {
|
||||
pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
|
||||
pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
|
||||
bus-width = <8>;
|
||||
cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
|
||||
keep-power-in-suspend;
|
||||
enable-sdio-wakeup;
|
||||
@ -304,7 +304,7 @@ &usdhc3 {
|
||||
&usdhc4 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc4>;
|
||||
cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
|
||||
cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -234,8 +234,8 @@ &uart1 {
|
||||
&usdhc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_usdhc1>;
|
||||
cd-gpios = <&gpio5 0 0>;
|
||||
wp-gpios = <&gpio5 1 0>;
|
||||
cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
|
||||
enable-sdio-wakeup;
|
||||
keep-power-in-suspend;
|
||||
status = "okay";
|
||||
|
@ -122,12 +122,12 @@ static int __init uefi_init(void)
|
||||
|
||||
/* Show what we know for posterity */
|
||||
c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
|
||||
sizeof(vendor));
|
||||
sizeof(vendor) * sizeof(efi_char16_t));
|
||||
if (c16) {
|
||||
for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
|
||||
vendor[i] = c16[i];
|
||||
vendor[i] = '\0';
|
||||
early_memunmap(c16, sizeof(vendor));
|
||||
early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
|
||||
}
|
||||
|
||||
pr_info("EFI v%u.%.02u by %s\n",
|
||||
|
@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&clk_lock, flags);
|
||||
__clk_enable(clk);
|
||||
spin_unlock_irqrestore(&clk_lock, flags);
|
||||
@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (IS_ERR_OR_NULL(clk))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&clk_lock, flags);
|
||||
__clk_disable(clk);
|
||||
spin_unlock_irqrestore(&clk_lock, flags);
|
||||
@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
|
||||
unsigned long flags;
|
||||
unsigned long rate;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&clk_lock, flags);
|
||||
rate = clk->get_rate(clk);
|
||||
spin_unlock_irqrestore(&clk_lock, flags);
|
||||
@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
|
||||
{
|
||||
unsigned long flags, actual_rate;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
if (!clk->set_rate)
|
||||
return -ENOSYS;
|
||||
|
||||
@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
|
||||
unsigned long flags;
|
||||
long ret;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
if (!clk->set_rate)
|
||||
return -ENOSYS;
|
||||
|
||||
@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
|
||||
if (!clk->set_parent)
|
||||
return -ENOSYS;
|
||||
|
||||
@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
|
||||
|
||||
struct clk *clk_get_parent(struct clk *clk)
|
||||
{
|
||||
return clk->parent;
|
||||
return !clk ? NULL : clk->parent;
|
||||
}
|
||||
EXPORT_SYMBOL(clk_get_parent);
|
||||
|
||||
|
@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr)
|
||||
#define iowrite16 writew
|
||||
#define iowrite32 writel
|
||||
|
||||
#define ioread16be(addr) be16_to_cpu(readw(addr))
|
||||
#define ioread32be(addr) be32_to_cpu(readl(addr))
|
||||
#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
|
||||
#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr))
|
||||
|
||||
#define mmiowb()
|
||||
|
||||
#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
|
||||
|
@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
|
||||
union cache_topology ct;
|
||||
enum cache_type ctype;
|
||||
|
||||
if (!test_facility(34))
|
||||
return -EOPNOTSUPP;
|
||||
if (!this_cpu_ci)
|
||||
return -EINVAL;
|
||||
ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
|
||||
|
@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
|
||||
|
||||
void __init free_initrd_mem(unsigned long begin, unsigned long end)
|
||||
{
|
||||
free_bootmem(__pa(begin), end - begin);
|
||||
free_bootmem_late(__pa(begin), end - begin);
|
||||
}
|
||||
|
||||
static int __init setup_initrd(char *str)
|
||||
|
@ -205,7 +205,6 @@ sysexit_from_sys_call:
|
||||
movl RDX(%rsp), %edx /* arg3 */
|
||||
movl RSI(%rsp), %ecx /* arg4 */
|
||||
movl RDI(%rsp), %r8d /* arg5 */
|
||||
movl %ebp, %r9d /* arg6 */
|
||||
.endm
|
||||
|
||||
.macro auditsys_exit exit
|
||||
@ -236,6 +235,7 @@ sysexit_from_sys_call:
|
||||
|
||||
sysenter_auditsys:
|
||||
auditsys_entry_common
|
||||
movl %ebp, %r9d /* reload 6th syscall arg */
|
||||
jmp sysenter_dispatch
|
||||
|
||||
sysexit_audit:
|
||||
@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
|
||||
* 32-bit zero extended:
|
||||
*/
|
||||
ASM_STAC
|
||||
1: movl (%r8), %ebp
|
||||
1: movl (%r8), %r9d
|
||||
_ASM_EXTABLE(1b, ia32_badarg)
|
||||
ASM_CLAC
|
||||
orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
|
||||
@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
|
||||
cstar_do_call:
|
||||
/* 32-bit syscall -> 64-bit C ABI argument conversion */
|
||||
movl %edi, %r8d /* arg5 */
|
||||
movl %ebp, %r9d /* arg6 */
|
||||
/* r9 already loaded */ /* arg6 */
|
||||
xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
|
||||
movl %ebx, %edi /* arg1 */
|
||||
movl %edx, %edx /* arg3 (zero extension) */
|
||||
@ -358,7 +358,6 @@ cstar_dispatch:
|
||||
call *ia32_sys_call_table(, %rax, 8)
|
||||
movq %rax, RAX(%rsp)
|
||||
1:
|
||||
movl RCX(%rsp), %ebp
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
@ -392,7 +391,9 @@ sysretl_from_sys_call:
|
||||
|
||||
#ifdef CONFIG_AUDITSYSCALL
|
||||
cstar_auditsys:
|
||||
movl %r9d, R9(%rsp) /* register to be clobbered by call */
|
||||
auditsys_entry_common
|
||||
movl R9(%rsp), %r9d /* reload 6th syscall arg */
|
||||
jmp cstar_dispatch
|
||||
|
||||
sysretl_audit:
|
||||
@ -404,14 +405,16 @@ cstar_tracesys:
|
||||
testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
|
||||
jz cstar_auditsys
|
||||
#endif
|
||||
xchgl %r9d, %ebp
|
||||
SAVE_EXTRA_REGS
|
||||
xorl %eax, %eax /* Do not leak kernel information */
|
||||
movq %rax, R11(%rsp)
|
||||
movq %rax, R10(%rsp)
|
||||
movq %rax, R9(%rsp)
|
||||
movq %r9, R9(%rsp)
|
||||
movq %rax, R8(%rsp)
|
||||
movq %rsp, %rdi /* &pt_regs -> arg1 */
|
||||
call syscall_trace_enter
|
||||
movl R9(%rsp), %r9d
|
||||
|
||||
/* Reload arg registers from stack. (see sysenter_tracesys) */
|
||||
movl RCX(%rsp), %ecx
|
||||
@ -421,6 +424,7 @@ cstar_tracesys:
|
||||
movl %eax, %eax /* zero extension */
|
||||
|
||||
RESTORE_EXTRA_REGS
|
||||
xchgl %ebp, %r9d
|
||||
jmp cstar_do_call
|
||||
END(entry_SYSCALL_compat)
|
||||
|
||||
|
@ -354,7 +354,7 @@ struct kvm_xcrs {
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
#define KVM_QUIRK_LINT0_REENABLED (1 << 0)
|
||||
#define KVM_QUIRK_CD_NW_CLEARED (1 << 1)
|
||||
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
|
||||
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
|
||||
|
||||
#endif /* _ASM_X86_KVM_H */
|
||||
|
@ -951,6 +951,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
|
||||
if (!cqm_group_leader(event))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Getting up-to-date values requires an SMP IPI which is not
|
||||
* possible if we're being called in interrupt context. Return
|
||||
* the cached values instead.
|
||||
*/
|
||||
if (unlikely(in_interrupt()))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Notice that we don't perform the reading of an RMID
|
||||
* atomically, because we can't hold a spin lock across the
|
||||
|
@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s)
|
||||
|
||||
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
|
||||
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
|
||||
setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
|
||||
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX2);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512F);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
|
||||
setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
|
||||
setup_clear_cpu_cap(X86_FEATURE_MPX);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
for (i = 0; i < APIC_LVT_NUM; i++)
|
||||
apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
|
||||
apic_update_lvtt(apic);
|
||||
if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED))
|
||||
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
|
||||
apic_set_reg(apic, APIC_LVT0,
|
||||
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
|
||||
apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
|
||||
|
@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
|
||||
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
|
||||
}
|
||||
|
||||
static u8 mtrr_disabled_type(void)
|
||||
{
|
||||
/*
|
||||
* Intel SDM 11.11.2.2: all MTRRs are disabled when
|
||||
* IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
|
||||
* memory type is applied to all of physical memory.
|
||||
*/
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Three terms are used in the following code:
|
||||
* - segment, it indicates the address segments covered by fixed MTRRs.
|
||||
@ -434,6 +444,8 @@ struct mtrr_iter {
|
||||
|
||||
/* output fields. */
|
||||
int mem_type;
|
||||
/* mtrr is completely disabled? */
|
||||
bool mtrr_disabled;
|
||||
/* [start, end) is not fully covered in MTRRs? */
|
||||
bool partial_map;
|
||||
|
||||
@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
|
||||
static void mtrr_lookup_start(struct mtrr_iter *iter)
|
||||
{
|
||||
if (!mtrr_is_enabled(iter->mtrr_state)) {
|
||||
iter->partial_map = true;
|
||||
iter->mtrr_disabled = true;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
|
||||
iter->mtrr_state = mtrr_state;
|
||||
iter->start = start;
|
||||
iter->end = end;
|
||||
iter->mtrr_disabled = false;
|
||||
iter->partial_map = false;
|
||||
iter->fixed = false;
|
||||
iter->range = NULL;
|
||||
@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
return MTRR_TYPE_WRBACK;
|
||||
}
|
||||
|
||||
/* It is not covered by MTRRs. */
|
||||
if (iter.partial_map) {
|
||||
/*
|
||||
* We just check one page, partially covered by MTRRs is
|
||||
* impossible.
|
||||
*/
|
||||
WARN_ON(type != -1);
|
||||
type = mtrr_default_type(mtrr_state);
|
||||
}
|
||||
if (iter.mtrr_disabled)
|
||||
return mtrr_disabled_type();
|
||||
|
||||
/*
|
||||
* We just check one page, partially covered by MTRRs is
|
||||
* impossible.
|
||||
*/
|
||||
WARN_ON(iter.partial_map);
|
||||
|
||||
/* not contained in any MTRRs. */
|
||||
if (type == -1)
|
||||
return mtrr_default_type(mtrr_state);
|
||||
|
||||
return type;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
|
||||
@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
return false;
|
||||
}
|
||||
|
||||
if (iter.mtrr_disabled)
|
||||
return true;
|
||||
|
||||
if (!iter.partial_map)
|
||||
return true;
|
||||
|
||||
|
@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
||||
* does not do it - this results in some delay at
|
||||
* reboot
|
||||
*/
|
||||
if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED))
|
||||
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
|
||||
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
|
||||
svm->vmcb->save.cr0 = cr0;
|
||||
mark_dirty(svm->vmcb, VMCB_CR);
|
||||
|
@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
|
||||
|
||||
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
|
||||
ipat = VMX_EPT_IPAT_BIT;
|
||||
cache = MTRR_TYPE_UNCACHABLE;
|
||||
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
|
||||
cache = MTRR_TYPE_WRBACK;
|
||||
else
|
||||
cache = MTRR_TYPE_UNCACHABLE;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
|
||||
return kvm_register_write(vcpu, reg, val);
|
||||
}
|
||||
|
||||
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
|
||||
{
|
||||
return !(kvm->arch.disabled_quirks & quirk);
|
||||
}
|
||||
|
||||
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
|
||||
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
|
||||
void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
|
||||
|
@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
||||
!PageReserved(pfn_to_page(start_pfn + i)))
|
||||
return 1;
|
||||
|
||||
WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
pgprot_t prot;
|
||||
int retval;
|
||||
void __iomem *ret_addr;
|
||||
int ram_region;
|
||||
|
||||
/* Don't allow wraparound or zero size */
|
||||
last_addr = phys_addr + size - 1;
|
||||
@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
/*
|
||||
* Don't allow anybody to remap normal RAM that we're using..
|
||||
*/
|
||||
/* First check if whole region can be identified as RAM or not */
|
||||
ram_region = region_is_ram(phys_addr, size);
|
||||
if (ram_region > 0) {
|
||||
WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
|
||||
(unsigned long int)phys_addr,
|
||||
(unsigned long int)last_addr);
|
||||
pfn = phys_addr >> PAGE_SHIFT;
|
||||
last_pfn = last_addr >> PAGE_SHIFT;
|
||||
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
|
||||
__ioremap_check_ram) == 1) {
|
||||
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
|
||||
&phys_addr, &last_addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* If could not be identified(-1), check page by page */
|
||||
if (ram_region < 0) {
|
||||
pfn = phys_addr >> PAGE_SHIFT;
|
||||
last_pfn = last_addr >> PAGE_SHIFT;
|
||||
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
|
||||
__ioremap_check_ram) == 1)
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Mappings have to be page-aligned
|
||||
*/
|
||||
|
@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
|
||||
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
||||
}
|
||||
}
|
||||
|
||||
const char *arch_vma_name(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_flags & VM_MPX)
|
||||
return "[mpx]";
|
||||
return NULL;
|
||||
}
|
||||
|
@ -20,20 +20,6 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <asm/trace/mpx.h>
|
||||
|
||||
static const char *mpx_mapping_name(struct vm_area_struct *vma)
|
||||
{
|
||||
return "[mpx]";
|
||||
}
|
||||
|
||||
static struct vm_operations_struct mpx_vma_ops = {
|
||||
.name = mpx_mapping_name,
|
||||
};
|
||||
|
||||
static int is_mpx_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
return (vma->vm_ops == &mpx_vma_ops);
|
||||
}
|
||||
|
||||
static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
|
||||
{
|
||||
if (is_64bit_mm(mm))
|
||||
@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
|
||||
/*
|
||||
* This is really a simplified "vm_mmap". it only handles MPX
|
||||
* bounds tables (the bounds directory is user-allocated).
|
||||
*
|
||||
* Later on, we use the vma->vm_ops to uniquely identify these
|
||||
* VMAs.
|
||||
*/
|
||||
static unsigned long mpx_mmap(unsigned long len)
|
||||
{
|
||||
@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
vma->vm_ops = &mpx_vma_ops;
|
||||
|
||||
if (vm_flags & VM_LOCKED) {
|
||||
up_write(&mm->mmap_sem);
|
||||
@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
|
||||
* so stop immediately and return an error. This
|
||||
* probably results in a SIGSEGV.
|
||||
*/
|
||||
if (!is_mpx_vma(vma))
|
||||
if (!(vma->vm_flags & VM_MPX))
|
||||
return -EINVAL;
|
||||
|
||||
len = min(vma->vm_end, end) - addr;
|
||||
@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
|
||||
* lots of tables even though we have no actual table
|
||||
* entries in use.
|
||||
*/
|
||||
while (next && is_mpx_vma(next))
|
||||
while (next && (next->vm_flags & VM_MPX))
|
||||
next = next->vm_next;
|
||||
while (prev && is_mpx_vma(prev))
|
||||
while (prev && (prev->vm_flags & VM_MPX))
|
||||
prev = prev->vm_prev;
|
||||
/*
|
||||
* We know 'start' and 'end' lie within an area controlled
|
||||
|
@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
|
||||
} else {
|
||||
unsigned long addr;
|
||||
unsigned long nr_pages =
|
||||
f->flush_end - f->flush_start / PAGE_SIZE;
|
||||
(f->flush_end - f->flush_start) / PAGE_SIZE;
|
||||
addr = f->flush_start;
|
||||
while (addr < f->flush_end) {
|
||||
__flush_tlb_single(addr);
|
||||
|
@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
|
||||
offsetof(struct bpf_array, map.max_entries));
|
||||
EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
|
||||
#define OFFSET1 44 /* number of bytes to jump */
|
||||
#define OFFSET1 47 /* number of bytes to jump */
|
||||
EMIT2(X86_JBE, OFFSET1); /* jbe out */
|
||||
label1 = cnt;
|
||||
|
||||
@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
*/
|
||||
EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
|
||||
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
|
||||
#define OFFSET2 33
|
||||
#define OFFSET2 36
|
||||
EMIT2(X86_JA, OFFSET2); /* ja out */
|
||||
label2 = cnt;
|
||||
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
||||
EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
|
||||
|
||||
/* prog = array->prog[index]; */
|
||||
EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */
|
||||
EMIT1(offsetof(struct bpf_array, prog));
|
||||
EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
|
||||
offsetof(struct bpf_array, prog));
|
||||
EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
|
||||
|
||||
/* if (prog == NULL)
|
||||
|
17
block/bio.c
17
block/bio.c
@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
|
||||
* Allocates and returns a new bio which represents @sectors from the start of
|
||||
* @bio, and updates @bio to represent the remaining sectors.
|
||||
*
|
||||
* The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
|
||||
* responsibility to ensure that @bio is not freed before the split.
|
||||
* Unless this is a discard request the newly allocated bio will point
|
||||
* to @bio's bi_io_vec; it is the caller's responsibility to ensure that
|
||||
* @bio is not freed before the split.
|
||||
*/
|
||||
struct bio *bio_split(struct bio *bio, int sectors,
|
||||
gfp_t gfp, struct bio_set *bs)
|
||||
@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
||||
BUG_ON(sectors <= 0);
|
||||
BUG_ON(sectors >= bio_sectors(bio));
|
||||
|
||||
split = bio_clone_fast(bio, gfp, bs);
|
||||
/*
|
||||
* Discards need a mutable bio_vec to accommodate the payload
|
||||
* required by the DSM TRIM and UNMAP commands.
|
||||
*/
|
||||
if (bio->bi_rw & REQ_DISCARD)
|
||||
split = bio_clone_bioset(bio, gfp, bs);
|
||||
else
|
||||
split = bio_clone_fast(bio, gfp, bs);
|
||||
|
||||
if (!split)
|
||||
return NULL;
|
||||
|
||||
@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
|
||||
bio->bi_css = blkcg_css;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_associate_blkcg);
|
||||
|
||||
/**
|
||||
* bio_associate_current - associate a bio with %current
|
||||
@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
|
||||
bio->bi_css = task_get_css(current, blkio_cgrp_id);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_associate_current);
|
||||
|
||||
/**
|
||||
* bio_disassociate_task - undo bio_associate_current()
|
||||
|
@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
return -EINVAL;
|
||||
|
||||
disk = get_gendisk(MKDEV(major, minor), &part);
|
||||
if (!disk || part)
|
||||
if (!disk)
|
||||
return -EINVAL;
|
||||
if (part) {
|
||||
put_disk(disk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_irq(disk->queue->queue_lock);
|
||||
|
@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
|
||||
dev_warn(&device->dev, "Failed to change power state to %s\n",
|
||||
acpi_power_state_string(state));
|
||||
} else {
|
||||
device->power.state = state;
|
||||
device->power.state = target_state;
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
"Device [%s] transitioned to %s\n",
|
||||
device->pnp.bus_id,
|
||||
|
@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
|
||||
dev->max_sectors);
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
|
||||
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
|
||||
dev->max_sectors);
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
|
||||
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
|
||||
|
||||
@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
|
||||
{ "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
|
||||
|
||||
/*
|
||||
* Causes silent data corruption with higher max sects.
|
||||
* http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
|
||||
*/
|
||||
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/* Devices we expect to fail diagnostics */
|
||||
|
||||
/* Devices where NCQ should be avoided */
|
||||
@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
|
||||
ATA_HORKAGE_FIRMWARE_WARN },
|
||||
|
||||
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
|
||||
/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
|
||||
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||
|
||||
/* Blacklist entries taken from Silicon Image 3124/3132
|
||||
Windows driver .inf file - also several Linux problem reports */
|
||||
@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
{ "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
|
||||
/* devices that don't properly handle TRIM commands */
|
||||
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
|
||||
|
||||
/*
|
||||
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
|
||||
* (Return Zero After Trim) flags in the ATA Command Set are
|
||||
@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
|
||||
else /* In the ancient relic department - skip all of this */
|
||||
return 0;
|
||||
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
|
||||
/* On some disks, this command causes spin-up, so we need longer timeout */
|
||||
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
|
||||
|
||||
DPRINTK("EXIT, err_mask=%x\n", err_mask);
|
||||
return err_mask;
|
||||
|
@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
|
||||
ATA_LFLAG_NO_SRST |
|
||||
ATA_LFLAG_ASSUME_ATA;
|
||||
}
|
||||
} else if (vendor == 0x11ab && devid == 0x4140) {
|
||||
/* Marvell 4140 quirks */
|
||||
ata_for_each_link(link, ap, EDGE) {
|
||||
/* port 4 is for SEMB device and it doesn't like SRST */
|
||||
if (link->pmp == 4)
|
||||
link->flags |= ATA_LFLAG_DISABLED;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
|
||||
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
|
||||
rbuf[15] = lowest_aligned;
|
||||
|
||||
if (ata_id_has_trim(args->id)) {
|
||||
if (ata_id_has_trim(args->id) &&
|
||||
!(dev->horkage & ATA_HORKAGE_NOTRIM)) {
|
||||
rbuf[14] |= 0x80; /* LBPME */
|
||||
|
||||
if (ata_id_has_zero_after_trim(args->id) &&
|
||||
|
@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
|
||||
|
||||
if (!ata_id_has_trim(ata_dev->id))
|
||||
mode = "unsupported";
|
||||
else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
|
||||
mode = "forced_unsupported";
|
||||
else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
|
||||
mode = "forced_unqueued";
|
||||
else if (ata_fpdma_dsm_supported(ata_dev))
|
||||
|
@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
|
||||
while ((entry = llist_del_all(&cq->list)) != NULL) {
|
||||
entry = llist_reverse_order(entry);
|
||||
do {
|
||||
struct request_queue *q = NULL;
|
||||
|
||||
cmd = container_of(entry, struct nullb_cmd, ll_list);
|
||||
entry = entry->next;
|
||||
if (cmd->rq)
|
||||
q = cmd->rq->q;
|
||||
end_cmd(cmd);
|
||||
|
||||
if (cmd->rq) {
|
||||
struct request_queue *q = cmd->rq->q;
|
||||
|
||||
if (!q->mq_ops && blk_queue_stopped(q)) {
|
||||
spin_lock(q->queue_lock);
|
||||
if (blk_queue_stopped(q))
|
||||
blk_start_queue(q);
|
||||
spin_unlock(q->queue_lock);
|
||||
}
|
||||
if (q && !q->mq_ops && blk_queue_stopped(q)) {
|
||||
spin_lock(q->queue_lock);
|
||||
if (blk_queue_stopped(q))
|
||||
blk_start_queue(q);
|
||||
spin_unlock(q->queue_lock);
|
||||
}
|
||||
} while (entry);
|
||||
}
|
||||
|
@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
|
||||
int ret = 0;
|
||||
|
||||
/* Some related CPUs might not be present (physically hotplugged) */
|
||||
for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
|
||||
for_each_cpu(j, policy->real_cpus) {
|
||||
if (j == policy->kobj_cpu)
|
||||
continue;
|
||||
|
||||
@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
|
||||
unsigned int j;
|
||||
|
||||
/* Some related CPUs might not be present (physically hotplugged) */
|
||||
for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) {
|
||||
for_each_cpu(j, policy->real_cpus) {
|
||||
if (j == policy->kobj_cpu)
|
||||
continue;
|
||||
|
||||
@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
|
||||
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
|
||||
goto err_free_cpumask;
|
||||
|
||||
if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
|
||||
goto err_free_rcpumask;
|
||||
|
||||
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
|
||||
"cpufreq");
|
||||
if (ret) {
|
||||
pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
|
||||
goto err_free_rcpumask;
|
||||
goto err_free_real_cpus;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&policy->policy_list);
|
||||
@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
|
||||
|
||||
return policy;
|
||||
|
||||
err_free_real_cpus:
|
||||
free_cpumask_var(policy->real_cpus);
|
||||
err_free_rcpumask:
|
||||
free_cpumask_var(policy->related_cpus);
|
||||
err_free_cpumask:
|
||||
@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
cpufreq_policy_put_kobj(policy, notify);
|
||||
free_cpumask_var(policy->real_cpus);
|
||||
free_cpumask_var(policy->related_cpus);
|
||||
free_cpumask_var(policy->cpus);
|
||||
kfree(policy);
|
||||
@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
|
||||
pr_debug("adding CPU %u\n", cpu);
|
||||
|
||||
/*
|
||||
* Only possible if 'cpu' wasn't physically present earlier and we are
|
||||
* here from subsys_interface add callback. A hotplug notifier will
|
||||
* follow and we will handle it like logical CPU hotplug then. For now,
|
||||
* just create the sysfs link.
|
||||
*/
|
||||
if (cpu_is_offline(cpu))
|
||||
return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu);
|
||||
if (cpu_is_offline(cpu)) {
|
||||
/*
|
||||
* Only possible if we are here from the subsys_interface add
|
||||
* callback. A hotplug notifier will follow and we will handle
|
||||
* it as CPU online then. For now, just create the sysfs link,
|
||||
* unless there is no policy or the link is already present.
|
||||
*/
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
|
||||
? add_cpu_dev_symlink(policy, cpu) : 0;
|
||||
}
|
||||
|
||||
if (!down_read_trylock(&cpufreq_rwsem))
|
||||
return 0;
|
||||
@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
/* related cpus should atleast have policy->cpus */
|
||||
cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
|
||||
|
||||
/* Remember which CPUs have been present at the policy creation time. */
|
||||
if (!recover_policy)
|
||||
cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
|
||||
|
||||
/*
|
||||
* affected cpus must always be the one, which are online. We aren't
|
||||
* managing offline cpus here.
|
||||
@ -1420,8 +1433,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __cpufreq_remove_dev_prepare(struct device *dev,
|
||||
struct subsys_interface *sif)
|
||||
static int __cpufreq_remove_dev_prepare(struct device *dev)
|
||||
{
|
||||
unsigned int cpu = dev->id;
|
||||
int ret = 0;
|
||||
@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
||||
|
||||
if (has_target()) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
pr_err("%s: Failed to stop governor\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __cpufreq_remove_dev_finish(struct device *dev,
|
||||
struct subsys_interface *sif)
|
||||
static int __cpufreq_remove_dev_finish(struct device *dev)
|
||||
{
|
||||
unsigned int cpu = dev->id;
|
||||
int ret;
|
||||
@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
||||
/* If cpu is last user of policy, free policy */
|
||||
if (has_target()) {
|
||||
ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
pr_err("%s: Failed to exit governor\n", __func__);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
|
||||
/* Free the policy only if the driver is getting removed. */
|
||||
if (sif)
|
||||
cpufreq_policy_free(policy, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
|
||||
static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
|
||||
{
|
||||
unsigned int cpu = dev->id;
|
||||
int ret;
|
||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
|
||||
/*
|
||||
* Only possible if 'cpu' is getting physically removed now. A hotplug
|
||||
* notifier should have already been called and we just need to remove
|
||||
* link or free policy here.
|
||||
*/
|
||||
if (cpu_is_offline(cpu)) {
|
||||
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
struct cpumask mask;
|
||||
if (!policy)
|
||||
return 0;
|
||||
|
||||
if (!policy)
|
||||
return 0;
|
||||
if (cpu_online(cpu)) {
|
||||
__cpufreq_remove_dev_prepare(dev);
|
||||
__cpufreq_remove_dev_finish(dev);
|
||||
}
|
||||
|
||||
cpumask_copy(&mask, policy->related_cpus);
|
||||
cpumask_clear_cpu(cpu, &mask);
|
||||
|
||||
/*
|
||||
* Free policy only if all policy->related_cpus are removed
|
||||
* physically.
|
||||
*/
|
||||
if (cpumask_intersects(&mask, cpu_present_mask)) {
|
||||
remove_cpu_dev_symlink(policy, cpu);
|
||||
return 0;
|
||||
}
|
||||
cpumask_clear_cpu(cpu, policy->real_cpus);
|
||||
|
||||
if (cpumask_empty(policy->real_cpus)) {
|
||||
cpufreq_policy_free(policy, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __cpufreq_remove_dev_prepare(dev, sif);
|
||||
if (cpu != policy->kobj_cpu) {
|
||||
remove_cpu_dev_symlink(policy, cpu);
|
||||
} else {
|
||||
/*
|
||||
* The CPU owning the policy object is going away. Move it to
|
||||
* another suitable CPU.
|
||||
*/
|
||||
unsigned int new_cpu = cpumask_first(policy->real_cpus);
|
||||
struct device *new_dev = get_cpu_device(new_cpu);
|
||||
|
||||
if (!ret)
|
||||
ret = __cpufreq_remove_dev_finish(dev, sif);
|
||||
dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
|
||||
|
||||
return ret;
|
||||
sysfs_remove_link(&new_dev->kobj, "cpufreq");
|
||||
policy->kobj_cpu = new_cpu;
|
||||
WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void handle_update(struct work_struct *work)
|
||||
@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
__cpufreq_remove_dev_prepare(dev, NULL);
|
||||
__cpufreq_remove_dev_prepare(dev);
|
||||
break;
|
||||
|
||||
case CPU_POST_DEAD:
|
||||
__cpufreq_remove_dev_finish(dev, NULL);
|
||||
__cpufreq_remove_dev_finish(dev);
|
||||
break;
|
||||
|
||||
case CPU_DOWN_FAILED:
|
||||
|
@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
|
||||
.get_max = core_get_max_pstate,
|
||||
.get_min = core_get_min_pstate,
|
||||
.get_turbo = knl_get_turbo_pstate,
|
||||
.get_scaling = core_get_scaling,
|
||||
.set = core_set_pstate,
|
||||
},
|
||||
};
|
||||
|
@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
|
||||
static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
|
||||
int len)
|
||||
{
|
||||
struct cper_mem_err_compact cmem;
|
||||
|
||||
/* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
|
||||
if (len == sizeof(struct cper_sec_mem_err_old) &&
|
||||
(mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
|
||||
pr_err(FW_WARN "valid bits set for fields beyond structure\n");
|
||||
return;
|
||||
}
|
||||
if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
|
||||
printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
|
||||
if (mem->validation_bits & CPER_MEM_VALID_PA)
|
||||
@ -405,8 +412,10 @@ static void cper_estatus_print_section(
|
||||
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
|
||||
struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
|
||||
printk("%s""section_type: memory error\n", newpfx);
|
||||
if (gdata->error_data_length >= sizeof(*mem_err))
|
||||
cper_print_mem(newpfx, mem_err);
|
||||
if (gdata->error_data_length >=
|
||||
sizeof(struct cper_sec_mem_err_old))
|
||||
cper_print_mem(newpfx, mem_err,
|
||||
gdata->error_data_length);
|
||||
else
|
||||
goto err_section_too_small;
|
||||
} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
|
||||
|
@ -1614,6 +1614,9 @@ struct amdgpu_uvd {
|
||||
#define AMDGPU_MAX_VCE_HANDLES 16
|
||||
#define AMDGPU_VCE_FIRMWARE_OFFSET 256
|
||||
|
||||
#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
|
||||
#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
|
||||
|
||||
struct amdgpu_vce {
|
||||
struct amdgpu_bo *vcpu_bo;
|
||||
uint64_t gpu_addr;
|
||||
@ -1626,6 +1629,7 @@ struct amdgpu_vce {
|
||||
const struct firmware *fw; /* VCE firmware */
|
||||
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
|
||||
struct amdgpu_irq_src irq;
|
||||
unsigned harvest_config;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1862,6 +1866,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
|
||||
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
|
||||
|
||||
struct amdgpu_ip_block_status {
|
||||
bool valid;
|
||||
bool sw;
|
||||
bool hw;
|
||||
};
|
||||
|
||||
struct amdgpu_device {
|
||||
struct device *dev;
|
||||
struct drm_device *ddev;
|
||||
@ -2004,7 +2014,7 @@ struct amdgpu_device {
|
||||
|
||||
const struct amdgpu_ip_block_version *ip_blocks;
|
||||
int num_ip_blocks;
|
||||
bool *ip_block_enabled;
|
||||
struct amdgpu_ip_block_status *ip_block_status;
|
||||
struct mutex mn_lock;
|
||||
DECLARE_HASHTABLE(mn_hash, 7);
|
||||
|
||||
|
@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
|
||||
if (adev->ip_block_enabled == NULL)
|
||||
adev->ip_block_status = kcalloc(adev->num_ip_blocks,
|
||||
sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
|
||||
if (adev->ip_block_status == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (adev->ip_blocks == NULL) {
|
||||
@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
|
||||
DRM_ERROR("disabled ip block: %d\n", i);
|
||||
adev->ip_block_enabled[i] = false;
|
||||
adev->ip_block_status[i].valid = false;
|
||||
} else {
|
||||
if (adev->ip_blocks[i].funcs->early_init) {
|
||||
r = adev->ip_blocks[i].funcs->early_init((void *)adev);
|
||||
if (r == -ENOENT)
|
||||
adev->ip_block_enabled[i] = false;
|
||||
adev->ip_block_status[i].valid = false;
|
||||
else if (r)
|
||||
return r;
|
||||
else
|
||||
adev->ip_block_enabled[i] = true;
|
||||
adev->ip_block_status[i].valid = true;
|
||||
} else {
|
||||
adev->ip_block_enabled[i] = true;
|
||||
adev->ip_block_status[i].valid = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
|
||||
if (r)
|
||||
return r;
|
||||
adev->ip_block_status[i].sw = true;
|
||||
/* need to do gmc hw init early so we can allocate gpu mem */
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
r = amdgpu_vram_scratch_init(adev);
|
||||
@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
||||
r = amdgpu_wb_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
adev->ip_block_status[i].hw = true;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].sw)
|
||||
continue;
|
||||
/* gmc hw init is done early */
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
|
||||
@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
|
||||
r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
|
||||
if (r)
|
||||
return r;
|
||||
adev->ip_block_status[i].hw = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
|
||||
int i = 0, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
/* enable clockgating to save power */
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
||||
int i, r;
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
amdgpu_wb_fini(adev);
|
||||
@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
||||
return r;
|
||||
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
adev->ip_block_status[i].hw = false;
|
||||
}
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].sw)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
adev->ip_block_enabled[i] = false;
|
||||
adev->ip_block_status[i].sw = false;
|
||||
adev->ip_block_status[i].valid = false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
|
||||
int i, r;
|
||||
|
||||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
/* ungate blocks so that suspend can properly shut them down */
|
||||
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
|
||||
@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (!adev->ip_block_enabled[i])
|
||||
if (!adev->ip_block_status[i].valid)
|
||||
continue;
|
||||
r = adev->ip_blocks[i].funcs->resume(adev);
|
||||
if (r)
|
||||
@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
||||
amdgpu_fence_driver_fini(adev);
|
||||
amdgpu_fbdev_fini(adev);
|
||||
r = amdgpu_fini(adev);
|
||||
kfree(adev->ip_block_enabled);
|
||||
adev->ip_block_enabled = NULL;
|
||||
kfree(adev->ip_block_status);
|
||||
adev->ip_block_status = NULL;
|
||||
adev->accel_working = false;
|
||||
/* free i2c buses */
|
||||
amdgpu_i2c_fini(adev);
|
||||
|
@ -449,7 +449,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
|
||||
* vital here, so they are not reported back to userspace.
|
||||
*/
|
||||
static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va)
|
||||
struct amdgpu_bo_va *bo_va, uint32_t operation)
|
||||
{
|
||||
struct ttm_validate_buffer tv, *entry;
|
||||
struct amdgpu_bo_list_entry *vm_bos;
|
||||
@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
||||
if (r)
|
||||
goto error_unlock;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
|
||||
|
||||
if (operation == AMDGPU_VA_OP_MAP)
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
|
||||
|
||||
error_unlock:
|
||||
mutex_unlock(&bo_va->vm->mutex);
|
||||
@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
|
||||
amdgpu_gem_va_update_vm(adev, bo_va);
|
||||
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return r;
|
||||
|
@ -180,17 +180,17 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
if (vm) {
|
||||
/* do context switch */
|
||||
amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
|
||||
|
||||
if (ring->funcs->emit_gds_switch)
|
||||
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
|
||||
ib->gds_base, ib->gds_size,
|
||||
ib->gws_base, ib->gws_size,
|
||||
ib->oa_base, ib->oa_size);
|
||||
|
||||
if (ring->funcs->emit_hdp_flush)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
}
|
||||
|
||||
if (vm && ring->funcs->emit_gds_switch)
|
||||
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
|
||||
ib->gds_base, ib->gds_size,
|
||||
ib->gws_base, ib->gws_size,
|
||||
ib->oa_base, ib->oa_size);
|
||||
|
||||
if (ring->funcs->emit_hdp_flush)
|
||||
amdgpu_ring_emit_hdp_flush(ring);
|
||||
|
||||
old_ctx = ring->current_ctx;
|
||||
for (i = 0; i < num_ibs; ++i) {
|
||||
ib = &ibs[i];
|
||||
|
@ -235,7 +235,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++) {
|
||||
if (adev->ip_blocks[i].type == type &&
|
||||
adev->ip_block_enabled[i]) {
|
||||
adev->ip_block_status[i].valid) {
|
||||
ip.hw_ip_version_major = adev->ip_blocks[i].major;
|
||||
ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
|
||||
ip.capabilities_flags = 0;
|
||||
@ -274,7 +274,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
|
||||
for (i = 0; i < adev->num_ip_blocks; i++)
|
||||
if (adev->ip_blocks[i].type == type &&
|
||||
adev->ip_block_enabled[i] &&
|
||||
adev->ip_block_status[i].valid &&
|
||||
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
|
||||
count++;
|
||||
|
||||
@ -416,7 +416,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
return n ? -EFAULT : 0;
|
||||
}
|
||||
case AMDGPU_INFO_DEV_INFO: {
|
||||
struct drm_amdgpu_info_device dev_info;
|
||||
struct drm_amdgpu_info_device dev_info = {};
|
||||
struct amdgpu_cu_info cu_info;
|
||||
|
||||
dev_info.device_id = dev->pdev->device;
|
||||
@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
|
||||
dev_info.vram_type = adev->mc.vram_type;
|
||||
dev_info.vram_bit_width = adev->mc.vram_width;
|
||||
dev_info.vce_harvest_config = adev->vce.harvest_config;
|
||||
|
||||
return copy_to_user(out, &dev_info,
|
||||
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
|
||||
|
@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
|
||||
amdgpu_free_extended_power_table(adev);
|
||||
}
|
||||
|
||||
#define ixSMUSVI_NB_CURRENTVID 0xD8230044
|
||||
#define CURRENT_NB_VID_MASK 0xff000000
|
||||
#define CURRENT_NB_VID__SHIFT 24
|
||||
#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
|
||||
#define CURRENT_GFX_VID_MASK 0xff000000
|
||||
#define CURRENT_GFX_VID__SHIFT 24
|
||||
|
||||
static void
|
||||
cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
|
||||
struct seq_file *m)
|
||||
{
|
||||
struct cz_power_info *pi = cz_get_pi(adev);
|
||||
struct amdgpu_clock_voltage_dependency_table *table =
|
||||
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
|
||||
u32 current_index =
|
||||
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
|
||||
TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
|
||||
TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
|
||||
u32 sclk, tmp;
|
||||
u16 vddc;
|
||||
struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
|
||||
&adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
|
||||
struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
|
||||
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
|
||||
u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
|
||||
TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
|
||||
u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
|
||||
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
|
||||
u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
|
||||
TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
|
||||
u32 sclk, vclk, dclk, ecclk, tmp;
|
||||
u16 vddnb, vddgfx;
|
||||
|
||||
if (current_index >= NUM_SCLK_LEVELS) {
|
||||
seq_printf(m, "invalid dpm profile %d\n", current_index);
|
||||
if (sclk_index >= NUM_SCLK_LEVELS) {
|
||||
seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
|
||||
} else {
|
||||
sclk = table->entries[current_index].clk;
|
||||
tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
|
||||
SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
|
||||
SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
|
||||
vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
|
||||
seq_printf(m, "power level %d sclk: %u vddc: %u\n",
|
||||
current_index, sclk, vddc);
|
||||
sclk = table->entries[sclk_index].clk;
|
||||
seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
|
||||
}
|
||||
|
||||
tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
|
||||
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
|
||||
vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
|
||||
tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
|
||||
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
|
||||
vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
|
||||
seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
|
||||
|
||||
seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
|
||||
if (!pi->uvd_power_gated) {
|
||||
if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
|
||||
seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
|
||||
} else {
|
||||
vclk = uvd_table->entries[uvd_index].vclk;
|
||||
dclk = uvd_table->entries[uvd_index].dclk;
|
||||
seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
|
||||
}
|
||||
}
|
||||
|
||||
seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
|
||||
if (!pi->vce_power_gated) {
|
||||
if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
|
||||
seq_printf(m, "invalid vce dpm level %d\n", vce_index);
|
||||
} else {
|
||||
ecclk = vce_table->entries[vce_index].ecclk;
|
||||
seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
unsigned type;
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
dce_v10_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
|
||||
dce_v10_0_vga_enable(crtc, false);
|
||||
/* Make sure VBLANK interrupt is still enabled */
|
||||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v10_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
|
@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
unsigned type;
|
||||
|
||||
switch (mode) {
|
||||
case DRM_MODE_DPMS_ON:
|
||||
@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
dce_v11_0_vga_enable(crtc, true);
|
||||
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
|
||||
dce_v11_0_vga_enable(crtc, false);
|
||||
/* Make sure VBLANK interrupt is still enabled */
|
||||
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
|
||||
amdgpu_irq_update(adev, &adev->crtc_irq, type);
|
||||
drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
|
||||
dce_v11_0_crtc_load_lut(crtc);
|
||||
break;
|
||||
|
@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
|
||||
* sheduling on the ring. This function schedules the IB
|
||||
* on the gfx ring for execution by the GPU.
|
||||
*/
|
||||
static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
bool need_ctx_switch = ring->current_ctx != ib->ctx;
|
||||
@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
/* drop the CE preamble IB for the same context */
|
||||
if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
|
||||
(ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
|
||||
!need_ctx_switch)
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
|
||||
return;
|
||||
|
||||
if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
|
||||
control |= INDIRECT_BUFFER_VALID;
|
||||
|
||||
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
|
||||
if (need_ctx_switch)
|
||||
next_rptr += 2;
|
||||
|
||||
next_rptr += 4;
|
||||
@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, next_rptr);
|
||||
|
||||
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
||||
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
|
||||
if (need_ctx_switch) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
u32 header, control = 0;
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
control |= INDIRECT_BUFFER_VALID;
|
||||
next_rptr += 4;
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
|
||||
amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, next_rptr);
|
||||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw |
|
||||
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
amdgpu_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(ib->gpu_addr & 0xFFFFFFFC));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
|
||||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
/**
|
||||
* gfx_v7_0_ring_test_ib - basic ring IB test
|
||||
*
|
||||
@ -5555,7 +5579,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
|
||||
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
|
||||
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
|
||||
@ -5571,7 +5595,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
|
||||
.get_wptr = gfx_v7_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v7_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib,
|
||||
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
|
||||
.emit_semaphore = gfx_v7_0_ring_emit_semaphore,
|
||||
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
|
||||
|
@ -3753,7 +3753,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
||||
amdgpu_ring_write(ring, 0x20); /* poll interval */
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
bool need_ctx_switch = ring->current_ctx != ib->ctx;
|
||||
@ -3761,15 +3761,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
/* drop the CE preamble IB for the same context */
|
||||
if ((ring->type == AMDGPU_RING_TYPE_GFX) &&
|
||||
(ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
|
||||
!need_ctx_switch)
|
||||
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
|
||||
return;
|
||||
|
||||
if (ring->type == AMDGPU_RING_TYPE_COMPUTE)
|
||||
control |= INDIRECT_BUFFER_VALID;
|
||||
|
||||
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
|
||||
if (need_ctx_switch)
|
||||
next_rptr += 2;
|
||||
|
||||
next_rptr += 4;
|
||||
@ -3780,7 +3775,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, next_rptr);
|
||||
|
||||
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
|
||||
if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) {
|
||||
if (need_ctx_switch) {
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
}
|
||||
@ -3803,6 +3798,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
|
||||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
u32 header, control = 0;
|
||||
u32 next_rptr = ring->wptr + 5;
|
||||
|
||||
control |= INDIRECT_BUFFER_VALID;
|
||||
|
||||
next_rptr += 4;
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
|
||||
amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, next_rptr);
|
||||
|
||||
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
|
||||
|
||||
control |= ib->length_dw |
|
||||
(ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
amdgpu_ring_write(ring,
|
||||
#ifdef __BIG_ENDIAN
|
||||
(2 << 0) |
|
||||
#endif
|
||||
(ib->gpu_addr & 0xFFFFFFFC));
|
||||
amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
|
||||
amdgpu_ring_write(ring, control);
|
||||
}
|
||||
|
||||
static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
|
||||
u64 seq, unsigned flags)
|
||||
{
|
||||
@ -4224,7 +4249,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_gfx,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_gfx,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
|
||||
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
|
||||
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
|
||||
@ -4240,7 +4265,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
|
||||
.get_wptr = gfx_v8_0_ring_get_wptr_compute,
|
||||
.set_wptr = gfx_v8_0_ring_set_wptr_compute,
|
||||
.parse_cs = NULL,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib,
|
||||
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
|
||||
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
|
||||
.emit_semaphore = gfx_v8_0_ring_emit_semaphore,
|
||||
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
|
||||
|
@ -35,6 +35,8 @@
|
||||
#include "oss/oss_2_0_d.h"
|
||||
#include "oss/oss_2_0_sh_mask.h"
|
||||
#include "gca/gfx_8_0_d.h"
|
||||
#include "smu/smu_7_1_2_d.h"
|
||||
#include "smu/smu_7_1_2_sh_mask.h"
|
||||
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
|
||||
@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
||||
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
for (idx = 0; idx < 2; ++idx) {
|
||||
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
if(idx == 0)
|
||||
WREG32_P(mmGRBM_GFX_INDEX, 0,
|
||||
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
|
||||
@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
|
||||
#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
|
||||
#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
|
||||
|
||||
static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
unsigned ret;
|
||||
|
||||
if (adev->flags & AMDGPU_IS_APU)
|
||||
tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
|
||||
VCE_HARVEST_FUSE_MACRO__MASK) >>
|
||||
VCE_HARVEST_FUSE_MACRO__SHIFT;
|
||||
else
|
||||
tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
|
||||
CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
|
||||
CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
|
||||
|
||||
switch (tmp) {
|
||||
case 1:
|
||||
ret = AMDGPU_VCE_HARVEST_VCE0;
|
||||
break;
|
||||
case 2:
|
||||
ret = AMDGPU_VCE_HARVEST_VCE1;
|
||||
break;
|
||||
case 3:
|
||||
ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
|
||||
break;
|
||||
default:
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vce_v3_0_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
|
||||
|
||||
if ((adev->vce.harvest_config &
|
||||
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
|
||||
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
|
||||
return -ENOENT;
|
||||
|
||||
vce_v3_0_set_ring_funcs(adev);
|
||||
vce_v3_0_set_irq_funcs(adev);
|
||||
|
||||
|
@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
|
||||
planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
|
||||
|
||||
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
|
||||
drm_crtc_vblank_reset(&crtc->base);
|
||||
|
||||
dc->crtc = &crtc->base;
|
||||
|
||||
|
@ -313,6 +313,12 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
|
||||
|
||||
pm_runtime_enable(dev->dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->dev, "failed to initialize vblank\n");
|
||||
goto err_periph_clk_disable;
|
||||
}
|
||||
|
||||
ret = atmel_hlcdc_dc_modeset_init(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->dev, "failed to initialize mode setting\n");
|
||||
@ -321,12 +327,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->dev, "failed to initialize vblank\n");
|
||||
goto err_periph_clk_disable;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
ret = drm_irq_install(dev, dc->hlcdc->irq);
|
||||
pm_runtime_put_sync(dev->dev);
|
||||
|
@ -5398,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev)
|
||||
if (encoder->funcs->reset)
|
||||
encoder->funcs->reset(encoder);
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
||||
connector->status = connector_status_unknown;
|
||||
|
||||
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
|
||||
if (connector->funcs->reset)
|
||||
connector->funcs->reset(connector);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mode_config_reset);
|
||||
|
||||
|
@ -3303,15 +3303,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
|
||||
#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
|
||||
|
||||
#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
|
||||
u32 upper = I915_READ(upper_reg); \
|
||||
u32 lower = I915_READ(lower_reg); \
|
||||
u32 tmp = I915_READ(upper_reg); \
|
||||
if (upper != tmp) { \
|
||||
upper = tmp; \
|
||||
lower = I915_READ(lower_reg); \
|
||||
WARN_ON(I915_READ(upper_reg) != upper); \
|
||||
} \
|
||||
(u64)upper << 32 | lower; })
|
||||
u32 upper, lower, tmp; \
|
||||
tmp = I915_READ(upper_reg); \
|
||||
do { \
|
||||
upper = tmp; \
|
||||
lower = I915_READ(lower_reg); \
|
||||
tmp = I915_READ(upper_reg); \
|
||||
} while (upper != tmp); \
|
||||
(u64)upper << 32 | lower; })
|
||||
|
||||
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
|
||||
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
|
||||
|
@ -1923,6 +1923,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
|
||||
vma->vm->insert_entries(vma->vm, pages,
|
||||
vma->node.start,
|
||||
cache_level, pte_flags);
|
||||
|
||||
/* Note the inconsistency here is due to absence of the
|
||||
* aliasing ppgtt on gen4 and earlier. Though we always
|
||||
* request PIN_USER for execbuffer (translated to LOCAL_BIND),
|
||||
* without the appgtt, we cannot honour that request and so
|
||||
* must substitute it with a global binding. Since we do this
|
||||
* behind the upper layers back, we need to explicitly set
|
||||
* the bound flag ourselves.
|
||||
*/
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
|
||||
}
|
||||
|
||||
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
|
||||
|
@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
|
||||
args->phys_swizzle_mode = args->swizzle_mode;
|
||||
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
|
||||
args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
|
||||
else
|
||||
args->phys_swizzle_mode = args->swizzle_mode;
|
||||
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
|
||||
args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
|
||||
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
|
||||
|
@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_reg_read *reg = data;
|
||||
struct register_whitelist const *entry = whitelist;
|
||||
unsigned size;
|
||||
u64 offset;
|
||||
int i, ret = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
|
||||
if (entry->offset == reg->offset &&
|
||||
if (entry->offset == (reg->offset & -entry->size) &&
|
||||
(1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
|
||||
break;
|
||||
}
|
||||
@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
|
||||
if (i == ARRAY_SIZE(whitelist))
|
||||
return -EINVAL;
|
||||
|
||||
/* We use the low bits to encode extra flags as the register should
|
||||
* be naturally aligned (and those that are not so aligned merely
|
||||
* limit the available flags for that register).
|
||||
*/
|
||||
offset = entry->offset;
|
||||
size = entry->size;
|
||||
size |= reg->offset ^ offset;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
switch (entry->size) {
|
||||
switch (size) {
|
||||
case 8 | 1:
|
||||
reg->val = I915_READ64_2x32(offset, offset+4);
|
||||
break;
|
||||
case 8:
|
||||
reg->val = I915_READ64(reg->offset);
|
||||
reg->val = I915_READ64(offset);
|
||||
break;
|
||||
case 4:
|
||||
reg->val = I915_READ(reg->offset);
|
||||
reg->val = I915_READ(offset);
|
||||
break;
|
||||
case 2:
|
||||
reg->val = I915_READ16(reg->offset);
|
||||
reg->val = I915_READ16(offset);
|
||||
break;
|
||||
case 1:
|
||||
reg->val = I915_READ8(reg->offset);
|
||||
reg->val = I915_READ8(offset);
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(entry->size);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
|
||||
uint32_t op_mode = 0;
|
||||
uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
|
||||
uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
|
||||
enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
|
||||
enum mdp4_frame_format frame_type;
|
||||
|
||||
if (!(crtc && fb)) {
|
||||
DBG("%s: disabled!", mdp4_plane->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
frame_type = mdp4_get_frame_format(fb);
|
||||
|
||||
/* src values are in Q16 fixed point, convert to integer: */
|
||||
src_x = src_x >> 16;
|
||||
src_y = src_y >> 16;
|
||||
|
@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
|
||||
|
||||
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
|
||||
{
|
||||
int i;
|
||||
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
|
||||
int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
|
||||
|
||||
for (i = 0; i < nplanes; i++) {
|
||||
struct drm_plane *plane = state->planes[i];
|
||||
struct drm_plane_state *plane_state = state->plane_states[i];
|
||||
|
||||
if (!plane)
|
||||
continue;
|
||||
|
||||
mdp5_plane_complete_commit(plane, plane_state);
|
||||
}
|
||||
|
||||
mdp5_disable(mdp5_kms);
|
||||
}
|
||||
|
||||
|
@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
|
||||
struct drm_mode_object *obj);
|
||||
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
|
||||
void mdp5_plane_complete_flip(struct drm_plane *plane);
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state);
|
||||
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
|
||||
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
|
||||
|
@ -31,8 +31,6 @@ struct mdp5_plane {
|
||||
|
||||
uint32_t nformats;
|
||||
uint32_t formats[32];
|
||||
|
||||
bool enabled;
|
||||
};
|
||||
#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
|
||||
|
||||
@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
|
||||
return state->fb && state->crtc;
|
||||
}
|
||||
|
||||
static int mdp5_plane_disable(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(plane);
|
||||
enum mdp5_pipe pipe = mdp5_plane->pipe;
|
||||
|
||||
DBG("%s: disable", mdp5_plane->name);
|
||||
|
||||
if (mdp5_kms) {
|
||||
/* Release the memory we requested earlier from the SMP: */
|
||||
mdp5_smp_release(mdp5_kms->smp, pipe);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mdp5_plane_destroy(struct drm_plane *plane)
|
||||
{
|
||||
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
|
||||
@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
|
||||
|
||||
if (!plane_enabled(state)) {
|
||||
to_mdp5_plane_state(state)->pending = true;
|
||||
mdp5_plane_disable(plane);
|
||||
} else if (to_mdp5_plane_state(state)->mode_changed) {
|
||||
int ret;
|
||||
to_mdp5_plane_state(state)->pending = true;
|
||||
@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
|
||||
return mdp5_plane->flush_mask;
|
||||
}
|
||||
|
||||
/* called after vsync in thread context */
|
||||
void mdp5_plane_complete_commit(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
struct mdp5_kms *mdp5_kms = get_kms(plane);
|
||||
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
|
||||
enum mdp5_pipe pipe = mdp5_plane->pipe;
|
||||
|
||||
if (!plane_enabled(plane->state)) {
|
||||
DBG("%s: free SMP", mdp5_plane->name);
|
||||
mdp5_smp_release(mdp5_kms->smp, pipe);
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize plane */
|
||||
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
|
||||
enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
|
||||
|
@ -34,22 +34,44 @@
|
||||
* and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
|
||||
*
|
||||
* For each block that can be dynamically allocated, it can be either
|
||||
* free, or pending/in-use by a client. The updates happen in three steps:
|
||||
* free:
|
||||
* The block is free.
|
||||
*
|
||||
* pending:
|
||||
* The block is allocated to some client and not free.
|
||||
*
|
||||
* configured:
|
||||
* The block is allocated to some client, and assigned to that
|
||||
* client in MDP5_MDP_SMP_ALLOC registers.
|
||||
*
|
||||
* inuse:
|
||||
* The block is being actively used by a client.
|
||||
*
|
||||
* The updates happen in the following steps:
|
||||
*
|
||||
* 1) mdp5_smp_request():
|
||||
* When plane scanout is setup, calculate required number of
|
||||
* blocks needed per client, and request. Blocks not inuse or
|
||||
* pending by any other client are added to client's pending
|
||||
* set.
|
||||
* blocks needed per client, and request. Blocks neither inuse nor
|
||||
* configured nor pending by any other client are added to client's
|
||||
* pending set.
|
||||
* For shrinking, blocks in pending but not in configured can be freed
|
||||
* directly, but those already in configured will be freed later by
|
||||
* mdp5_smp_commit.
|
||||
*
|
||||
* 2) mdp5_smp_configure():
|
||||
* As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
|
||||
* are configured for the union(pending, inuse)
|
||||
* Current pending is copied to configured.
|
||||
* It is assumed that mdp5_smp_request and mdp5_smp_configure not run
|
||||
* concurrently for the same pipe.
|
||||
*
|
||||
* 3) mdp5_smp_commit():
|
||||
* After next vblank, copy pending -> inuse. Optionally update
|
||||
* After next vblank, copy configured -> inuse. Optionally update
|
||||
* MDP5_SMP_ALLOC registers if there are newly unused blocks
|
||||
*
|
||||
* 4) mdp5_smp_release():
|
||||
* Must be called after the pipe is disabled and no longer uses any SMB
|
||||
*
|
||||
* On the next vblank after changes have been committed to hw, the
|
||||
* client's pending blocks become it's in-use blocks (and no-longer
|
||||
* in-use blocks become available to other clients).
|
||||
@ -77,6 +99,9 @@ struct mdp5_smp {
|
||||
struct mdp5_client_smp_state client_state[MAX_CLIENTS];
|
||||
};
|
||||
|
||||
static void update_smp_state(struct mdp5_smp *smp,
|
||||
u32 cid, mdp5_smp_state_t *assigned);
|
||||
|
||||
static inline
|
||||
struct mdp5_kms *get_kms(struct mdp5_smp *smp)
|
||||
{
|
||||
@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
|
||||
for (i = cur_nblks; i > nblks; i--) {
|
||||
int blk = find_first_bit(ps->pending, cnt);
|
||||
clear_bit(blk, ps->pending);
|
||||
/* don't clear in global smp_state until _commit() */
|
||||
|
||||
/* clear in global smp_state if not in configured
|
||||
* otherwise until _commit()
|
||||
*/
|
||||
if (!test_bit(blk, ps->configured))
|
||||
clear_bit(blk, smp->state);
|
||||
}
|
||||
}
|
||||
|
||||
@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
|
||||
/* Release SMP blocks for all clients of the pipe */
|
||||
void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
||||
{
|
||||
int i, nblks;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
int cnt = smp->blk_cnt;
|
||||
|
||||
for (i = 0; i < pipe2nclients(pipe); i++) {
|
||||
mdp5_smp_state_t assigned;
|
||||
u32 cid = pipe2client(pipe, i);
|
||||
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
|
||||
|
||||
spin_lock_irqsave(&smp->state_lock, flags);
|
||||
|
||||
/* clear hw assignment */
|
||||
bitmap_or(assigned, ps->inuse, ps->configured, cnt);
|
||||
update_smp_state(smp, CID_UNUSED, &assigned);
|
||||
|
||||
/* free to global pool */
|
||||
bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
|
||||
bitmap_andnot(smp->state, smp->state, assigned, cnt);
|
||||
|
||||
/* clear client's infor */
|
||||
bitmap_zero(ps->pending, cnt);
|
||||
bitmap_zero(ps->configured, cnt);
|
||||
bitmap_zero(ps->inuse, cnt);
|
||||
|
||||
spin_unlock_irqrestore(&smp->state_lock, flags);
|
||||
}
|
||||
|
||||
for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
|
||||
smp_request_block(smp, pipe2client(pipe, i), 0);
|
||||
set_fifo_thresholds(smp, pipe, 0);
|
||||
}
|
||||
|
||||
@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
||||
u32 cid = pipe2client(pipe, i);
|
||||
struct mdp5_client_smp_state *ps = &smp->client_state[cid];
|
||||
|
||||
bitmap_or(assigned, ps->inuse, ps->pending, cnt);
|
||||
/*
|
||||
* if vblank has not happened since last smp_configure
|
||||
* skip the configure for now
|
||||
*/
|
||||
if (!bitmap_equal(ps->inuse, ps->configured, cnt))
|
||||
continue;
|
||||
|
||||
bitmap_copy(ps->configured, ps->pending, cnt);
|
||||
bitmap_or(assigned, ps->inuse, ps->configured, cnt);
|
||||
update_smp_state(smp, cid, &assigned);
|
||||
}
|
||||
}
|
||||
|
||||
/* step #3: after vblank, copy pending -> inuse: */
|
||||
/* step #3: after vblank, copy configured -> inuse: */
|
||||
void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
||||
{
|
||||
int cnt = smp->blk_cnt;
|
||||
@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
||||
* using, which can be released and made available to other
|
||||
* clients:
|
||||
*/
|
||||
if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
|
||||
if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&smp->state_lock, flags);
|
||||
@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
|
||||
update_smp_state(smp, CID_UNUSED, &released);
|
||||
}
|
||||
|
||||
bitmap_copy(ps->inuse, ps->pending, cnt);
|
||||
bitmap_copy(ps->inuse, ps->configured, cnt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
|
||||
struct mdp5_client_smp_state {
|
||||
mdp5_smp_state_t inuse;
|
||||
mdp5_smp_state_t configured;
|
||||
mdp5_smp_state_t pending;
|
||||
};
|
||||
|
||||
|
@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
|
||||
|
||||
timeout = ktime_add_ms(ktime_get(), 1000);
|
||||
|
||||
ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
|
||||
if (ret) {
|
||||
WARN_ON(ret); // TODO unswap state back? or??
|
||||
commit_destroy(c);
|
||||
return ret;
|
||||
}
|
||||
/* uninterruptible wait */
|
||||
msm_wait_fence(dev, c->fence, &timeout, false);
|
||||
|
||||
complete_commit(c);
|
||||
|
||||
|
@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
|
||||
* Fences:
|
||||
*/
|
||||
|
||||
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
|
||||
ktime_t *timeout)
|
||||
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
|
||||
ktime_t *timeout , bool interruptible)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
int ret;
|
||||
@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
|
||||
remaining_jiffies = timespec_to_jiffies(&ts);
|
||||
}
|
||||
|
||||
ret = wait_event_interruptible_timeout(priv->fence_event,
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout(priv->fence_event,
|
||||
fence_completed(dev, fence),
|
||||
remaining_jiffies);
|
||||
else
|
||||
ret = wait_event_timeout(priv->fence_event,
|
||||
fence_completed(dev, fence),
|
||||
remaining_jiffies);
|
||||
|
||||
@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return msm_wait_fence_interruptable(dev, args->fence, &timeout);
|
||||
return msm_wait_fence(dev, args->fence, &timeout, true);
|
||||
}
|
||||
|
||||
static const struct drm_ioctl_desc msm_ioctls[] = {
|
||||
|
@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
|
||||
|
||||
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
|
||||
|
||||
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
|
||||
ktime_t *timeout);
|
||||
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
|
||||
ktime_t *timeout, bool interruptible);
|
||||
int msm_queue_fence_cb(struct drm_device *dev,
|
||||
struct msm_fence_cb *cb, uint32_t fence);
|
||||
void msm_update_fence(struct drm_device *dev, uint32_t fence);
|
||||
|
@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
|
||||
if (op & MSM_PREP_NOSYNC)
|
||||
timeout = NULL;
|
||||
|
||||
ret = msm_wait_fence_interruptable(dev, fence, timeout);
|
||||
ret = msm_wait_fence(dev, fence, timeout, true);
|
||||
}
|
||||
|
||||
/* TODO cache maintenance */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user