diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 10:15:34 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-04 10:15:34 +0000 |
commit | f68548b6d10e317aeedb009bab2cdc53aa196bd8 (patch) | |
tree | 612907d41d90d34f497c4bfbbfd9fbd368d2a1d7 /arch | |
parent | Adding debian version 6.1.82-1. (diff) | |
download | linux-f68548b6d10e317aeedb009bab2cdc53aa196bd8.tar.xz linux-f68548b6d10e317aeedb009bab2cdc53aa196bd8.zip |
Merging upstream version 6.1.85.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
180 files changed, 1697 insertions, 943 deletions
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts index efed325af..d99bac022 100644 --- a/arch/arm/boot/dts/arm-realview-pb1176.dts +++ b/arch/arm/boot/dts/arm-realview-pb1176.dts @@ -451,7 +451,7 @@ /* Direct-mapped development chip ROM */ pb1176_rom@10200000 { - compatible = "direct-mapped"; + compatible = "mtd-rom"; reg = <0x10200000 0x4000>; bank-width = <1>; }; diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi index aacbf317f..4b7aee895 100644 --- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi +++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi @@ -106,8 +106,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet>; phy-mode = "rgmii-id"; - phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; - phy-reset-duration = <20>; phy-supply = <&sw2_reg>; status = "okay"; @@ -120,17 +118,10 @@ #address-cells = <1>; #size-cells = <0>; - phy_port2: phy@1 { - reg = <1>; - }; - - phy_port3: phy@2 { - reg = <2>; - }; - switch@10 { compatible = "qca,qca8334"; - reg = <10>; + reg = <0x10>; + reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; switch_ports: ports { #address-cells = <1>; @@ -151,15 +142,30 @@ eth2: port@2 { reg = <2>; label = "eth2"; + phy-mode = "internal"; phy-handle = <&phy_port2>; }; eth1: port@3 { reg = <3>; label = "eth1"; + phy-mode = "internal"; phy-handle = <&phy_port3>; }; }; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + phy_port2: ethernet-phy@1 { + reg = <1>; + }; + + phy_port3: ethernet-phy@2 { + reg = <2>; + }; + }; }; }; }; diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts index 04f1ae138..bc64348b8 100644 --- a/arch/arm/boot/dts/mmp2-brownstone.dts +++ b/arch/arm/boot/dts/mmp2-brownstone.dts @@ -28,7 +28,7 @@ &twsi1 { status = "okay"; pmic: max8925@3c { - compatible = "maxium,max8925"; + compatible = "maxim,max8925"; reg = <0x3c>; interrupts = <1>; interrupt-parent = <&intcmux4>; diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi index c4b2e9ac2..5ea45e486 100644 --- a/arch/arm/boot/dts/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974.dtsi @@ -1134,7 +1134,7 @@ qfprom: qfprom@fc4bc000 { compatible = "qcom,msm8974-qfprom", "qcom,qfprom"; - reg = <0xfc4bc000 0x1000>; + reg = <0xfc4bc000 0x2100>; #address-cells = <1>; #size-cells = <1>; tsens_calib: calib@d0 { diff --git a/arch/arm/boot/dts/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/r8a73a4-ape6evm.dts index e81a7213d..4282bafbb 100644 --- a/arch/arm/boot/dts/r8a73a4-ape6evm.dts +++ b/arch/arm/boot/dts/r8a73a4-ape6evm.dts @@ -209,6 +209,18 @@ status = "okay"; }; +&extal1_clk { + clock-frequency = <26000000>; +}; + +&extal2_clk { + clock-frequency = <48000000>; +}; + +&extalr_clk { + clock-frequency = <32768>; +}; + &pfc { scifa0_pins: scifa0 { groups = "scifa0_data"; diff --git a/arch/arm/boot/dts/r8a73a4.dtsi b/arch/arm/boot/dts/r8a73a4.dtsi index c39066967..d1f4cbd09 100644 --- a/arch/arm/boot/dts/r8a73a4.dtsi +++ b/arch/arm/boot/dts/r8a73a4.dtsi @@ -450,17 +450,20 @@ extalr_clk: extalr { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <32768>; + /* This value must be overridden by the board. */ + clock-frequency = <0>; }; extal1_clk: extal1 { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <25000000>; + /* This value must be overridden by the board. */ + clock-frequency = <0>; }; extal2_clk: extal2 { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <48000000>; + /* This value must be overridden by the board. */ + clock-frequency = <0>; }; fsiack_clk: fsiack { compatible = "fixed-clock"; diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c index 433ee4ddc..f85933fde 100644 --- a/arch/arm/crypto/sha256_glue.c +++ b/arch/arm/crypto/sha256_glue.c @@ -24,8 +24,8 @@ #include "sha256_glue.h" -asmlinkage void sha256_block_data_order(u32 *digest, const void *data, - unsigned int num_blks); +asmlinkage void sha256_block_data_order(struct sha256_state *state, + const u8 *data, int num_blks); int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) @@ -33,23 +33,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, /* make sure casting to sha256_block_fn() is safe */ BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0); - return sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order); + return sha256_base_do_update(desc, data, len, sha256_block_data_order); } EXPORT_SYMBOL(crypto_sha256_arm_update); static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out) { - sha256_base_do_finalize(desc, - (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_finalize(desc, sha256_block_data_order); return sha256_base_finish(desc, out); } int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha256_base_do_update(desc, data, len, - (sha256_block_fn *)sha256_block_data_order); + sha256_base_do_update(desc, data, len, sha256_block_data_order); return crypto_sha256_arm_final(desc, out); } EXPORT_SYMBOL(crypto_sha256_arm_finup); diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c index 0635a65aa..1be5bd498 100644 --- a/arch/arm/crypto/sha512-glue.c +++ b/arch/arm/crypto/sha512-glue.c @@ -25,27 +25,25 @@ MODULE_ALIAS_CRYPTO("sha512"); MODULE_ALIAS_CRYPTO("sha384-arm"); MODULE_ALIAS_CRYPTO("sha512-arm"); -asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks); +asmlinkage void sha512_block_data_order(struct sha512_state *state, + u8 const *src, int blocks); int sha512_arm_update(struct shash_desc *desc, const u8 *data, unsigned int len) { - return sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order); + return sha512_base_do_update(desc, data, len, sha512_block_data_order); } static int sha512_arm_final(struct shash_desc *desc, u8 *out) { - sha512_base_do_finalize(desc, - (sha512_block_fn *)sha512_block_data_order); + sha512_base_do_finalize(desc, sha512_block_data_order); return sha512_base_finish(desc, out); } int sha512_arm_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) { - sha512_base_do_update(desc, data, len, - (sha512_block_fn *)sha512_block_data_order); + sha512_base_do_update(desc, data, len, sha512_block_data_order); return sha512_arm_final(desc, out); } diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts index 9ec49ac2f..381d58cea 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts @@ -291,6 +291,8 @@ }; &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&spdif_tx_pin>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi index 4903d6358..855b7d43b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi @@ -166,6 +166,8 @@ }; &spdif { + pinctrl-names = "default"; + pinctrl-0 = <&spdif_tx_pin>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi index ca1d287a0..d11e5041b 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi @@ -406,6 +406,7 @@ function = "spi1"; }; + /omit-if-no-ref/ spdif_tx_pin: spdif-tx-pin { pins = "PH7"; function = "spdif"; @@ -655,10 +656,8 @@ clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>; clock-names = "apb", "spdif"; resets = <&ccu RST_BUS_SPDIF>; - dmas = <&dma 2>; - dma-names = "tx"; - pinctrl-names = "default"; - pinctrl-0 = <&spdif_tx_pin>; + dmas = <&dma 2>, <&dma 2>; + dma-names = "rx", "tx"; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi index 4eb2cd14e..9b6da84de 100644 --- a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi +++ b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi @@ -145,7 +145,6 @@ msix: msix@fbe00000 { compatible = "al,alpine-msix"; reg = <0x0 0xfbe00000 0x0 0x100000>; - interrupt-controller; msi-controller; al,msi-base-spi = <160>; al,msi-num-spis = <160>; diff --git a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi index 73a352ea8..b30014d4d 100644 --- a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi +++ b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi @@ -351,7 +351,6 @@ msix: msix@fbe00000 { compatible = "al,alpine-msix"; reg = <0x0 0xfbe00000 0x0 0x100000>; - interrupt-controller; msi-controller; al,msi-base-spi = <336>; al,msi-num-spis = <959>; diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi index df7134854..a4c5a3890 100644 --- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi @@ -180,9 +180,6 @@ brcm,num-gphy = <5>; brcm,num-rgmii-ports = <2>; - #address-cells = <1>; - #size-cells = <0>; - ports: ports { #address-cells = <1>; #size-cells = <0>; diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi index fda97c47f..d57784174 100644 --- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi @@ -584,6 +584,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>; }; diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi index 8f8c25e51..473d7d0dd 100644 --- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi +++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi @@ -442,6 +442,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>; gpio-ranges = <&pinmux 0 0 16>, <&pinmux 16 71 2>, diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts index 8b16bd685..d9fa0deea 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts @@ -294,8 +294,8 @@ pinctrl_i2c4: i2c4grp { fsl,pins = < - MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3 - MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3 + MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083 + MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083 >; }; @@ -313,19 +313,19 @@ pinctrl_uart1: uart1grp { fsl,pins = < - MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140 - MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140 - MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140 - MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140 + MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0 + MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0 + MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0 + MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0 >; }; pinctrl_uart2: uart2grp { fsl,pins = < - MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140 - MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140 - MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140 - MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140 + MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0 + MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0 + MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0 + MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0 >; }; @@ -337,40 +337,40 @@ pinctrl_usdhc2: usdhc2grp { fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190 + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90 MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0 MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0 MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0 MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0 MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0 - MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 >; }; pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194 + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94 MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4 MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4 MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4 MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4 MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4 - MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 >; }; pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196 + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96 MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6 MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6 MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6 MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6 MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6 - MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 >; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts index a079322a3..d54cddd65 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts +++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts @@ -277,8 +277,8 @@ pinctrl_i2c4: i2c4grp { fsl,pins = < - MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3 - MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3 + MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083 + MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083 >; }; @@ -290,19 +290,19 @@ pinctrl_uart1: uart1grp { fsl,pins = < - MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140 - MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140 - MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140 - MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140 + MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0 + MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0 + MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0 + MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0 >; }; pinctrl_uart2: uart2grp { fsl,pins = < - MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140 - MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140 - MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140 - MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140 + MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0 + MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0 + MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0 + MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0 >; }; @@ -314,40 +314,40 @@ pinctrl_usdhc2: usdhc2grp { fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190 + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90 MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0 MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0 MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0 MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0 MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0 - MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 >; }; pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194 + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94 MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4 MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4 MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4 MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4 MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4 - MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 >; }; pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { fsl,pins = < - MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196 + MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96 MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6 MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6 MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6 MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6 MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6 - MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019 - MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0 + MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19 + MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0 >; }; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi index 8d10f5b41..d5199ecb3 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi @@ -205,7 +205,7 @@ reg = <0x52>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_rtc>; - interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_HIGH>; + interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_LOW>; trickle-diode-disable; }; }; @@ -247,8 +247,8 @@ pinctrl_i2c1: i2c1grp { fsl,pins = < - MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3 - MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3 + MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083 + MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi index 0679728d2..884ae2ad3 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi @@ -237,8 +237,8 @@ pinctrl_i2c1: i2c1grp { fsl,pins = < - MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3 - MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3 + MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083 + MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083 >; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi index c557dbf4d..2e90466db 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi @@ -47,17 +47,6 @@ gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>; status = "okay"; }; - - reg_usb_otg1_vbus: regulator-usb-otg1 { - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_reg_usb1_en>; - compatible = "regulator-fixed"; - regulator-name = "usb_otg1_vbus"; - gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>; - enable-active-high; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - }; }; /* off-board header */ @@ -146,9 +135,10 @@ }; &usbotg1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usbotg1>; dr_mode = "otg"; over-current-active-low; - vbus-supply = <®_usb_otg1_vbus>; status = "okay"; }; @@ -206,14 +196,6 @@ >; }; - pinctrl_reg_usb1_en: regusb1grp { - fsl,pins = < - MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x41 - MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141 - MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41 - >; - }; - pinctrl_spi2: spi2grp { fsl,pins = < MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6 @@ -236,4 +218,11 @@ MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140 >; }; + + pinctrl_usbotg1: usbotg1grp { + fsl,pins = < + MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141 + MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41 + >; + }; }; diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi index 78ae73d0c..98ff17b14 100644 --- a/arch/arm64/boot/dts/lg/lg1312.dtsi +++ b/arch/arm64/boot/dts/lg/lg1312.dtsi @@ -124,7 +124,6 @@ amba { #address-cells = <2>; #size-cells = <1>; - #interrupt-cells = <3>; compatible = "simple-bus"; interrupt-parent = <&gic>; diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi index 217331657..8e9410d8f 100644 --- a/arch/arm64/boot/dts/lg/lg1313.dtsi +++ b/arch/arm64/boot/dts/lg/lg1313.dtsi @@ -124,7 +124,6 @@ amba { #address-cells = <2>; #size-cells = <1>; - #interrupt-cells = <3>; compatible = "simple-bus"; interrupt-parent = <&gic>; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index df152c722..cd28e1c45 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -426,14 +426,14 @@ crypto: crypto@90000 { compatible = "inside-secure,safexcel-eip97ies"; reg = <0x90000 0x20000>; - interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, + interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "mem", "ring0", "ring1", - "ring2", "ring3", "eip"; + <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "ring0", "ring1", "ring2", + "ring3", "eip", "mem"; clocks = <&nb_periph_clk 15>; }; diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi index a06a0a889..73d8803b5 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi @@ -133,7 +133,6 @@ odmi: odmi@300000 { compatible = "marvell,odmi-controller"; - interrupt-controller; msi-controller; marvell,odmi-frames = <4>; reg = <0x300000 0x4000>, diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi index d6c0990a2..218c059b1 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi @@ -506,14 +506,14 @@ CP11X_LABEL(crypto): crypto@800000 { compatible = "inside-secure,safexcel-eip197b"; reg = <0x800000 0x200000>; - interrupts = <87 IRQ_TYPE_LEVEL_HIGH>, - <88 IRQ_TYPE_LEVEL_HIGH>, + interrupts = <88 IRQ_TYPE_LEVEL_HIGH>, <89 IRQ_TYPE_LEVEL_HIGH>, <90 IRQ_TYPE_LEVEL_HIGH>, <91 IRQ_TYPE_LEVEL_HIGH>, - <92 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "mem", "ring0", "ring1", - "ring2", "ring3", "eip"; + <92 IRQ_TYPE_LEVEL_HIGH>, + <87 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "ring0", "ring1", "ring2", "ring3", + "eip", "mem"; clock-names = "core", "reg"; clocks = <&CP11X_LABEL(clk) 1 26>, <&CP11X_LABEL(clk) 1 17>; diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts index 2c35ed073..b1ddc491d 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts @@ -74,6 +74,7 @@ memory@40000000 { reg = <0 0x40000000 0 0x40000000>; + device_type = "memory"; }; reg_1p8v: regulator-1p8v { diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts index f9313b697..527dcb279 100644 --- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts +++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts @@ -56,6 +56,7 @@ memory@40000000 { reg = <0 0x40000000 0 0x20000000>; + device_type = "memory"; }; reg_1p8v: regulator-1p8v { diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi index fc338bd49..108931e79 100644 --- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi @@ -110,6 +110,7 @@ compatible = "mediatek,mt7986-infracfg", "syscon"; reg = <0 0x10001000 0 0x1000>; #clock-cells = <1>; + #reset-cells = <1>; }; wed_pcie: wed-pcie@10003000 { diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi index dccf367c7..3d95625f1 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi @@ -4,6 +4,8 @@ */ #include "mt8183-kukui.dtsi" +/* Must come after mt8183-kukui.dtsi to modify cros_ec */ +#include <arm/cros-ec-keyboard.dtsi> / { panel: panel { diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi index 50a0dd36b..0d3c7b816 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi @@ -372,6 +372,16 @@ }; }; +&cros_ec { + cbas { + compatible = "google,cros-cbas"; + }; + + keyboard-controller { + compatible = "google,cros-ec-keyb-switches"; + }; +}; + &qca_wifi { qcom,ath10k-calibration-variant = "GO_KAKADU"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi index 06f8c80bf..e73113cb5 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi @@ -339,6 +339,16 @@ }; }; +&cros_ec { + cbas { + compatible = "google,cros-cbas"; + }; + + keyboard-controller { + compatible = "google,cros-ec-keyb-switches"; + }; +}; + &qca_wifi { qcom,ath10k-calibration-variant = "GO_KODAMA"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi index a7b0cb3ff..181da69d1 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi @@ -343,6 +343,16 @@ }; }; +&cros_ec { + cbas { + compatible = "google,cros-cbas"; + }; + + keyboard-controller { + compatible = "google,cros-ec-keyb-switches"; + }; +}; + &qca_wifi { qcom,ath10k-calibration-variant = "LE_Krane"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi index a428a581c..1db97d946 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi @@ -896,10 +896,6 @@ google,usb-port-id = <0>; }; - cbas { - compatible = "google,cros-cbas"; - }; - typec { compatible = "google,cros-ec-typec"; #address-cells = <1>; @@ -999,5 +995,4 @@ }; }; -#include <arm/cros-ec-keyboard.dtsi> #include <arm/cros-ec-sbs.dtsi> diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi index 50367da93..c6080af1e 100644 --- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi @@ -819,10 +819,6 @@ #address-cells = <1>; #size-cells = <0>; - base_detection: cbas { - compatible = "google,cros-cbas"; - }; - cros_ec_pwm: pwm { compatible = "google,cros-ec-pwm"; #pwm-cells = <1>; diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi index 2f40c6cc4..4ed8a0f18 100644 --- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi @@ -1539,7 +1539,7 @@ mediatek,scp = <&scp>; power-domains = <&spm MT8192_POWER_DOMAIN_VENC>; clocks = <&vencsys CLK_VENC_SET1_VENC>; - clock-names = "venc-set1"; + clock-names = "venc_sel"; assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>; assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D4>; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts index 3348ba69f..d86d193e5 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts @@ -13,3 +13,7 @@ &ts_10 { status = "okay"; }; + +&watchdog { + /delete-property/ mediatek,disable-extrst; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts index 4669e9d91..5356f5330 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts @@ -33,3 +33,7 @@ &ts_10 { status = "okay"; }; + +&watchdog { + /delete-property/ mediatek,disable-extrst; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts index 5021edd02..fca3606cb 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts @@ -34,3 +34,7 @@ &ts_10 { status = "okay"; }; + +&watchdog { + /delete-property/ mediatek,disable-extrst; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts index 5117b2e79..998c2e781 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts @@ -111,6 +111,7 @@ compatible = "mediatek,mt6360"; reg = <0x34>; interrupt-controller; + #interrupt-cells = <1>; interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>; interrupt-names = "IRQB"; diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts index f094011be..8099dc04e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts +++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts @@ -2024,7 +2024,7 @@ status = "okay"; phy-handle = <&mgbe0_phy>; - phy-mode = "usxgmii"; + phy-mode = "10gbase-r"; mdio { #address-cells = <1>; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi index eae22e6e9..f55ce6f2f 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi @@ -923,6 +923,8 @@ ap_spi_fp: &spi10 { vddrf-supply = <&pp1300_l2c>; vddch0-supply = <&pp3300_l10c>; max-speed = <3200000>; + + qcom,local-bd-address-broken; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index 04106d725..b5cd24d59 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -2028,8 +2028,16 @@ ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>, <0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>; - interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "msi"; + interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "msi0", "msi1", "msi2", "msi3", + "msi4", "msi5", "msi6", "msi7"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0x7>; interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts index 135ff4368..5c04c91b0 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts @@ -532,7 +532,7 @@ &pcie0 { status = "okay"; perst-gpios = <&tlmm 35 GPIO_ACTIVE_LOW>; - enable-gpio = <&tlmm 134 GPIO_ACTIVE_HIGH>; + wake-gpios = <&tlmm 134 GPIO_ACTIVE_HIGH>; vddpe-3v3-supply = <&pcie0_3p3v_dual>; diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi index eb1a93699..9dccecd9f 100644 --- a/arch/arm64/boot/dts/qcom/sm8150.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi @@ -1822,8 +1822,8 @@ phys = <&pcie0_lane>; phy-names = "pciephy"; - perst-gpio = <&tlmm 35 GPIO_ACTIVE_HIGH>; - enable-gpio = <&tlmm 37 GPIO_ACTIVE_HIGH>; + perst-gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>; + wake-gpios = <&tlmm 37 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&pcie0_default_state>; @@ -1925,7 +1925,7 @@ phys = <&pcie1_lane>; phy-names = "pciephy"; - perst-gpio = <&tlmm 102 GPIO_ACTIVE_HIGH>; + perst-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>; enable-gpio = <&tlmm 104 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi index ed9400f90..b677ef670 100644 --- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi @@ -656,8 +656,8 @@ avb0: ethernet@e6800000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; - reg = <0 0xe6800000 0 0x800>; + "renesas,etheravb-rcar-gen4"; + reg = <0 0xe6800000 0 0x1000>; interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>, @@ -704,8 +704,8 @@ avb1: ethernet@e6810000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; - reg = <0 0xe6810000 0 0x800>; + "renesas,etheravb-rcar-gen4"; + reg = <0 0xe6810000 0 0x1000>; interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>, @@ -752,7 +752,7 @@ avb2: ethernet@e6820000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; + "renesas,etheravb-rcar-gen4"; reg = <0 0xe6820000 0 0x1000>; interrupts = <GIC_SPI 306 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>, @@ -800,7 +800,7 @@ avb3: ethernet@e6830000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; + "renesas,etheravb-rcar-gen4"; reg = <0 0xe6830000 0 0x1000>; interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>, @@ -848,7 +848,7 @@ avb4: ethernet@e6840000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; + "renesas,etheravb-rcar-gen4"; reg = <0 0xe6840000 0 0x1000>; interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>, @@ -896,7 +896,7 @@ avb5: ethernet@e6850000 { compatible = "renesas,etheravb-r8a779a0", - "renesas,etheravb-rcar-gen3"; + "renesas,etheravb-rcar-gen4"; reg = <0 0xe6850000 0 0x1000>; interrupts = <GIC_SPI 381 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 382 IRQ_TYPE_LEVEL_HIGH>, @@ -1019,7 +1019,7 @@ msiof0: spi@e6e90000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6e90000 0 0x0064>; interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 618>; @@ -1034,7 +1034,7 @@ msiof1: spi@e6ea0000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6ea0000 0 0x0064>; interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 619>; @@ -1049,7 +1049,7 @@ msiof2: spi@e6c00000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6c00000 0 0x0064>; interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 620>; @@ -1064,7 +1064,7 @@ msiof3: spi@e6c10000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6c10000 0 0x0064>; interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 621>; @@ -1079,7 +1079,7 @@ msiof4: spi@e6c20000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6c20000 0 0x0064>; interrupts = <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 622>; @@ -1094,7 +1094,7 @@ msiof5: spi@e6c28000 { compatible = "renesas,msiof-r8a779a0", - "renesas,rcar-gen3-msiof"; + "renesas,rcar-gen4-msiof"; reg = <0 0xe6c28000 0 0x0064>; interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>; clocks = <&cpg CPG_MOD 623>; diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi index d58b18802..868d1a3cb 100644 --- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi @@ -337,7 +337,7 @@ avb0: ethernet@e6800000 { compatible = "renesas,etheravb-r8a779g0", "renesas,etheravb-rcar-gen4"; - reg = <0 0xe6800000 0 0x800>; + reg = <0 0xe6800000 0 0x1000>; interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>, @@ -384,7 +384,7 @@ avb1: ethernet@e6810000 { compatible = "renesas,etheravb-r8a779g0", "renesas,etheravb-rcar-gen4"; - reg = <0 0xe6810000 0 0x800>; + reg = <0 0xe6810000 0 0x1000>; interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi index a4738842f..7f88395ff 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g043.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g043.dtsi @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* - * Device Tree Source for the RZ/G2UL SoC + * Device Tree Source for the RZ/Five and RZ/G2UL SoCs * * Copyright (C) 2022 Renesas Electronics Corp. */ @@ -68,36 +68,8 @@ }; }; - cpus { - #address-cells = <1>; - #size-cells = <0>; - - cpu0: cpu@0 { - compatible = "arm,cortex-a55"; - reg = <0>; - device_type = "cpu"; - #cooling-cells = <2>; - next-level-cache = <&L3_CA55>; - enable-method = "psci"; - clocks = <&cpg CPG_CORE R9A07G043_CLK_I>; - operating-points-v2 = <&cluster0_opp>; - }; - - L3_CA55: cache-controller-0 { - compatible = "cache"; - cache-unified; - cache-size = <0x40000>; - }; - }; - - psci { - compatible = "arm,psci-1.0", "arm,psci-0.2"; - method = "smc"; - }; - soc: soc { compatible = "simple-bus"; - interrupt-parent = <&gic>; #address-cells = <2>; #size-cells = <2>; ranges; @@ -545,12 +517,6 @@ sysc: system-controller@11020000 { compatible = "renesas,r9a07g043-sysc"; reg = <0 0x11020000 0 0x10000>; - interrupts = <SOC_PERIPHERAL_IRQ(42) IRQ_TYPE_LEVEL_HIGH>, - <SOC_PERIPHERAL_IRQ(43) IRQ_TYPE_LEVEL_HIGH>, - <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>, - <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "lpm_int", "ca55stbydone_int", - "cm33stbyr_int", "ca55_deny"; status = "disabled"; }; @@ -603,16 +569,6 @@ dma-channels = <16>; }; - gic: interrupt-controller@11900000 { - compatible = "arm,gic-v3"; - #interrupt-cells = <3>; - #address-cells = <0>; - interrupt-controller; - reg = <0x0 0x11900000 0 0x40000>, - <0x0 0x11940000 0 0x60000>; - interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; - }; - sdhi0: mmc@11c00000 { compatible = "renesas,sdhi-r9a07g043", "renesas,rcar-gen3-sdhi"; @@ -893,12 +849,4 @@ }; }; }; - - timer { - compatible = "arm,armv8-timer"; - interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>, - <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>, - <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>, - <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>; - }; }; diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi index 96f935bc2..011d4c88f 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi @@ -10,3 +10,139 @@ #define SOC_PERIPHERAL_IRQ(nr) GIC_SPI nr #include "r9a07g043.dtsi" + +/ { + cpus { + #address-cells = <1>; + #size-cells = <0>; + + cpu0: cpu@0 { + compatible = "arm,cortex-a55"; + reg = <0>; + device_type = "cpu"; + #cooling-cells = <2>; + next-level-cache = <&L3_CA55>; + enable-method = "psci"; + clocks = <&cpg CPG_CORE R9A07G043_CLK_I>; + operating-points-v2 = <&cluster0_opp>; + }; + + L3_CA55: cache-controller-0 { + compatible = "cache"; + cache-unified; + cache-size = <0x40000>; + }; + }; + + psci { + compatible = "arm,psci-1.0", "arm,psci-0.2"; + method = "smc"; + }; + + timer { + compatible = "arm,armv8-timer"; + interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>, + <&gic GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>, + <&gic GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>, + <&gic GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>; + }; +}; + +&soc { + interrupt-parent = <&gic>; + + irqc: interrupt-controller@110a0000 { + compatible = "renesas,r9a07g043u-irqc", + "renesas,rzg2l-irqc"; + reg = <0 0x110a0000 0 0x10000>; + #interrupt-cells = <2>; + #address-cells = <0>; + interrupt-controller; + interrupts = <SOC_PERIPHERAL_IRQ(0) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(1) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(2) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(3) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(4) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(5) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(6) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(7) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(8) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(444) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(445) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(446) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(447) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(448) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(449) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(450) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(451) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(452) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(453) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(454) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(455) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(456) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(457) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(458) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(459) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(460) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(461) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(462) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(463) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(464) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(465) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(466) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(467) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(468) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(469) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(470) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(471) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(472) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(473) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(474) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(475) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(25) IRQ_TYPE_EDGE_RISING>, + <SOC_PERIPHERAL_IRQ(34) IRQ_TYPE_EDGE_RISING>, + <SOC_PERIPHERAL_IRQ(35) IRQ_TYPE_EDGE_RISING>, + <SOC_PERIPHERAL_IRQ(36) IRQ_TYPE_EDGE_RISING>, + <SOC_PERIPHERAL_IRQ(37) IRQ_TYPE_EDGE_RISING>, + <SOC_PERIPHERAL_IRQ(38) IRQ_TYPE_EDGE_RISING>, + <SOC_PERIPHERAL_IRQ(39) IRQ_TYPE_EDGE_RISING>; + interrupt-names = "nmi", + "irq0", "irq1", "irq2", "irq3", + "irq4", "irq5", "irq6", "irq7", + "tint0", "tint1", "tint2", "tint3", + "tint4", "tint5", "tint6", "tint7", + "tint8", "tint9", "tint10", "tint11", + "tint12", "tint13", "tint14", "tint15", + "tint16", "tint17", "tint18", "tint19", + "tint20", "tint21", "tint22", "tint23", + "tint24", "tint25", "tint26", "tint27", + "tint28", "tint29", "tint30", "tint31", + "bus-err", "ec7tie1-0", "ec7tie2-0", + "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1", + "ec7tiovf-1"; + clocks = <&cpg CPG_MOD R9A07G043_IA55_CLK>, + <&cpg CPG_MOD R9A07G043_IA55_PCLK>; + clock-names = "clk", "pclk"; + power-domains = <&cpg>; + resets = <&cpg R9A07G043_IA55_RESETN>; + }; + + gic: interrupt-controller@11900000 { + compatible = "arm,gic-v3"; + #interrupt-cells = <3>; + #address-cells = <0>; + interrupt-controller; + reg = <0x0 0x11900000 0 0x40000>, + <0x0 0x11940000 0 0x60000>; + interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>; + }; +}; + +&sysc { + interrupts = <SOC_PERIPHERAL_IRQ(42) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(43) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(44) IRQ_TYPE_LEVEL_HIGH>, + <SOC_PERIPHERAL_IRQ(45) IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "lpm_int", "ca55stbydone_int", + "cm33stbyr_int", "ca55_deny"; +}; diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi index 7dbf6a629..d26488b5a 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi @@ -698,7 +698,27 @@ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>; + <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3", + "irq4", "irq5", "irq6", "irq7", + "tint0", "tint1", "tint2", "tint3", + "tint4", "tint5", "tint6", "tint7", + "tint8", "tint9", "tint10", "tint11", + "tint12", "tint13", "tint14", "tint15", + "tint16", "tint17", "tint18", "tint19", + "tint20", "tint21", "tint22", "tint23", + "tint24", "tint25", "tint26", "tint27", + "tint28", "tint29", "tint30", "tint31", + "bus-err", "ec7tie1-0", "ec7tie2-0", + "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1", + "ec7tiovf-1"; clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>, <&cpg CPG_MOD R9A07G044_IA55_PCLK>; clock-names = "clk", "pclk"; diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi index e000510b9..b3d37ca94 100644 --- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi +++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi @@ -704,7 +704,27 @@ <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>, - <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>; + <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>, + <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3", + "irq4", "irq5", "irq6", "irq7", + "tint0", "tint1", "tint2", "tint3", + "tint4", "tint5", "tint6", "tint7", + "tint8", "tint9", "tint10", "tint11", + "tint12", "tint13", "tint14", "tint15", + "tint16", "tint17", "tint18", "tint19", + "tint20", "tint21", "tint22", "tint23", + "tint24", "tint25", "tint26", "tint27", + "tint28", "tint29", "tint30", "tint31", + "bus-err", "ec7tie1-0", "ec7tie2-0", + "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1", + "ec7tiovf-1"; clocks = <&cpg CPG_MOD R9A07G054_IA55_CLK>, <&cpg CPG_MOD R9A07G054_IA55_PCLK>; clock-names = "clk", "pclk"; diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi index 588b14b66..f37abfc13 100644 --- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi +++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi @@ -251,6 +251,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; + #interrupt-cells = <2>; interrupt-parent = <&gpio6>; interrupts = <8 IRQ_TYPE_EDGE_FALLING>; @@ -311,6 +312,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; + #interrupt-cells = <2>; interrupt-parent = <&gpio6>; interrupts = <4 IRQ_TYPE_EDGE_FALLING>; }; @@ -331,6 +333,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; + #interrupt-cells = <2>; interrupt-parent = <&gpio7>; interrupts = <3 IRQ_TYPE_EDGE_FALLING>; }; @@ -341,6 +344,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; + #interrupt-cells = <2>; interrupt-parent = <&gpio5>; interrupts = <9 IRQ_TYPE_EDGE_FALLING>; }; diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi index f4d6dbbbd..99ad6fc51 100644 --- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi @@ -596,6 +596,7 @@ compatible = "rockchip,rk3568-vpu"; reg = <0x0 0xfdea0000 0x0 0x800>; interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "vdpu"; clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>; clock-names = "aclk", "hclk"; iommus = <&vdpu_mmu>; diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index da1841371..930b0e6c9 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -36,13 +36,13 @@ * When we defined the maximum SVE vector length we defined the ABI so * that the maximum vector length included all the reserved for future * expansion bits in ZCR rather than those just currently defined by - * the architecture. While SME follows a similar pattern the fact that - * it includes a square matrix means that any allocations that attempt - * to cover the maximum potential vector length (such as happen with - * the regset used for ptrace) end up being extremely large. Define - * the much lower actual limit for use in such situations. + * the architecture. Using this length to allocate worst size buffers + * results in excessively large allocations, and this effect is even + * more pronounced for SME due to ZA. Define more suitable VLs for + * these situations. */ -#define SME_VQ_MAX 16 +#define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1) +#define SME_VQ_MAX ((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1) struct task_struct; diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index e1f6366b7..d02dd2be1 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -1450,7 +1450,8 @@ static const struct user_regset aarch64_regsets[] = { #ifdef CONFIG_ARM64_SVE [REGSET_SVE] = { /* Scalable Vector Extension */ .core_note_type = NT_ARM_SVE, - .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE), + .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX, + SVE_PT_REGS_SVE), SVE_VQ_BYTES), .size = SVE_VQ_BYTES, .align = SVE_VQ_BYTES, diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S index 57465bff1..df7f349c8 100644 --- a/arch/hexagon/kernel/vmlinux.lds.S +++ b/arch/hexagon/kernel/vmlinux.lds.S @@ -64,6 +64,7 @@ SECTIONS STABS_DEBUG DWARF_DEBUG ELF_DETAILS + .hexagon.attributes 0 : { *(.hexagon.attributes) } DISCARDS } diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h index 402a7d9e3..427d147f3 100644 --- a/arch/loongarch/include/asm/io.h +++ b/arch/loongarch/include/asm/io.h @@ -72,6 +72,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t #define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l)) #define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l)) +#define __io_aw() mmiowb() + #include <asm-generic/io.h> #define ARCH_HAS_VALID_PHYS_ADDR_RANGE diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index 302f0e339..c90c56094 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -25,7 +25,12 @@ static inline void set_my_cpu_offset(unsigned long off) __my_cpu_offset = off; csr_write64(off, PERCPU_BASE_KS); } -#define __my_cpu_offset __my_cpu_offset + +#define __my_cpu_offset \ +({ \ + __asm__ __volatile__("":"+r"(__my_cpu_offset)); \ + __my_cpu_offset; \ +}) #define PERCPU_OP(op, asm_op, c_op) \ static __always_inline unsigned long __percpu_##op(void *ptr, \ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index daf3cf244..b3e4dd6be 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val) { regs->cp0_epc = val; + regs->cp0_cause &= ~CAUSEF_BD; } /* Query offset/name of register from its name/offset */ diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h index 5937d5eda..000a28e1c 100644 --- a/arch/parisc/include/asm/assembly.h +++ b/arch/parisc/include/asm/assembly.h @@ -97,26 +97,28 @@ * version takes two arguments: a src and destination register. * However, the source and destination registers can not be * the same register. + * + * We use add,l to avoid clobbering the C/B bits in the PSW. */ .macro tophys grvirt, grphys - ldil L%(__PAGE_OFFSET), \grphys - sub \grvirt, \grphys, \grphys + ldil L%(-__PAGE_OFFSET), \grphys + addl \grvirt, \grphys, \grphys .endm - + .macro tovirt grphys, grvirt ldil L%(__PAGE_OFFSET), \grvirt - add \grphys, \grvirt, \grvirt + addl \grphys, \grvirt, \grvirt .endm .macro tophys_r1 gr - ldil L%(__PAGE_OFFSET), %r1 - sub \gr, %r1, \gr + ldil L%(-__PAGE_OFFSET), %r1 + addl \gr, %r1, \gr .endm - + .macro tovirt_r1 gr ldil L%(__PAGE_OFFSET), %r1 - add \gr, %r1, \gr + addl \gr, %r1, \gr .endm .macro delay value diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index 3c43baca7..2aceebcd6 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h @@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) " addc %0, %5, %0\n" " addc %0, %3, %0\n" "1: ldws,ma 4(%1), %3\n" -" addib,< 0, %2, 1b\n" +" addib,> -1, %2, 1b\n" " addc %0, %3, %0\n" "\n" " extru %0, 31, 16, %4\n" @@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ** Try to keep 4 registers with "live" values ahead of the ALU. */ +" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */ " ldd,ma 8(%1), %4\n" /* get 1st saddr word */ " ldd,ma 8(%2), %5\n" /* get 1st daddr word */ " add %4, %0, %0\n" @@ -137,8 +138,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ " extrd,u %0, 31, 32, %4\n"/* copy upper half down */ " depdi 0, 31, 32, %0\n"/* clear upper half */ -" add %4, %0, %0\n" /* fold into 32-bits */ -" addc 0, %0, %0\n" /* add carry */ +" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */ +" addc 0, %0, %0\n" /* add final carry */ #else @@ -163,7 +164,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, " ldw,ma 4(%2), %7\n" /* 4th daddr */ " addc %6, %0, %0\n" " addc %7, %0, %0\n" -" addc %3, %0, %0\n" /* fold in proto+len, catch carry */ +" addc %3, %0, %0\n" /* fold in proto+len */ +" addc 0, %0, %0\n" /* add carry */ #endif : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len), diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 4d392e4ed..ac7253891 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -78,7 +78,7 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent, #endif } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER) int ftrace_enable_ftrace_graph_caller(void) { static_key_enable(&ftrace_graph_enable.key); diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index 8a8e7d722..782ee05e2 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c @@ -167,6 +167,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) { unsigned long saddr = regs->ior; + unsigned long shift, temp1; __u64 val = 0; ASM_EXCEPTIONTABLE_VAR(ret); @@ -178,25 +179,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) #ifdef CONFIG_64BIT __asm__ __volatile__ ( -" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */ -" mtsp %4, %%sr1\n" -" depd %%r0,63,3,%3\n" -"1: ldd 0(%%sr1,%3),%0\n" -"2: ldd 8(%%sr1,%3),%%r20\n" -" subi 64,%%r19,%%r19\n" -" mtsar %%r19\n" -" shrpd %0,%%r20,%%sar,%0\n" +" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */ +" mtsp %5, %%sr1\n" +" depd %%r0,63,3,%2\n" +"1: ldd 0(%%sr1,%2),%0\n" +"2: ldd 8(%%sr1,%2),%4\n" +" subi 64,%3,%3\n" +" mtsar %3\n" +" shrpd %0,%4,%%sar,%0\n" "3: \n" ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") - : "=r" (val), "+r" (ret) - : "0" (val), "r" (saddr), "r" (regs->isr) - : "r19", "r20" ); + : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) + : "r" (regs->isr) ); #else - { - unsigned long shift, temp1; __asm__ __volatile__ ( -" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */ +" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */ " mtsp %5, %%sr1\n" " dep %%r0,31,2,%2\n" "1: ldw 0(%%sr1,%2),%0\n" @@ -212,7 +210,6 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1") : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) : "r" (regs->isr) ); - } #endif DPRINTF("val = 0x%llx\n", val); diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h index a21f529c4..8359c06d9 100644 --- a/arch/powerpc/include/asm/reg_fsl_emb.h +++ b/arch/powerpc/include/asm/reg_fsl_emb.h @@ -12,9 +12,16 @@ #ifndef __ASSEMBLY__ /* Performance Monitor Registers */ #define mfpmr(rn) ({unsigned int rval; \ - asm volatile("mfpmr %0," __stringify(rn) \ + asm volatile(".machine push; " \ + ".machine e300; " \ + "mfpmr %0," __stringify(rn) ";" \ + ".machine pop; " \ : "=r" (rval)); rval;}) -#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v)) +#define mtpmr(rn, v) asm volatile(".machine push; " \ + ".machine e300; " \ + "mtpmr " __stringify(rn) ",%0; " \ + ".machine pop; " \ + : : "r" (v)) #endif /* __ASSEMBLY__ */ /* Freescale Book E Performance Monitor APU Registers */ diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index 4c69ece52..59ed89890 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -7,14 +7,14 @@ #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP #define arch_vmap_pud_supported arch_vmap_pud_supported -static inline bool arch_vmap_pud_supported(pgprot_t prot) +static __always_inline bool arch_vmap_pud_supported(pgprot_t prot) { /* HPT does not cope with large pages in the vmalloc area */ return radix_enabled(); } #define arch_vmap_pmd_supported arch_vmap_pmd_supported -static inline bool arch_vmap_pmd_supported(pgprot_t prot) +static __always_inline bool arch_vmap_pmd_supported(pgprot_t prot) { return radix_enabled(); } diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 8537c354c..9531ab90f 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -369,6 +369,18 @@ static int __init early_init_dt_scan_cpus(unsigned long node, if (IS_ENABLED(CONFIG_PPC64)) boot_cpu_hwid = be32_to_cpu(intserv[found_thread]); + if (nr_cpu_ids % nthreads != 0) { + set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads)); + pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n", + nr_cpu_ids); + } + + if (boot_cpuid >= nr_cpu_ids) { + set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads))); + pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n", + boot_cpuid, nr_cpu_ids); + } + /* * PAPR defines "logical" PVR values for cpus that * meet various levels of the architecture: diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 9b394bab1..374b82cf1 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -72,7 +72,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o -CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec) +CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec) # Enable <altivec.h> CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include) diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index 7ff8ff350..943248a0e 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c @@ -164,6 +164,20 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index, ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE); + + /* + * ret value as 'H_PARAMETER' with detail_rc as 'GEN_BUF_TOO_SMALL', + * specifies that the current buffer size cannot accommodate + * all the information and a partial buffer returned. + * Since in this function we are only accessing data for a given starting index, + * we don't need to accommodate whole data and can get required count by + * accessing first entry data. + * Hence hcall fails only incase the ret value is other than H_SUCCESS or + * H_PARAMETER with detail_rc value as GEN_BUF_TOO_SMALL(0x1B). + */ + if (ret == H_PARAMETER && be32_to_cpu(arg->params.detail_rc) == 0x1B) + ret = 0; + if (ret) { pr_devel("hcall failed: 0x%lx\n", ret); goto out; @@ -228,6 +242,7 @@ static int h_gpci_event_init(struct perf_event *event) { u64 count; u8 length; + unsigned long ret; /* Not our event */ if (event->attr.type != event->pmu->type) @@ -258,13 +273,23 @@ static int h_gpci_event_init(struct perf_event *event) } /* check if the request works... */ - if (single_gpci_request(event_get_request(event), + ret = single_gpci_request(event_get_request(event), event_get_starting_index(event), event_get_secondary_index(event), event_get_counter_info_version(event), event_get_offset(event), length, - &count)) { + &count); + + /* + * ret value as H_AUTHORITY implies that partition is not permitted to retrieve + * performance information, and required to set + * "Enable Performance Information Collection" option. + */ + if (ret == H_AUTHORITY) + return -EPERM; + + if (ret) { pr_devel("gpci hcall failed\n"); return -EINVAL; } diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c index 1830e1ac1..107a8b60a 100644 --- a/arch/powerpc/platforms/embedded6xx/linkstation.c +++ b/arch/powerpc/platforms/embedded6xx/linkstation.c @@ -99,9 +99,6 @@ static void __init linkstation_init_IRQ(void) mpic_init(mpic); } -extern void avr_uart_configure(void); -extern void avr_uart_send(const char); - static void __noreturn linkstation_restart(char *cmd) { local_irq_disable(); diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h index 5ad12023e..ebc258fa4 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc10x.h +++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h @@ -156,4 +156,7 @@ int mpc10x_disable_store_gathering(struct pci_controller *hose); /* For MPC107 boards that use the built-in openpic */ void mpc10x_set_openpic(void); +void avr_uart_configure(void); +void avr_uart_send(const char c); + #endif /* __PPC_KERNEL_MPC10X_H */ diff --git a/arch/powerpc/platforms/pseries/papr_platform_attributes.c b/arch/powerpc/platforms/pseries/papr_platform_attributes.c index 526c621b0..eea2041b2 100644 --- a/arch/powerpc/platforms/pseries/papr_platform_attributes.c +++ b/arch/powerpc/platforms/pseries/papr_platform_attributes.c @@ -101,10 +101,12 @@ retry: esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs); temp_buf = krealloc(buf, esi_buf_size, GFP_KERNEL); - if (temp_buf) + if (temp_buf) { buf = temp_buf; - else - return -ENOMEM; + } else { + ret = -ENOMEM; + goto out_buf; + } goto retry; } diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts index 07387f9c1..72b87b08a 100644 --- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts +++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts @@ -123,6 +123,7 @@ interrupt-parent = <&gpio>; interrupts = <1 IRQ_TYPE_LEVEL_LOW>; interrupt-controller; + #interrupt-cells = <2>; onkey { compatible = "dlg,da9063-onkey"; diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index ec0cab9fb..72ec1d9bd 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -319,7 +319,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ - long __kr_err; \ + long __kr_err = 0; \ \ __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \ if (unlikely(__kr_err)) \ @@ -328,7 +328,7 @@ do { \ #define __put_kernel_nofault(dst, src, type, err_label) \ do { \ - long __kr_err; \ + long __kr_err = 0; \ \ __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \ if (unlikely(__kr_err)) \ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 8955f2432..6906cc0e5 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -25,8 +25,6 @@ #include <asm/thread_info.h> #include <asm/cpuidle.h> -register unsigned long gp_in_global __asm__("gp"); - #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; @@ -170,7 +168,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) if (unlikely(args->fn)) { /* Kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); - childregs->gp = gp_in_global; /* Supervisor/Machine, irqs on: */ childregs->status = SR_PP | SR_PIE; diff --git a/arch/s390/include/uapi/asm/dasd.h b/arch/s390/include/uapi/asm/dasd.h index 93d1ccd33..9c49c3d67 100644 --- a/arch/s390/include/uapi/asm/dasd.h +++ b/arch/s390/include/uapi/asm/dasd.h @@ -78,6 +78,7 @@ typedef struct dasd_information2_t { * 0x040: give access to raw eckd data * 0x080: enable discard support * 0x100: enable autodisable for IFCC errors (default) + * 0x200: enable requeue of all requests on autoquiesce */ #define DASD_FEATURE_READONLY 0x001 #define DASD_FEATURE_USEDIAG 0x002 @@ -88,6 +89,7 @@ typedef struct dasd_information2_t { #define DASD_FEATURE_USERAW 0x040 #define DASD_FEATURE_DISCARD 0x080 #define DASD_FEATURE_PATH_AUTODISABLE 0x100 +#define DASD_FEATURE_REQUEUEQUIESCE 0x200 #define DASD_FEATURE_DEFAULT DASD_FEATURE_PATH_AUTODISABLE #define DASD_PARTN_BITS 2 diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index 7ee3651d0..732024ca0 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -166,5 +166,6 @@ int populate_cache_leaves(unsigned int cpu) ci_leaf_init(this_leaf++, pvt, ctype, level, cpu); } } + this_cpu_ci->cpu_map_populated = true; return 0; } diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index d2a1f2f4f..c9799dec9 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -699,6 +699,7 @@ ENDPROC(stack_overflow) .Lthis_cpu: .short 0 .Lstosm_tmp: .byte 0 .section .rodata, "a" + .balign 8 #define SYSCALL(esame,emu) .quad __s390x_ ## esame .globl sys_call_table sys_call_table: diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c index 6826e2a69..f61a65204 100644 --- a/arch/s390/kernel/perf_pai_crypto.c +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -647,7 +647,7 @@ static int __init attr_event_init(void) for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) { ret = attr_event_init_one(attrs, i); if (ret) { - attr_event_free(attrs, i - 1); + attr_event_free(attrs, i); return ret; } } diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c index 74b53c531..b4d896541 100644 --- a/arch/s390/kernel/perf_pai_ext.c +++ b/arch/s390/kernel/perf_pai_ext.c @@ -612,7 +612,7 @@ static int __init attr_event_init(void) for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) { ret = attr_event_init_one(attrs, i); if (ret) { - attr_event_free(attrs, i - 1); + attr_event_free(attrs, i); return ret; } } diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index 245bddfe9..cc513add4 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -22,7 +22,7 @@ KBUILD_AFLAGS_32 += -m31 -s KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \ +LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \ --hash-style=both --build-id=sha1 -melf_s390 -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 1605ba45a..42d918d50 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -26,7 +26,7 @@ KBUILD_AFLAGS_64 += -m64 -s KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \ +ldflags-y := -shared -soname=linux-vdso64.so.1 \ --hash-style=both --build-id=sha1 -T $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 9436f3053..003c926a0 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -210,13 +210,13 @@ void vtime_flush(struct task_struct *tsk) virt_timer_expire(); steal = S390_lowcore.steal_timer; - avg_steal = S390_lowcore.avg_steal_timer / 2; + avg_steal = S390_lowcore.avg_steal_timer; if ((s64) steal > 0) { S390_lowcore.steal_timer = 0; account_steal_time(cputime_to_nsecs(steal)); avg_steal += steal; } - S390_lowcore.avg_steal_timer = avg_steal; + S390_lowcore.avg_steal_timer = avg_steal / 2; } static u64 vtime_delta(void) diff --git a/arch/sparc/crypto/crop_devid.c b/arch/sparc/crypto/crop_devid.c index 83fc4536d..93f4e0fdd 100644 --- a/arch/sparc/crypto/crop_devid.c +++ b/arch/sparc/crypto/crop_devid.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> /* This is a dummy device table linked into all of the crypto * opcode drivers. It serves to trigger the module autoloading diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h index e10ab9ad3..836f6575a 100644 --- a/arch/sparc/include/asm/floppy_32.h +++ b/arch/sparc/include/asm/floppy_32.h @@ -8,7 +8,7 @@ #define __ASM_SPARC_FLOPPY_H #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/pgtable.h> #include <asm/idprom.h> diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h index 070c8c1f5..6efeb24b0 100644 --- a/arch/sparc/include/asm/floppy_64.h +++ b/arch/sparc/include/asm/floppy_64.h @@ -11,7 +11,7 @@ #define __ASM_SPARC64_FLOPPY_H #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/dma-mapping.h> #include <asm/auxio.h> diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h index 03b27090c..e2eed8f97 100644 --- a/arch/sparc/include/asm/parport.h +++ b/arch/sparc/include/asm/parport.h @@ -1,255 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* parport.h: sparc64 specific parport initialization and dma. - * - * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be) - */ +#ifndef ___ASM_SPARC_PARPORT_H +#define ___ASM_SPARC_PARPORT_H -#ifndef _ASM_SPARC64_PARPORT_H -#define _ASM_SPARC64_PARPORT_H 1 - -#include <linux/of_device.h> - -#include <asm/ebus_dma.h> -#include <asm/ns87303.h> -#include <asm/prom.h> - -#define PARPORT_PC_MAX_PORTS PARPORT_MAX - -/* - * While sparc64 doesn't have an ISA DMA API, we provide something that looks - * close enough to make parport_pc happy - */ -#define HAS_DMA - -#ifdef CONFIG_PARPORT_PC_FIFO -static DEFINE_SPINLOCK(dma_spin_lock); - -#define claim_dma_lock() \ -({ unsigned long flags; \ - spin_lock_irqsave(&dma_spin_lock, flags); \ - flags; \ -}) - -#define release_dma_lock(__flags) \ - spin_unlock_irqrestore(&dma_spin_lock, __flags); +#if defined(__sparc__) && defined(__arch64__) +#include <asm/parport_64.h> +#else +#include <asm-generic/parport.h> +#endif #endif -static struct sparc_ebus_info { - struct ebus_dma_info info; - unsigned int addr; - unsigned int count; - int lock; - - struct parport *port; -} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS]; - -static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS); - -static inline int request_dma(unsigned int dmanr, const char *device_id) -{ - if (dmanr >= PARPORT_PC_MAX_PORTS) - return -EINVAL; - if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0) - return -EBUSY; - return 0; -} - -static inline void free_dma(unsigned int dmanr) -{ - if (dmanr >= PARPORT_PC_MAX_PORTS) { - printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); - return; - } - if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) { - printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); - return; - } -} - -static inline void enable_dma(unsigned int dmanr) -{ - ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1); - - if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info, - sparc_ebus_dmas[dmanr].addr, - sparc_ebus_dmas[dmanr].count)) - BUG(); -} - -static inline void disable_dma(unsigned int dmanr) -{ - ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0); -} - -static inline void clear_dma_ff(unsigned int dmanr) -{ - /* nothing */ -} - -static inline void set_dma_mode(unsigned int dmanr, char mode) -{ - ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE)); -} - -static inline void set_dma_addr(unsigned int dmanr, unsigned int addr) -{ - sparc_ebus_dmas[dmanr].addr = addr; -} - -static inline void set_dma_count(unsigned int dmanr, unsigned int count) -{ - sparc_ebus_dmas[dmanr].count = count; -} - -static inline unsigned int get_dma_residue(unsigned int dmanr) -{ - return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); -} - -static int ecpp_probe(struct platform_device *op) -{ - unsigned long base = op->resource[0].start; - unsigned long config = op->resource[1].start; - unsigned long d_base = op->resource[2].start; - unsigned long d_len; - struct device_node *parent; - struct parport *p; - int slot, err; - - parent = op->dev.of_node->parent; - if (of_node_name_eq(parent, "dma")) { - p = parport_pc_probe_port(base, base + 0x400, - op->archdata.irqs[0], PARPORT_DMA_NOFIFO, - op->dev.parent->parent, 0); - if (!p) - return -ENOMEM; - dev_set_drvdata(&op->dev, p); - return 0; - } - - for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) { - if (!test_and_set_bit(slot, dma_slot_map)) - break; - } - err = -ENODEV; - if (slot >= PARPORT_PC_MAX_PORTS) - goto out_err; - - spin_lock_init(&sparc_ebus_dmas[slot].info.lock); - - d_len = (op->resource[2].end - d_base) + 1UL; - sparc_ebus_dmas[slot].info.regs = - of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA"); - - if (!sparc_ebus_dmas[slot].info.regs) - goto out_clear_map; - - sparc_ebus_dmas[slot].info.flags = 0; - sparc_ebus_dmas[slot].info.callback = NULL; - sparc_ebus_dmas[slot].info.client_cookie = NULL; - sparc_ebus_dmas[slot].info.irq = 0xdeadbeef; - strcpy(sparc_ebus_dmas[slot].info.name, "parport"); - if (ebus_dma_register(&sparc_ebus_dmas[slot].info)) - goto out_unmap_regs; - - ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1); - - /* Configure IRQ to Push Pull, Level Low */ - /* Enable ECP, set bit 2 of the CTR first */ - outb(0x04, base + 0x02); - ns87303_modify(config, PCR, - PCR_EPP_ENABLE | - PCR_IRQ_ODRAIN, - PCR_ECP_ENABLE | - PCR_ECP_CLK_ENA | - PCR_IRQ_POLAR); - - /* CTR bit 5 controls direction of port */ - ns87303_modify(config, PTR, - 0, PTR_LPT_REG_DIR); - - p = parport_pc_probe_port(base, base + 0x400, - op->archdata.irqs[0], - slot, - op->dev.parent, - 0); - err = -ENOMEM; - if (!p) - goto out_disable_irq; - - dev_set_drvdata(&op->dev, p); - - return 0; - -out_disable_irq: - ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); - ebus_dma_unregister(&sparc_ebus_dmas[slot].info); - -out_unmap_regs: - of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len); - -out_clear_map: - clear_bit(slot, dma_slot_map); - -out_err: - return err; -} - -static int ecpp_remove(struct platform_device *op) -{ - struct parport *p = dev_get_drvdata(&op->dev); - int slot = p->dma; - - parport_pc_unregister_port(p); - - if (slot != PARPORT_DMA_NOFIFO) { - unsigned long d_base = op->resource[2].start; - unsigned long d_len; - - d_len = (op->resource[2].end - d_base) + 1UL; - - ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); - ebus_dma_unregister(&sparc_ebus_dmas[slot].info); - of_iounmap(&op->resource[2], - sparc_ebus_dmas[slot].info.regs, - d_len); - clear_bit(slot, dma_slot_map); - } - - return 0; -} - -static const struct of_device_id ecpp_match[] = { - { - .name = "ecpp", - }, - { - .name = "parallel", - .compatible = "ecpp", - }, - { - .name = "parallel", - .compatible = "ns87317-ecpp", - }, - { - .name = "parallel", - .compatible = "pnpALI,1533,3", - }, - {}, -}; - -static struct platform_driver ecpp_driver = { - .driver = { - .name = "ecpp", - .of_match_table = ecpp_match, - }, - .probe = ecpp_probe, - .remove = ecpp_remove, -}; - -static int parport_pc_find_nonpci_ports(int autoirq, int autodma) -{ - return platform_driver_register(&ecpp_driver); -} - -#endif /* !(_ASM_SPARC64_PARPORT_H */ diff --git a/arch/sparc/include/asm/parport_64.h b/arch/sparc/include/asm/parport_64.h new file mode 100644 index 000000000..0a7ffcfd5 --- /dev/null +++ b/arch/sparc/include/asm/parport_64.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* parport.h: sparc64 specific parport initialization and dma. + * + * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be) + */ + +#ifndef _ASM_SPARC64_PARPORT_H +#define _ASM_SPARC64_PARPORT_H 1 + +#include <linux/of.h> +#include <linux/platform_device.h> + +#include <asm/ebus_dma.h> +#include <asm/ns87303.h> +#include <asm/prom.h> + +#define PARPORT_PC_MAX_PORTS PARPORT_MAX + +/* + * While sparc64 doesn't have an ISA DMA API, we provide something that looks + * close enough to make parport_pc happy + */ +#define HAS_DMA + +#ifdef CONFIG_PARPORT_PC_FIFO +static DEFINE_SPINLOCK(dma_spin_lock); + +#define claim_dma_lock() \ +({ unsigned long flags; \ + spin_lock_irqsave(&dma_spin_lock, flags); \ + flags; \ +}) + +#define release_dma_lock(__flags) \ + spin_unlock_irqrestore(&dma_spin_lock, __flags); +#endif + +static struct sparc_ebus_info { + struct ebus_dma_info info; + unsigned int addr; + unsigned int count; + int lock; + + struct parport *port; +} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS]; + +static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS); + +static inline int request_dma(unsigned int dmanr, const char *device_id) +{ + if (dmanr >= PARPORT_PC_MAX_PORTS) + return -EINVAL; + if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0) + return -EBUSY; + return 0; +} + +static inline void free_dma(unsigned int dmanr) +{ + if (dmanr >= PARPORT_PC_MAX_PORTS) { + printk(KERN_WARNING "Trying to free DMA%d\n", dmanr); + return; + } + if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) { + printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); + return; + } +} + +static inline void enable_dma(unsigned int dmanr) +{ + ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1); + + if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info, + sparc_ebus_dmas[dmanr].addr, + sparc_ebus_dmas[dmanr].count)) + BUG(); +} + +static inline void disable_dma(unsigned int dmanr) +{ + ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0); +} + +static inline void clear_dma_ff(unsigned int dmanr) +{ + /* nothing */ +} + +static inline void set_dma_mode(unsigned int dmanr, char mode) +{ + ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE)); +} + +static inline void set_dma_addr(unsigned int dmanr, unsigned int addr) +{ + sparc_ebus_dmas[dmanr].addr = addr; +} + +static inline void set_dma_count(unsigned int dmanr, unsigned int count) +{ + sparc_ebus_dmas[dmanr].count = count; +} + +static inline unsigned int get_dma_residue(unsigned int dmanr) +{ + return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info); +} + +static int ecpp_probe(struct platform_device *op) +{ + unsigned long base = op->resource[0].start; + unsigned long config = op->resource[1].start; + unsigned long d_base = op->resource[2].start; + unsigned long d_len; + struct device_node *parent; + struct parport *p; + int slot, err; + + parent = op->dev.of_node->parent; + if (of_node_name_eq(parent, "dma")) { + p = parport_pc_probe_port(base, base + 0x400, + op->archdata.irqs[0], PARPORT_DMA_NOFIFO, + op->dev.parent->parent, 0); + if (!p) + return -ENOMEM; + dev_set_drvdata(&op->dev, p); + return 0; + } + + for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) { + if (!test_and_set_bit(slot, dma_slot_map)) + break; + } + err = -ENODEV; + if (slot >= PARPORT_PC_MAX_PORTS) + goto out_err; + + spin_lock_init(&sparc_ebus_dmas[slot].info.lock); + + d_len = (op->resource[2].end - d_base) + 1UL; + sparc_ebus_dmas[slot].info.regs = + of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA"); + + if (!sparc_ebus_dmas[slot].info.regs) + goto out_clear_map; + + sparc_ebus_dmas[slot].info.flags = 0; + sparc_ebus_dmas[slot].info.callback = NULL; + sparc_ebus_dmas[slot].info.client_cookie = NULL; + sparc_ebus_dmas[slot].info.irq = 0xdeadbeef; + strcpy(sparc_ebus_dmas[slot].info.name, "parport"); + if (ebus_dma_register(&sparc_ebus_dmas[slot].info)) + goto out_unmap_regs; + + ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1); + + /* Configure IRQ to Push Pull, Level Low */ + /* Enable ECP, set bit 2 of the CTR first */ + outb(0x04, base + 0x02); + ns87303_modify(config, PCR, + PCR_EPP_ENABLE | + PCR_IRQ_ODRAIN, + PCR_ECP_ENABLE | + PCR_ECP_CLK_ENA | + PCR_IRQ_POLAR); + + /* CTR bit 5 controls direction of port */ + ns87303_modify(config, PTR, + 0, PTR_LPT_REG_DIR); + + p = parport_pc_probe_port(base, base + 0x400, + op->archdata.irqs[0], + slot, + op->dev.parent, + 0); + err = -ENOMEM; + if (!p) + goto out_disable_irq; + + dev_set_drvdata(&op->dev, p); + + return 0; + +out_disable_irq: + ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); + ebus_dma_unregister(&sparc_ebus_dmas[slot].info); + +out_unmap_regs: + of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len); + +out_clear_map: + clear_bit(slot, dma_slot_map); + +out_err: + return err; +} + +static int ecpp_remove(struct platform_device *op) +{ + struct parport *p = dev_get_drvdata(&op->dev); + int slot = p->dma; + + parport_pc_unregister_port(p); + + if (slot != PARPORT_DMA_NOFIFO) { + unsigned long d_base = op->resource[2].start; + unsigned long d_len; + + d_len = (op->resource[2].end - d_base) + 1UL; + + ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0); + ebus_dma_unregister(&sparc_ebus_dmas[slot].info); + of_iounmap(&op->resource[2], + sparc_ebus_dmas[slot].info.regs, + d_len); + clear_bit(slot, dma_slot_map); + } + + return 0; +} + +static const struct of_device_id ecpp_match[] = { + { + .name = "ecpp", + }, + { + .name = "parallel", + .compatible = "ecpp", + }, + { + .name = "parallel", + .compatible = "ns87317-ecpp", + }, + { + .name = "parallel", + .compatible = "pnpALI,1533,3", + }, + {}, +}; + +static struct platform_driver ecpp_driver = { + .driver = { + .name = "ecpp", + .of_match_table = ecpp_match, + }, + .probe = ecpp_probe, + .remove = ecpp_remove, +}; + +static int parport_pc_find_nonpci_ports(int autoirq, int autodma) +{ + return platform_driver_register(&ecpp_driver); +} + +#endif /* !(_ASM_SPARC64_PARPORT_H */ diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index ecd05bc0a..d44725d37 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -13,7 +13,7 @@ #include <linux/miscdevice.h> #include <linux/pm.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/module.h> #include <asm/io.h> diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c index a32d58817..989860e89 100644 --- a/arch/sparc/kernel/auxio_32.c +++ b/arch/sparc/kernel/auxio_32.c @@ -8,7 +8,6 @@ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/export.h> #include <asm/oplib.h> diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 774a82b0c..2a2800d21 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c @@ -10,7 +10,8 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioport.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <asm/prom.h> #include <asm/io.h> diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index 23f8838dd..a1a6485c9 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -10,7 +10,7 @@ #include <linux/export.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <asm/fhc.h> diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index 6ff43df74..d5fad5fb0 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c @@ -15,7 +15,8 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/spitfire.h> #include <asm/chmctrl.h> #include <asm/cpudata.h> diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 4e4f3d326..e5a327799 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -39,7 +39,7 @@ #include <linux/seq_file.h> #include <linux/scatterlist.h> #include <linux/dma-map-ops.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <asm/io.h> #include <asm/vaddrs.h> diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 39229940d..4c61da491 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -8,9 +8,7 @@ #include <linux/errno.h> #include <linux/mutex.h> #include <linux/of.h> -#include <linux/of_platform.h> #include <linux/interrupt.h> -#include <linux/of_device.h> #include <linux/clocksource.h> #include <linux/clockchips.h> diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c index e5e5ff6b9..3a73bc466 100644 --- a/arch/sparc/kernel/leon_pci.c +++ b/arch/sparc/kernel/leon_pci.c @@ -7,7 +7,8 @@ * Code is partially derived from pcic.c */ -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/export.h> diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index e6935d0ac..b2b639bee 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -13,10 +13,11 @@ * Contributors: Daniel Hellstrom <daniel@gaisler.com> */ -#include <linux/of_device.h> #include <linux/export.h> #include <linux/kernel.h> +#include <linux/of.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> #include <linux/delay.h> #include <linux/pci.h> @@ -696,7 +697,7 @@ err1: return err; } -static const struct of_device_id grpci1_of_match[] __initconst = { +static const struct of_device_id grpci1_of_match[] = { { .name = "GAISLER_PCIFBRG", }, diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index ca22f93d9..ac2acd62a 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -6,12 +6,14 @@ * */ -#include <linux/of_device.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/export.h> +#include <linux/of.h> +#include <linux/platform_device.h> + #include <asm/io.h> #include <asm/leon.h> #include <asm/vaddrs.h> @@ -887,7 +889,7 @@ err1: return err; } -static const struct of_device_id grpci2_of_match[] __initconst = { +static const struct of_device_id grpci2_of_match[] = { { .name = "GAISLER_GRPCI2", }, diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 060fff95a..fbf25e926 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -274,7 +274,7 @@ static int __init setup_nmi_watchdog(char *str) if (!strncmp(str, "panic", 5)) panic_on_timeout = 1; - return 0; + return 1; } __setup("nmi_watchdog=", setup_nmi_watchdog); diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 4ebf51e6e..9ac6853b3 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -7,8 +7,8 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/irq.h> -#include <linux/of_device.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <asm/leon.h> #include <asm/leon_amba.h> diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 5a9f86b1d..a8ccd7260 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/string.h> #include <linux/kernel.h> -#include <linux/of.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/export.h> @@ -9,8 +8,9 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/irq.h> -#include <linux/of_device.h> +#include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/spitfire.h> #include "of_device_common.h" diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c index e717a56ef..a09724381 100644 --- a/arch/sparc/kernel/of_device_common.c +++ b/arch/sparc/kernel/of_device_common.c @@ -1,15 +1,15 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/string.h> #include <linux/kernel.h> -#include <linux/of.h> #include <linux/export.h> #include <linux/mod_devicetable.h> #include <linux/errno.h> #include <linux/irq.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_address.h> -#include <linux/of_device.h> #include <linux/of_irq.h> +#include <linux/platform_device.h> #include "of_device_common.h" diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index cb1ef2511..5637b37ba 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -20,8 +20,9 @@ #include <linux/irq.h> #include <linux/init.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/pgtable.h> +#include <linux/platform_device.h> #include <linux/uaccess.h> #include <asm/irq.h> diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index 4759ccd54..5eeec9ad6 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -8,7 +8,8 @@ #include <linux/slab.h> #include <linux/pci.h> #include <linux/device.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <asm/prom.h> #include <asm/oplib.h> diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c index 0ca08d455..0b91bde80 100644 --- a/arch/sparc/kernel/pci_fire.c +++ b/arch/sparc/kernel/pci_fire.c @@ -10,7 +10,8 @@ #include <linux/msi.h> #include <linux/export.h> #include <linux/irq.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/numa.h> #include <asm/prom.h> diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h index 4e3d15189..f31761f51 100644 --- a/arch/sparc/kernel/pci_impl.h +++ b/arch/sparc/kernel/pci_impl.h @@ -11,7 +11,6 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/msi.h> -#include <linux/of_device.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/iommu.h> diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index 9ed119857..fc7402948 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c @@ -5,6 +5,8 @@ */ #include <linux/kernel.h> #include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/irq.h> diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c index f413371da..1efc98305 100644 --- a/arch/sparc/kernel/pci_psycho.c +++ b/arch/sparc/kernel/pci_psycho.c @@ -13,7 +13,9 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/interrupt.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/iommu.h> #include <asm/irq.h> diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 384480971..0ddef827e 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -15,7 +15,8 @@ #include <linux/msi.h> #include <linux/export.h> #include <linux/log2.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/dma-map-ops.h> #include <asm/iommu-common.h> diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index b5c1eb33b..69a0206e5 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c @@ -11,7 +11,7 @@ #include <linux/init.h> #include <linux/pm.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/module.h> #include <asm/io.h> diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c index d941875dd..2f6c909e1 100644 --- a/arch/sparc/kernel/power.c +++ b/arch/sparc/kernel/power.c @@ -9,7 +9,8 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/reboot.h> -#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <asm/prom.h> #include <asm/io.h> diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c index 28aff1c52..426bd08cb 100644 --- a/arch/sparc/kernel/prom_irqtrans.c +++ b/arch/sparc/kernel/prom_irqtrans.c @@ -4,6 +4,7 @@ #include <linux/init.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/oplib.h> #include <asm/prom.h> diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c index e90bcb6ba..5ee74b4c0 100644 --- a/arch/sparc/kernel/psycho_common.c +++ b/arch/sparc/kernel/psycho_common.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/numa.h> +#include <linux/platform_device.h> #include <asm/upa.h> diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c index 32141e100..0bababf6f 100644 --- a/arch/sparc/kernel/sbus.c +++ b/arch/sparc/kernel/sbus.c @@ -14,7 +14,8 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/numa.h> #include <asm/page.h> diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 8a08830e4..79934beba 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -33,7 +33,6 @@ #include <linux/ioport.h> #include <linux/profile.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <asm/mc146818rtc.h> diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c index bf3e6d2fe..3afbbe5fb 100644 --- a/arch/sparc/mm/io-unit.c +++ b/arch/sparc/mm/io-unit.c @@ -13,7 +13,8 @@ #include <linux/bitops.h> #include <linux/dma-map-ops.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/io.h> #include <asm/io-unit.h> diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c index 9e3f6933c..14e178bfe 100644 --- a/arch/sparc/mm/iommu.c +++ b/arch/sparc/mm/iommu.c @@ -7,14 +7,15 @@ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ - + #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/dma-map-ops.h> #include <linux/of.h> -#include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <asm/io.h> #include <asm/mxcc.h> diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c index ae9a86cb6..2b97df085 100644 --- a/arch/sparc/vdso/vma.c +++ b/arch/sparc/vdso/vma.c @@ -449,9 +449,8 @@ static __init int vdso_setup(char *s) unsigned long val; err = kstrtoul(s, 10, &val); - if (err) - return err; - vdso_enabled = val; - return 0; + if (!err) + vdso_enabled = val; + return 1; } __setup("vdso=", vdso_setup); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5caa023e9..ba815ac47 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1553,19 +1553,6 @@ config AMD_MEM_ENCRYPT This requires an AMD processor that supports Secure Memory Encryption (SME). -config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT - bool "Activate AMD Secure Memory Encryption (SME) by default" - depends on AMD_MEM_ENCRYPT - help - Say yes to have system memory encrypted by default if running on - an AMD processor that supports Secure Memory Encryption (SME). - - If set to Y, then the encryption of system memory can be - deactivated with the mem_encrypt=off command line option. - - If set to N, then the encryption of system memory can be - activated with the mem_encrypt=on command line option. - # Common NUMA Features config NUMA bool "NUMA Memory Allocation and Scheduler Support" @@ -2576,6 +2563,31 @@ config MITIGATION_RFDS stored in floating point, vector and integer registers. See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst> +choice + prompt "Clear branch history" + depends on CPU_SUP_INTEL + default SPECTRE_BHI_ON + help + Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks + where the branch history buffer is poisoned to speculatively steer + indirect branches. + See <file:Documentation/admin-guide/hw-vuln/spectre.rst> + +config SPECTRE_BHI_ON + bool "on" + help + Equivalent to setting spectre_bhi=on command line parameter. +config SPECTRE_BHI_OFF + bool "off" + help + Equivalent to setting spectre_bhi=off command line parameter. +config SPECTRE_BHI_AUTO + bool "auto" + help + Equivalent to setting spectre_bhi=auto command line parameter. + +endchoice + endif config ARCH_HAS_ADD_PAGES diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S index 8232c5b2a..fb6d60dcd 100644 --- a/arch/x86/boot/compressed/efi_mixed.S +++ b/arch/x86/boot/compressed/efi_mixed.S @@ -15,10 +15,12 @@ */ #include <linux/linkage.h> +#include <asm/asm-offsets.h> #include <asm/msr.h> #include <asm/page_types.h> #include <asm/processor-flags.h> #include <asm/segment.h> +#include <asm/setup.h> .code64 .text @@ -49,6 +51,11 @@ SYM_FUNC_START(startup_64_mixed_mode) lea efi32_boot_args(%rip), %rdx mov 0(%rdx), %edi mov 4(%rdx), %esi + + /* Switch to the firmware's stack */ + movl efi32_boot_sp(%rip), %esp + andl $~7, %esp + #ifdef CONFIG_EFI_HANDOVER_PROTOCOL mov 8(%rdx), %edx // saved bootparams pointer test %edx, %edx @@ -150,6 +157,7 @@ SYM_FUNC_END(__efi64_thunk) SYM_FUNC_START(efi32_stub_entry) call 1f 1: popl %ecx + leal (efi32_boot_args - 1b)(%ecx), %ebx /* Clear BSS */ xorl %eax, %eax @@ -164,6 +172,7 @@ SYM_FUNC_START(efi32_stub_entry) popl %ecx popl %edx popl %esi + movl %esi, 8(%ebx) jmp efi32_entry SYM_FUNC_END(efi32_stub_entry) #endif @@ -240,8 +249,6 @@ SYM_FUNC_END(efi_enter32) * * Arguments: %ecx image handle * %edx EFI system table pointer - * %esi struct bootparams pointer (or NULL when not using - * the EFI handover protocol) * * Since this is the point of no return for ordinary execution, no registers * are considered live except for the function parameters. [Note that the EFI @@ -260,13 +267,25 @@ SYM_FUNC_START_LOCAL(efi32_entry) /* Store firmware IDT descriptor */ sidtl (efi32_boot_idt - 1b)(%ebx) + /* Store firmware stack pointer */ + movl %esp, (efi32_boot_sp - 1b)(%ebx) + /* Store boot arguments */ leal (efi32_boot_args - 1b)(%ebx), %ebx movl %ecx, 0(%ebx) movl %edx, 4(%ebx) - movl %esi, 8(%ebx) movb $0x0, 12(%ebx) // efi_is64 + /* + * Allocate some memory for a temporary struct boot_params, which only + * needs the minimal pieces that startup_32() relies on. + */ + subl $PARAM_SIZE, %esp + movl %esp, %esi + movl $PAGE_SIZE, BP_kernel_alignment(%esi) + movl $_end - 1b, BP_init_size(%esi) + subl $startup_32 - 1b, BP_init_size(%esi) + /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax @@ -292,8 +311,7 @@ SYM_FUNC_START(efi32_pe_entry) movl 8(%ebp), %ecx // image_handle movl 12(%ebp), %edx // sys_table - xorl %esi, %esi - jmp efi32_entry // pass %ecx, %edx, %esi + jmp efi32_entry // pass %ecx, %edx // no other registers remain live 2: popl %edi // restore callee-save registers @@ -324,5 +342,6 @@ SYM_DATA_END(efi32_boot_idt) SYM_DATA_LOCAL(efi32_boot_cs, .word 0) SYM_DATA_LOCAL(efi32_boot_ds, .word 0) +SYM_DATA_LOCAL(efi32_boot_sp, .long 0) SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0) SYM_DATA(efi_is64, .byte 1) diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c index 49b44f881..801e943fd 100644 --- a/arch/x86/coco/core.c +++ b/arch/x86/coco/core.c @@ -3,18 +3,22 @@ * Confidential Computing Platform Capability checks * * Copyright (C) 2021 Advanced Micro Devices, Inc. + * Copyright (C) 2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * Author: Tom Lendacky <thomas.lendacky@amd.com> */ #include <linux/export.h> #include <linux/cc_platform.h> +#include <linux/string.h> +#include <linux/random.h> +#include <asm/archrandom.h> #include <asm/coco.h> #include <asm/processor.h> -static enum cc_vendor vendor __ro_after_init; -static u64 cc_mask __ro_after_init; +enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE; +u64 cc_mask __ro_after_init; static bool intel_cc_platform_has(enum cc_attr attr) { @@ -83,7 +87,7 @@ static bool hyperv_cc_platform_has(enum cc_attr attr) bool cc_platform_has(enum cc_attr attr) { - switch (vendor) { + switch (cc_vendor) { case CC_VENDOR_AMD: return amd_cc_platform_has(attr); case CC_VENDOR_INTEL: @@ -105,7 +109,7 @@ u64 cc_mkenc(u64 val) * - for AMD, bit *set* means the page is encrypted * - for Intel *clear* means encrypted. */ - switch (vendor) { + switch (cc_vendor) { case CC_VENDOR_AMD: return val | cc_mask; case CC_VENDOR_INTEL: @@ -118,7 +122,7 @@ u64 cc_mkenc(u64 val) u64 cc_mkdec(u64 val) { /* See comment in cc_mkenc() */ - switch (vendor) { + switch (cc_vendor) { case CC_VENDOR_AMD: return val & ~cc_mask; case CC_VENDOR_INTEL: @@ -129,12 +133,39 @@ u64 cc_mkdec(u64 val) } EXPORT_SYMBOL_GPL(cc_mkdec); -__init void cc_set_vendor(enum cc_vendor v) +__init void cc_random_init(void) { - vendor = v; -} + /* + * The seed is 32 bytes (in units of longs), which is 256 bits, which + * is the security level that the RNG is targeting. + */ + unsigned long rng_seed[32 / sizeof(long)]; + size_t i, longs; -__init void cc_set_mask(u64 mask) -{ - cc_mask = mask; + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return; + + /* + * Since the CoCo threat model includes the host, the only reliable + * source of entropy that can be neither observed nor manipulated is + * RDRAND. Usually, RDRAND failure is considered tolerable, but since + * CoCo guests have no other unobservable source of entropy, it's + * important to at least ensure the RNG gets some initial random seeds. + */ + for (i = 0; i < ARRAY_SIZE(rng_seed); i += longs) { + longs = arch_get_random_longs(&rng_seed[i], ARRAY_SIZE(rng_seed) - i); + + /* + * A zero return value means that the guest doesn't have RDRAND + * or the CPU is physically broken, and in both cases that + * means most crypto inside of the CoCo instance will be + * broken, defeating the purpose of CoCo in the first place. So + * just panic here because it's absolutely unsafe to continue + * executing. + */ + if (longs == 0) + panic("RDRAND is defective."); + } + add_device_randomness(rng_seed, sizeof(rng_seed)); + memzero_explicit(rng_seed, sizeof(rng_seed)); } diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index d0565a9e7..4692450ae 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -793,7 +793,7 @@ void __init tdx_early_init(void) setup_force_cpu_cap(X86_FEATURE_TDX_GUEST); - cc_set_vendor(CC_VENDOR_INTEL); + cc_vendor = CC_VENDOR_INTEL; tdx_parse_tdinfo(&cc_mask); cc_set_mask(cc_mask); diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 9c0b26ae5..e72dac092 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -48,7 +48,7 @@ static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr) if (likely(unr < NR_syscalls)) { unr = array_index_nospec(unr, NR_syscalls); - regs->ax = sys_call_table[unr](regs); + regs->ax = x64_sys_call(regs, unr); return true; } return false; @@ -65,7 +65,7 @@ static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr) if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) { xnr = array_index_nospec(xnr, X32_NR_syscalls); - regs->ax = x32_sys_call_table[xnr](regs); + regs->ax = x32_sys_call(regs, xnr); return true; } return false; @@ -114,7 +114,7 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr) if (likely(unr < IA32_NR_syscalls)) { unr = array_index_nospec(unr, IA32_NR_syscalls); - regs->ax = ia32_sys_call_table[unr](regs); + regs->ax = ia32_sys_call(regs, unr); } else if (nr != -1) { regs->ax = __ia32_sys_ni_syscall(regs); } @@ -141,7 +141,7 @@ static __always_inline bool int80_is_external(void) } /** - * int80_emulation - 32-bit legacy syscall entry + * do_int80_emulation - 32-bit legacy syscall C entry from asm * * This entry point can be used by 32-bit and 64-bit programs to perform * 32-bit system calls. Instances of INT $0x80 can be found inline in @@ -159,7 +159,7 @@ static __always_inline bool int80_is_external(void) * eax: system call number * ebx, ecx, edx, esi, edi, ebp: arg1 - arg 6 */ -DEFINE_IDTENTRY_RAW(int80_emulation) +__visible noinstr void do_int80_emulation(struct pt_regs *regs) { int nr; diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index c2383c288..6624806e6 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -116,6 +116,7 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) /* clobbers %rax, make sure it is after saving the syscall nr */ IBRS_ENTER UNTRAIN_RET + CLEAR_BRANCH_HISTORY call do_syscall_64 /* returns with IRQs disabled */ @@ -1539,3 +1540,63 @@ SYM_CODE_START(rewind_stack_and_make_dead) call make_task_dead SYM_CODE_END(rewind_stack_and_make_dead) .popsection + +/* + * This sequence executes branches in order to remove user branch information + * from the branch history tracker in the Branch Predictor, therefore removing + * user influence on subsequent BTB lookups. + * + * It should be used on parts prior to Alder Lake. Newer parts should use the + * BHI_DIS_S hardware control instead. If a pre-Alder Lake part is being + * virtualized on newer hardware the VMM should protect against BHI attacks by + * setting BHI_DIS_S for the guests. + * + * CALLs/RETs are necessary to prevent Loop Stream Detector(LSD) from engaging + * and not clearing the branch history. The call tree looks like: + * + * call 1 + * call 2 + * call 2 + * call 2 + * call 2 + * call 2 + * ret + * ret + * ret + * ret + * ret + * ret + * + * This means that the stack is non-constant and ORC can't unwind it with %rsp + * alone. Therefore we unconditionally set up the frame pointer, which allows + * ORC to unwind properly. + * + * The alignment is for performance and not for safety, and may be safely + * refactored in the future if needed. + */ +SYM_FUNC_START(clear_bhb_loop) + push %rbp + mov %rsp, %rbp + movl $5, %ecx + ANNOTATE_INTRA_FUNCTION_CALL + call 1f + jmp 5f + .align 64, 0xcc + ANNOTATE_INTRA_FUNCTION_CALL +1: call 2f + RET + .align 64, 0xcc +2: movl $5, %eax +3: jmp 4f + nop +4: sub $1, %eax + jnz 3b + sub $1, %ecx + jnz 1b + RET +5: lfence + pop %rbp + RET +SYM_FUNC_END(clear_bhb_loop) +EXPORT_SYMBOL_GPL(clear_bhb_loop) +STACK_FRAME_NON_STANDARD(clear_bhb_loop) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index 4bcd009a2..b14b8cd85 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -92,6 +92,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) IBRS_ENTER UNTRAIN_RET + CLEAR_BRANCH_HISTORY /* * SYSENTER doesn't filter flags, so we need to clear NT and AC @@ -210,6 +211,7 @@ SYM_INNER_LABEL(entry_SYSCALL_compat_after_hwframe, SYM_L_GLOBAL) IBRS_ENTER UNTRAIN_RET + CLEAR_BRANCH_HISTORY movq %rsp, %rdi call do_fast_syscall_32 @@ -278,3 +280,17 @@ SYM_INNER_LABEL(entry_SYSRETL_compat_end, SYM_L_GLOBAL) ANNOTATE_NOENDBR int3 SYM_CODE_END(entry_SYSCALL_compat) + +/* + * int 0x80 is used by 32 bit mode as a system call entry. Normally idt entries + * point to C routines, however since this is a system call interface the branch + * history needs to be scrubbed to protect against BHI attacks, and that + * scrubbing needs to take place in assembly code prior to entering any C + * routines. + */ +SYM_CODE_START(int80_emulation) + ANNOTATE_NOENDBR + UNWIND_HINT_FUNC + CLEAR_BRANCH_HISTORY + jmp do_int80_emulation +SYM_CODE_END(int80_emulation) diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c index 8cfc9bc73..c2235bae1 100644 --- a/arch/x86/entry/syscall_32.c +++ b/arch/x86/entry/syscall_32.c @@ -18,8 +18,25 @@ #include <asm/syscalls_32.h> #undef __SYSCALL +/* + * The sys_call_table[] is no longer used for system calls, but + * kernel/trace/trace_syscalls.c still wants to know the system + * call address. + */ +#ifdef CONFIG_X86_32 #define __SYSCALL(nr, sym) __ia32_##sym, - -__visible const sys_call_ptr_t ia32_sys_call_table[] = { +const sys_call_ptr_t sys_call_table[] = { #include <asm/syscalls_32.h> }; +#undef __SYSCALL +#endif + +#define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs); + +long ia32_sys_call(const struct pt_regs *regs, unsigned int nr) +{ + switch (nr) { + #include <asm/syscalls_32.h> + default: return __ia32_sys_ni_syscall(regs); + } +}; diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index be120eec1..33b3f09e6 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -11,8 +11,23 @@ #include <asm/syscalls_64.h> #undef __SYSCALL +/* + * The sys_call_table[] is no longer used for system calls, but + * kernel/trace/trace_syscalls.c still wants to know the system + * call address. + */ #define __SYSCALL(nr, sym) __x64_##sym, - -asmlinkage const sys_call_ptr_t sys_call_table[] = { +const sys_call_ptr_t sys_call_table[] = { #include <asm/syscalls_64.h> }; +#undef __SYSCALL + +#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs); + +long x64_sys_call(const struct pt_regs *regs, unsigned int nr) +{ + switch (nr) { + #include <asm/syscalls_64.h> + default: return __x64_sys_ni_syscall(regs); + } +}; diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c index bdd0e03a1..03de4a932 100644 --- a/arch/x86/entry/syscall_x32.c +++ b/arch/x86/entry/syscall_x32.c @@ -11,8 +11,12 @@ #include <asm/syscalls_x32.h> #undef __SYSCALL -#define __SYSCALL(nr, sym) __x64_##sym, +#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs); -asmlinkage const sys_call_ptr_t x32_sys_call_table[] = { -#include <asm/syscalls_x32.h> +long x32_sys_call(const struct pt_regs *regs, unsigned int nr) +{ + switch (nr) { + #include <asm/syscalls_x32.h> + default: return __x64_sys_ni_syscall(regs); + } }; diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 04f4b96de..3ac069a45 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -604,7 +604,6 @@ static void amd_pmu_cpu_dead(int cpu) kfree(cpuhw->lbr_sel); cpuhw->lbr_sel = NULL; - amd_pmu_cpu_reset(cpu); if (!x86_pmu.amd_nb_constraints) return; @@ -905,8 +904,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) if (!status) goto done; - /* Read branch records before unfreezing */ - if (status & GLOBAL_STATUS_LBRS_FROZEN) { + /* Read branch records */ + if (x86_pmu.lbr_nr) { amd_pmu_lbr_read(); status &= ~GLOBAL_STATUS_LBRS_FROZEN; } diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c index 38a75216c..b8fe74e8e 100644 --- a/arch/x86/events/amd/lbr.c +++ b/arch/x86/events/amd/lbr.c @@ -400,10 +400,12 @@ void amd_pmu_lbr_enable_all(void) wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select); } - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); - rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); + if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { + rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); + wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); + } - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); + rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN); } @@ -416,10 +418,12 @@ void amd_pmu_lbr_disable_all(void) return; rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg); - rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); - wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN); - wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); + + if (cpu_feature_enabled(X86_FEATURE_AMD_LBR_PMC_FREEZE)) { + rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl); + wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); + } } __init int amd_pmu_lbr_init(void) diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 8f80de627..5cdccea45 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -12,6 +12,7 @@ #include <asm/special_insns.h> #include <asm/preempt.h> #include <asm/asm.h> +#include <asm/nospec-branch.h> #ifndef CONFIG_X86_CMPXCHG64 extern void cmpxchg8b_emu(void); diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index fbcfec4dc..ca8eed1d4 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -113,6 +113,20 @@ #endif +#ifndef __ASSEMBLY__ +#ifndef __pic__ +static __always_inline __pure void *rip_rel_ptr(void *p) +{ + asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p)); + + return p; +} +#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var))) +#else +#define RIP_REL_REF(var) (var) +#endif +#endif + /* * Macros to generate condition code outputs from inline assembly, * The output operand must be type "bool". diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h index 3d98c3a60..1f97d00ad 100644 --- a/arch/x86/include/asm/coco.h +++ b/arch/x86/include/asm/coco.h @@ -2,6 +2,7 @@ #ifndef _ASM_X86_COCO_H #define _ASM_X86_COCO_H +#include <asm/asm.h> #include <asm/types.h> enum cc_vendor { @@ -11,12 +12,18 @@ enum cc_vendor { CC_VENDOR_INTEL, }; -void cc_set_vendor(enum cc_vendor v); -void cc_set_mask(u64 mask); +extern enum cc_vendor cc_vendor; +extern u64 cc_mask; #ifdef CONFIG_ARCH_HAS_CC_PLATFORM +static inline void cc_set_mask(u64 mask) +{ + RIP_REL_REF(cc_mask) = mask; +} + u64 cc_mkenc(u64 val); u64 cc_mkdec(u64 val); +void cc_random_init(void); #else static inline u64 cc_mkenc(u64 val) { @@ -27,6 +34,7 @@ static inline u64 cc_mkdec(u64 val) { return val; } +static inline void cc_random_init(void) { } #endif #endif /* _ASM_X86_COCO_H */ diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index f835b328b..16051c6f3 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -33,6 +33,8 @@ enum cpuid_leafs CPUID_7_EDX, CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, + CPUID_LNX_5, + NR_CPUID_WORDS, }; #define X86_CAP_FMT_NUM "%d:%d" @@ -96,8 +98,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 21)) + BUILD_BUG_ON_ZERO(NCAPINTS != 22)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -121,8 +124,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 21)) + BUILD_BUG_ON_ZERO(NCAPINTS != 22)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index b97a70aa4..7ded92672 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NCAPINTS 22 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -427,11 +427,24 @@ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ +#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ /* + * Extended auxiliary flags: Linux defined - for features scattered in various + * CPUID levels like 0x80000022, etc and Linux defined features. + * + * Reuse free bits when adding new feature flags! + */ +#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */ +#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */ +#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */ +#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */ +#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ + +/* * BUG word(s) */ #define X86_BUG(x) (NCAPINTS*32 + (x)) @@ -478,4 +491,5 @@ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */ +#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 000037078..380e96314 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -112,6 +112,7 @@ #define DISABLED_MASK18 0 #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define DISABLED_MASK21 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index c91326593..41d06822b 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -15,7 +15,8 @@ #include <linux/init.h> #include <linux/cc_platform.h> -#include <asm/bootparam.h> +#include <asm/asm.h> +struct boot_params; #ifdef CONFIG_X86_MEM_ENCRYPT void __init mem_encrypt_init(void); @@ -57,6 +58,11 @@ void __init mem_encrypt_free_decrypted_mem(void); void __init sev_es_init_vc_handling(void); +static inline u64 sme_get_me_mask(void) +{ + return RIP_REL_REF(sme_me_mask); +} + #define __bss_decrypted __section(".bss..decrypted") #else /* !CONFIG_AMD_MEM_ENCRYPT */ @@ -88,6 +94,8 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en static inline void mem_encrypt_free_decrypted_mem(void) { } +static inline u64 sme_get_me_mask(void) { return 0; } + #define __bss_decrypted #endif /* CONFIG_AMD_MEM_ENCRYPT */ @@ -105,11 +113,6 @@ void add_encrypt_protection_map(void); extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; -static inline u64 sme_get_me_mask(void) -{ - return sme_me_mask; -} - #endif /* __ASSEMBLY__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 005e41dc7..681e8401b 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -30,6 +30,7 @@ #define _EFER_SVME 12 /* Enable virtualization */ #define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */ #define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */ +#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */ #define EFER_SCE (1<<_EFER_SCE) #define EFER_LME (1<<_EFER_LME) @@ -38,6 +39,7 @@ #define EFER_SVME (1<<_EFER_SVME) #define EFER_LMSLE (1<<_EFER_LMSLE) #define EFER_FFXSR (1<<_EFER_FFXSR) +#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS) /* Intel MSRs. Some also available on other CPUs */ @@ -53,10 +55,13 @@ #define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ #define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */ #define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT) +#define SPEC_CTRL_BHI_DIS_S_SHIFT 10 /* Disable Branch History Injection behavior */ +#define SPEC_CTRL_BHI_DIS_S BIT(SPEC_CTRL_BHI_DIS_S_SHIFT) /* A mask for bits which the kernel toggles when controlling mitigations */ #define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \ - | SPEC_CTRL_RRSBA_DIS_S) + | SPEC_CTRL_RRSBA_DIS_S \ + | SPEC_CTRL_BHI_DIS_S) #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ @@ -155,6 +160,10 @@ * are restricted to targets in * kernel. */ +#define ARCH_CAP_BHI_NO BIT(20) /* + * CPU is not affected by Branch + * History Injection. + */ #define ARCH_CAP_PBRSB_NO BIT(24) /* * Not susceptible to Post-Barrier * Return Stack Buffer Predictions. diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 8f6f17a86..1e481d308 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -167,11 +167,20 @@ .Lskip_rsb_\@: .endm +/* + * The CALL to srso_alias_untrain_ret() must be patched in directly at + * the spot where untraining must be done, ie., srso_alias_untrain_ret() + * must be the target of a CALL instruction instead of indirectly + * jumping to a wrapper which then calls it. Therefore, this macro is + * called outside of __UNTRAIN_RET below, for the time being, before the + * kernel can support nested alternatives with arbitrary nesting. + */ +.macro CALL_UNTRAIN_RET #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_UNTRAIN_RET "call entry_untrain_ret" -#else -#define CALL_UNTRAIN_RET "" + ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \ + "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS #endif +.endm /* * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the @@ -188,9 +197,8 @@ #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ defined(CONFIG_CPU_SRSO) ANNOTATE_UNRET_END - ALTERNATIVE_2 "", \ - CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ - "call entry_ibpb", X86_FEATURE_ENTRY_IBPB + CALL_UNTRAIN_RET + ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_ENTRY_IBPB #endif .endm @@ -207,6 +215,19 @@ .Lskip_verw_\@: .endm +#ifdef CONFIG_X86_64 +.macro CLEAR_BRANCH_HISTORY + ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP +.endm + +.macro CLEAR_BRANCH_HISTORY_VMEXIT + ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT +.endm +#else +#define CLEAR_BRANCH_HISTORY +#define CLEAR_BRANCH_HISTORY_VMEXIT +#endif + #else /* __ASSEMBLY__ */ #define ANNOTATE_RETPOLINE_SAFE \ @@ -235,6 +256,10 @@ extern void srso_alias_untrain_ret(void); extern void entry_untrain_ret(void); extern void entry_ibpb(void); +#ifdef CONFIG_X86_64 +extern void clear_bhb_loop(void); +#endif + extern void (*x86_return_thunk)(void); #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 7ba1726b7..e9187ddd3 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -99,6 +99,7 @@ #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define REQUIRED_MASK21 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index cf98fc286..c57dd2115 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -196,12 +196,12 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd unsigned long npages); void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages); -void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op); void snp_set_memory_shared(unsigned long vaddr, unsigned long npages); void snp_set_memory_private(unsigned long vaddr, unsigned long npages); void snp_set_wakeup_secondary_cpu(void); bool snp_init(struct boot_params *bp); void __init __noreturn snp_abort(void); +void snp_dmi_setup(void); int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio); u64 snp_get_unsupported_features(u64 status); u64 sev_get_status(void); @@ -219,12 +219,12 @@ static inline void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } static inline void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { } -static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { } static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { } static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { } static inline void snp_set_wakeup_secondary_cpu(void) { } static inline bool snp_init(struct boot_params *bp) { return false; } static inline void snp_abort(void) { } +static inline void snp_dmi_setup(void) { } static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio) { return -ENOTTY; diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index a800abb1a..d8416b3bf 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h @@ -12,11 +12,6 @@ /* image of the saved processor state */ struct saved_context { - /* - * On x86_32, all segment registers except gs are saved at kernel - * entry in pt_regs. - */ - u16 gs; unsigned long cr0, cr2, cr3, cr4; u64 misc_enable; struct saved_msrs saved_msrs; @@ -27,6 +22,11 @@ struct saved_context { unsigned long tr; unsigned long safety; unsigned long return_address; + /* + * On x86_32, all segment registers except gs are saved at kernel + * entry in pt_regs. + */ + u16 gs; bool misc_enable_saved; } __attribute__((packed)); diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 5b85987a5..2725a4502 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -16,19 +16,17 @@ #include <asm/thread_info.h> /* for TS_COMPAT */ #include <asm/unistd.h> +/* This is used purely for kernel/trace/trace_syscalls.c */ typedef long (*sys_call_ptr_t)(const struct pt_regs *); extern const sys_call_ptr_t sys_call_table[]; -#if defined(CONFIG_X86_32) -#define ia32_sys_call_table sys_call_table -#else /* * These may not exist, but still put the prototypes in so we * can use IS_ENABLED(). */ -extern const sys_call_ptr_t ia32_sys_call_table[]; -extern const sys_call_ptr_t x32_sys_call_table[]; -#endif +extern long ia32_sys_call(const struct pt_regs *, unsigned int nr); +extern long x32_sys_call(const struct pt_regs *, unsigned int nr); +extern long x64_sys_call(const struct pt_regs *, unsigned int nr); /* * Only the low 32 bits of orig_ax are meaningful, so we return int. @@ -129,6 +127,7 @@ static inline int syscall_get_arch(struct task_struct *task) void do_syscall_64(struct pt_regs *regs, int nr); void do_int80_syscall_32(struct pt_regs *regs); long do_fast_syscall_32(struct pt_regs *regs); +void do_int80_emulation(struct pt_regs *regs); #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index ab60a71a8..472f0263d 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -4,6 +4,7 @@ #include <linux/seqlock.h> #include <uapi/asm/vsyscall.h> +#include <asm/page_types.h> #ifdef CONFIG_X86_VSYSCALL_EMULATION extern void map_vsyscall(void); @@ -24,4 +25,13 @@ static inline bool emulate_vsyscall(unsigned long error_code, } #endif +/* + * The (legacy) vsyscall page is the long page in the kernel portion + * of the address space that has user-accessible permissions. + */ +static inline bool is_vsyscall_vaddr(unsigned long vaddr) +{ + return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); +} + #endif /* _ASM_X86_VSYSCALL_H */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 034e62838..c3e910b1d 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -30,12 +30,13 @@ struct x86_init_mpparse { * @reserve_resources: reserve the standard resources for the * platform * @memory_setup: platform specific memory setup - * + * @dmi_setup: platform specific DMI setup */ struct x86_init_resources { void (*probe_roms)(void); void (*reserve_resources)(void); char *(*memory_setup)(void); + void (*dmi_setup)(void); }; /** diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c index 8d8752b44..ff8f25fac 100644 --- a/arch/x86/kernel/acpi/cppc.c +++ b/arch/x86/kernel/acpi/cppc.c @@ -20,7 +20,7 @@ bool cpc_supported_by_cpu(void) (boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f))) return true; else if (boot_cpu_data.x86 == 0x17 && - boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f) + boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f) return true; return boot_cpu_has(X86_FEATURE_CPPC); } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c1d09c884..425092806 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -997,11 +997,11 @@ static bool cpu_has_zenbleed_microcode(void) u32 good_rev = 0; switch (boot_cpu_data.x86_model) { - case 0x30 ... 0x3f: good_rev = 0x0830107a; break; - case 0x60 ... 0x67: good_rev = 0x0860010b; break; - case 0x68 ... 0x6f: good_rev = 0x08608105; break; - case 0x70 ... 0x7f: good_rev = 0x08701032; break; - case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; + case 0x30 ... 0x3f: good_rev = 0x0830107b; break; + case 0x60 ... 0x67: good_rev = 0x0860010c; break; + case 0x68 ... 0x6f: good_rev = 0x08608107; break; + case 0x70 ... 0x7f: good_rev = 0x08701033; break; + case 0xa0 ... 0xaf: good_rev = 0x08a00009; break; default: return false; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c68789fdc..96bd3ee83 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1354,19 +1354,21 @@ spectre_v2_user_select_mitigation(void) } /* - * If no STIBP, enhanced IBRS is enabled, or SMT impossible, STIBP + * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP * is not required. * - * Enhanced IBRS also protects against cross-thread branch target + * Intel's Enhanced IBRS also protects against cross-thread branch target * injection in user-mode as the IBRS bit remains always set which * implicitly enables cross-thread protections. However, in legacy IBRS * mode, the IBRS bit is set only on kernel entry and cleared on return - * to userspace. This disables the implicit cross-thread protection, - * so allow for STIBP to be selected in that case. + * to userspace. AMD Automatic IBRS also does not protect userspace. + * These modes therefore disable the implicit cross-thread protection, + * so allow for STIBP to be selected in those cases. */ if (!boot_cpu_has(X86_FEATURE_STIBP) || !smt_possible || - spectre_v2_in_eibrs_mode(spectre_v2_enabled)) + (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && + !boot_cpu_has(X86_FEATURE_AUTOIBRS))) return; /* @@ -1396,9 +1398,9 @@ static const char * const spectre_v2_strings[] = { [SPECTRE_V2_NONE] = "Vulnerable", [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", - [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS", - [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE", - [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines", + [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", + [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", + [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", [SPECTRE_V2_IBRS] = "Mitigation: IBRS", }; @@ -1467,7 +1469,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { - pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n", + pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", mitigation_options[i].option); return SPECTRE_V2_CMD_AUTO; } @@ -1582,6 +1584,79 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_ dump_stack(); } +/* + * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by + * branch history in userspace. Not needed if BHI_NO is set. + */ +static bool __init spec_ctrl_bhi_dis(void) +{ + if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) + return false; + + x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S; + update_spec_ctrl(x86_spec_ctrl_base); + setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW); + + return true; +} + +enum bhi_mitigations { + BHI_MITIGATION_OFF, + BHI_MITIGATION_ON, + BHI_MITIGATION_AUTO, +}; + +static enum bhi_mitigations bhi_mitigation __ro_after_init = + IS_ENABLED(CONFIG_SPECTRE_BHI_ON) ? BHI_MITIGATION_ON : + IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF : + BHI_MITIGATION_AUTO; + +static int __init spectre_bhi_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + bhi_mitigation = BHI_MITIGATION_OFF; + else if (!strcmp(str, "on")) + bhi_mitigation = BHI_MITIGATION_ON; + else if (!strcmp(str, "auto")) + bhi_mitigation = BHI_MITIGATION_AUTO; + else + pr_err("Ignoring unknown spectre_bhi option (%s)", str); + + return 0; +} +early_param("spectre_bhi", spectre_bhi_parse_cmdline); + +static void __init bhi_select_mitigation(void) +{ + if (bhi_mitigation == BHI_MITIGATION_OFF) + return; + + /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ + if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) && + !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA)) + return; + + if (spec_ctrl_bhi_dis()) + return; + + if (!IS_ENABLED(CONFIG_X86_64)) + return; + + /* Mitigate KVM by default */ + setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT); + pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n"); + + if (bhi_mitigation == BHI_MITIGATION_AUTO) + return; + + /* Mitigate syscalls when the mitigation is forced =on */ + setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); + pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n"); +} + static void __init spectre_v2_select_mitigation(void) { enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -1652,8 +1727,12 @@ static void __init spectre_v2_select_mitigation(void) pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); if (spectre_v2_in_ibrs_mode(mode)) { - x86_spec_ctrl_base |= SPEC_CTRL_IBRS; - update_spec_ctrl(x86_spec_ctrl_base); + if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { + msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); + } else { + x86_spec_ctrl_base |= SPEC_CTRL_IBRS; + update_spec_ctrl(x86_spec_ctrl_base); + } } switch (mode) { @@ -1688,6 +1767,9 @@ static void __init spectre_v2_select_mitigation(void) mode == SPECTRE_V2_RETPOLINE) spec_ctrl_disable_kernel_rrsba(); + if (boot_cpu_has(X86_BUG_BHI)) + bhi_select_mitigation(); + spectre_v2_enabled = mode; pr_info("%s\n", spectre_v2_strings[mode]); @@ -1737,8 +1819,8 @@ static void __init spectre_v2_select_mitigation(void) /* * Retpoline protects the kernel, but doesn't protect firmware. IBRS * and Enhanced IBRS protect firmware too, so enable IBRS around - * firmware calls only when IBRS / Enhanced IBRS aren't otherwise - * enabled. + * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't + * otherwise enabled. * * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because * the user might select retpoline on the kernel command line and if @@ -2568,74 +2650,74 @@ static const char * const l1tf_vmx_states[] = { static ssize_t l1tf_show_state(char *buf) { if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) - return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); + return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && sched_smt_active())) { - return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, - l1tf_vmx_states[l1tf_vmx_mitigation]); + return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, + l1tf_vmx_states[l1tf_vmx_mitigation]); } - return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, - l1tf_vmx_states[l1tf_vmx_mitigation], - sched_smt_active() ? "vulnerable" : "disabled"); + return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, + l1tf_vmx_states[l1tf_vmx_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t itlb_multihit_show_state(char *buf) { if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || !boot_cpu_has(X86_FEATURE_VMX)) - return sprintf(buf, "KVM: Mitigation: VMX unsupported\n"); + return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); else if (!(cr4_read_shadow() & X86_CR4_VMXE)) - return sprintf(buf, "KVM: Mitigation: VMX disabled\n"); + return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); else if (itlb_multihit_kvm_mitigation) - return sprintf(buf, "KVM: Mitigation: Split huge pages\n"); + return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); else - return sprintf(buf, "KVM: Vulnerable\n"); + return sysfs_emit(buf, "KVM: Vulnerable\n"); } #else static ssize_t l1tf_show_state(char *buf) { - return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG); + return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); } static ssize_t itlb_multihit_show_state(char *buf) { - return sprintf(buf, "Processor vulnerable\n"); + return sysfs_emit(buf, "Processor vulnerable\n"); } #endif static ssize_t mds_show_state(char *buf) { if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { - return sprintf(buf, "%s; SMT Host state unknown\n", - mds_strings[mds_mitigation]); + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + mds_strings[mds_mitigation]); } if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { - return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], - (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : - sched_smt_active() ? "mitigated" : "disabled")); + return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : + sched_smt_active() ? "mitigated" : "disabled")); } - return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], - sched_smt_active() ? "vulnerable" : "disabled"); + return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t tsx_async_abort_show_state(char *buf) { if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || (taa_mitigation == TAA_MITIGATION_OFF)) - return sprintf(buf, "%s\n", taa_strings[taa_mitigation]); + return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { - return sprintf(buf, "%s; SMT Host state unknown\n", - taa_strings[taa_mitigation]); + return sysfs_emit(buf, "%s; SMT Host state unknown\n", + taa_strings[taa_mitigation]); } - return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], - sched_smt_active() ? "vulnerable" : "disabled"); + return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], + sched_smt_active() ? "vulnerable" : "disabled"); } static ssize_t mmio_stale_data_show_state(char *buf) @@ -2662,20 +2744,21 @@ static ssize_t rfds_show_state(char *buf) static char *stibp_state(void) { - if (spectre_v2_in_eibrs_mode(spectre_v2_enabled)) + if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && + !boot_cpu_has(X86_FEATURE_AUTOIBRS)) return ""; switch (spectre_v2_user_stibp) { case SPECTRE_V2_USER_NONE: - return ", STIBP: disabled"; + return "; STIBP: disabled"; case SPECTRE_V2_USER_STRICT: - return ", STIBP: forced"; + return "; STIBP: forced"; case SPECTRE_V2_USER_STRICT_PREFERRED: - return ", STIBP: always-on"; + return "; STIBP: always-on"; case SPECTRE_V2_USER_PRCTL: case SPECTRE_V2_USER_SECCOMP: if (static_key_enabled(&switch_to_cond_stibp)) - return ", STIBP: conditional"; + return "; STIBP: conditional"; } return ""; } @@ -2684,10 +2767,10 @@ static char *ibpb_state(void) { if (boot_cpu_has(X86_FEATURE_IBPB)) { if (static_key_enabled(&switch_mm_always_ibpb)) - return ", IBPB: always-on"; + return "; IBPB: always-on"; if (static_key_enabled(&switch_mm_cond_ibpb)) - return ", IBPB: conditional"; - return ", IBPB: disabled"; + return "; IBPB: conditional"; + return "; IBPB: disabled"; } return ""; } @@ -2697,58 +2780,76 @@ static char *pbrsb_eibrs_state(void) if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) - return ", PBRSB-eIBRS: SW sequence"; + return "; PBRSB-eIBRS: SW sequence"; else - return ", PBRSB-eIBRS: Vulnerable"; + return "; PBRSB-eIBRS: Vulnerable"; } else { - return ", PBRSB-eIBRS: Not affected"; + return "; PBRSB-eIBRS: Not affected"; } } +static const char * const spectre_bhi_state(void) +{ + if (!boot_cpu_has_bug(X86_BUG_BHI)) + return "; BHI: Not affected"; + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) + return "; BHI: BHI_DIS_S"; + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) + return "; BHI: SW loop, KVM: SW loop"; + else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && + !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA)) + return "; BHI: Retpoline"; + else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT)) + return "; BHI: Syscall hardening, KVM: SW loop"; + + return "; BHI: Vulnerable (Syscall hardening enabled)"; +} + static ssize_t spectre_v2_show_state(char *buf) { if (spectre_v2_enabled == SPECTRE_V2_LFENCE) - return sprintf(buf, "Vulnerable: LFENCE\n"); + return sysfs_emit(buf, "Vulnerable: LFENCE\n"); if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) - return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); + return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); if (sched_smt_active() && unprivileged_ebpf_enabled() && spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) - return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); + return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); - return sprintf(buf, "%s%s%s%s%s%s%s\n", - spectre_v2_strings[spectre_v2_enabled], - ibpb_state(), - boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", - stibp_state(), - boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", - pbrsb_eibrs_state(), - spectre_v2_module_string()); + return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n", + spectre_v2_strings[spectre_v2_enabled], + ibpb_state(), + boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "", + stibp_state(), + boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "", + pbrsb_eibrs_state(), + spectre_bhi_state(), + /* this should always be at the end */ + spectre_v2_module_string()); } static ssize_t srbds_show_state(char *buf) { - return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]); + return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); } static ssize_t retbleed_show_state(char *buf) { if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && - boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) - return sprintf(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); - return sprintf(buf, "%s; SMT %s\n", - retbleed_strings[retbleed_mitigation], - !sched_smt_active() ? "disabled" : - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? - "enabled with STIBP protection" : "vulnerable"); + return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], + !sched_smt_active() ? "disabled" : + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? + "enabled with STIBP protection" : "vulnerable"); } - return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); + return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } static ssize_t gds_show_state(char *buf) @@ -2770,26 +2871,26 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr char *buf, unsigned int bug) { if (!boot_cpu_has_bug(bug)) - return sprintf(buf, "Not affected\n"); + return sysfs_emit(buf, "Not affected\n"); switch (bug) { case X86_BUG_CPU_MELTDOWN: if (boot_cpu_has(X86_FEATURE_PTI)) - return sprintf(buf, "Mitigation: PTI\n"); + return sysfs_emit(buf, "Mitigation: PTI\n"); if (hypervisor_is_type(X86_HYPER_XEN_PV)) - return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); + return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); break; case X86_BUG_SPECTRE_V1: - return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); + return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); case X86_BUG_SPECTRE_V2: return spectre_v2_show_state(buf); case X86_BUG_SPEC_STORE_BYPASS: - return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); + return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); case X86_BUG_L1TF: if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) @@ -2828,7 +2929,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr break; } - return sprintf(buf, "Vulnerable\n"); + return sysfs_emit(buf, "Vulnerable\n"); } ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 758938c94..08fe77d2a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1144,6 +1144,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) #define NO_SPECTRE_V2 BIT(8) #define NO_MMIO BIT(9) #define NO_EIBRS_PBRSB BIT(10) +#define NO_BHI BIT(11) #define VULNWL(vendor, family, model, whitelist) \ X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) @@ -1206,18 +1207,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), /* AMD Family 0xf - 0x12 */ - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI), /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), - VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI), + VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI), /* Zhaoxin Family 7 */ - VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), - VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), + VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI), + VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI), {} }; @@ -1362,8 +1363,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) !cpu_has(c, X86_FEATURE_AMD_SSB_NO)) setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS); - if (ia32_cap & ARCH_CAP_IBRS_ALL) + /* + * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature + * flag and protect from vendor-specific bugs via the whitelist. + */ + if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) { setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED); + if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && + !(ia32_cap & ARCH_CAP_PBRSB_NO)) + setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); + } if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) && !(ia32_cap & ARCH_CAP_MDS_NO)) { @@ -1425,11 +1434,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) setup_force_cpu_bug(X86_BUG_RETBLEED); } - if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) && - !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) && - !(ia32_cap & ARCH_CAP_PBRSB_NO)) - setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB); - if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) setup_force_cpu_bug(X86_BUG_SMT_RSB); @@ -1451,6 +1455,13 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (vulnerable_to_rfds(ia32_cap)) setup_force_cpu_bug(X86_BUG_RFDS); + /* When virtualized, eIBRS could be hidden, assume vulnerable */ + if (!(ia32_cap & ARCH_CAP_BHI_NO) && + !cpu_matches(cpu_vuln_whitelist, NO_BHI) && + (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) || + boot_cpu_has(X86_FEATURE_HYPERVISOR))) + setup_force_cpu_bug(X86_BUG_BHI); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index cad6ea191..359218bc1 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -2471,12 +2471,14 @@ static ssize_t set_bank(struct device *s, struct device_attribute *attr, return -EINVAL; b = &per_cpu(mce_banks_array, s->id)[bank]; - if (!b->init) return -ENODEV; b->ctl = new; + + mutex_lock(&mce_sysfs_mutex); mce_restart(); + mutex_unlock(&mce_sysfs_mutex); return size; } diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 34d9e899e..9b039e963 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -344,7 +344,7 @@ static void __init ms_hyperv_init_platform(void) /* Isolation VMs are unenlightened SEV-based VMs, thus this check: */ if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) { if (hv_get_isolation_type() != HV_ISOLATION_TYPE_NONE) - cc_set_vendor(CC_VENDOR_HYPERV); + cc_vendor = CC_VENDOR_HYPERV; } } diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 0b5c6c76f..4761d489a 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -281,14 +281,10 @@ struct rftype { * struct mbm_state - status for each MBM counter in each domain * @prev_bw_bytes: Previous bytes value read for bandwidth calculation * @prev_bw: The most recent bandwidth in MBps - * @delta_bw: Difference between the current and previous bandwidth - * @delta_comp: Indicates whether to compute the delta_bw */ struct mbm_state { u64 prev_bw_bytes; u32 prev_bw; - u32 delta_bw; - bool delta_comp; }; /** diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 77538abeb..b9adb7077 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -428,9 +428,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr) cur_bw = bytes / SZ_1M; - if (m->delta_comp) - m->delta_bw = abs(cur_bw - m->prev_bw); - m->delta_comp = false; m->prev_bw = cur_bw; } @@ -508,11 +505,11 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) { u32 closid, rmid, cur_msr_val, new_msr_val; struct mbm_state *pmbm_data, *cmbm_data; - u32 cur_bw, delta_bw, user_bw; struct rdt_resource *r_mba; struct rdt_domain *dom_mba; struct list_head *head; struct rdtgroup *entry; + u32 cur_bw, user_bw; if (!is_mbm_local_enabled()) return; @@ -531,7 +528,6 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) cur_bw = pmbm_data->prev_bw; user_bw = dom_mba->mbps_val[closid]; - delta_bw = pmbm_data->delta_bw; /* MBA resource doesn't support CDP */ cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); @@ -543,49 +539,31 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) list_for_each_entry(entry, head, mon.crdtgrp_list) { cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; cur_bw += cmbm_data->prev_bw; - delta_bw += cmbm_data->delta_bw; } /* * Scale up/down the bandwidth linearly for the ctrl group. The * bandwidth step is the bandwidth granularity specified by the * hardware. - * - * The delta_bw is used when increasing the bandwidth so that we - * dont alternately increase and decrease the control values - * continuously. - * - * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if - * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep - * switching between 90 and 110 continuously if we only check - * cur_bw < user_bw. + * Always increase throttling if current bandwidth is above the + * target set by user. + * But avoid thrashing up and down on every poll by checking + * whether a decrease in throttling is likely to push the group + * back over target. E.g. if currently throttling to 30% of bandwidth + * on a system with 10% granularity steps, check whether moving to + * 40% would go past the limit by multiplying current bandwidth by + * "(30 + 10) / 30". */ if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { new_msr_val = cur_msr_val - r_mba->membw.bw_gran; } else if (cur_msr_val < MAX_MBA_BW && - (user_bw > (cur_bw + delta_bw))) { + (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { new_msr_val = cur_msr_val + r_mba->membw.bw_gran; } else { return; } resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); - - /* - * Delta values are updated dynamically package wise for each - * rdtgrp every time the throttle MSR changes value. - * - * This is because (1)the increase in bandwidth is not perfectly - * linear and only "approximately" linear even when the hardware - * says it is linear.(2)Also since MBA is a core specific - * mechanism, the delta values vary based on number of cores used - * by the rdtgrp. - */ - pmbm_data->delta_comp = true; - list_for_each_entry(entry, head, mon.crdtgrp_list) { - cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; - cmbm_data->delta_comp = true; - } } static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index fc01f81f6..28c357cf7 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -28,6 +28,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 }, { X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 }, + { X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 }, { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 }, { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 }, { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 }, @@ -46,6 +47,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, + { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 }, { 0, 0, 0, 0, 0 } }; diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c index e963344b0..53935b4d6 100644 --- a/arch/x86/kernel/eisa.c +++ b/arch/x86/kernel/eisa.c @@ -2,6 +2,7 @@ /* * EISA specific code */ +#include <linux/cc_platform.h> #include <linux/ioport.h> #include <linux/eisa.h> #include <linux/io.h> @@ -12,7 +13,7 @@ static __init int eisa_bus_probe(void) { void __iomem *p; - if (xen_pv_domain() && !xen_initial_domain()) + if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return 0; p = ioremap(0x0FFFD9, 4); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index ebe698f8a..2aa849705 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -177,10 +177,11 @@ void fpu__init_cpu_xstate(void) * Must happen after CR4 setup and before xsetbv() to allow KVM * lazy passthrough. Write independent of the dynamic state static * key as that does not work on the boot CPU. This also ensures - * that any stale state is wiped out from XFD. + * that any stale state is wiped out from XFD. Reset the per CPU + * xfd cache too. */ if (cpu_feature_enabled(X86_FEATURE_XFD)) - wrmsrl(MSR_IA32_XFD, init_fpstate.xfd); + xfd_set_state(init_fpstate.xfd); /* * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h index 3518fb26d..19ca623ff 100644 --- a/arch/x86/kernel/fpu/xstate.h +++ b/arch/x86/kernel/fpu/xstate.h @@ -148,20 +148,26 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs #endif #ifdef CONFIG_X86_64 +static inline void xfd_set_state(u64 xfd) +{ + wrmsrl(MSR_IA32_XFD, xfd); + __this_cpu_write(xfd_state, xfd); +} + static inline void xfd_update_state(struct fpstate *fpstate) { if (fpu_state_size_dynamic()) { u64 xfd = fpstate->xfd; - if (__this_cpu_read(xfd_state) != xfd) { - wrmsrl(MSR_IA32_XFD, xfd); - __this_cpu_write(xfd_state, xfd); - } + if (__this_cpu_read(xfd_state) != xfd) + xfd_set_state(xfd); } } extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu); #else +static inline void xfd_set_state(u64 xfd) { } + static inline void xfd_update_state(struct fpstate *fpstate) { } static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) { diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6120f25b0..991f00c81 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -301,7 +301,16 @@ static int can_probe(unsigned long paddr) kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, bool *on_func_entry) { - if (is_endbr(*(u32 *)addr)) { + u32 insn; + + /* + * Since 'addr' is not guaranteed to be safe to access, use + * copy_from_kernel_nofault() to read the instruction: + */ + if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32))) + return NULL; + + if (is_endbr(insn)) { *on_func_entry = !offset || offset == 4; if (*on_func_entry) offset = 4; diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c index 319fef37d..cc2c34ba7 100644 --- a/arch/x86/kernel/probe_roms.c +++ b/arch/x86/kernel/probe_roms.c @@ -203,16 +203,6 @@ void __init probe_roms(void) unsigned char c; int i; - /* - * The ROM memory range is not part of the e820 table and is therefore not - * pre-validated by BIOS. The kernel page table maps the ROM region as encrypted - * memory, and SNP requires encrypted memory to be validated before access. - * Do that here. - */ - snp_prep_memory(video_rom_resource.start, - ((system_rom_resource.end + 1) - video_rom_resource.start), - SNP_PAGE_STATE_PRIVATE); - /* video rom */ upper = adapter_rom_resources[0].start; for (start = video_rom_resource.start; start < upper; start += 2048) { diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 804a25238..18a034613 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -9,7 +9,6 @@ #include <linux/console.h> #include <linux/crash_dump.h> #include <linux/dma-map-ops.h> -#include <linux/dmi.h> #include <linux/efi.h> #include <linux/ima.h> #include <linux/init_ohci1394_dma.h> @@ -34,6 +33,7 @@ #include <asm/numa.h> #include <asm/bios_ebda.h> #include <asm/bugs.h> +#include <asm/coco.h> #include <asm/cpu.h> #include <asm/efi.h> #include <asm/gart.h> @@ -1032,7 +1032,7 @@ void __init setup_arch(char **cmdline_p) if (efi_enabled(EFI_BOOT)) efi_init(); - dmi_setup(); + x86_init.resources.dmi_setup(); /* * VMware detection requires dmi to be available, so this @@ -1133,6 +1133,7 @@ void __init setup_arch(char **cmdline_p) * memory size. */ sev_setup_arch(); + cc_random_init(); efi_fake_memmap(); efi_find_mirror(); diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c index 71d869870..271e70d57 100644 --- a/arch/x86/kernel/sev-shared.c +++ b/arch/x86/kernel/sev-shared.c @@ -553,9 +553,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0; /* Skip post-processing for out-of-range zero leafs. */ - if (!(leaf->fn <= cpuid_std_range_max || - (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) || - (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max))) + if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) || + (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) || + (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max)))) return 0; } @@ -1060,10 +1060,10 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info) const struct snp_cpuid_fn *fn = &cpuid_table->fn[i]; if (fn->eax_in == 0x0) - cpuid_std_range_max = fn->eax; + RIP_REL_REF(cpuid_std_range_max) = fn->eax; else if (fn->eax_in == 0x40000000) - cpuid_hyp_range_max = fn->eax; + RIP_REL_REF(cpuid_hyp_range_max) = fn->eax; else if (fn->eax_in == 0x80000000) - cpuid_ext_range_max = fn->eax; + RIP_REL_REF(cpuid_ext_range_max) = fn->eax; } } diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index c8dfb0fdd..e35fcc8d4 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -23,6 +23,7 @@ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/psp-sev.h> +#include <linux/dmi.h> #include <uapi/linux/sev-guest.h> #include <asm/cpu_entry_area.h> @@ -736,7 +737,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd * This eliminates worries about jump tables or checking boot_cpu_data * in the cc_platform_has() function. */ - if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) + if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED)) return; /* @@ -758,7 +759,7 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr * This eliminates worries about jump tables or checking boot_cpu_data * in the cc_platform_has() function. */ - if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) + if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED)) return; /* Invalidate the memory pages before they are marked shared in the RMP table. */ @@ -768,21 +769,6 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED); } -void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) -{ - unsigned long vaddr, npages; - - vaddr = (unsigned long)__va(paddr); - npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; - - if (op == SNP_PAGE_STATE_PRIVATE) - early_snp_set_memory_private(vaddr, paddr, npages); - else if (op == SNP_PAGE_STATE_SHARED) - early_snp_set_memory_shared(vaddr, paddr, npages); - else - WARN(1, "invalid memory op %d\n", op); -} - static int vmgexit_psc(struct snp_psc_desc *desc) { int cur_entry, end_entry, ret = 0; @@ -2152,6 +2138,17 @@ void __init __noreturn snp_abort(void) sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); } +/* + * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are + * enabled, as the alternative (fallback) logic for DMI probing in the legacy + * ROM region can cause a crash since this region is not pre-validated. + */ +void __init snp_dmi_setup(void) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + dmi_setup(); +} + static void dump_cpuid_table(void) { const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table(); diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 41e5b4cb8..a4a921b9e 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -3,6 +3,7 @@ * * For licencing details see kernel-base/COPYING */ +#include <linux/dmi.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/export.h> @@ -66,6 +67,7 @@ struct x86_init_ops x86_init __initdata = { .probe_roms = probe_roms, .reserve_resources = reserve_standard_io_resources, .memory_setup = e820__memory_setup_default, + .dmi_setup = dmi_setup, }, .mpparse = { diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index c3ef1fc60..62a44455c 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -535,9 +535,9 @@ static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf) } static __always_inline -void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask) +void kvm_cpu_cap_init_kvm_defined(enum kvm_only_cpuid_leafs leaf, u32 mask) { - /* Use kvm_cpu_cap_mask for non-scattered leafs. */ + /* Use kvm_cpu_cap_mask for leafs that aren't KVM-only. */ BUILD_BUG_ON(leaf < NCAPINTS); kvm_cpu_caps[leaf] = mask; @@ -547,7 +547,7 @@ void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask) static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask) { - /* Use kvm_cpu_cap_init_scattered for scattered leafs. */ + /* Use kvm_cpu_cap_init_kvm_defined for KVM-only leafs. */ BUILD_BUG_ON(leaf >= NCAPINTS); kvm_cpu_caps[leaf] &= mask; @@ -652,11 +652,16 @@ void kvm_set_cpu_caps(void) F(AVX_VNNI) | F(AVX512_BF16) ); + kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX, + F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) | + F(BHI_CTRL) | F(MCDT_NO) + ); + kvm_cpu_cap_mask(CPUID_D_1_EAX, F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd ); - kvm_cpu_cap_init_scattered(CPUID_12_EAX, + kvm_cpu_cap_init_kvm_defined(CPUID_12_EAX, SF(SGX1) | SF(SGX2) ); @@ -902,13 +907,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) break; /* function 7 has additional index. */ case 7: - entry->eax = min(entry->eax, 1u); + max_idx = entry->eax = min(entry->eax, 2u); cpuid_entry_override(entry, CPUID_7_0_EBX); cpuid_entry_override(entry, CPUID_7_ECX); cpuid_entry_override(entry, CPUID_7_EDX); - /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */ - if (entry->eax == 1) { + /* KVM only supports up to 0x7.2, capped above via min(). */ + if (max_idx >= 1) { entry = do_host_cpuid(array, function, 1); if (!entry) goto out; @@ -918,6 +923,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) entry->ecx = 0; entry->edx = 0; } + if (max_idx >= 2) { + entry = do_host_cpuid(array, function, 2); + if (!entry) + goto out; + + cpuid_entry_override(entry, CPUID_7_2_EDX); + entry->ecx = 0; + entry->ebx = 0; + entry->eax = 0; + } break; case 0xa: { /* Architectural Performance Monitoring */ union cpuid10_eax eax; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index edcf45e31..bfeafe485 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -40,6 +40,7 @@ #include "ioapic.h" #include "trace.h" #include "x86.h" +#include "xen.h" #include "cpuid.h" #include "hyperv.h" @@ -338,8 +339,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) } /* Check if there are APF page ready requests pending */ - if (enabled) + if (enabled) { kvm_make_request(KVM_REQ_APF_READY, apic->vcpu); + kvm_xen_sw_enable_lapic(apic->vcpu); + } } static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id) diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index 7eeade35a..e43909d65 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -7,23 +7,44 @@ #include <asm/cpufeatures.h> /* - * Hardware-defined CPUID leafs that are scattered in the kernel, but need to - * be directly used by KVM. Note, these word values conflict with the kernel's - * "bug" caps, but KVM doesn't use those. + * Hardware-defined CPUID leafs that are either scattered by the kernel or are + * unknown to the kernel, but need to be directly used by KVM. Note, these + * word values conflict with the kernel's "bug" caps, but KVM doesn't use those. */ enum kvm_only_cpuid_leafs { CPUID_12_EAX = NCAPINTS, + CPUID_7_2_EDX, NR_KVM_CPU_CAPS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, }; +/* + * Define a KVM-only feature flag. + * + * For features that are scattered by cpufeatures.h, __feature_translate() also + * needs to be updated to translate the kernel-defined feature into the + * KVM-defined feature. + * + * For features that are 100% KVM-only, i.e. not defined by cpufeatures.h, + * forego the intermediate KVM_X86_FEATURE and directly define X86_FEATURE_* so + * that X86_FEATURE_* can be used in KVM. No __feature_translate() handling is + * needed in this case. + */ #define KVM_X86_FEATURE(w, f) ((w)*32 + (f)) /* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */ #define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0) #define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1) +/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */ +#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0) +#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1) +#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2) +#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3) +#define KVM_X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4) +#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5) + struct cpuid_reg { u32 function; u32 index; @@ -49,6 +70,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, + [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, }; /* @@ -61,10 +83,12 @@ static const struct cpuid_reg reverse_cpuid[] = { */ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) { + BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS); BUILD_BUG_ON(x86_leaf == CPUID_LNX_1); BUILD_BUG_ON(x86_leaf == CPUID_LNX_2); BUILD_BUG_ON(x86_leaf == CPUID_LNX_3); BUILD_BUG_ON(x86_leaf == CPUID_LNX_4); + BUILD_BUG_ON(x86_leaf == CPUID_LNX_5); BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid)); BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0); } @@ -75,12 +99,17 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) */ static __always_inline u32 __feature_translate(int x86_feature) { - if (x86_feature == X86_FEATURE_SGX1) - return KVM_X86_FEATURE_SGX1; - else if (x86_feature == X86_FEATURE_SGX2) - return KVM_X86_FEATURE_SGX2; - - return x86_feature; +#define KVM_X86_TRANSLATE_FEATURE(f) \ + case X86_FEATURE_##f: return KVM_X86_FEATURE_##f + + switch (x86_feature) { + KVM_X86_TRANSLATE_FEATURE(SGX1); + KVM_X86_TRANSLATE_FEATURE(SGX2); + KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); + KVM_X86_TRANSLATE_FEATURE(BHI_CTRL); + default: + return x86_feature; + } } static __always_inline u32 __feature_leaf(int x86_feature) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 3060fe4e9..d8e192ad5 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -76,9 +76,10 @@ struct enc_region { }; /* Called with the sev_bitmap_lock held, or on shutdown */ -static int sev_flush_asids(int min_asid, int max_asid) +static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid) { - int ret, asid, error = 0; + int ret, error = 0; + unsigned int asid; /* Check if there are any ASIDs to reclaim before performing a flush */ asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid); @@ -108,7 +109,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm) } /* Must be called with the sev_bitmap_lock held */ -static bool __sev_recycle_asids(int min_asid, int max_asid) +static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid) { if (sev_flush_asids(min_asid, max_asid)) return false; @@ -135,8 +136,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) static int sev_asid_new(struct kvm_sev_info *sev) { - int asid, min_asid, max_asid, ret; + /* + * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. + * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. + * Note: min ASID can end up larger than the max if basic SEV support is + * effectively disabled by disallowing use of ASIDs for SEV guests. + */ + unsigned int min_asid = sev->es_active ? 1 : min_sev_asid; + unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; + unsigned int asid; bool retry = true; + int ret; + + if (min_asid > max_asid) + return -ENOTTY; WARN_ON(sev->misc_cg); sev->misc_cg = get_current_misc_cg(); @@ -149,12 +162,6 @@ static int sev_asid_new(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); - /* - * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. - * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. - */ - min_asid = sev->es_active ? 1 : min_sev_asid; - max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; again: asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); if (asid > max_asid) { @@ -179,7 +186,7 @@ e_uncharge: return ret; } -static int sev_get_asid(struct kvm *kvm) +static unsigned int sev_get_asid(struct kvm *kvm) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -276,8 +283,8 @@ e_no_asid: static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) { + unsigned int asid = sev_get_asid(kvm); struct sev_data_activate activate; - int asid = sev_get_asid(kvm); int ret; /* activate ASID on the given handle */ @@ -1958,20 +1965,22 @@ int sev_mem_enc_register_region(struct kvm *kvm, goto e_free; } - region->uaddr = range->addr; - region->size = range->size; - - list_add_tail(®ion->list, &sev->regions_list); - mutex_unlock(&kvm->lock); - /* * The guest may change the memory encryption attribute from C=0 -> C=1 * or vice versa for this memory range. Lets make sure caches are * flushed to ensure that guest data gets written into memory with - * correct C-bit. + * correct C-bit. Note, this must be done before dropping kvm->lock, + * as region and its array of pages can be freed by a different task + * once kvm->lock is released. */ sev_clflush_pages(region->pages, region->npages); + region->uaddr = range->addr; + region->size = range->size; + + list_add_tail(®ion->list, &sev->regions_list); + mutex_unlock(&kvm->lock); + return ret; e_free: @@ -2211,11 +2220,10 @@ void __init sev_hardware_setup(void) goto out; } - sev_asid_count = max_sev_asid - min_sev_asid + 1; - if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)) - goto out; - - pr_info("SEV supported: %u ASIDs\n", sev_asid_count); + if (min_sev_asid <= max_sev_asid) { + sev_asid_count = max_sev_asid - min_sev_asid + 1; + WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); + } sev_supported = true; /* SEV-ES support requested? */ @@ -2240,13 +2248,21 @@ void __init sev_hardware_setup(void) goto out; sev_es_asid_count = min_sev_asid - 1; - if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)) - goto out; - - pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count); + WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)); sev_es_supported = true; out: + if (boot_cpu_has(X86_FEATURE_SEV)) + pr_info("SEV %s (ASIDs %u - %u)\n", + sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" : + "unusable" : + "disabled", + min_sev_asid, max_sev_asid); + if (boot_cpu_has(X86_FEATURE_SEV_ES)) + pr_info("SEV-ES %s (ASIDs %u - %u)\n", + sev_es_supported ? "enabled" : "disabled", + min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); + sev_enabled = sev_supported; sev_es_enabled = sev_es_supported; #endif @@ -2285,7 +2301,7 @@ int sev_cpu_init(struct svm_cpu_data *sd) */ static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va) { - int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid; + unsigned int asid = sev_get_asid(vcpu->kvm); /* * Note! The address must be a kernel address, as regular page walk @@ -2606,7 +2622,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) void pre_sev_run(struct vcpu_svm *svm, int cpu) { struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); - int asid = sev_get_asid(svm->vcpu.kvm); + unsigned int asid = sev_get_asid(svm->vcpu.kvm); /* Assign the asid allocated with this SEV guest */ svm->asid = asid; diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index bc25589ad..6c1dcf44c 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -729,13 +729,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit, * Tracepoint for nested #vmexit because of interrupt pending */ TRACE_EVENT(kvm_invlpga, - TP_PROTO(__u64 rip, int asid, u64 address), + TP_PROTO(__u64 rip, unsigned int asid, u64 address), TP_ARGS(rip, asid, address), TP_STRUCT__entry( - __field( __u64, rip ) - __field( int, asid ) - __field( __u64, address ) + __field( __u64, rip ) + __field( unsigned int, asid ) + __field( __u64, address ) ), TP_fast_assign( @@ -744,7 +744,7 @@ TRACE_EVENT(kvm_invlpga, __entry->address = address; ), - TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", + TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx", __entry->rip, __entry->asid, __entry->address) ); diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index 0b2cad66d..b4f893722 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -242,6 +242,8 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) call vmx_spec_ctrl_restore_host + CLEAR_BRANCH_HISTORY_VMEXIT + /* Put return value in AX */ mov %_ASM_BX, %_ASM_AX diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 688bc7b72..f72476503 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1614,7 +1614,7 @@ static unsigned int num_msr_based_features; ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \ - ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR) + ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO) static u64 kvm_get_arch_capabilities(void) { @@ -7758,6 +7758,16 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, if (r < 0) return X86EMUL_UNHANDLEABLE; + + /* + * Mark the page dirty _before_ checking whether or not the CMPXCHG was + * successful, as the old value is written back on failure. Note, for + * live migration, this is unnecessarily conservative as CMPXCHG writes + * back the original value and the access is atomic, but KVM's ABI is + * that all writes are dirty logged, regardless of the value written. + */ + kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa)); + if (r) return X86EMUL_CMPXCHG_FAILED; diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c index a58a426e6..684a39df6 100644 --- a/arch/x86/kvm/xen.c +++ b/arch/x86/kvm/xen.c @@ -314,7 +314,7 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state) mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); } -static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) +void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v) { struct kvm_lapic_irq irq = { }; int r; diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index 532a535a9..500d9593a 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -16,6 +16,7 @@ extern struct static_key_false_deferred kvm_xen_enabled; int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu); void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu); +void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu); int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data); int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data); @@ -33,6 +34,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue); +static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) +{ + /* + * The local APIC is being enabled. If the per-vCPU upcall vector is + * set and the vCPU's evtchn_upcall_pending flag is set, inject the + * interrupt. + */ + if (static_branch_unlikely(&kvm_xen_enabled.key) && + vcpu->arch.xen.vcpu_info_cache.active && + vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu)) + kvm_xen_inject_vcpu_vector(vcpu); +} + static inline bool kvm_xen_msr_enabled(struct kvm *kvm) { return static_branch_unlikely(&kvm_xen_enabled.key) && @@ -98,6 +112,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu) { } +static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu) +{ +} + static inline bool kvm_xen_msr_enabled(struct kvm *kvm) { return false; diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 65c5c44f0..055955c9b 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -110,6 +110,7 @@ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ret int3 SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) #endif SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) @@ -252,9 +253,7 @@ SYM_CODE_START(srso_return_thunk) SYM_CODE_END(srso_return_thunk) SYM_FUNC_START(entry_untrain_ret) - ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ - "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ - "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS + ALTERNATIVE "jmp retbleed_untrain_ret", "jmp srso_untrain_ret", X86_FEATURE_SRSO SYM_FUNC_END(entry_untrain_ret) __EXPORT_THUNK(entry_untrain_ret) @@ -262,6 +261,7 @@ SYM_CODE_START(__x86_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR ANNOTATE_UNRET_SAFE + ANNOTATE_NOENDBR ret int3 SYM_CODE_END(__x86_return_thunk) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 1dbbad731..f20636510 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -818,15 +818,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code, show_opcodes(regs, loglvl); } -/* - * The (legacy) vsyscall page is the long page in the kernel portion - * of the address space that has user-accessible permissions. - */ -static bool is_vsyscall_vaddr(unsigned long vaddr) -{ - return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR); -} - static void __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, unsigned long address, u32 pkey, int si_code) diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index f50cc210a..968d7005f 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -26,31 +26,18 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, for (; addr < end; addr = next) { pud_t *pud = pud_page + pud_index(addr); pmd_t *pmd; - bool use_gbpage; next = (addr & PUD_MASK) + PUD_SIZE; if (next > end) next = end; - /* if this is already a gbpage, this portion is already mapped */ - if (pud_large(*pud)) - continue; - - /* Is using a gbpage allowed? */ - use_gbpage = info->direct_gbpages; - - /* Don't use gbpage if it maps more than the requested region. */ - /* at the begining: */ - use_gbpage &= ((addr & ~PUD_MASK) == 0); - /* ... or at the end: */ - use_gbpage &= ((next & ~PUD_MASK) == 0); - - /* Never overwrite existing mappings */ - use_gbpage &= !pud_present(*pud); - - if (use_gbpage) { + if (info->direct_gbpages) { pud_t pudval; + if (pud_present(*pud)) + continue; + + addr &= PUD_MASK; pudval = __pud((addr - info->offset) | info->page_flag); set_pud(pud, pudval); continue; diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c index 6993f026a..42115ac07 100644 --- a/arch/x86/mm/maccess.c +++ b/arch/x86/mm/maccess.c @@ -3,6 +3,8 @@ #include <linux/uaccess.h> #include <linux/kernel.h> +#include <asm/vsyscall.h> + #ifdef CONFIG_X86_64 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) { @@ -16,6 +18,14 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) return false; /* + * Reading from the vsyscall page may cause an unhandled fault in + * certain cases. Though it is at an address above TASK_SIZE_MAX, it is + * usually considered as a user space address. + */ + if (is_vsyscall_vaddr(vaddr)) + return false; + + /* * Allow everything during early boot before 'x86_virt_bits' * is initialized. Needed for instruction decoding in early * exception handlers. diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 3e93af083..d4957eefe 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -513,6 +513,24 @@ void __init sme_early_init(void) */ if (sev_status & MSR_AMD64_SEV_ENABLED) ia32_disable(); + + /* + * Override init functions that scan the ROM region in SEV-SNP guests, + * as this memory is not pre-validated and would thus cause a crash. + */ + if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) { + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.pci.init_irq = x86_init_noop; + x86_init.resources.probe_roms = x86_init_noop; + + /* + * DMI setup behavior for SEV-SNP guests depends on + * efi_enabled(EFI_CONFIG_TABLES), which hasn't been + * parsed yet. snp_dmi_setup() will run after that + * parsing has happened. + */ + x86_init.resources.dmi_setup = snp_dmi_setup; + } } void __init mem_encrypt_free_decrypted_mem(void) diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index d94ebd8ac..06ccbd36e 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -97,7 +97,6 @@ static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch"); static char sme_cmdline_arg[] __initdata = "mem_encrypt"; static char sme_cmdline_on[] __initdata = "on"; -static char sme_cmdline_off[] __initdata = "off"; static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) { @@ -305,7 +304,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp) * instrumentation or checking boot_cpu_data in the cc_platform_has() * function. */ - if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED) + if (!sme_get_me_mask() || + RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED) return; /* @@ -504,10 +504,9 @@ void __init sme_encrypt_kernel(struct boot_params *bp) void __init sme_enable(struct boot_params *bp) { - const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off; + const char *cmdline_ptr, *cmdline_arg, *cmdline_on; unsigned int eax, ebx, ecx, edx; unsigned long feature_mask; - bool active_by_default; unsigned long me_mask; char buffer[16]; bool snp; @@ -543,11 +542,11 @@ void __init sme_enable(struct boot_params *bp) me_mask = 1UL << (ebx & 0x3f); /* Check the SEV MSR whether SEV or SME is enabled */ - sev_status = __rdmsr(MSR_AMD64_SEV); - feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; + RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV); + feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */ - if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) + if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED)) snp_abort(); /* Check if memory encryption is enabled */ @@ -573,7 +572,6 @@ void __init sme_enable(struct boot_params *bp) return; } else { /* SEV state cannot be controlled by a command line option */ - sme_me_mask = me_mask; goto out; } @@ -588,31 +586,17 @@ void __init sme_enable(struct boot_params *bp) asm ("lea sme_cmdline_on(%%rip), %0" : "=r" (cmdline_on) : "p" (sme_cmdline_on)); - asm ("lea sme_cmdline_off(%%rip), %0" - : "=r" (cmdline_off) - : "p" (sme_cmdline_off)); - - if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) - active_by_default = true; - else - active_by_default = false; cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | ((u64)bp->ext_cmd_line_ptr << 32)); - if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0) + if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 || + strncmp(buffer, cmdline_on, sizeof(buffer))) return; - if (!strncmp(buffer, cmdline_on, sizeof(buffer))) - sme_me_mask = me_mask; - else if (!strncmp(buffer, cmdline_off, sizeof(buffer))) - sme_me_mask = 0; - else - sme_me_mask = active_by_default ? me_mask : 0; out: - if (sme_me_mask) { - physical_mask &= ~sme_me_mask; - cc_set_vendor(CC_VENDOR_AMD); - cc_set_mask(sme_me_mask); - } + RIP_REL_REF(sme_me_mask) = me_mask; + physical_mask &= ~me_mask; + cc_vendor = CC_VENDOR_AMD; + cc_set_mask(me_mask); } diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 66a209f7e..d6fe9093e 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -997,6 +997,38 @@ static void free_pfn_range(u64 paddr, unsigned long size) memtype_free(paddr, paddr + size); } +static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, + pgprot_t *pgprot) +{ + unsigned long prot; + + VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT)); + + /* + * We need the starting PFN and cachemode used for track_pfn_remap() + * that covered the whole VMA. For most mappings, we can obtain that + * information from the page tables. For COW mappings, we might now + * suddenly have anon folios mapped and follow_phys() will fail. + * + * Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to + * detect the PFN. If we need the cachemode as well, we're out of luck + * for now and have to fail fork(). + */ + if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) { + if (pgprot) + *pgprot = __pgprot(prot); + return 0; + } + if (is_cow_mapping(vma->vm_flags)) { + if (pgprot) + return -EINVAL; + *paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; + return 0; + } + WARN_ON_ONCE(1); + return -EINVAL; +} + /* * track_pfn_copy is called when vma that is covering the pfnmap gets * copied through copy_page_range(). @@ -1007,20 +1039,13 @@ static void free_pfn_range(u64 paddr, unsigned long size) int track_pfn_copy(struct vm_area_struct *vma) { resource_size_t paddr; - unsigned long prot; unsigned long vma_size = vma->vm_end - vma->vm_start; pgprot_t pgprot; if (vma->vm_flags & VM_PAT) { - /* - * reserve the whole chunk covered by vma. We need the - * starting address and protection from pte. - */ - if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { - WARN_ON_ONCE(1); + if (get_pat_info(vma, &paddr, &pgprot)) return -EINVAL; - } - pgprot = __pgprot(prot); + /* reserve the whole chunk covered by vma. */ return reserve_pfn_range(paddr, vma_size, &pgprot, 1); } @@ -1095,7 +1120,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, unsigned long size) { resource_size_t paddr; - unsigned long prot; if (vma && !(vma->vm_flags & VM_PAT)) return; @@ -1103,11 +1127,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, /* free the chunk starting from pfn or the whole chunk */ paddr = (resource_size_t)pfn << PAGE_SHIFT; if (!paddr && !size) { - if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) { - WARN_ON_ONCE(1); + if (get_pat_info(vma, &paddr, NULL)) return; - } - size = vma->vm_end - vma->vm_start; } free_pfn_range(paddr, size); diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 2925074b9..9a5b101c4 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -653,6 +653,14 @@ static void print_absolute_relocs(void) if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) { continue; } + /* + * Do not perform relocations in .notes section; any + * values there are meant for pre-boot consumption (e.g. + * startup_xen). + */ + if (sec_applies->shdr.sh_type == SHT_NOTE) { + continue; + } sh_symtab = sec_symtab->symtab; sym_strtab = sec_symtab->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 4b0d6fff8..1fb9a1644 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu) char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); + if (!resched_name) + goto fail_mem; per_cpu(xen_resched_irq, cpu).name = resched_name; rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, @@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu) per_cpu(xen_resched_irq, cpu).irq = rc; callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); + if (!callfunc_name) + goto fail_mem; per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, cpu, @@ -90,6 +94,9 @@ int xen_smp_intr_init(unsigned int cpu) if (!xen_fifo_events) { debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); + if (!debug_name) + goto fail_mem; + per_cpu(xen_debug_irq, cpu).name = debug_name; rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, @@ -101,6 +108,9 @@ int xen_smp_intr_init(unsigned int cpu) } callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + if (!callfunc_name) + goto fail_mem; + per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, cpu, @@ -114,6 +124,8 @@ int xen_smp_intr_init(unsigned int cpu) return 0; + fail_mem: + rc = -ENOMEM; fail: xen_smp_intr_free(cpu); return rc; |