summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:47:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:47:50 +0000
commit7c0639a3af697d4ae7a5db4d2ecc09eed43cad35 (patch)
treeb28a6eef28064256422bed5e477ee51f2cbb0c0b
parentAdding debian version 6.7.9-2. (diff)
downloadlinux-7c0639a3af697d4ae7a5db4d2ecc09eed43cad35.tar.xz
linux-7c0639a3af697d4ae7a5db4d2ecc09eed43cad35.zip
Merging upstream version 6.7.12.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/admin-guide/hw-vuln/index.rst1
-rw-r--r--Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst104
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt25
-rw-r--r--Documentation/arch/x86/amd-memory-encryption.rst16
-rw-r--r--Documentation/conf.py6
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml1
-rw-r--r--Documentation/netlink/specs/devlink.yaml2
-rw-r--r--Documentation/netlink/specs/dpll.yaml1
-rw-r--r--Documentation/userspace-api/media/mediactl/media-types.rst11
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/boot/dts/amazon/alpine.dtsi1
-rw-r--r--arch/arm/boot/dts/arm/arm-realview-pb1176.dts2
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g4.dtsi14
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g5.dtsi15
-rw-r--r--arch/arm/boot/dts/aspeed/aspeed-g6.dtsi18
-rw-r--r--arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi3
-rw-r--r--arch/arm/boot/dts/broadcom/bcm-hr2.dtsi1
-rw-r--r--arch/arm/boot/dts/broadcom/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts2
-rw-r--r--arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts2
-rw-r--r--arch/arm/boot/dts/marvell/kirkwood-l-50.dts2
-rw-r--r--arch/arm/boot/dts/marvell/mmp2-brownstone.dts2
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi2
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi1
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi1
-rw-r--r--arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi25
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts3
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi1
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts1
-rw-r--r--arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts1
-rw-r--r--arch/arm/boot/dts/qcom/qcom-msm8974.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/qcom-sdx55.dtsi8
-rw-r--r--arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts12
-rw-r--r--arch/arm/boot/dts/renesas/r8a73a4.dtsi9
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790-lager.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7790-stout.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791-koelsch.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7791-porter.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7792-blanche.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7793-gose.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794-alt.dts1
-rw-r--r--arch/arm/boot/dts/renesas/r8a7794-silk.dts1
-rw-r--r--arch/arm/boot/dts/rockchip/rv1108.dtsi8
-rw-r--r--arch/arm/boot/dts/st/stm32429i-eval.dts1
-rw-r--r--arch/arm/boot/dts/st/stm32mp157c-dk2.dts1
-rw-r--r--arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/crypto/sha256_glue.c13
-rw-r--r--arch/arm/crypto/sha512-glue.c12
-rw-r--r--arch/arm/include/asm/mman.h14
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/iwmmxt.S51
-rw-r--r--arch/arm/kernel/pj4-cp0.c135
-rw-r--r--arch/arm/mm/fault.c32
-rw-r--r--arch/arm/mm/flush.c3
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi7
-rw-r--r--arch/arm64/boot/dts/amazon/alpine-v2.dtsi1
-rw-r--r--arch/arm64/boot/dts/amazon/alpine-v3.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi3
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi1
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts38
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts38
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi29
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts33
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi29
-rw-r--r--arch/arm64/boot/dts/lg/lg1312.dtsi1
-rw-r--r--arch/arm64/boot/dts/lg/lg1313.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi10
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap80x.dtsi1
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi10
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts7
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986a.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts7
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8186.dtsi18
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-demo.dts1
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/ipq6018.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/qcm2290.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/sa8540p.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sc8180x.dtsi26
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sm6115.dtsi7
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi6
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450-hdk.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8450.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-mtp.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550-qrd.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/sm8550.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g0.dtsi88
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g043u.dtsi12
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g044.dtsi22
-rw-r--r--arch/arm64/boot/dts/renesas/r9a07g054.dtsi22
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk356x.dtsi3
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3588s.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/Makefile1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-main.dtsi9
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-mcu.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62p5-sk.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi11
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm.dts6
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-sk.dts4
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi1
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi5
-rw-r--r--arch/arm64/boot/dts/ti/k3-am69-sk.dts8
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts18
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-evm.dts2
-rw-r--r--arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi2
-rw-r--r--arch/arm64/include/asm/fpsimd.h12
-rw-r--r--arch/arm64/kernel/ptrace.c3
-rw-r--r--arch/hexagon/kernel/vmlinux.lds.S1
-rw-r--r--arch/loongarch/crypto/crc32-loongarch.c2
-rw-r--r--arch/loongarch/include/asm/Kbuild1
-rw-r--r--arch/loongarch/include/asm/io.h2
-rw-r--r--arch/loongarch/include/asm/percpu.h7
-rw-r--r--arch/loongarch/include/asm/qspinlock.h18
-rw-r--r--arch/mips/include/asm/ptrace.h1
-rw-r--r--arch/parisc/include/asm/assembly.h18
-rw-r--r--arch/parisc/include/asm/checksum.h10
-rw-r--r--arch/parisc/include/asm/mman.h14
-rw-r--r--arch/parisc/kernel/ftrace.c2
-rw-r--r--arch/parisc/kernel/unaligned.c27
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h11
-rw-r--r--arch/powerpc/include/asm/vmalloc.h4
-rw-r--r--arch/powerpc/kernel/prom.c12
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/perf/hv-gpci.c29
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc10x.h3
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig2
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/papr_platform_attributes.c8
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts1
-rw-r--r--arch/riscv/include/asm/pgtable.h2
-rw-r--r--arch/riscv/kernel/traps_misaligned.c2
-rw-r--r--arch/s390/kernel/cache.c1
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c2
-rw-r--r--arch/s390/kernel/perf_pai_ext.c2
-rw-r--r--arch/s390/kernel/vdso32/Makefile2
-rw-r--r--arch/s390/kernel/vdso64/Makefile2
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/sparc/include/asm/parport.h259
-rw-r--r--arch/sparc/include/asm/parport_64.h256
-rw-r--r--arch/sparc/kernel/leon_pci_grpci1.c2
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c2
-rw-r--r--arch/sparc/kernel/nmi.c2
-rw-r--r--arch/sparc/vdso/vma.c7
-rw-r--r--arch/x86/Kconfig24
-rw-r--r--arch/x86/boot/compressed/efi_mixed.S29
-rw-r--r--arch/x86/coco/core.c7
-rw-r--r--arch/x86/events/amd/core.c1
-rw-r--r--arch/x86/hyperv/hv_vtl.c26
-rw-r--r--arch/x86/include/asm/asm-prototypes.h1
-rw-r--r--arch/x86/include/asm/asm.h14
-rw-r--r--arch/x86/include/asm/coco.h8
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/crash_core.h2
-rw-r--r--arch/x86/include/asm/mem_encrypt.h15
-rw-r--r--arch/x86/include/asm/msr-index.h8
-rw-r--r--arch/x86/include/asm/nospec-branch.h21
-rw-r--r--arch/x86/include/asm/page.h6
-rw-r--r--arch/x86/include/asm/sev.h4
-rw-r--r--arch/x86/include/asm/suspend_32.h10
-rw-r--r--arch/x86/include/asm/vsyscall.h10
-rw-r--r--arch/x86/include/asm/x86_init.h3
-rw-r--r--arch/x86/kernel/acpi/cppc.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c10
-rw-r--r--arch/x86/kernel/cpu/bugs.c92
-rw-r--r--arch/x86/kernel/cpu/common.c38
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c10
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h8
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c48
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c14
-rw-r--r--arch/x86/kernel/eisa.c3
-rw-r--r--arch/x86/kernel/fpu/xstate.c5
-rw-r--r--arch/x86/kernel/fpu/xstate.h14
-rw-r--r--arch/x86/kernel/kprobes/core.c11
-rw-r--r--arch/x86/kernel/mpparse.c10
-rw-r--r--arch/x86/kernel/nmi.c2
-rw-r--r--arch/x86/kernel/probe_roms.c10
-rw-r--r--arch/x86/kernel/setup.c3
-rw-r--r--arch/x86/kernel/sev-shared.c12
-rw-r--r--arch/x86/kernel/sev.c31
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/kvm/cpuid.c21
-rw-r--r--arch/x86/kvm/hyperv.c2
-rw-r--r--arch/x86/kvm/lapic.c5
-rw-r--r--arch/x86/kvm/reverse_cpuid.h35
-rw-r--r--arch/x86/kvm/svm/sev.c23
-rw-r--r--arch/x86/kvm/x86.c15
-rw-r--r--arch/x86/kvm/xen.c4
-rw-r--r--arch/x86/kvm/xen.h18
-rw-r--r--arch/x86/lib/retpoline.S11
-rw-r--r--arch/x86/mm/fault.c9
-rw-r--r--arch/x86/mm/maccess.c10
-rw-r--r--arch/x86/mm/mem_encrypt_amd.c18
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c40
-rw-r--r--arch/x86/tools/relocs.c8
-rw-r--r--arch/x86/xen/smp.c12
-rw-r--r--block/bio.c7
-rw-r--r--block/blk-mq.c9
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/holder.c12
-rw-r--r--block/mq-deadline.c3
-rw-r--r--block/opal_proto.h1
-rw-r--r--block/sed-opal.c6
-rw-r--r--crypto/Kconfig5
-rw-r--r--crypto/asymmetric_keys/mscode_parser.c3
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c4
-rw-r--r--crypto/asymmetric_keys/public_key.c3
-rw-r--r--crypto/asymmetric_keys/signature.c2
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c8
-rw-r--r--crypto/testmgr.h80
-rw-r--r--drivers/accel/habanalabs/common/device.c2
-rw-r--r--drivers/accessibility/speakup/synth.c4
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/resource.c33
-rw-r--r--drivers/acpi/scan.c8
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/libata-eh.c5
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/power/wakeirq.c4
-rw-r--r--drivers/base/regmap/regmap-kunit.c54
-rw-r--r--drivers/block/aoe/aoecmd.c12
-rw-r--r--drivers/block/aoe/aoenet.c1
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/bluetooth/btmtk.c4
-rw-r--r--drivers/bluetooth/btnxpuart.c3
-rw-r--r--drivers/bluetooth/btusb.c10
-rw-r--r--drivers/bluetooth/hci_h5.c5
-rw-r--r--drivers/bluetooth/hci_qca.c6
-rw-r--r--drivers/bluetooth/hci_serdev.c9
-rw-r--r--drivers/bluetooth/hci_uart.h12
-rw-r--r--drivers/bus/Kconfig5
-rw-r--r--drivers/bus/mhi/ep/main.c2
-rw-r--r--drivers/char/tpm/tpm_tis_core.c3
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c4
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/hisilicon/clk-hi3519.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3559a.c1
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c11
-rw-r--r--drivers/clk/mediatek/clk-mt7622-apmixedsys.c1
-rw-r--r--drivers/clk/mediatek/clk-mt7981-topckgen.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8135-apmixedsys.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c2
-rw-r--r--drivers/clk/meson/axg.c2
-rw-r--r--drivers/clk/qcom/dispcc-sdm845.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq5018.c9
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq8074.c2
-rw-r--r--drivers/clk/qcom/gcc-ipq9574.c1
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c2
-rw-r--r--drivers/clk/qcom/reset.c27
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r8a779g0-cpg-mssr.c11
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c2
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c2
-rw-r--r--drivers/clk/samsung/clk-exynos850.c33
-rw-r--r--drivers/clk/zynq/clkc.c8
-rw-r--r--drivers/clocksource/arm_global_timer.c2
-rw-r--r--drivers/clocksource/timer-riscv.c3
-rw-r--r--drivers/comedi/drivers/comedi_8255.c1
-rw-r--r--drivers/comedi/drivers/comedi_test.c30
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/cpufreq/amd-pstate.c2
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c18
-rw-r--r--drivers/cpufreq/freq_table.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c19
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c34
-rw-r--r--drivers/crypto/ccp/platform-access.c11
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c22
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c27
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c20
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c9
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c4
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c3
-rw-r--r--drivers/cxl/core/region.c62
-rw-r--r--drivers/cxl/core/trace.h14
-rw-r--r--drivers/dma/Kconfig14
-rw-r--r--drivers/dma/fsl-edma-common.h5
-rw-r--r--drivers/dma/fsl-edma-main.c21
-rw-r--r--drivers/dpll/dpll_core.c8
-rw-r--r--drivers/firewire/core-card.c14
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firmware/arm_scmi/smc.c7
-rw-r--r--drivers/firmware/efi/efi.c2
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c8
-rw-r--r--drivers/fpga/dfl.c2
-rw-r--r--drivers/gpio/Kconfig3
-rw-r--r--drivers/gpio/gpiolib-devres.c2
-rw-r--r--drivers/gpio/gpiolib.c14
-rw-r--r--drivers/gpio/gpiolib.h8
-rw-r--r--drivers/gpu/drm/Kconfig5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c76
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c48
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c13
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c22
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c24
-rw-r--r--drivers/gpu/drm/ci/test.yml5
-rw-r--r--drivers/gpu/drm/drm_bridge.c46
-rw-r--r--drivers/gpu/drm/drm_buddy.c6
-rw-r--r--drivers/gpu/drm/drm_panel.c17
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c7
-rw-r--r--drivers/gpu/drm/drm_syncobj.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c200
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c13
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c37
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c4
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c90
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/client.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/client.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/object.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c1
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c3
-rw-r--r--drivers/gpu/drm/radeon/ni.c2
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c3
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c12
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c14
-rw-r--r--drivers/gpu/drm/tegra/dsi.c59
-rw-r--r--drivers/gpu/drm/tegra/fb.c1
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c20
-rw-r--r--drivers/gpu/drm/tegra/output.c16
-rw-r--r--drivers/gpu/drm/tegra/rgb.c18
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c28
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c30
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h6
-rw-r--r--drivers/hid/hid-lenovo.c57
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hwmon/amc6821.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c30
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c6
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i3c/master/dw-i3c-master.c4
-rw-r--r--drivers/iio/accel/adxl367.c8
-rw-r--r--drivers/iio/accel/adxl367_i2c.c2
-rw-r--r--drivers/iio/adc/rockchip_saradc.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c5
-rw-r--r--drivers/iio/industrialio-gts-helper.c15
-rw-r--r--drivers/iio/pressure/mprls0025pa.c4
-rw-r--r--drivers/infiniband/core/device.c37
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c16
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c3
-rw-r--r--drivers/infiniband/hw/mana/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c4
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c3
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c10
-rw-r--r--drivers/input/misc/iqs7222.c112
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/amd/init.c3
-rw-r--r--drivers/iommu/dma-iommu.c9
-rw-r--r--drivers/iommu/intel/Makefile2
-rw-r--r--drivers/iommu/intel/pasid.c3
-rw-r--r--drivers/iommu/irq_remapping.c3
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c93
-rw-r--r--drivers/leds/flash/leds-sgm3140.c3
-rw-r--r--drivers/leds/leds-aw2013.c1
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c4
-rw-r--r--drivers/md/dm-bufio.c6
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-integrity.c30
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-kcopyd.c4
-rw-r--r--drivers/md/dm-log.c4
-rw-r--r--drivers/md/dm-raid.c97
-rw-r--r--drivers/md/dm-raid1.c6
-rw-r--r--drivers/md/dm-snap-persistent.c4
-rw-r--r--drivers/md/dm-snap.c4
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-verity.h4
-rw-r--r--drivers/md/dm-writecache.c8
-rw-r--r--drivers/md/dm.c26
-rw-r--r--drivers/md/md-bitmap.c9
-rw-r--r--drivers/md/md-multipath.c9
-rw-r--r--drivers/md/md.c134
-rw-r--r--drivers/md/md.h44
-rw-r--r--drivers/md/raid1.c199
-rw-r--r--drivers/md/raid1.h1
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c55
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c52
-rw-r--r--drivers/media/dvb-core/dvbdev.c5
-rw-r--r--drivers/media/dvb-frontends/stv0367.c34
-rw-r--r--drivers/media/i2c/imx290.c16
-rw-r--r--drivers/media/i2c/tc358743.c7
-rw-r--r--drivers/media/mc/mc-entity.c93
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c4
-rw-r--r--drivers/media/pci/ttpci/budget-av.c8
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c2
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c10
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.h2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c3
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c3
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c22
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c3
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c69
-rw-r--r--drivers/media/tuners/xc4000.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c4
-rw-r--r--drivers/media/usb/go7007/go7007-driver.c8
-rw-r--r--drivers/media/usb/go7007/go7007-usb.c4
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-context.c10
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-dvb.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-cci.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c10
-rw-r--r--drivers/memory/tegra/tegra234.c16
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/altera-sysmgr.c4
-rw-r--r--drivers/mfd/cs42l43.c72
-rw-r--r--drivers/mfd/intel-lpss-pci.c28
-rw-r--r--drivers/mfd/intel-lpss.c9
-rw-r--r--drivers/mfd/intel-lpss.h14
-rw-r--r--drivers/mfd/syscon.c4
-rw-r--r--drivers/misc/fastrpc.c10
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_i2c.c21
-rw-r--r--drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c8
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/ocxl/file.c2
-rw-r--r--drivers/mmc/core/block.c14
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c28
-rw-r--r--drivers/mmc/host/sdhci-omap.c3
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c4
-rw-r--r--drivers/mtd/maps/physmap-core.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c5
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c2
-rw-r--r--drivers/mtd/nand/raw/nand_base.c85
-rw-r--r--drivers/mtd/nand/spi/esmt.c9
-rw-r--r--drivers/mtd/ubi/fastmap.c7
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/can/m_can/m_can.c23
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c4
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c10
-rw-r--r--drivers/net/dsa/mt7530.c60
-rw-r--r--drivers/net/dsa/mt7530.h22
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c17
-rw-r--r--drivers/net/ethernet/amd/pds_core/auxbus.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c9
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c119
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/geneve.c18
-rw-r--r--drivers/net/phy/dp83822.c37
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/smsc95xx.c5
-rw-r--r--drivers/net/usb/sr9800.c4
-rw-r--r--drivers/net/veth.c18
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c6
-rw-r--r--drivers/net/wireguard/netlink.c10
-rw-r--r--drivers/net/wireguard/receive.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c17
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c10
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h16
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c16
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c63
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c81
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c176
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h92
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c157
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h6
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c12
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c40
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c38
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c40
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c40
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c15
-rw-r--r--drivers/ntb/core.c8
-rw-r--r--drivers/nvme/host/core.c6
-rw-r--r--drivers/nvme/host/fabrics.h7
-rw-r--r--drivers/nvmem/meson-efuse.c25
-rw-r--r--drivers/opp/debugfs.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c41
-rw-r--r--drivers/pci/controller/pci-hyperv.c3
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c6
-rw-r--r--drivers/pci/p2pdma.c2
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/dpc.c2
-rw-r--r--drivers/pci/pcie/err.c20
-rw-r--r--drivers/pci/quirks.c3
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/perf/arm-cmn.c11
-rw-r--r--drivers/perf/cxl_pmu.c10
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--drivers/phy/tegra/xusb.c13
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8186.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8192.c1
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c6
-rw-r--r--drivers/pinctrl/renesas/core.c4
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c14
-rw-r--r--drivers/platform/x86/intel/tpmi.c9
-rw-r--r--drivers/platform/x86/p2sb.c25
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c4
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c1
-rw-r--r--drivers/power/supply/mm8013.c13
-rw-r--r--drivers/powercap/dtpm_cpu.c2
-rw-r--r--drivers/powercap/intel_rapl_common.c34
-rw-r--r--drivers/powercap/intel_rapl_msr.c8
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c15
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c2
-rw-r--r--drivers/pwm/pwm-img.c4
-rw-r--r--drivers/pwm/pwm-sti.c11
-rw-r--r--drivers/regulator/max5970-regulator.c8
-rw-r--r--drivers/regulator/userspace-consumer.c1
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c6
-rw-r--r--drivers/remoteproc/stm32_rproc.c6
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/lib_test.c2
-rw-r--r--drivers/s390/block/dasd.c55
-rw-r--r--drivers/s390/cio/vfio_ccw_chp.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c6
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c2
-rw-r--r--drivers/scsi/bfa/bfa.h9
-rw-r--r--drivers/scsi/bfa/bfa_core.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.h8
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c11
-rw-r--r--drivers/scsi/csiostor/csio_defs.h18
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c8
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c12
-rw-r--r--drivers/scsi/hosts.c7
-rw-r--r--drivers/scsi/libsas/sas_expander.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c128
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/scsi/scsi_scan.c34
-rw-r--r--drivers/scsi/sd.c23
-rw-r--r--drivers/slimbus/core.c4
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c2
-rw-r--r--drivers/soc/fsl/qbman/qman.c25
-rw-r--r--drivers/soc/microchip/Kconfig2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c2
-rw-r--r--drivers/soc/qcom/pmic_glink_altmode.c16
-rw-r--r--drivers/soc/qcom/socinfo.c2
-rw-r--r--drivers/spi/spi-cadence-quadspi.c24
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-mt65xx.c22
-rw-r--r--drivers/staging/greybus/light.c8
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c1
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c16
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c10
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c5
-rw-r--r--drivers/tee/optee/device.c3
-rw-r--r--drivers/thermal/devfreq_cooling.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c8
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c8
-rw-r--r--drivers/thermal/intel/intel_tcc.c12
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c8
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c3
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c4
-rw-r--r--drivers/thermal/qoriq_thermal.c12
-rw-r--r--drivers/thunderbolt/switch.c3
-rw-r--r--drivers/tty/serial/8250/8250_exar.c5
-rw-r--r--drivers/tty/serial/8250/8250_port.c6
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/imx.c22
-rw-r--r--drivers/tty/serial/max310x.c9
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c10
-rw-r--r--drivers/tty/serial/samsung_tty.c5
-rw-r--r--drivers/tty/serial/serial_core.c12
-rw-r--r--drivers/tty/serial/serial_port.c25
-rw-r--r--drivers/tty/vt/vt.c4
-rw-r--r--drivers/ufs/host/ufs-qcom.c6
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/core/hub.c23
-rw-r--r--drivers/usb/core/hub.h2
-rw-r--r--drivers/usb/core/port.c43
-rw-r--r--drivers/usb/core/sysfs.c16
-rw-r--r--drivers/usb/dwc2/core.h14
-rw-r--r--drivers/usb/dwc2/core_intr.c72
-rw-r--r--drivers/usb/dwc2/gadget.c10
-rw-r--r--drivers/usb/dwc2/hcd.c49
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c17
-rw-r--r--drivers/usb/dwc2/hw.h2
-rw-r--r--drivers/usb/dwc2/platform.c2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c13
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/gadget.c10
-rw-r--r--drivers/usb/dwc3/host.c11
-rw-r--r--drivers/usb/gadget/function/f_fs.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c2
-rw-r--r--drivers/usb/gadget/udc/core.c4
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c39
-rw-r--r--drivers/usb/host/xhci-ring.c8
-rw-r--r--drivers/usb/host/xhci.c2
-rw-r--r--drivers/usb/misc/usb-ljca.c22
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/storage/isd200.c23
-rw-r--r--drivers/usb/storage/uas.c28
-rw-r--r--drivers/usb/typec/altmodes/displayport.c18
-rw-r--r--drivers/usb/typec/class.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c46
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h4
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c75
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c14
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c13
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c3
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c6
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c9
-rw-r--r--drivers/vfio/pci/pds/lm.c13
-rw-r--r--drivers/vfio/pci/pds/lm.h1
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c186
-rw-r--r--drivers/vfio/platform/vfio_platform_irq.c105
-rw-r--r--drivers/vfio/virqfd.c21
-rw-r--r--drivers/vhost/vdpa.c4
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/ktz8866.c6
-rw-r--r--drivers/video/backlight/lm3630a_bl.c15
-rw-r--r--drivers/video/backlight/lm3639_bl.c1
-rw-r--r--drivers/video/backlight/lp8788_bl.c1
-rw-r--r--drivers/virt/acrn/ioeventfd.c2
-rw-r--r--drivers/virtio/virtio.c10
-rw-r--r--drivers/virtio/virtio_ring.c6
-rw-r--r--drivers/watchdog/starfive-wdt.c9
-rw-r--r--drivers/watchdog/stm32_iwdg.c3
-rw-r--r--drivers/xen/events/events_base.c22
-rw-r--r--drivers/xen/evtchn.c6
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--fs/afs/dir.c10
-rw-r--r--fs/aio.c10
-rw-r--r--fs/bcachefs/btree_iter.c4
-rw-r--r--fs/bcachefs/chardev.c3
-rw-r--r--fs/bcachefs/errcode.h1
-rw-r--r--fs/bcachefs/mean_and_variance.h2
-rw-r--r--fs/bcachefs/super-io.c27
-rw-r--r--fs/btrfs/block-group.c3
-rw-r--r--fs/btrfs/block-rsv.c2
-rw-r--r--fs/btrfs/block-rsv.h32
-rw-r--r--fs/btrfs/extent_io.c75
-rw-r--r--fs/btrfs/qgroup.c10
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/space-info.c26
-rw-r--r--fs/btrfs/volumes.c69
-rw-r--r--fs/btrfs/zoned.c9
-rw-r--r--fs/ceph/caps.c65
-rw-r--r--fs/ceph/file.c23
-rw-r--r--fs/ceph/mds_client.c48
-rw-r--r--fs/ceph/mds_client.h5
-rw-r--r--fs/debugfs/inode.c25
-rw-r--r--fs/dlm/user.c10
-rw-r--r--fs/erofs/data.c1
-rw-r--r--fs/erofs/fscache.c20
-rw-r--r--fs/erofs/internal.h1
-rw-r--r--fs/erofs/super.c30
-rw-r--r--fs/eventfd.c11
-rw-r--r--fs/exec.c1
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/super.c2
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/mballoc.c17
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/f2fs/checkpoint.c5
-rw-r--r--fs/f2fs/compress.c43
-rw-r--r--fs/f2fs/data.c73
-rw-r--r--fs/f2fs/dir.c5
-rw-r--r--fs/f2fs/f2fs.h110
-rw-r--r--fs/f2fs/file.c94
-rw-r--r--fs/f2fs/gc.c7
-rw-r--r--fs/f2fs/inode.c57
-rw-r--r--fs/f2fs/namei.c25
-rw-r--r--fs/f2fs/node.c20
-rw-r--r--fs/f2fs/recovery.c33
-rw-r--r--fs/f2fs/segment.c29
-rw-r--r--fs/f2fs/segment.h17
-rw-r--r--fs/f2fs/super.c17
-rw-r--r--fs/fat/nfs.c6
-rw-r--r--fs/fcntl.c12
-rw-r--r--fs/fhandle.c2
-rw-r--r--fs/fuse/dir.c6
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/fuse/fuse_i.h1
-rw-r--r--fs/fuse/inode.c7
-rw-r--r--fs/iomap/buffered-io.c18
-rw-r--r--fs/jfs/jfs_incore.h2
-rw-r--r--fs/jfs/super.c2
-rw-r--r--fs/nfs/direct.c11
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c2
-rw-r--r--fs/nfs/fscache.c9
-rw-r--r--fs/nfs/nfs42.h7
-rw-r--r--fs/nfs/nfs4proc.c16
-rw-r--r--fs/nfs/nfsroot.c4
-rw-r--r--fs/nfs/pnfs_nfs.c44
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/trace.h2
-rw-r--r--fs/nilfs2/btree.c9
-rw-r--r--fs/nilfs2/direct.c9
-rw-r--r--fs/nilfs2/inode.c2
-rw-r--r--fs/ocfs2/inode.h2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/overlayfs/params.c14
-rw-r--r--fs/pstore/inode.c76
-rw-r--r--fs/quota/dquot.c159
-rw-r--r--fs/reiserfs/reiserfs.h2
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/select.c2
-rw-r--r--fs/smb/client/cached_dir.c3
-rw-r--r--fs/smb/client/cifs_debug.c2
-rw-r--r--fs/smb/client/cifsglob.h5
-rw-r--r--fs/smb/client/cifsproto.h8
-rw-r--r--fs/smb/client/connect.c6
-rw-r--r--fs/smb/client/file.c303
-rw-r--r--fs/smb/client/fs_context.c27
-rw-r--r--fs/smb/client/inode.c17
-rw-r--r--fs/smb/client/readdir.c133
-rw-r--r--fs/smb/client/sess.c45
-rw-r--r--fs/smb/client/smb2ops.c2
-rw-r--r--fs/smb/client/smb2pdu.c10
-rw-r--r--fs/smb/server/smb2misc.c26
-rw-r--r--fs/smb/server/smb2pdu.c228
-rw-r--r--fs/smb/server/smb_common.c11
-rw-r--r--fs/smb/server/vfs.c12
-rw-r--r--fs/ubifs/file.c13
-rw-r--r--include/drm/drm_bridge.h33
-rw-r--r--include/drm/drm_fixed.h5
-rw-r--r--include/drm/drm_kunit_helpers.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h3
-rw-r--r--include/drm/ttm/ttm_tt.h9
-rw-r--r--include/dt-bindings/clock/r8a779g0-cpg-mssr.h1
-rw-r--r--include/dt-bindings/dma/fsl-edma.h21
-rw-r--r--include/linux/cpu.h2
-rw-r--r--include/linux/cpufreq.h15
-rw-r--r--include/linux/dm-io.h3
-rw-r--r--include/linux/eventfd.h4
-rw-r--r--include/linux/f2fs_fs.h1
-rw-r--r--include/linux/filter.h21
-rw-r--r--include/linux/fs.h11
-rw-r--r--include/linux/gfp.h9
-rw-r--r--include/linux/hyperv.h22
-rw-r--r--include/linux/intel_rapl.h6
-rw-r--r--include/linux/intel_tcc.h2
-rw-r--r--include/linux/io_uring.h9
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4
-rw-r--r--include/linux/mlx5/qp.h5
-rw-r--r--include/linux/mman.h8
-rw-r--r--include/linux/moduleloader.h8
-rw-r--r--include/linux/mtd/spinand.h2
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/oid_registry.h4
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/phy/tegra/xusb.h1
-rw-r--r--include/linux/poll.h4
-rw-r--r--include/linux/rcupdate.h31
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/serial_core.h3
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/skbuff.h10
-rw-r--r--include/linux/vfio.h2
-rw-r--r--include/linux/workqueue.h35
-rw-r--r--include/media/media-entity.h2
-rw-r--r--include/net/bluetooth/hci.h2
-rw-r--r--include/net/bluetooth/hci_core.h1
-rw-r--r--include/net/bluetooth/hci_sync.h2
-rw-r--r--include/net/bluetooth/l2cap.h42
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/cfg802154.h1
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/soc/qcom/qcom-spmi-pmic.h2
-rw-r--r--include/sound/tas2781.h5
-rw-r--r--include/trace/events/qdisc.h20
-rw-r--r--include/uapi/linux/virtio_config.h8
-rw-r--r--init/Kconfig6
-rw-r--r--init/initramfs.c2
-rw-r--r--init/main.c5
-rw-r--r--io_uring/filetable.c11
-rw-r--r--io_uring/futex.c1
-rw-r--r--io_uring/io_uring.c166
-rw-r--r--io_uring/io_uring.h1
-rw-r--r--io_uring/net.c265
-rw-r--r--io_uring/poll.c23
-rw-r--r--io_uring/rsrc.c169
-rw-r--r--io_uring/rsrc.h15
-rw-r--r--io_uring/rw.c4
-rw-r--r--io_uring/waitid.c7
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/bpf/core.c7
-rw-r--r--kernel/bpf/cpumap.c5
-rw-r--r--kernel/bpf/devmap.c11
-rw-r--r--kernel/bpf/hashtab.c14
-rw-r--r--kernel/bpf/helpers.c4
-rw-r--r--kernel/bpf/stackmap.c9
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--kernel/cgroup/cpuset.c8
-rw-r--r--kernel/crash_core.c8
-rw-r--r--kernel/dma/swiotlb.c35
-rw-r--r--kernel/entry/common.c8
-rw-r--r--kernel/module/Kconfig5
-rw-r--r--kernel/module/main.c9
-rw-r--r--kernel/power/suspend.c1
-rw-r--r--kernel/printk/internal.h1
-rw-r--r--kernel/printk/nbcon.c41
-rw-r--r--kernel/printk/printk.c101
-rw-r--r--kernel/printk/printk_ringbuffer.c315
-rw-r--r--kernel/printk/printk_ringbuffer.h38
-rw-r--r--kernel/rcu/tree.c3
-rw-r--r--kernel/rcu/tree_exp.h25
-rw-r--r--kernel/sched/fair.c16
-rw-r--r--kernel/sys.c7
-rw-r--r--kernel/time/posix-clock.c16
-rw-r--r--kernel/time/time_test.c2
-rw-r--r--kernel/time/timekeeping.c24
-rw-r--r--kernel/trace/ring_buffer.c233
-rw-r--r--kernel/trace/trace.c21
-rw-r--r--kernel/workqueue.c757
-rw-r--r--lib/cmdline_kunit.c2
-rw-r--r--lib/kunit/executor_test.c2
-rw-r--r--lib/memcpy_kunit.c4
-rw-r--r--lib/pci_iomap.c2
-rw-r--r--lib/test_blackhole_dev.c3
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/filemap.c16
-rw-r--r--mm/kasan/kasan_test.c3
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memtest.c4
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/readahead.c4
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/shmem_quota.c10
-rw-r--r--mm/swapfile.c13
-rw-r--r--mm/vmpressure.c2
-rw-r--r--mm/vmscan.c5
-rw-r--r--net/bluetooth/Kconfig8
-rw-r--r--net/bluetooth/Makefile1
-rw-r--r--net/bluetooth/a2mp.c1054
-rw-r--r--net/bluetooth/a2mp.h154
-rw-r--r--net/bluetooth/af_bluetooth.c10
-rw-r--r--net/bluetooth/amp.c590
-rw-r--r--net/bluetooth/amp.h60
-rw-r--r--net/bluetooth/eir.c29
-rw-r--r--net/bluetooth/hci_conn.c6
-rw-r--r--net/bluetooth/hci_core.c126
-rw-r--r--net/bluetooth/hci_event.c43
-rw-r--r--net/bluetooth/hci_request.c2
-rw-r--r--net/bluetooth/hci_sync.c46
-rw-r--r--net/bluetooth/l2cap_core.c1069
-rw-r--r--net/bluetooth/l2cap_sock.c18
-rw-r--r--net/bluetooth/mgmt.c101
-rw-r--r--net/bluetooth/msft.c3
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/gso_test.c2
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock_diag.c10
-rw-r--r--net/devlink/core.c4
-rw-r--r--net/devlink/devl_internal.h21
-rw-r--r--net/devlink/health.c3
-rw-r--r--net/devlink/netlink.c41
-rw-r--r--net/devlink/netlink_gen.c2
-rw-r--r--net/devlink/port.c2
-rw-r--r--net/devlink/region.c3
-rw-r--r--net/hsr/hsr_framereg.c4
-rw-r--r--net/hsr/hsr_main.c15
-rw-r--r--net/ipv4/esp4.c8
-rw-r--r--net/ipv4/inet_diag.c6
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c41
-rw-r--r--net/ipv4/ip_tunnel.c15
-rw-r--r--net/ipv4/ipmr.c4
-rw-r--r--net/ipv4/raw.c1
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/esp6.c8
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/mcast.c1
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/iucv/iucv.c4
-rw-r--r--net/kcm/kcmsock.c3
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/cfg.c7
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/rate.c5
-rw-r--r--net/mac80211/sta_info.h6
-rw-r--r--net/mac80211/vht.c46
-rw-r--r--net/mac802154/llsec.c18
-rw-r--r--net/mctp/route.c3
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c4
-rw-r--r--net/netfilter/nf_tables_api.c36
-rw-r--r--net/netfilter/nft_ct.c11
-rw-r--r--net/netfilter/nft_set_pipapo.c5
-rw-r--r--net/netrom/af_netrom.c14
-rw-r--r--net/netrom/nr_dev.c2
-rw-r--r--net/netrom/nr_in.c6
-rw-r--r--net/netrom/nr_out.c2
-rw-r--r--net/netrom/nr_route.c8
-rw-r--r--net/netrom/nr_subr.c5
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/rds/rdma.c3
-rw-r--r--net/rds/send.c11
-rw-r--r--net/sched/sch_taprio.c3
-rw-r--r--net/sunrpc/addr.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c11
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c27
-rw-r--r--net/unix/garbage.c2
-rw-r--r--net/unix/scm.c4
-rw-r--r--net/wireless/wext-core.c7
-rw-r--r--net/x25/af_x25.c4
-rw-r--r--net/xfrm/xfrm_device.c3
-rw-r--r--net/xfrm/xfrm_output.c6
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--net/xfrm/xfrm_user.c3
-rw-r--r--samples/vfio-mdev/mtty.c4
-rw-r--r--scripts/Makefile.extrawarn2
-rwxr-xr-xscripts/clang-tools/gen_compile_commands.py2
-rw-r--r--scripts/kconfig/lexer.l7
-rw-r--r--security/landlock/syscalls.c18
-rw-r--r--security/smack/smack_lsm.c12
-rw-r--r--sound/core/seq/seq_midi.c8
-rw-r--r--sound/core/seq/seq_virmidi.c9
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c20
-rw-r--r--sound/pci/hda/patch_realtek.c79
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c145
-rw-r--r--sound/sh/aica.c17
-rw-r--r--sound/soc/amd/acp/acp-sof-mach.c14
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c21
-rw-r--r--sound/soc/codecs/cs42l43.c5
-rw-r--r--sound/soc/codecs/rt5645.c10
-rw-r--r--sound/soc/codecs/tas2781-comlib.c15
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c4
-rw-r--r--sound/soc/codecs/wm8962.c29
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c12
-rw-r--r--sound/soc/meson/aiu.c19
-rw-r--r--sound/soc/meson/aiu.h1
-rw-r--r--sound/soc/meson/axg-tdm-interface.c29
-rw-r--r--sound/soc/meson/t9015.c20
-rw-r--r--sound/soc/rockchip/rockchip_i2s_tdm.c352
-rw-r--r--sound/soc/sh/rz-ssi.c2
-rw-r--r--sound/soc/sof/amd/acp.c28
-rw-r--r--sound/soc/sof/ipc3-loader.c2
-rw-r--r--sound/soc/sof/ipc4-pcm.c13
-rw-r--r--sound/usb/stream.c5
-rw-r--r--tools/bpf/bpftool/prog.c2
-rw-r--r--tools/bpf/resolve_btfids/main.c70
-rw-r--r--tools/include/linux/btf_ids.h11
-rw-r--r--tools/lib/bpf/bpf.h2
-rw-r--r--tools/lib/bpf/libbpf.c4
-rw-r--r--tools/lib/bpf/libbpf_internal.h14
-rw-r--r--tools/lib/bpf/netlink.c4
-rw-r--r--tools/objtool/check.c12
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/builtin-record.c35
-rw-r--r--tools/perf/builtin-top.c5
-rw-r--r--tools/perf/util/data.c2
-rw-r--r--tools/perf/util/evlist.c25
-rw-r--r--tools/perf/util/evlist.h1
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/expr.c20
-rw-r--r--tools/perf/util/pmu.c19
-rw-r--r--tools/perf/util/print-events.c27
-rw-r--r--tools/perf/util/srcline.c2
-rw-r--r--tools/perf/util/stat-display.c2
-rw-r--r--tools/perf/util/stat-shadow.c7
-rw-r--r--tools/perf/util/thread_map.c2
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lwt_redirect.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bonding.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c26
-rw-r--r--tools/testing/selftests/bpf/test_maps.c6
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c2
-rw-r--r--tools/testing/selftests/mm/gup_test.c67
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c2
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c2
-rw-r--r--tools/testing/selftests/mm/uffd-common.c3
-rw-r--r--tools/testing/selftests/mm/uffd-common.h2
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c13
-rw-r--r--tools/testing/selftests/mqueue/setting1
-rw-r--r--tools/testing/selftests/net/forwarding/config35
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh4
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh6
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh8
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh13
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py71
-rw-r--r--tools/testing/selftests/net/tls.c8
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv32.config1
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv64.config1
-rw-r--r--virt/kvm/async_pf.c31
-rw-r--r--virt/kvm/eventfd.c4
1308 files changed, 12905 insertions, 10659 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index a1db6db475..710d47be11 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -516,6 +516,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/mds
/sys/devices/system/cpu/vulnerabilities/meltdown
/sys/devices/system/cpu/vulnerabilities/mmio_stale_data
+ /sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
/sys/devices/system/cpu/vulnerabilities/retbleed
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
/sys/devices/system/cpu/vulnerabilities/spectre_v1
diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst
index de99caabf6..ff0b440ef2 100644
--- a/Documentation/admin-guide/hw-vuln/index.rst
+++ b/Documentation/admin-guide/hw-vuln/index.rst
@@ -21,3 +21,4 @@ are configurable at compile, boot or run time.
cross-thread-rsb
srso
gather_data_sampling
+ reg-file-data-sampling
diff --git a/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst b/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
new file mode 100644
index 0000000000..0585d02b9a
--- /dev/null
+++ b/Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
@@ -0,0 +1,104 @@
+==================================
+Register File Data Sampling (RFDS)
+==================================
+
+Register File Data Sampling (RFDS) is a microarchitectural vulnerability that
+only affects Intel Atom parts(also branded as E-cores). RFDS may allow
+a malicious actor to infer data values previously used in floating point
+registers, vector registers, or integer registers. RFDS does not provide the
+ability to choose which data is inferred. CVE-2023-28746 is assigned to RFDS.
+
+Affected Processors
+===================
+Below is the list of affected Intel processors [#f1]_:
+
+ =================== ============
+ Common name Family_Model
+ =================== ============
+ ATOM_GOLDMONT 06_5CH
+ ATOM_GOLDMONT_D 06_5FH
+ ATOM_GOLDMONT_PLUS 06_7AH
+ ATOM_TREMONT_D 06_86H
+ ATOM_TREMONT 06_96H
+ ALDERLAKE 06_97H
+ ALDERLAKE_L 06_9AH
+ ATOM_TREMONT_L 06_9CH
+ RAPTORLAKE 06_B7H
+ RAPTORLAKE_P 06_BAH
+ ATOM_GRACEMONT 06_BEH
+ RAPTORLAKE_S 06_BFH
+ =================== ============
+
+As an exception to this table, Intel Xeon E family parts ALDERLAKE(06_97H) and
+RAPTORLAKE(06_B7H) codenamed Catlow are not affected. They are reported as
+vulnerable in Linux because they share the same family/model with an affected
+part. Unlike their affected counterparts, they do not enumerate RFDS_CLEAR or
+CPUID.HYBRID. This information could be used to distinguish between the
+affected and unaffected parts, but it is deemed not worth adding complexity as
+the reporting is fixed automatically when these parts enumerate RFDS_NO.
+
+Mitigation
+==========
+Intel released a microcode update that enables software to clear sensitive
+information using the VERW instruction. Like MDS, RFDS deploys the same
+mitigation strategy to force the CPU to clear the affected buffers before an
+attacker can extract the secrets. This is achieved by using the otherwise
+unused and obsolete VERW instruction in combination with a microcode update.
+The microcode clears the affected CPU buffers when the VERW instruction is
+executed.
+
+Mitigation points
+-----------------
+VERW is executed by the kernel before returning to user space, and by KVM
+before VMentry. None of the affected cores support SMT, so VERW is not required
+at C-state transitions.
+
+New bits in IA32_ARCH_CAPABILITIES
+----------------------------------
+Newer processors and microcode update on existing affected processors added new
+bits to IA32_ARCH_CAPABILITIES MSR. These bits can be used to enumerate
+vulnerability and mitigation capability:
+
+- Bit 27 - RFDS_NO - When set, processor is not affected by RFDS.
+- Bit 28 - RFDS_CLEAR - When set, processor is affected by RFDS, and has the
+ microcode that clears the affected buffers on VERW execution.
+
+Mitigation control on the kernel command line
+---------------------------------------------
+The kernel command line allows to control RFDS mitigation at boot time with the
+parameter "reg_file_data_sampling=". The valid arguments are:
+
+ ========== =================================================================
+ on If the CPU is vulnerable, enable mitigation; CPU buffer clearing
+ on exit to userspace and before entering a VM.
+ off Disables mitigation.
+ ========== =================================================================
+
+Mitigation default is selected by CONFIG_MITIGATION_RFDS.
+
+Mitigation status information
+-----------------------------
+The Linux kernel provides a sysfs interface to enumerate the current
+vulnerability status of the system: whether the system is vulnerable, and
+which mitigations are active. The relevant sysfs file is:
+
+ /sys/devices/system/cpu/vulnerabilities/reg_file_data_sampling
+
+The possible values in this file are:
+
+ .. list-table::
+
+ * - 'Not affected'
+ - The processor is not vulnerable
+ * - 'Vulnerable'
+ - The processor is vulnerable, but no mitigation enabled
+ * - 'Vulnerable: No microcode'
+ - The processor is vulnerable but microcode is not updated.
+ * - 'Mitigation: Clear Register File'
+ - The processor is vulnerable and the CPU buffer clearing mitigation is
+ enabled.
+
+References
+----------
+.. [#f1] Affected Processors
+ https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index b72e2049c4..7120c4e169 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1137,6 +1137,26 @@
The filter can be disabled or changed to another
driver later using sysfs.
+ reg_file_data_sampling=
+ [X86] Controls mitigation for Register File Data
+ Sampling (RFDS) vulnerability. RFDS is a CPU
+ vulnerability which may allow userspace to infer
+ kernel data values previously stored in floating point
+ registers, vector registers, or integer registers.
+ RFDS only affects Intel Atom processors.
+
+ on: Turns ON the mitigation.
+ off: Turns OFF the mitigation.
+
+ This parameter overrides the compile time default set
+ by CONFIG_MITIGATION_RFDS. Mitigation cannot be
+ disabled when other VERW based mitigations (like MDS)
+ are enabled. In order to disable RFDS mitigation all
+ VERW based mitigations need to be disabled.
+
+ For details see:
+ Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst
+
driver_async_probe= [KNL]
List of driver names to be probed asynchronously. *
matches with all driver names. If * is specified, the
@@ -3307,9 +3327,7 @@
mem_encrypt= [X86-64] AMD Secure Memory Encryption (SME) control
Valid arguments: on, off
- Default (depends on kernel configuration option):
- on (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y)
- off (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=n)
+ Default: off
mem_encrypt=on: Activate SME
mem_encrypt=off: Do not activate SME
@@ -3385,6 +3403,7 @@
nospectre_bhb [ARM64]
nospectre_v1 [X86,PPC]
nospectre_v2 [X86,PPC,S390,ARM64]
+ reg_file_data_sampling=off [X86]
retbleed=off [X86]
spec_store_bypass_disable=off [X86,PPC]
spectre_v2_user=off [X86]
diff --git a/Documentation/arch/x86/amd-memory-encryption.rst b/Documentation/arch/x86/amd-memory-encryption.rst
index 07caa8fff8..414bc7402a 100644
--- a/Documentation/arch/x86/amd-memory-encryption.rst
+++ b/Documentation/arch/x86/amd-memory-encryption.rst
@@ -87,14 +87,14 @@ The state of SME in the Linux kernel can be documented as follows:
kernel is non-zero).
SME can also be enabled and activated in the BIOS. If SME is enabled and
-activated in the BIOS, then all memory accesses will be encrypted and it will
-not be necessary to activate the Linux memory encryption support. If the BIOS
-merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
-memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
-by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
-not enable SME, then Linux will not be able to activate memory encryption, even
-if configured to do so by default or the mem_encrypt=on command line parameter
-is specified.
+activated in the BIOS, then all memory accesses will be encrypted and it
+will not be necessary to activate the Linux memory encryption support.
+
+If the BIOS merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG),
+then memory encryption can be enabled by supplying mem_encrypt=on on the
+kernel command line. However, if BIOS does not enable SME, then Linux
+will not be able to activate memory encryption, even if configured to do
+so by default or the mem_encrypt=on command line parameter is specified.
Secure Nested Paging (SNP)
==========================
diff --git a/Documentation/conf.py b/Documentation/conf.py
index dfc19c915d..e385e24fe9 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -345,9 +345,9 @@ sys.stderr.write("Using %s theme\n" % html_theme)
html_static_path = ['sphinx-static']
# If true, Docutils "smart quotes" will be used to convert quotes and dashes
-# to typographically correct entities. This will convert "--" to "—",
-# which is not always what we want, so disable it.
-smartquotes = False
+# to typographically correct entities. However, conversion of "--" to "—"
+# is not always what we want, so enable only quotes.
+smartquotes_action = 'q'
# Custom sidebar templates, maps document names to template names.
# Note that the RTD theme ignores this
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
index 0999ea07f4..e4576546bf 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,mdss.yaml
@@ -127,6 +127,7 @@ patternProperties:
- qcom,dsi-phy-20nm
- qcom,dsi-phy-28nm-8226
- qcom,dsi-phy-28nm-hpm
+ - qcom,dsi-phy-28nm-hpm-fam-b
- qcom,dsi-phy-28nm-lp
- qcom,hdmi-phy-8084
- qcom,hdmi-phy-8660
diff --git a/Documentation/netlink/specs/devlink.yaml b/Documentation/netlink/specs/devlink.yaml
index 572d83a414..42a9d77803 100644
--- a/Documentation/netlink/specs/devlink.yaml
+++ b/Documentation/netlink/specs/devlink.yaml
@@ -244,7 +244,7 @@ attribute-sets:
-
name: eswitch-inline-mode
- type: u16
+ type: u8
enum: eswitch-inline-mode
-
name: dpipe-tables
diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml
index 2b4c4bcd83..5b25ad589e 100644
--- a/Documentation/netlink/specs/dpll.yaml
+++ b/Documentation/netlink/specs/dpll.yaml
@@ -274,6 +274,7 @@ attribute-sets:
-
name: capabilities
type: u32
+ enum: pin-capabilities
-
name: parent-device
type: nest
diff --git a/Documentation/userspace-api/media/mediactl/media-types.rst b/Documentation/userspace-api/media/mediactl/media-types.rst
index 0ffeece1e0..6332e83952 100644
--- a/Documentation/userspace-api/media/mediactl/media-types.rst
+++ b/Documentation/userspace-api/media/mediactl/media-types.rst
@@ -375,12 +375,11 @@ Types and flags used to represent the media graph elements
are origins of links.
* - ``MEDIA_PAD_FL_MUST_CONNECT``
- - If this flag is set and the pad is linked to any other pad, then
- at least one of those links must be enabled for the entity to be
- able to stream. There could be temporary reasons (e.g. device
- configuration dependent) for the pad to need enabled links even
- when this flag isn't set; the absence of the flag doesn't imply
- there is none.
+ - If this flag is set, then for this pad to be able to stream, it must
+ be connected by at least one enabled link. There could be temporary
+ reasons (e.g. device configuration dependent) for the pad to need
+ enabled links even when this flag isn't set; the absence of the flag
+ doesn't imply there is none.
One and only one of ``MEDIA_PAD_FL_SINK`` and ``MEDIA_PAD_FL_SOURCE``
diff --git a/Makefile b/Makefile
index f1a592b7c7..190ed2b2e0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 7
-SUBLEVEL = 9
+SUBLEVEL = 12
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f8567e95f9..f53832383a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -35,6 +35,7 @@ config ARM
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
+ select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
@@ -590,8 +591,8 @@ source "arch/arm/mm/Kconfig"
config IWMMXT
bool "Enable iWMMXt support"
- depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
- default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 || CPU_PJ4B
+ depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
+ default y if PXA27x || PXA3xx || ARCH_MMP
help
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
diff --git a/arch/arm/boot/dts/amazon/alpine.dtsi b/arch/arm/boot/dts/amazon/alpine.dtsi
index ff68dfb4eb..90bd12feac 100644
--- a/arch/arm/boot/dts/amazon/alpine.dtsi
+++ b/arch/arm/boot/dts/amazon/alpine.dtsi
@@ -167,7 +167,6 @@
msix: msix@fbe00000 {
compatible = "al,alpine-msix";
reg = <0x0 0xfbe00000 0x0 0x100000>;
- interrupt-controller;
msi-controller;
al,msi-base-spi = <96>;
al,msi-num-spis = <64>;
diff --git a/arch/arm/boot/dts/arm/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
index efed325af8..d99bac0223 100644
--- a/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm/arm-realview-pb1176.dts
@@ -451,7 +451,7 @@
/* Direct-mapped development chip ROM */
pb1176_rom@10200000 {
- compatible = "direct-mapped";
+ compatible = "mtd-rom";
reg = <0x10200000 0x4000>;
bank-width = <1>;
};
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
index 530491ae5e..857cb26ed6 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g4.dtsi
@@ -466,7 +466,6 @@
i2c0: i2c-bus@40 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x40 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -482,7 +481,6 @@
i2c1: i2c-bus@80 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x80 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -498,7 +496,6 @@
i2c2: i2c-bus@c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0xc0 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -515,7 +512,6 @@
i2c3: i2c-bus@100 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x100 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -532,7 +528,6 @@
i2c4: i2c-bus@140 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x140 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -549,7 +544,6 @@
i2c5: i2c-bus@180 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x180 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -566,7 +560,6 @@
i2c6: i2c-bus@1c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x1c0 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -583,7 +576,6 @@
i2c7: i2c-bus@300 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x300 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -600,7 +592,6 @@
i2c8: i2c-bus@340 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x340 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -617,7 +608,6 @@
i2c9: i2c-bus@380 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x380 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -634,7 +624,6 @@
i2c10: i2c-bus@3c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x3c0 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -651,7 +640,6 @@
i2c11: i2c-bus@400 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x400 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -668,7 +656,6 @@
i2c12: i2c-bus@440 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x440 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
@@ -685,7 +672,6 @@
i2c13: i2c-bus@480 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x480 0x40>;
compatible = "aspeed,ast2400-i2c-bus";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
index 04f98d1dbb..e6f3cf3c72 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g5.dtsi
@@ -363,6 +363,7 @@
interrupts = <40>;
reg = <0x1e780200 0x0100>;
clocks = <&syscon ASPEED_CLK_APB>;
+ #interrupt-cells = <2>;
interrupt-controller;
bus-frequency = <12000000>;
pinctrl-names = "default";
@@ -594,7 +595,6 @@
i2c0: i2c-bus@40 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x40 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -610,7 +610,6 @@
i2c1: i2c-bus@80 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x80 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -626,7 +625,6 @@
i2c2: i2c-bus@c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0xc0 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -643,7 +641,6 @@
i2c3: i2c-bus@100 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x100 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -660,7 +657,6 @@
i2c4: i2c-bus@140 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x140 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -677,7 +673,6 @@
i2c5: i2c-bus@180 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x180 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -694,7 +689,6 @@
i2c6: i2c-bus@1c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x1c0 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -711,7 +705,6 @@
i2c7: i2c-bus@300 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x300 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -728,7 +721,6 @@
i2c8: i2c-bus@340 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x340 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -745,7 +737,6 @@
i2c9: i2c-bus@380 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x380 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -762,7 +753,6 @@
i2c10: i2c-bus@3c0 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x3c0 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -779,7 +769,6 @@
i2c11: i2c-bus@400 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x400 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -796,7 +785,6 @@
i2c12: i2c-bus@440 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x440 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
@@ -813,7 +801,6 @@
i2c13: i2c-bus@480 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x480 0x40>;
compatible = "aspeed,ast2500-i2c-bus";
diff --git a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
index c4d1faade8..29f94696d8 100644
--- a/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed/aspeed-g6.dtsi
@@ -474,6 +474,7 @@
reg = <0x1e780500 0x100>;
interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_APB2>;
+ #interrupt-cells = <2>;
interrupt-controller;
bus-frequency = <12000000>;
pinctrl-names = "default";
@@ -488,6 +489,7 @@
reg = <0x1e780600 0x100>;
interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&syscon ASPEED_CLK_APB2>;
+ #interrupt-cells = <2>;
interrupt-controller;
bus-frequency = <12000000>;
pinctrl-names = "default";
@@ -902,7 +904,6 @@
i2c0: i2c-bus@80 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x80 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -917,7 +918,6 @@
i2c1: i2c-bus@100 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x100 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -932,7 +932,6 @@
i2c2: i2c-bus@180 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x180 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -947,7 +946,6 @@
i2c3: i2c-bus@200 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x200 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -962,7 +960,6 @@
i2c4: i2c-bus@280 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x280 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -977,7 +974,6 @@
i2c5: i2c-bus@300 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x300 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -992,7 +988,6 @@
i2c6: i2c-bus@380 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x380 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1007,7 +1002,6 @@
i2c7: i2c-bus@400 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x400 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1022,7 +1016,6 @@
i2c8: i2c-bus@480 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x480 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1037,7 +1030,6 @@
i2c9: i2c-bus@500 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x500 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1052,7 +1044,6 @@
i2c10: i2c-bus@580 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x580 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1067,7 +1058,6 @@
i2c11: i2c-bus@600 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x600 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1082,7 +1072,6 @@
i2c12: i2c-bus@680 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x680 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1097,7 +1086,6 @@
i2c13: i2c-bus@700 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x700 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1112,7 +1100,6 @@
i2c14: i2c-bus@780 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x780 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
@@ -1127,7 +1114,6 @@
i2c15: i2c-bus@800 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
reg = <0x800 0x80>;
compatible = "aspeed,ast2600-i2c-bus";
clocks = <&syscon ASPEED_CLK_APB2>;
diff --git a/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi b/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
index f9f79ed825..07ca0d993c 100644
--- a/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm-cygnus.dtsi
@@ -167,6 +167,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&mailbox>;
interrupts = <0>;
};
@@ -247,6 +248,7 @@
gpio-controller;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
+ #interrupt-cells = <2>;
};
i2c1: i2c@1800b000 {
@@ -518,6 +520,7 @@
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&pinctrl 0 42 1>,
<&pinctrl 1 44 3>,
diff --git a/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi b/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
index 788a680619..75545b10ef 100644
--- a/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm-hr2.dtsi
@@ -200,6 +200,7 @@
gpio-controller;
ngpios = <4>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi b/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
index 9d20ba3b1f..6a4482c931 100644
--- a/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/broadcom/bcm-nsp.dtsi
@@ -180,6 +180,7 @@
gpio-controller;
ngpios = <32>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&pinctrl 0 0 32>;
};
@@ -352,6 +353,7 @@
gpio-controller;
ngpios = <4>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
index 4d70f6afd1..6d5e69035f 100644
--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
+++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-gateway-7001.dts
@@ -60,6 +60,8 @@
* We have slots (IDSEL) 1 and 2 with one assigned IRQ
* each handling all IRQs.
*/
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map =
/* IDSEL 1 */
<0x0800 0 0 1 &gpio0 11 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 1 is irq 11 */
diff --git a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
index 9ec0169bac..5f4c849915 100644
--- a/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
+++ b/arch/arm/boot/dts/intel/ixp/intel-ixp42x-goramo-multilink.dts
@@ -89,6 +89,8 @@
* The slots have Ethernet, Ethernet, NEC and MPCI.
* The IDSELs are 11, 12, 13, 14.
*/
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xf800 0 0 7>;
interrupt-map =
/* IDSEL 11 - Ethernet A */
<0x5800 0 0 1 &gpio0 4 IRQ_TYPE_LEVEL_LOW>, /* INT A on slot 11 is irq 4 */
diff --git a/arch/arm/boot/dts/marvell/kirkwood-l-50.dts b/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
index dffb9f84e6..c841eb8e7f 100644
--- a/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
+++ b/arch/arm/boot/dts/marvell/kirkwood-l-50.dts
@@ -65,6 +65,7 @@
gpio2: gpio-expander@20 {
#gpio-cells = <2>;
#interrupt-cells = <2>;
+ interrupt-controller;
compatible = "semtech,sx1505q";
reg = <0x20>;
@@ -79,6 +80,7 @@
gpio3: gpio-expander@21 {
#gpio-cells = <2>;
#interrupt-cells = <2>;
+ interrupt-controller;
compatible = "semtech,sx1505q";
reg = <0x21>;
diff --git a/arch/arm/boot/dts/marvell/mmp2-brownstone.dts b/arch/arm/boot/dts/marvell/mmp2-brownstone.dts
index 04f1ae1382..bc64348b82 100644
--- a/arch/arm/boot/dts/marvell/mmp2-brownstone.dts
+++ b/arch/arm/boot/dts/marvell/mmp2-brownstone.dts
@@ -28,7 +28,7 @@
&twsi1 {
status = "okay";
pmic: max8925@3c {
- compatible = "maxium,max8925";
+ compatible = "maxim,max8925";
reg = <0x3c>;
interrupts = <1>;
interrupt-parent = <&intcmux4>;
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
index fd671c7a1e..6e1f0f164c 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-wpcm450.dtsi
@@ -120,6 +120,7 @@
interrupts = <2 IRQ_TYPE_LEVEL_HIGH>,
<3 IRQ_TYPE_LEVEL_HIGH>,
<4 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
interrupt-controller;
};
@@ -128,6 +129,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <2>;
interrupt-controller;
};
diff --git a/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi b/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
index 1640763fd4..ff0d684622 100644
--- a/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra30-apalis-v1.1.dtsi
@@ -997,7 +997,6 @@
compatible = "st,stmpe811";
reg = <0x41>;
irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
- interrupt-controller;
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
diff --git a/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi b/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
index 3b6fad273c..d38f1dd38a 100644
--- a/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra30-apalis.dtsi
@@ -980,7 +980,6 @@
compatible = "st,stmpe811";
reg = <0x41>;
irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
- interrupt-controller;
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
diff --git a/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi b/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
index 4eb526fe9c..81c8a5fd92 100644
--- a/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
+++ b/arch/arm/boot/dts/nvidia/tegra30-colibri.dtsi
@@ -861,7 +861,6 @@
compatible = "st,stmpe811";
reg = <0x41>;
irq-gpio = <&gpio TEGRA_GPIO(V, 0) GPIO_ACTIVE_LOW>;
- interrupt-controller;
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi
index 3be38a3c4b..c32ea040fe 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6dl-yapp4-common.dtsi
@@ -117,17 +117,9 @@
#address-cells = <1>;
#size-cells = <0>;
- phy_port2: phy@1 {
- reg = <1>;
- };
-
- phy_port3: phy@2 {
- reg = <2>;
- };
-
switch@10 {
compatible = "qca,qca8334";
- reg = <10>;
+ reg = <0x10>;
reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
switch_ports: ports {
@@ -149,15 +141,30 @@
eth2: port@2 {
reg = <2>;
label = "eth2";
+ phy-mode = "internal";
phy-handle = <&phy_port2>;
};
eth1: port@3 {
reg = <3>;
label = "eth1";
+ phy-mode = "internal";
phy-handle = <&phy_port3>;
};
};
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy_port2: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ phy_port3: ethernet-phy@2 {
+ reg = <2>;
+ };
+ };
};
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts b/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
index db8c332df6..cad112e054 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-b850v3.dts
@@ -227,7 +227,6 @@
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
bridge@2,1 {
compatible = "pci10b5,8605";
@@ -235,7 +234,6 @@
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
/* Intel Corporation I210 Gigabit Network Connection */
ethernet@3,0 {
@@ -250,7 +248,6 @@
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
/* Intel Corporation I210 Gigabit Network Connection */
switch_nic: ethernet@4,0 {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
index 99f4f6ac71..c1ae7c47b4 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6q-bx50v3.dtsi
@@ -245,6 +245,7 @@
reg = <0x74>;
gpio-controller;
#gpio-cells = <2>;
+ #interrupt-cells = <2>;
interrupt-controller;
interrupt-parent = <&gpio2>;
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
@@ -390,7 +391,6 @@
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <1>;
};
};
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
index 4cc965277c..dcb4f6a32f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-apalis.dtsi
@@ -619,7 +619,6 @@
blocks = <0x5>;
id = <0>;
interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
interrupt-parent = <&gpio4>;
irq-trigger = <0x1>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
index 11d9c7a2da..6cc4d6fd5f 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-colibri.dtsi
@@ -543,7 +543,6 @@
blocks = <0x5>;
interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpio6>;
- interrupt-controller;
id = <0>;
irq-trigger = <0x1>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
index a63e73adc1..42b2ba23ae 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-emcon.dtsi
@@ -225,7 +225,6 @@
pinctrl-0 = <&pinctrl_pmic>;
interrupt-parent = <&gpio2>;
interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
onkey {
compatible = "dlg,da9063-onkey";
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
index 113974520d..c0c47adc58 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-pfla02.dtsi
@@ -124,6 +124,7 @@
reg = <0x58>;
interrupt-parent = <&gpio2>;
interrupts = <9 IRQ_TYPE_LEVEL_LOW>; /* active-low GPIO2_9 */
+ #interrupt-cells = <2>;
interrupt-controller;
regulators {
diff --git a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
index 86b4269e0e..85e278eb20 100644
--- a/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx6qdl-phytec-phycore-som.dtsi
@@ -100,6 +100,7 @@
interrupt-parent = <&gpio1>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts b/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
index 12361fcbe2..1b96565229 100644
--- a/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
+++ b/arch/arm/boot/dts/nxp/imx/imx7d-pico-dwarf.dts
@@ -63,6 +63,7 @@
gpio-controller;
#gpio-cells = <2>;
#interrupt-cells = <2>;
+ interrupt-controller;
reg = <0x25>;
};
diff --git a/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts b/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
index 16b4e06c4e..a248b8a453 100644
--- a/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
+++ b/arch/arm/boot/dts/nxp/vf/vf610-zii-dev-rev-b.dts
@@ -338,6 +338,7 @@
reg = <0x22>;
gpio-controller;
#gpio-cells = <2>;
+ #interrupt-cells = <2>;
interrupt-controller;
interrupt-parent = <&gpio3>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
diff --git a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
index 0bc2e66d15..f41cc4b4b5 100644
--- a/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-msm8974.dtsi
@@ -1226,7 +1226,7 @@
qfprom: qfprom@fc4bc000 {
compatible = "qcom,msm8974-qfprom", "qcom,qfprom";
- reg = <0xfc4bc000 0x1000>;
+ reg = <0xfc4bc000 0x2100>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
index a976370264..fb6764b2dc 100644
--- a/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
+++ b/arch/arm/boot/dts/qcom/qcom-sdx55.dtsi
@@ -345,10 +345,10 @@
"msi8";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 0 0 141 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 0 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 0 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 0 0 144 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ interrupt-map = <0 0 0 1 &intc 0 141 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 142 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 143 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 144 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
clocks = <&gcc GCC_PCIE_PIPE_CLK>,
<&gcc GCC_PCIE_AUX_CLK>,
diff --git a/arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts b/arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts
index ed75c01dbe..3d02f065f7 100644
--- a/arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts
+++ b/arch/arm/boot/dts/renesas/r8a73a4-ape6evm.dts
@@ -209,6 +209,18 @@
status = "okay";
};
+&extal1_clk {
+ clock-frequency = <26000000>;
+};
+
+&extal2_clk {
+ clock-frequency = <48000000>;
+};
+
+&extalr_clk {
+ clock-frequency = <32768>;
+};
+
&pfc {
scifa0_pins: scifa0 {
groups = "scifa0_data";
diff --git a/arch/arm/boot/dts/renesas/r8a73a4.dtsi b/arch/arm/boot/dts/renesas/r8a73a4.dtsi
index c390669670..d1f4cbd099 100644
--- a/arch/arm/boot/dts/renesas/r8a73a4.dtsi
+++ b/arch/arm/boot/dts/renesas/r8a73a4.dtsi
@@ -450,17 +450,20 @@
extalr_clk: extalr {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <32768>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
};
extal1_clk: extal1 {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <25000000>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
};
extal2_clk: extal2 {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <48000000>;
+ /* This value must be overridden by the board. */
+ clock-frequency = <0>;
};
fsiack_clk: fsiack {
compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/renesas/r8a7790-lager.dts b/arch/arm/boot/dts/renesas/r8a7790-lager.dts
index 4d666ad8b1..b17a9f9307 100644
--- a/arch/arm/boot/dts/renesas/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/renesas/r8a7790-lager.dts
@@ -432,6 +432,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7790-stout.dts b/arch/arm/boot/dts/renesas/r8a7790-stout.dts
index fe14727eef..25956661a8 100644
--- a/arch/arm/boot/dts/renesas/r8a7790-stout.dts
+++ b/arch/arm/boot/dts/renesas/r8a7790-stout.dts
@@ -332,6 +332,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
onkey {
compatible = "dlg,da9063-onkey";
diff --git a/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts b/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
index 545515b41e..ec01cc8595 100644
--- a/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/renesas/r8a7791-koelsch.dts
@@ -795,6 +795,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7791-porter.dts b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
index ec0a20d513..fcc9a2313e 100644
--- a/arch/arm/boot/dts/renesas/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/renesas/r8a7791-porter.dts
@@ -389,6 +389,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
watchdog {
compatible = "dlg,da9063-watchdog";
diff --git a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
index e793134f32..af16f25184 100644
--- a/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
+++ b/arch/arm/boot/dts/renesas/r8a7792-blanche.dts
@@ -332,6 +332,7 @@
interrupt-parent = <&irqc>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7793-gose.dts b/arch/arm/boot/dts/renesas/r8a7793-gose.dts
index 79b537b246..9358fc7d0e 100644
--- a/arch/arm/boot/dts/renesas/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/renesas/r8a7793-gose.dts
@@ -735,6 +735,7 @@
interrupt-parent = <&irqc0>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7794-alt.dts b/arch/arm/boot/dts/renesas/r8a7794-alt.dts
index 08df031bc2..73ec4d3541 100644
--- a/arch/arm/boot/dts/renesas/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/renesas/r8a7794-alt.dts
@@ -453,6 +453,7 @@
interrupt-parent = <&gpio3>;
interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
rtc {
compatible = "dlg,da9063-rtc";
diff --git a/arch/arm/boot/dts/renesas/r8a7794-silk.dts b/arch/arm/boot/dts/renesas/r8a7794-silk.dts
index b7af1befa1..b825f2e25d 100644
--- a/arch/arm/boot/dts/renesas/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/renesas/r8a7794-silk.dts
@@ -424,6 +424,7 @@
interrupt-parent = <&gpio3>;
interrupts = <31 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
onkey {
compatible = "dlg,da9063-onkey";
diff --git a/arch/arm/boot/dts/rockchip/rv1108.dtsi b/arch/arm/boot/dts/rockchip/rv1108.dtsi
index abf3006f0a..f3291f3bbc 100644
--- a/arch/arm/boot/dts/rockchip/rv1108.dtsi
+++ b/arch/arm/boot/dts/rockchip/rv1108.dtsi
@@ -196,7 +196,6 @@
pwm4: pwm@10280000 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x10280000 0x10>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -208,7 +207,6 @@
pwm5: pwm@10280010 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x10280010 0x10>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -220,7 +218,6 @@
pwm6: pwm@10280020 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x10280020 0x10>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -232,7 +229,6 @@
pwm7: pwm@10280030 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x10280030 0x10>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM>, <&cru PCLK_PWM>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -386,7 +382,6 @@
pwm0: pwm@20040000 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x20040000 0x10>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -398,7 +393,6 @@
pwm1: pwm@20040010 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x20040010 0x10>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -410,7 +404,6 @@
pwm2: pwm@20040020 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x20040020 0x10>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
@@ -422,7 +415,6 @@
pwm3: pwm@20040030 {
compatible = "rockchip,rv1108-pwm", "rockchip,rk3288-pwm";
reg = <0x20040030 0x10>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_PWM0_PMU>, <&cru PCLK_PWM0_PMU>;
clock-names = "pwm", "pclk";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/st/stm32429i-eval.dts b/arch/arm/boot/dts/st/stm32429i-eval.dts
index 576235ec3c..afa417b34b 100644
--- a/arch/arm/boot/dts/st/stm32429i-eval.dts
+++ b/arch/arm/boot/dts/st/stm32429i-eval.dts
@@ -222,7 +222,6 @@
reg = <0x42>;
interrupts = <8 3>;
interrupt-parent = <&gpioi>;
- interrupt-controller;
wakeup-source;
stmpegpio: stmpe_gpio {
diff --git a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
index 510cca5acb..7a701f7ef0 100644
--- a/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
+++ b/arch/arm/boot/dts/st/stm32mp157c-dk2.dts
@@ -64,7 +64,6 @@
reg = <0x38>;
interrupts = <2 2>;
interrupt-parent = <&gpiof>;
- interrupt-controller;
touchscreen-size-x = <480>;
touchscreen-size-y = <800>;
status = "okay";
diff --git a/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts b/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
index 9a234dc143..5b240769d3 100644
--- a/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
+++ b/arch/arm/boot/dts/ti/omap/am5729-beagleboneai.dts
@@ -415,7 +415,6 @@
reg = <0x41>;
interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
interrupt-parent = <&gpio2>;
- interrupt-controller;
id = <0>;
blocks = <0x5>;
irq-trigger = <0x1>;
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 0a90583f9f..8f9dbe8d90 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -297,6 +297,7 @@ CONFIG_FB_MODE_HELPERS=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_L4F00242T03=y
CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_GPIO=y
CONFIG_FRAMEBUFFER_CONSOLE=y
diff --git a/arch/arm/crypto/sha256_glue.c b/arch/arm/crypto/sha256_glue.c
index 433ee4ddce..f85933fdec 100644
--- a/arch/arm/crypto/sha256_glue.c
+++ b/arch/arm/crypto/sha256_glue.c
@@ -24,8 +24,8 @@
#include "sha256_glue.h"
-asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
- unsigned int num_blks);
+asmlinkage void sha256_block_data_order(struct sha256_state *state,
+ const u8 *data, int num_blks);
int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -33,23 +33,20 @@ int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
/* make sure casting to sha256_block_fn() is safe */
BUILD_BUG_ON(offsetof(struct sha256_state, state) != 0);
- return sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ return sha256_base_do_update(desc, data, len, sha256_block_data_order);
}
EXPORT_SYMBOL(crypto_sha256_arm_update);
static int crypto_sha256_arm_final(struct shash_desc *desc, u8 *out)
{
- sha256_base_do_finalize(desc,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_finalize(desc, sha256_block_data_order);
return sha256_base_finish(desc, out);
}
int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- sha256_base_do_update(desc, data, len,
- (sha256_block_fn *)sha256_block_data_order);
+ sha256_base_do_update(desc, data, len, sha256_block_data_order);
return crypto_sha256_arm_final(desc, out);
}
EXPORT_SYMBOL(crypto_sha256_arm_finup);
diff --git a/arch/arm/crypto/sha512-glue.c b/arch/arm/crypto/sha512-glue.c
index 0635a65aa4..1be5bd498a 100644
--- a/arch/arm/crypto/sha512-glue.c
+++ b/arch/arm/crypto/sha512-glue.c
@@ -25,27 +25,25 @@ MODULE_ALIAS_CRYPTO("sha512");
MODULE_ALIAS_CRYPTO("sha384-arm");
MODULE_ALIAS_CRYPTO("sha512-arm");
-asmlinkage void sha512_block_data_order(u64 *state, u8 const *src, int blocks);
+asmlinkage void sha512_block_data_order(struct sha512_state *state,
+ u8 const *src, int blocks);
int sha512_arm_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
- return sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ return sha512_base_do_update(desc, data, len, sha512_block_data_order);
}
static int sha512_arm_final(struct shash_desc *desc, u8 *out)
{
- sha512_base_do_finalize(desc,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_finalize(desc, sha512_block_data_order);
return sha512_base_finish(desc, out);
}
int sha512_arm_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
- sha512_base_do_update(desc, data, len,
- (sha512_block_fn *)sha512_block_data_order);
+ sha512_base_do_update(desc, data, len, sha512_block_data_order);
return sha512_arm_final(desc, out);
}
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
new file mode 100644
index 0000000000..2189e507c8
--- /dev/null
+++ b/arch/arm/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <asm/system_info.h>
+#include <uapi/asm/mman.h>
+
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return cpu_architecture() >= CPU_ARCH_ARMv6;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 771264d472..ae2f2b2b4e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -75,8 +75,6 @@ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
-obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
-obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index a0218c4867..4a335d3c59 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -18,18 +18,6 @@
#include <asm/assembler.h>
#include "iwmmxt.h"
-#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
-#define PJ4(code...) code
-#define XSC(code...)
-#elif defined(CONFIG_CPU_MOHAWK) || \
- defined(CONFIG_CPU_XSC3) || \
- defined(CONFIG_CPU_XSCALE)
-#define PJ4(code...)
-#define XSC(code...) code
-#else
-#error "Unsupported iWMMXt architecture"
-#endif
-
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
#define MMX_WR2 (0x10)
@@ -81,17 +69,13 @@ ENDPROC(iwmmxt_undef_handler)
ENTRY(iwmmxt_task_enable)
inc_preempt_count r10, r3
- XSC(mrc p15, 0, r2, c15, c1, 0)
- PJ4(mrc p15, 0, r2, c1, c0, 2)
+ mrc p15, 0, r2, c15, c1, 0
@ CP0 and CP1 accessible?
- XSC(tst r2, #0x3)
- PJ4(tst r2, #0xf)
+ tst r2, #0x3
bne 4f @ if so no business here
@ enable access to CP0 and CP1
- XSC(orr r2, r2, #0x3)
- XSC(mcr p15, 0, r2, c15, c1, 0)
- PJ4(orr r2, r2, #0xf)
- PJ4(mcr p15, 0, r2, c1, c0, 2)
+ orr r2, r2, #0x3
+ mcr p15, 0, r2, c15, c1, 0
ldr r3, =concan_owner
ldr r2, [r0, #S_PC] @ current task pc value
@@ -218,12 +202,9 @@ ENTRY(iwmmxt_task_disable)
bne 1f @ no: quit
@ enable access to CP0 and CP1
- XSC(mrc p15, 0, r4, c15, c1, 0)
- XSC(orr r4, r4, #0x3)
- XSC(mcr p15, 0, r4, c15, c1, 0)
- PJ4(mrc p15, 0, r4, c1, c0, 2)
- PJ4(orr r4, r4, #0xf)
- PJ4(mcr p15, 0, r4, c1, c0, 2)
+ mrc p15, 0, r4, c15, c1, 0
+ orr r4, r4, #0x3
+ mcr p15, 0, r4, c15, c1, 0
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
@@ -232,10 +213,8 @@ ENTRY(iwmmxt_task_disable)
bl concan_save
@ disable access to CP0 and CP1
- XSC(bic r4, r4, #0x3)
- XSC(mcr p15, 0, r4, c15, c1, 0)
- PJ4(bic r4, r4, #0xf)
- PJ4(mcr p15, 0, r4, c1, c0, 2)
+ bic r4, r4, #0x3
+ mcr p15, 0, r4, c15, c1, 0
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
@@ -330,11 +309,9 @@ ENDPROC(iwmmxt_task_restore)
*/
ENTRY(iwmmxt_task_switch)
- XSC(mrc p15, 0, r1, c15, c1, 0)
- PJ4(mrc p15, 0, r1, c1, c0, 2)
+ mrc p15, 0, r1, c15, c1, 0
@ CP0 and CP1 accessible?
- XSC(tst r1, #0x3)
- PJ4(tst r1, #0xf)
+ tst r1, #0x3
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
@@ -344,10 +321,8 @@ ENTRY(iwmmxt_task_switch)
retne lr @ no: leave Concan disabled
1: @ flip Concan access
- XSC(eor r1, r1, #0x3)
- XSC(mcr p15, 0, r1, c15, c1, 0)
- PJ4(eor r1, r1, #0xf)
- PJ4(mcr p15, 0, r1, c1, c0, 2)
+ eor r1, r1, #0x3
+ mcr p15, 0, r1, c15, c1, 0
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
deleted file mode 100644
index 4bca8098c4..0000000000
--- a/arch/arm/kernel/pj4-cp0.c
+++ /dev/null
@@ -1,135 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/arch/arm/kernel/pj4-cp0.c
- *
- * PJ4 iWMMXt coprocessor context switching and handling
- *
- * Copyright (c) 2010 Marvell International Inc.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <asm/thread_notify.h>
-#include <asm/cputype.h>
-
-static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
-{
- struct thread_info *thread = t;
-
- switch (cmd) {
- case THREAD_NOTIFY_FLUSH:
- /*
- * flush_thread() zeroes thread->fpstate, so no need
- * to do anything here.
- *
- * FALLTHROUGH: Ensure we don't try to overwrite our newly
- * initialised state information on the first fault.
- */
-
- case THREAD_NOTIFY_EXIT:
- iwmmxt_task_release(thread);
- break;
-
- case THREAD_NOTIFY_SWITCH:
- iwmmxt_task_switch(thread);
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block __maybe_unused iwmmxt_notifier_block = {
- .notifier_call = iwmmxt_do,
-};
-
-
-static u32 __init pj4_cp_access_read(void)
-{
- u32 value;
-
- __asm__ __volatile__ (
- "mrc p15, 0, %0, c1, c0, 2\n\t"
- : "=r" (value));
- return value;
-}
-
-static void __init pj4_cp_access_write(u32 value)
-{
- u32 temp;
-
- __asm__ __volatile__ (
- "mcr p15, 0, %1, c1, c0, 2\n\t"
-#ifdef CONFIG_THUMB2_KERNEL
- "isb\n\t"
-#else
- "mrc p15, 0, %0, c1, c0, 2\n\t"
- "mov %0, %0\n\t"
- "sub pc, pc, #4\n\t"
-#endif
- : "=r" (temp) : "r" (value));
-}
-
-static int __init pj4_get_iwmmxt_version(void)
-{
- u32 cp_access, wcid;
-
- cp_access = pj4_cp_access_read();
- pj4_cp_access_write(cp_access | 0xf);
-
- /* check if coprocessor 0 and 1 are available */
- if ((pj4_cp_access_read() & 0xf) != 0xf) {
- pj4_cp_access_write(cp_access);
- return -ENODEV;
- }
-
- /* read iWMMXt coprocessor id register p1, c0 */
- __asm__ __volatile__ ("mrc p1, 0, %0, c0, c0, 0\n" : "=r" (wcid));
-
- pj4_cp_access_write(cp_access);
-
- /* iWMMXt v1 */
- if ((wcid & 0xffffff00) == 0x56051000)
- return 1;
- /* iWMMXt v2 */
- if ((wcid & 0xffffff00) == 0x56052000)
- return 2;
-
- return -EINVAL;
-}
-
-/*
- * Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
- * switch code handle iWMMXt context switching.
- */
-static int __init pj4_cp0_init(void)
-{
- u32 __maybe_unused cp_access;
- int vers;
-
- if (!cpu_is_pj4())
- return 0;
-
- vers = pj4_get_iwmmxt_version();
- if (vers < 0)
- return 0;
-
-#ifndef CONFIG_IWMMXT
- pr_info("PJ4 iWMMXt coprocessor detected, but kernel support is missing.\n");
-#else
- cp_access = pj4_cp_access_read() & ~0xf;
- pj4_cp_access_write(cp_access);
-
- pr_info("PJ4 iWMMXt v%d coprocessor enabled.\n", vers);
- elf_hwcap |= HWCAP_IWMMXT;
- thread_register_notifier(&iwmmxt_notifier_block);
- register_iwmmxt_undef_handler();
-#endif
-
- return 0;
-}
-
-late_initcall(pj4_cp0_init);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index fef62e4a9e..07565b593e 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -278,6 +278,37 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+ if (!(flags & FAULT_FLAG_USER))
+ goto lock_mmap;
+
+ vma = lock_vma_under_rcu(mm, addr);
+ if (!vma)
+ goto lock_mmap;
+
+ if (!(vma->vm_flags & vm_flags)) {
+ vma_end_read(vma);
+ goto lock_mmap;
+ }
+ fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
+
+ if (!(fault & VM_FAULT_RETRY)) {
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto done;
+ }
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
+ return 0;
+ }
+lock_mmap:
+
retry:
vma = lock_mm_and_find_vma(mm, addr, regs);
if (unlikely(!vma)) {
@@ -316,6 +347,7 @@ retry:
}
mmap_read_unlock(mm);
+done:
/*
* Handle the "normal" case first - VM_FAULT_MAJOR
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index d19d140a10..0749cf8a66 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -296,6 +296,9 @@ void __sync_icache_dcache(pte_t pteval)
return;
folio = page_folio(pfn_to_page(pfn));
+ if (folio_test_reserved(folio))
+ return;
+
if (cache_is_vipt_aliasing())
mapping = folio_flush_mapping(folio);
else
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 456e8680e1..77dfbc8ab2 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -196,7 +196,7 @@ config ARM64
if DYNAMIC_FTRACE_WITH_ARGS && DYNAMIC_FTRACE_WITH_CALL_OPS
select HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS \
if (DYNAMIC_FTRACE_WITH_ARGS && !CFI_CLANG && \
- !CC_OPTIMIZE_FOR_SIZE)
+ (CC_IS_CLANG || !CC_OPTIMIZE_FOR_SIZE))
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_SAMPLE_FTRACE_DIRECT
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 9ec49ac2f6..381d58cea0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -291,6 +291,8 @@
};
&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_tx_pin>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
index 4903d63581..855b7d43bc 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-tanix.dtsi
@@ -166,6 +166,8 @@
};
&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spdif_tx_pin>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index ca1d287a0a..d11e5041ba 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -406,6 +406,7 @@
function = "spi1";
};
+ /omit-if-no-ref/
spdif_tx_pin: spdif-tx-pin {
pins = "PH7";
function = "spdif";
@@ -655,10 +656,8 @@
clocks = <&ccu CLK_BUS_SPDIF>, <&ccu CLK_SPDIF>;
clock-names = "apb", "spdif";
resets = <&ccu RST_BUS_SPDIF>;
- dmas = <&dma 2>;
- dma-names = "tx";
- pinctrl-names = "default";
- pinctrl-0 = <&spdif_tx_pin>;
+ dmas = <&dma 2>, <&dma 2>;
+ dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
index dccbba6e7f..dbf2dce8d1 100644
--- a/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
+++ b/arch/arm64/boot/dts/amazon/alpine-v2.dtsi
@@ -145,7 +145,6 @@
msix: msix@fbe00000 {
compatible = "al,alpine-msix";
reg = <0x0 0xfbe00000 0x0 0x100000>;
- interrupt-controller;
msi-controller;
al,msi-base-spi = <160>;
al,msi-num-spis = <160>;
diff --git a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
index 39481d7fd7..3ea178acdd 100644
--- a/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
+++ b/arch/arm64/boot/dts/amazon/alpine-v3.dtsi
@@ -355,7 +355,6 @@
msix: msix@fbe00000 {
compatible = "al,alpine-msix";
reg = <0x0 0xfbe00000 0x0 0x100000>;
- interrupt-controller;
msi-controller;
al,msi-base-spi = <336>;
al,msi-num-spis = <959>;
diff --git a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
index 2f124b027b..aadfa0ae05 100644
--- a/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcmbca/bcm4908.dtsi
@@ -227,9 +227,6 @@
brcm,num-gphy = <5>;
brcm,num-rgmii-ports = <2>;
- #address-cells = <1>;
- #size-cells = <0>;
-
ports: ports {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 9dcd25ec2c..896d1f33b5 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -586,6 +586,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index f049687d6b..d8516ec0da 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -450,6 +450,7 @@
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
gpio-ranges = <&pinmux 0 0 16>,
<&pinmux 16 71 2>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
index 8b16bd6857..d9fa0deea7 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl-osm-s.dts
@@ -294,8 +294,8 @@
pinctrl_i2c4: i2c4grp {
fsl,pins = <
- MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
- MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083
>;
};
@@ -313,19 +313,19 @@
pinctrl_uart1: uart1grp {
fsl,pins = <
- MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140
- MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140
- MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140
- MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140
+ MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0
+ MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0
+ MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0
+ MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
- MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140
- MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140
- MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140
- MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140
+ MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0
+ MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0
+ MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0
+ MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0
>;
};
@@ -337,40 +337,40 @@
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
>;
};
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins = <
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
>;
};
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins = <
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
index dcec57c203..aab8e24216 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-bl.dts
@@ -279,8 +279,8 @@
pinctrl_i2c4: i2c4grp {
fsl,pins = <
- MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
- MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x40000083
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x40000083
>;
};
@@ -292,19 +292,19 @@
pinctrl_uart1: uart1grp {
fsl,pins = <
- MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x140
- MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x140
- MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x140
- MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x140
+ MX8MM_IOMUXC_SAI2_RXC_UART1_DCE_RX 0x0
+ MX8MM_IOMUXC_SAI2_RXFS_UART1_DCE_TX 0x0
+ MX8MM_IOMUXC_SAI2_RXD0_UART1_DCE_RTS_B 0x0
+ MX8MM_IOMUXC_SAI2_TXFS_UART1_DCE_CTS_B 0x0
>;
};
pinctrl_uart2: uart2grp {
fsl,pins = <
- MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x140
- MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x140
- MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x140
- MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x140
+ MX8MM_IOMUXC_SAI3_TXFS_UART2_DCE_RX 0x0
+ MX8MM_IOMUXC_SAI3_TXC_UART2_DCE_TX 0x0
+ MX8MM_IOMUXC_SAI3_RXD_UART2_DCE_RTS_B 0x0
+ MX8MM_IOMUXC_SAI3_RXC_UART2_DCE_CTS_B 0x0
>;
};
@@ -316,40 +316,40 @@
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x190
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d0
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d0
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d0
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d0
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d0
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
>;
};
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins = <
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x194
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
>;
};
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins = <
- MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x196
+ MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96
MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d6
MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d6
MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d6
MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d6
MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d6
- MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x019
- MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x1d0
+ MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x19
+ MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xd0
>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
index 6e75ab879b..60abcb636c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-osm-s.dtsi
@@ -210,7 +210,7 @@
reg = <0x52>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rtc>;
- interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&gpio4 1 IRQ_TYPE_LEVEL_LOW>;
trickle-diode-disable;
};
};
@@ -252,8 +252,8 @@
pinctrl_i2c1: i2c1grp {
fsl,pins = <
- MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
- MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
index 1f8326613e..2076148e08 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-kontron-sl.dtsi
@@ -237,8 +237,8 @@
pinctrl_i2c1: i2c1grp {
fsl,pins = <
- MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
- MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x40000083
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x40000083
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
index 6425773f68..bbbaf2165e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw71xx.dtsi
@@ -47,17 +47,6 @@
gpios = <&gpio1 15 GPIO_ACTIVE_HIGH>;
status = "okay";
};
-
- reg_usb_otg1_vbus: regulator-usb-otg1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_reg_usb1_en>;
- compatible = "regulator-fixed";
- regulator-name = "usb_otg1_vbus";
- gpio = <&gpio1 10 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- };
};
/* off-board header */
@@ -144,9 +133,10 @@
};
&usbotg1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
dr_mode = "otg";
over-current-active-low;
- vbus-supply = <&reg_usb_otg1_vbus>;
status = "okay";
};
@@ -204,14 +194,6 @@
>;
};
- pinctrl_reg_usb1_en: regusb1grp {
- fsl,pins = <
- MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x41
- MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141
- MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
- >;
- };
-
pinctrl_spi2: spi2grp {
fsl,pins = <
MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0xd6
@@ -234,4 +216,11 @@
MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x140
>;
};
+
+ pinctrl_usbotg1: usbotg1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO12_GPIO1_IO12 0x141
+ MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x41
+ >;
+ };
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
index 5828c9d782..b5ce7b14b5 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-data-modul-edm-sbc.dts
@@ -121,7 +121,7 @@
flash@0 { /* W25Q128JVEI */
compatible = "jedec,spi-nor";
reg = <0>;
- spi-max-frequency = <100000000>; /* Up to 133 MHz */
+ spi-max-frequency = <40000000>;
spi-tx-bus-width = <1>;
spi-rx-bus-width = <1>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index cc9d468b43..92f8cc05fe 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -23,7 +23,7 @@
port {
hdmi_connector_in: endpoint {
- remote-endpoint = <&adv7533_out>;
+ remote-endpoint = <&adv7535_out>;
};
};
};
@@ -107,6 +107,13 @@
enable-active-high;
};
+ reg_vext_3v3: regulator-vext-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VEXT_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
sound {
compatible = "simple-audio-card";
simple-audio-card,name = "wm8960-audio";
@@ -342,7 +349,7 @@
regulator-always-on;
};
- BUCK5 {
+ reg_buck5: BUCK5 {
regulator-name = "BUCK5";
regulator-min-microvolt = <1650000>;
regulator-max-microvolt = <1950000>;
@@ -393,14 +400,16 @@
hdmi@3d {
compatible = "adi,adv7535";
- reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
- reg-names = "main", "cec", "edid", "packet";
+ reg = <0x3d>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
adi,dsi-lanes = <4>;
- adi,input-depth = <8>;
- adi,input-colorspace = "rgb";
- adi,input-clock = "1x";
- adi,input-style = <1>;
- adi,input-justification = "evenly";
+ avdd-supply = <&reg_buck5>;
+ dvdd-supply = <&reg_buck5>;
+ pvdd-supply = <&reg_buck5>;
+ a2vdd-supply = <&reg_buck5>;
+ v3p3-supply = <&reg_vext_3v3>;
+ v1p2-supply = <&reg_buck5>;
ports {
#address-cells = <1>;
@@ -409,7 +418,7 @@
port@0 {
reg = <0>;
- adv7533_in: endpoint {
+ adv7535_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
@@ -417,7 +426,7 @@
port@1 {
reg = <1>;
- adv7533_out: endpoint {
+ adv7535_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
@@ -502,7 +511,7 @@
reg = <1>;
dsi_out: endpoint {
- remote-endpoint = <&adv7533_in>;
+ remote-endpoint = <&adv7535_in>;
data-lanes = <1 2 3 4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
index 8439dd6b39..459ba2b9b7 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
@@ -96,15 +96,30 @@
status = "okay";
};
+/* It is eDMA1 in 8QM RM, but 8QXP it is eDMA3 */
&edma3 {
+ reg = <0x5a9f0000 0x210000>;
+ dma-channels = <10>;
+ interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 432 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 433 IRQ_TYPE_LEVEL_HIGH>;
power-domains = <&pd IMX_SC_R_DMA_1_CH0>,
- <&pd IMX_SC_R_DMA_1_CH1>,
- <&pd IMX_SC_R_DMA_1_CH2>,
- <&pd IMX_SC_R_DMA_1_CH3>,
- <&pd IMX_SC_R_DMA_1_CH4>,
- <&pd IMX_SC_R_DMA_1_CH5>,
- <&pd IMX_SC_R_DMA_1_CH6>,
- <&pd IMX_SC_R_DMA_1_CH7>;
+ <&pd IMX_SC_R_DMA_1_CH1>,
+ <&pd IMX_SC_R_DMA_1_CH2>,
+ <&pd IMX_SC_R_DMA_1_CH3>,
+ <&pd IMX_SC_R_DMA_1_CH4>,
+ <&pd IMX_SC_R_DMA_1_CH5>,
+ <&pd IMX_SC_R_DMA_1_CH6>,
+ <&pd IMX_SC_R_DMA_1_CH7>,
+ <&pd IMX_SC_R_DMA_1_CH8>,
+ <&pd IMX_SC_R_DMA_1_CH9>;
};
&flexcan1 {
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi
index 48ec4ebec0..b864ffa74e 100644
--- a/arch/arm64/boot/dts/lg/lg1312.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1312.dtsi
@@ -126,7 +126,6 @@
amba {
#address-cells = <2>;
#size-cells = <1>;
- #interrupt-cells = <3>;
compatible = "simple-bus";
interrupt-parent = <&gic>;
diff --git a/arch/arm64/boot/dts/lg/lg1313.dtsi b/arch/arm64/boot/dts/lg/lg1313.dtsi
index 3869460aa5..996fb39bb5 100644
--- a/arch/arm64/boot/dts/lg/lg1313.dtsi
+++ b/arch/arm64/boot/dts/lg/lg1313.dtsi
@@ -126,7 +126,6 @@
amba {
#address-cells = <2>;
#size-cells = <1>;
- #interrupt-cells = <3>;
compatible = "simple-bus";
interrupt-parent = <&gic>;
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index e300145ad1..1cc3fa1c35 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -431,14 +431,14 @@
crypto: crypto@90000 {
compatible = "inside-secure,safexcel-eip97ies";
reg = <0x90000 0x20000>;
- interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "mem", "ring0", "ring1",
- "ring2", "ring3", "eip";
+ <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ring0", "ring1", "ring2",
+ "ring3", "eip", "mem";
clocks = <&nb_periph_clk 15>;
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
index 2c920e22ce..7ec7c789d8 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap80x.dtsi
@@ -138,7 +138,6 @@
odmi: odmi@300000 {
compatible = "marvell,odmi-controller";
- interrupt-controller;
msi-controller;
marvell,odmi-frames = <4>;
reg = <0x300000 0x4000>,
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index 4ec1aae0a3..7e595ac800 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -511,14 +511,14 @@
CP11X_LABEL(crypto): crypto@800000 {
compatible = "inside-secure,safexcel-eip197b";
reg = <0x800000 0x200000>;
- interrupts = <87 IRQ_TYPE_LEVEL_HIGH>,
- <88 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <88 IRQ_TYPE_LEVEL_HIGH>,
<89 IRQ_TYPE_LEVEL_HIGH>,
<90 IRQ_TYPE_LEVEL_HIGH>,
<91 IRQ_TYPE_LEVEL_HIGH>,
- <92 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "mem", "ring0", "ring1",
- "ring2", "ring3", "eip";
+ <92 IRQ_TYPE_LEVEL_HIGH>,
+ <87 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ring0", "ring1", "ring2", "ring3",
+ "eip", "mem";
clock-names = "core", "reg";
clocks = <&CP11X_LABEL(clk) 1 26>,
<&CP11X_LABEL(clk) 1 17>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index c46682150e..26cb3268cc 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -75,6 +75,7 @@
memory@40000000 {
reg = <0 0x40000000 0 0x40000000>;
+ device_type = "memory";
};
reg_1p8v: regulator-1p8v {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index 2dc1bdc74e..5c26021fc4 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -57,6 +57,7 @@
memory@40000000 {
reg = <0 0x40000000 0 0x20000000>;
+ device_type = "memory";
};
reg_1p8v: regulator-1p8v {
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
index b876e50121..e1ec2cccf4 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-bananapi-bpi-r3.dts
@@ -43,7 +43,7 @@
#cooling-cells = <2>;
/* cooling level (0, 1, 2) - pwm inverted */
cooling-levels = <255 96 0>;
- pwms = <&pwm 0 10000 0>;
+ pwms = <&pwm 0 10000>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts b/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
index 3ef371ca25..2f884c24f1 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986a-rfb.dts
@@ -237,12 +237,13 @@
pinctrl-0 = <&spi_flash_pins>;
cs-gpios = <0>, <0>;
status = "okay";
- spi_nand: spi_nand@0 {
+
+ spi_nand: flash@0 {
compatible = "spi-nand";
reg = <0>;
spi-max-frequency = <10000000>;
- spi-tx-buswidth = <4>;
- spi-rx-buswidth = <4>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index fc751e0499..d974739eae 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -153,6 +153,7 @@
compatible = "mediatek,mt7986-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
wed_pcie: wed-pcie@10003000 {
@@ -234,7 +235,6 @@
<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "ring0", "ring1", "ring2", "ring3";
clocks = <&infracfg CLK_INFRA_EIP97_CK>;
- clock-names = "infra_eip97_ck";
assigned-clocks = <&topckgen CLK_TOP_EIP_B_SEL>;
assigned-clock-parents = <&apmixedsys CLK_APMIXED_NET2PLL>;
status = "disabled";
@@ -243,7 +243,6 @@
pwm: pwm@10048000 {
compatible = "mediatek,mt7986-pwm";
reg = <0 0x10048000 0 0x1000>;
- #clock-cells = <1>;
#pwm-cells = <2>;
interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&topckgen CLK_TOP_PWM_SEL>,
diff --git a/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts b/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
index dde190442e..57dcaeef31 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7986b-rfb.dts
@@ -152,12 +152,13 @@
pinctrl-0 = <&spi_flash_pins>;
cs-gpios = <0>, <0>;
status = "okay";
- spi_nand: spi_nand@0 {
+
+ spi_nand: flash@0 {
compatible = "spi-nand";
reg = <0>;
spi-max-frequency = <10000000>;
- spi-tx-buswidth = <4>;
- spi-rx-buswidth = <4>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
index a11adeb29b..0d3c7b8162 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
@@ -373,6 +373,10 @@
};
&cros_ec {
+ cbas {
+ compatible = "google,cros-cbas";
+ };
+
keyboard-controller {
compatible = "google,cros-ec-keyb-switches";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
index 4864c39e53..e73113cb51 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
@@ -340,6 +340,10 @@
};
&cros_ec {
+ cbas {
+ compatible = "google,cros-cbas";
+ };
+
keyboard-controller {
compatible = "google,cros-ec-keyb-switches";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
index d5f41c6c98..181da69d18 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi
@@ -344,6 +344,10 @@
};
&cros_ec {
+ cbas {
+ compatible = "google,cros-cbas";
+ };
+
keyboard-controller {
compatible = "google,cros-ec-keyb-switches";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
index 7881a27be0..49a1c3ccb0 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
@@ -935,10 +935,6 @@
google,usb-port-id = <0>;
};
- cbas {
- compatible = "google,cros-cbas";
- };
-
typec {
compatible = "google,cros-ec-typec";
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8186.dtsi b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
index 2fec6fd1c1..84ec6c1aa1 100644
--- a/arch/arm64/boot/dts/mediatek/mt8186.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8186.dtsi
@@ -931,11 +931,17 @@
power-domain@MT8186_POWER_DOMAIN_SSUSB {
reg = <MT8186_POWER_DOMAIN_SSUSB>;
+ clocks = <&topckgen CLK_TOP_USB_TOP>,
+ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_REF>;
+ clock-names = "sys_ck", "ref_ck";
#power-domain-cells = <0>;
};
power-domain@MT8186_POWER_DOMAIN_SSUSB_P1 {
reg = <MT8186_POWER_DOMAIN_SSUSB_P1>;
+ clocks = <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_SYS>,
+ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_REF>;
+ clock-names = "sys_ck", "ref_ck";
#power-domain-cells = <0>;
};
@@ -1061,7 +1067,7 @@
reg = <MT8186_POWER_DOMAIN_VENC>;
clocks = <&topckgen CLK_TOP_VENC>,
<&vencsys CLK_VENC_CKE1_VENC>;
- clock-names = "venc0", "larb";
+ clock-names = "venc0", "subsys-larb";
mediatek,infracfg = <&infracfg_ao>;
#power-domain-cells = <0>;
};
@@ -1530,8 +1536,9 @@
clocks = <&topckgen CLK_TOP_USB_TOP>,
<&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_REF>,
<&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_HCLK>,
- <&infracfg_ao CLK_INFRA_AO_ICUSB>;
- clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck";
+ <&infracfg_ao CLK_INFRA_AO_ICUSB>,
+ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_XHCI>;
+ clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
interrupts = <GIC_SPI 303 IRQ_TYPE_LEVEL_HIGH 0>;
phys = <&u2port0 PHY_TYPE_USB2>;
power-domains = <&spm MT8186_POWER_DOMAIN_SSUSB>;
@@ -1595,8 +1602,9 @@
clocks = <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_SYS>,
<&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_REF>,
<&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_HCLK>,
- <&clk26m>;
- clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck";
+ <&clk26m>,
+ <&infracfg_ao CLK_INFRA_AO_SSUSB_TOP_P1_XHCI>;
+ clock-names = "sys_ck", "ref_ck", "mcu_ck", "dma_ck", "xhci_ck";
interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH 0>;
phys = <&u2port1 PHY_TYPE_USB2>, <&u3port1 PHY_TYPE_USB3>;
power-domains = <&spm MT8186_POWER_DOMAIN_SSUSB_P1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index f2281250ac..02ce05bc15 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -1336,10 +1336,6 @@
#address-cells = <1>;
#size-cells = <0>;
- base_detection: cbas {
- compatible = "google,cros-cbas";
- };
-
cros_ec_pwm: pwm {
compatible = "google,cros-ec-pwm";
#pwm-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
index 69f4cded5d..f1fc14e53f 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
@@ -1770,7 +1770,7 @@
mediatek,scp = <&scp>;
power-domains = <&spm MT8192_POWER_DOMAIN_VENC>;
clocks = <&vencsys CLK_VENC_SET1_VENC>;
- clock-names = "venc-set1";
+ clock-names = "venc_sel";
assigned-clocks = <&topckgen CLK_TOP_VENC_SEL>;
assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL_D4>;
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
index 2d5e8f371b..a82d716f10 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r1.dts
@@ -23,3 +23,7 @@
&ts_10 {
status = "okay";
};
+
+&watchdog {
+ /delete-property/ mediatek,disable-extrst;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
index 2586c32ce6..2fe20e0dad 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r2.dts
@@ -43,3 +43,7 @@
&ts_10 {
status = "okay";
};
+
+&watchdog {
+ /delete-property/ mediatek,disable-extrst;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
index f54f9477b9..dd294ca981 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry-tomato-r3.dts
@@ -44,3 +44,7 @@
&ts_10 {
status = "okay";
};
+
+&watchdog {
+ /delete-property/ mediatek,disable-extrst;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
index 69c7f3954a..4127cb84eb 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts
@@ -128,6 +128,7 @@
compatible = "mediatek,mt6360";
reg = <0x34>;
interrupt-controller;
+ #interrupt-cells = <1>;
interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "IRQB";
diff --git a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
index ea13c4a702..81a82933e3 100644
--- a/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra234-p3737-0000+p3701-0000.dts
@@ -175,7 +175,7 @@
status = "okay";
phy-handle = <&mgbe0_phy>;
- phy-mode = "usxgmii";
+ phy-mode = "10gbase-r";
mdio {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/ipq6018.dtsi b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
index 0b1330b521..cf4e501c84 100644
--- a/arch/arm64/boot/dts/qcom/ipq6018.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq6018.dtsi
@@ -759,10 +759,10 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
+ interrupt-map = <0 0 0 1 &intc 0 0 0 75 IRQ_TYPE_LEVEL_HIGH>, /* int_a */
+ <0 0 0 2 &intc 0 0 0 78 IRQ_TYPE_LEVEL_HIGH>, /* int_b */
+ <0 0 0 3 &intc 0 0 0 79 IRQ_TYPE_LEVEL_HIGH>, /* int_c */
+ <0 0 0 4 &intc 0 0 0 83 IRQ_TYPE_LEVEL_HIGH>; /* int_d */
clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
<&gcc GCC_PCIE0_AXI_M_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index 2f275c84e5..b33145b756 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -806,13 +806,13 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 142
+ interrupt-map = <0 0 0 1 &intc 0 0 142
IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 143
+ <0 0 0 2 &intc 0 0 143
IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 144
+ <0 0 0 3 &intc 0 0 144
IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 145
+ <0 0 0 4 &intc 0 0 145
IRQ_TYPE_LEVEL_HIGH>; /* int_d */
clocks = <&gcc GCC_SYS_NOC_PCIE1_AXI_CLK>,
@@ -868,13 +868,13 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc 0 75
+ interrupt-map = <0 0 0 1 &intc 0 0 75
IRQ_TYPE_LEVEL_HIGH>, /* int_a */
- <0 0 0 2 &intc 0 78
+ <0 0 0 2 &intc 0 0 78
IRQ_TYPE_LEVEL_HIGH>, /* int_b */
- <0 0 0 3 &intc 0 79
+ <0 0 0 3 &intc 0 0 79
IRQ_TYPE_LEVEL_HIGH>, /* int_c */
- <0 0 0 4 &intc 0 83
+ <0 0 0 4 &intc 0 0 83
IRQ_TYPE_LEVEL_HIGH>; /* int_d */
clocks = <&gcc GCC_SYS_NOC_PCIE0_AXI_CLK>,
diff --git a/arch/arm64/boot/dts/qcom/qcm2290.dtsi b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
index d46e591e72..40a8506553 100644
--- a/arch/arm64/boot/dts/qcom/qcm2290.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcm2290.dtsi
@@ -418,6 +418,11 @@
#hwlock-cells = <1>;
};
+ tcsr_regs: syscon@3c0000 {
+ compatible = "qcom,qcm2290-tcsr", "syscon";
+ reg = <0x0 0x003c0000 0x0 0x40000>;
+ };
+
tlmm: pinctrl@500000 {
compatible = "qcom,qcm2290-tlmm";
reg = <0x0 0x00500000 0x0 0x300000>;
@@ -665,6 +670,8 @@
#phy-cells = <0>;
+ qcom,tcsr-reg = <&tcsr_regs 0xb244>;
+
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sa8540p.dtsi b/arch/arm64/boot/dts/qcom/sa8540p.dtsi
index 96b2c59ad0..23888029cc 100644
--- a/arch/arm64/boot/dts/qcom/sa8540p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8540p.dtsi
@@ -168,6 +168,9 @@
};
&gpucc {
+ /* SA8295P and SA8540P doesn't provide gfx.lvl */
+ /delete-property/ power-domains;
+
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
index 84de20c88a..e456c9512f 100644
--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -2147,8 +2147,16 @@
ranges = <0x01000000 0x0 0x00000000 0x0 0x40200000 0x0 0x100000>,
<0x02000000 0x0 0x40300000 0x0 0x40300000 0x0 0x1fd00000>;
- interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi";
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0", "msi1", "msi2", "msi3",
+ "msi4", "msi5", "msi6", "msi7";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
interrupt-map = <0 0 0 1 &intc 0 0 0 434 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
index b389d49d3e..6760f6a340 100644
--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi
@@ -289,7 +289,7 @@
BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
compatible = "arm,idle-state";
arm,psci-suspend-param = <0x40000004>;
- entry-latency-us = <241>;
+ entry-latency-us = <2411>;
exit-latency-us = <1461>;
min-residency-us = <4488>;
local-timer-stop;
@@ -297,7 +297,15 @@
};
domain-idle-states {
- CLUSTER_SLEEP_0: cluster-sleep-0 {
+ CLUSTER_SLEEP_APSS_OFF: cluster-sleep-0 {
+ compatible = "domain-idle-state";
+ arm,psci-suspend-param = <0x41000044>;
+ entry-latency-us = <3300>;
+ exit-latency-us = <3300>;
+ min-residency-us = <6000>;
+ };
+
+ CLUSTER_SLEEP_AOSS_SLEEP: cluster-sleep-1 {
compatible = "domain-idle-state";
arm,psci-suspend-param = <0x4100a344>;
entry-latency-us = <3263>;
@@ -581,7 +589,7 @@
CLUSTER_PD: power-domain-cpu-cluster0 {
#power-domain-cells = <0>;
- domain-idle-states = <&CLUSTER_SLEEP_0>;
+ domain-idle-states = <&CLUSTER_SLEEP_APSS_OFF &CLUSTER_SLEEP_AOSS_SLEEP>;
};
};
@@ -781,6 +789,7 @@
clock-names = "bi_tcxo",
"bi_tcxo_ao",
"sleep_clk";
+ power-domains = <&rpmhpd SC8180X_CX>;
};
qupv3_id_0: geniqup@8c0000 {
@@ -2714,10 +2723,8 @@
"core",
"vsync";
- assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>,
- <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
- assigned-clock-rates = <460000000>,
- <19200000>;
+ assigned-clocks = <&dispcc DISP_CC_MDSS_VSYNC_CLK>;
+ assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmhpd SC8180X_MMCX>;
@@ -3177,7 +3184,7 @@
<&dispcc DISP_CC_MDSS_AHB_CLK>;
clock-names = "aux", "cfg_ahb";
- power-domains = <&dispcc MDSS_GDSC>;
+ power-domains = <&rpmhpd SC8180X_MX>;
#clock-cells = <1>;
#phy-cells = <0>;
@@ -3203,6 +3210,7 @@
"edp_phy_pll_link_clk",
"edp_phy_pll_vco_div_clk";
power-domains = <&rpmhpd SC8180X_MMCX>;
+ required-opps = <&rpmhpd_opp_low_svs>;
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
@@ -3241,7 +3249,7 @@
aoss_qmp: power-controller@c300000 {
compatible = "qcom,sc8180x-aoss-qmp", "qcom,aoss-qmp";
- reg = <0x0 0x0c300000 0x0 0x100000>;
+ reg = <0x0 0x0c300000 0x0 0x400>;
interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
mboxes = <&apss_shared 0>;
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
index f2055899ae..a993ad15ea 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
+++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts
@@ -721,6 +721,8 @@
};
&pcie4 {
+ max-link-speed = <2>;
+
perst-gpios = <&tlmm 141 GPIO_ACTIVE_LOW>;
wake-gpios = <&tlmm 139 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index 7e7bf3fb3b..0a891a0122 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -580,7 +580,7 @@
&pcie0 {
status = "okay";
perst-gpios = <&tlmm 35 GPIO_ACTIVE_LOW>;
- enable-gpio = <&tlmm 134 GPIO_ACTIVE_HIGH>;
+ wake-gpios = <&tlmm 134 GPIO_ACTIVE_HIGH>;
vddpe-3v3-supply = <&pcie0_3p3v_dual>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
index b523b5fff7..13d7e088aa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
@@ -485,13 +485,13 @@
};
&q6afedai {
- qi2s@22 {
- reg = <22>;
+ dai@22 {
+ reg = <QUATERNARY_MI2S_RX>;
qcom,sd-lines = <1>;
};
- qi2s@23 {
- reg = <23>;
+ dai@23 {
+ reg = <QUATERNARY_MI2S_TX>;
qcom,sd-lines = <0>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 9e594d21ec..88ed543de8 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -3353,8 +3353,8 @@
qcom,qmp = <&aoss_qmp>;
- power-domains = <&rpmhpd SDM845_CX>,
- <&rpmhpd SDM845_MX>;
+ power-domains = <&rpmhpd SDM845_LCX>,
+ <&rpmhpd SDM845_LMX>;
power-domain-names = "lcx", "lmx";
memory-region = <&slpi_mem>;
diff --git a/arch/arm64/boot/dts/qcom/sm6115.dtsi b/arch/arm64/boot/dts/qcom/sm6115.dtsi
index 839c603512..87cbc4e8b1 100644
--- a/arch/arm64/boot/dts/qcom/sm6115.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm6115.dtsi
@@ -591,6 +591,11 @@
#hwlock-cells = <1>;
};
+ tcsr_regs: syscon@3c0000 {
+ compatible = "qcom,sm6115-tcsr", "syscon";
+ reg = <0x0 0x003c0000 0x0 0x40000>;
+ };
+
tlmm: pinctrl@500000 {
compatible = "qcom,sm6115-tlmm";
reg = <0x0 0x00500000 0x0 0x400000>,
@@ -856,6 +861,8 @@
#phy-cells = <0>;
+ qcom,tcsr-reg = <&tcsr_regs 0xb244>;
+
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index 3221478663..2678eac1ec 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -1878,8 +1878,8 @@
phys = <&pcie0_phy>;
phy-names = "pciephy";
- perst-gpio = <&tlmm 35 GPIO_ACTIVE_HIGH>;
- enable-gpio = <&tlmm 37 GPIO_ACTIVE_HIGH>;
+ perst-gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>;
+ wake-gpios = <&tlmm 37 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
pinctrl-0 = <&pcie0_default_state>;
@@ -1972,7 +1972,7 @@
phys = <&pcie1_phy>;
phy-names = "pciephy";
- perst-gpio = <&tlmm 102 GPIO_ACTIVE_HIGH>;
+ perst-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>;
enable-gpio = <&tlmm 104 GPIO_ACTIVE_HIGH>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
index 20153d08ed..ff346be5c9 100644
--- a/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
+++ b/arch/arm64/boot/dts/qcom/sm8450-hdk.dts
@@ -930,8 +930,8 @@
"TX DMIC3", "MIC BIAS1",
"TX SWR_INPUT0", "ADC1_OUTPUT",
"TX SWR_INPUT1", "ADC2_OUTPUT",
- "TX SWR_INPUT2", "ADC3_OUTPUT",
- "TX SWR_INPUT3", "ADC4_OUTPUT";
+ "TX SWR_INPUT0", "ADC3_OUTPUT",
+ "TX SWR_INPUT1", "ADC4_OUTPUT";
wcd-playback-dai-link {
link-name = "WCD Playback";
diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi
index f82480570d..9d06fb0d4d 100644
--- a/arch/arm64/boot/dts/qcom/sm8450.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi
@@ -1026,6 +1026,12 @@
pinctrl-names = "default";
pinctrl-0 = <&qup_uart20_default>;
interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
status = "disabled";
};
@@ -1418,6 +1424,12 @@
pinctrl-names = "default";
pinctrl-0 = <&qup_uart7_tx>, <&qup_uart7_rx>;
interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
+ interconnects = <&clk_virt MASTER_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS
+ &clk_virt SLAVE_QUP_CORE_2 QCOM_ICC_TAG_ALWAYS>,
+ <&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ALWAYS
+ &config_noc SLAVE_QUP_2 QCOM_ICC_TAG_ALWAYS>;
+ interconnect-names = "qup-core",
+ "qup-config";
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
index 9a70875028..3098bb6b93 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-mtp.dts
@@ -745,7 +745,7 @@
wcd_tx: codec@0,3 {
compatible = "sdw20217010d00";
reg = <0 3>;
- qcom,tx-port-mapping = <1 1 2 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
index eef811def3..ad3d7ac29c 100644
--- a/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
+++ b/arch/arm64/boot/dts/qcom/sm8550-qrd.dts
@@ -842,7 +842,7 @@
wcd_tx: codec@0,3 {
compatible = "sdw20217010d00";
reg = <0 3>;
- qcom,tx-port-mapping = <1 1 2 3>;
+ qcom,tx-port-mapping = <2 2 3 4>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sm8550.dtsi b/arch/arm64/boot/dts/qcom/sm8550.dtsi
index e15564ed54..d7e68e0f57 100644
--- a/arch/arm64/boot/dts/qcom/sm8550.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8550.dtsi
@@ -3047,7 +3047,7 @@
spmi_bus: spmi@c400000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0 0x0c400000 0 0x3000>,
- <0 0x0c500000 0 0x4000000>,
+ <0 0x0c500000 0 0x400000>,
<0 0x0c440000 0 0x80000>,
<0 0x0c4c0000 0 0x20000>,
<0 0x0c42d000 0 0x4000>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index 4e67a03564..504ac8c93f 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -658,7 +658,7 @@
avb0: ethernet@e6800000 {
compatible = "renesas,etheravb-r8a779a0",
"renesas,etheravb-rcar-gen4";
- reg = <0 0xe6800000 0 0x800>;
+ reg = <0 0xe6800000 0 0x1000>;
interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
@@ -706,7 +706,7 @@
avb1: ethernet@e6810000 {
compatible = "renesas,etheravb-r8a779a0",
"renesas,etheravb-rcar-gen4";
- reg = <0 0xe6810000 0 0x800>;
+ reg = <0 0xe6810000 0 0x1000>;
interrupts = <GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 283 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
index d3d25e077c..d767759520 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
@@ -161,11 +161,6 @@
};
};
- psci {
- compatible = "arm,psci-1.0", "arm,psci-0.2";
- method = "smc";
- };
-
extal_clk: extal {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -185,13 +180,24 @@
interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
- /* External SCIF clock - to be overridden by boards that provide it */
+ psci {
+ compatible = "arm,psci-1.0", "arm,psci-0.2";
+ method = "smc";
+ };
+
+ /* External SCIF clocks - to be overridden by boards that provide them */
scif_clk: scif {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <0>;
};
+ scif_clk2: scif2 {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <0>;
+ };
+
soc: soc {
compatible = "simple-bus";
interrupt-parent = <&gic>;
@@ -681,7 +687,7 @@
interrupts = <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 516>,
<&cpg CPG_CORE R8A779G0_CLK_SASYNCPERD1>,
- <&scif_clk>;
+ <&scif_clk2>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac0 0x35>, <&dmac0 0x34>,
<&dmac1 0x35>, <&dmac1 0x34>;
@@ -761,7 +767,7 @@
avb0: ethernet@e6800000 {
compatible = "renesas,etheravb-r8a779g0",
"renesas,etheravb-rcar-gen4";
- reg = <0 0xe6800000 0 0x800>;
+ reg = <0 0xe6800000 0 0x1000>;
interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
@@ -808,7 +814,7 @@
avb1: ethernet@e6810000 {
compatible = "renesas,etheravb-r8a779g0",
"renesas,etheravb-rcar-gen4";
- reg = <0 0xe6810000 0 0x800>;
+ reg = <0 0xe6810000 0 0x1000>;
interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 362 IRQ_TYPE_LEVEL_HIGH>,
@@ -1057,7 +1063,7 @@
interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 705>,
<&cpg CPG_CORE R8A779G0_CLK_SASYNCPERD1>,
- <&scif_clk>;
+ <&scif_clk2>;
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac0 0x59>, <&dmac0 0x58>,
<&dmac1 0x59>, <&dmac1 0x58>;
@@ -1777,6 +1783,37 @@
};
};
+ mmc0: mmc@ee140000 {
+ compatible = "renesas,sdhi-r8a779g0",
+ "renesas,rcar-gen4-sdhi";
+ reg = <0 0xee140000 0 0x2000>;
+ interrupts = <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 706>,
+ <&cpg CPG_CORE R8A779G0_CLK_SD0H>;
+ clock-names = "core", "clkh";
+ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+ resets = <&cpg 706>;
+ max-frequency = <200000000>;
+ iommus = <&ipmmu_ds0 32>;
+ status = "disabled";
+ };
+
+ rpc: spi@ee200000 {
+ compatible = "renesas,r8a779g0-rpc-if",
+ "renesas,rcar-gen4-rpc-if";
+ reg = <0 0xee200000 0 0x200>,
+ <0 0x08000000 0 0x04000000>,
+ <0 0xee208000 0 0x100>;
+ reg-names = "regs", "dirmap", "wbuf";
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 629>;
+ power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
+ resets = <&cpg 629>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
ipmmu_rt0: iommu@ee480000 {
compatible = "renesas,ipmmu-r8a779g0",
"renesas,rcar-gen4-ipmmu-vmsa";
@@ -1886,37 +1923,6 @@
#iommu-cells = <1>;
};
- mmc0: mmc@ee140000 {
- compatible = "renesas,sdhi-r8a779g0",
- "renesas,rcar-gen4-sdhi";
- reg = <0 0xee140000 0 0x2000>;
- interrupts = <GIC_SPI 440 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 706>,
- <&cpg CPG_CORE R8A779G0_CLK_SD0H>;
- clock-names = "core", "clkh";
- power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
- resets = <&cpg 706>;
- max-frequency = <200000000>;
- iommus = <&ipmmu_ds0 32>;
- status = "disabled";
- };
-
- rpc: spi@ee200000 {
- compatible = "renesas,r8a779g0-rpc-if",
- "renesas,rcar-gen4-rpc-if";
- reg = <0 0xee200000 0 0x200>,
- <0 0x08000000 0 0x04000000>,
- <0 0xee208000 0 0x100>;
- reg-names = "regs", "dirmap", "wbuf";
- interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cpg CPG_MOD 629>;
- power-domains = <&sysc R8A779G0_PD_ALWAYS_ON>;
- resets = <&cpg 629>;
- #address-cells = <1>;
- #size-cells = <0>;
- status = "disabled";
- };
-
gic: interrupt-controller@f1000000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
diff --git a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
index 2ab231572d..b3f83d0ebc 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g043u.dtsi
@@ -109,7 +109,13 @@
<SOC_PERIPHERAL_IRQ(473) IRQ_TYPE_LEVEL_HIGH>,
<SOC_PERIPHERAL_IRQ(474) IRQ_TYPE_LEVEL_HIGH>,
<SOC_PERIPHERAL_IRQ(475) IRQ_TYPE_LEVEL_HIGH>,
- <SOC_PERIPHERAL_IRQ(25) IRQ_TYPE_EDGE_RISING>;
+ <SOC_PERIPHERAL_IRQ(25) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(34) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(35) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(36) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(37) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(38) IRQ_TYPE_EDGE_RISING>,
+ <SOC_PERIPHERAL_IRQ(39) IRQ_TYPE_EDGE_RISING>;
interrupt-names = "nmi",
"irq0", "irq1", "irq2", "irq3",
"irq4", "irq5", "irq6", "irq7",
@@ -121,7 +127,9 @@
"tint20", "tint21", "tint22", "tint23",
"tint24", "tint25", "tint26", "tint27",
"tint28", "tint29", "tint30", "tint31",
- "bus-err";
+ "bus-err", "ec7tie1-0", "ec7tie2-0",
+ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
+ "ec7tiovf-1";
clocks = <&cpg CPG_MOD R9A07G043_IA55_CLK>,
<&cpg CPG_MOD R9A07G043_IA55_PCLK>;
clock-names = "clk", "pclk";
diff --git a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
index 66f68fc2b2..081d8f49db 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g044.dtsi
@@ -905,7 +905,27 @@
<GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3",
+ "irq4", "irq5", "irq6", "irq7",
+ "tint0", "tint1", "tint2", "tint3",
+ "tint4", "tint5", "tint6", "tint7",
+ "tint8", "tint9", "tint10", "tint11",
+ "tint12", "tint13", "tint14", "tint15",
+ "tint16", "tint17", "tint18", "tint19",
+ "tint20", "tint21", "tint22", "tint23",
+ "tint24", "tint25", "tint26", "tint27",
+ "tint28", "tint29", "tint30", "tint31",
+ "bus-err", "ec7tie1-0", "ec7tie2-0",
+ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
+ "ec7tiovf-1";
clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>,
<&cpg CPG_MOD R9A07G044_IA55_PCLK>;
clock-names = "clk", "pclk";
diff --git a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
index 1f1d481dc7..0d327464d2 100644
--- a/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a07g054.dtsi
@@ -912,7 +912,27 @@
<GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 39 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "nmi", "irq0", "irq1", "irq2", "irq3",
+ "irq4", "irq5", "irq6", "irq7",
+ "tint0", "tint1", "tint2", "tint3",
+ "tint4", "tint5", "tint6", "tint7",
+ "tint8", "tint9", "tint10", "tint11",
+ "tint12", "tint13", "tint14", "tint15",
+ "tint16", "tint17", "tint18", "tint19",
+ "tint20", "tint21", "tint22", "tint23",
+ "tint24", "tint25", "tint26", "tint27",
+ "tint28", "tint29", "tint30", "tint31",
+ "bus-err", "ec7tie1-0", "ec7tie2-0",
+ "ec7tiovf-0", "ec7tie1-1", "ec7tie2-1",
+ "ec7tiovf-1";
clocks = <&cpg CPG_MOD R9A07G054_IA55_CLK>,
<&cpg CPG_MOD R9A07G054_IA55_PCLK>;
clock-names = "clk", "pclk";
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
index 3885ef3454..50de17e4fb 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
@@ -234,6 +234,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&gpio6>;
interrupts = <8 IRQ_TYPE_EDGE_FALLING>;
@@ -294,6 +295,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&gpio6>;
interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
};
@@ -314,6 +316,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&gpio7>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
};
@@ -324,6 +327,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
+ #interrupt-cells = <2>;
interrupt-parent = <&gpio5>;
interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk356x.dtsi b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
index c19c0f1b37..92f96ec013 100644
--- a/arch/arm64/boot/dts/rockchip/rk356x.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk356x.dtsi
@@ -597,6 +597,7 @@
compatible = "rockchip,rk3568-vpu";
reg = <0x0 0xfdea0000 0x0 0x800>;
interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vdpu";
clocks = <&cru ACLK_VPU>, <&cru HCLK_VPU>;
clock-names = "aclk", "hclk";
iommus = <&vdpu_mmu>;
@@ -1123,7 +1124,7 @@
dmas = <&dmac1 4>, <&dmac1 5>;
dma-names = "tx", "rx";
resets = <&cru SRST_M_I2S2_2CH>;
- reset-names = "m";
+ reset-names = "tx-m";
rockchip,grf = <&grf>;
pinctrl-names = "default";
pinctrl-0 = <&i2s2m0_sclktx
diff --git a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
index b9d789d578..bbbe00bcd1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3588-evb1-v10.dts
@@ -351,6 +351,7 @@
<&rk806_dvs2_null>, <&rk806_dvs3_null>;
pinctrl-names = "default";
spi-max-frequency = <1000000>;
+ system-power-controller;
vcc1-supply = <&vcc5v0_sys>;
vcc2-supply = <&vcc5v0_sys>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
index f9f9749848..1d262cd54c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
@@ -1589,7 +1589,6 @@
dmas = <&dmac1 0>, <&dmac1 1>;
dma-names = "tx", "rx";
power-domains = <&power RK3588_PD_AUDIO>;
- rockchip,trcm-sync-tx-only;
pinctrl-names = "default";
pinctrl-0 = <&i2s2m1_lrck
&i2s2m1_sclk
@@ -1610,7 +1609,6 @@
dmas = <&dmac1 2>, <&dmac1 3>;
dma-names = "tx", "rx";
power-domains = <&power RK3588_PD_AUDIO>;
- rockchip,trcm-sync-tx-only;
pinctrl-names = "default";
pinctrl-0 = <&i2s3_lrck
&i2s3_sclk
diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
index 77a347f9f4..c1513f0fa4 100644
--- a/arch/arm64/boot/dts/ti/Makefile
+++ b/arch/arm64/boot/dts/ti/Makefile
@@ -57,6 +57,7 @@ dtb-$(CONFIG_ARCH_K3) += k3-am654-base-board.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am654-gp-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am654-evm.dtb
dtb-$(CONFIG_ARCH_K3) += k3-am654-idk.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am654-base-board-rocktech-rk101-panel.dtbo
# Boards with J7200 SoC
k3-j7200-evm-dtbs := k3-j7200-common-proc-board.dtb k3-j7200-evm-quad-port-eth-exp.dtbo
diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
index e5c64c86d1..2f318c5287 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi
@@ -623,6 +623,8 @@
interrupt-names = "host", "peripheral";
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,usb2-gadget-lpm-disable;
+ snps,usb2-lpm-disable;
};
};
@@ -646,6 +648,8 @@
interrupt-names = "host", "peripheral";
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,usb2-gadget-lpm-disable;
+ snps,usb2-lpm-disable;
};
};
@@ -753,9 +757,10 @@
<0x00 0x30207000 0x00 0x1000>, /* ovr1 */
<0x00 0x30208000 0x00 0x1000>, /* ovr2 */
<0x00 0x3020a000 0x00 0x1000>, /* vp1: Used for OLDI */
- <0x00 0x3020b000 0x00 0x1000>; /* vp2: Used as DPI Out */
+ <0x00 0x3020b000 0x00 0x1000>, /* vp2: Used as DPI Out */
+ <0x00 0x30201000 0x00 0x1000>; /* common1 */
reg-names = "common", "vidl1", "vid",
- "ovr1", "ovr2", "vp1", "vp2";
+ "ovr1", "ovr2", "vp1", "vp2", "common1";
power-domains = <&k3_pds 186 TI_SCI_PD_EXCLUSIVE>;
clocks = <&k3_clks 186 6>,
<&dss_vp1_clk>,
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-mcu.dtsi
index c4b0b91d70..14eb9ba836 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-mcu.dtsi
@@ -187,6 +187,8 @@
ranges = <0x79000000 0x00 0x79000000 0x8000>,
<0x79020000 0x00 0x79020000 0x8000>;
power-domains = <&k3_pds 7 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
+
mcu_r5fss0_core0: r5f@79000000 {
compatible = "ti,am62-r5f";
reg = <0x79000000 0x00008000>,
diff --git a/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi
index 19f42b3939..10a7059b2d 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p-wakeup.dtsi
@@ -78,6 +78,7 @@
ranges = <0x78000000 0x00 0x78000000 0x8000>,
<0x78100000 0x00 0x78100000 0x8000>;
power-domains = <&k3_pds 119 TI_SCI_PD_EXCLUSIVE>;
+ status = "disabled";
wkup_r5fss0_core0: r5f@78000000 {
compatible = "ti,am62-r5f";
diff --git a/arch/arm64/boot/dts/ti/k3-am62p.dtsi b/arch/arm64/boot/dts/ti/k3-am62p.dtsi
index 84ffe7b9dc..4f22b5d9fb 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62p.dtsi
@@ -71,7 +71,7 @@
<0x00 0x43600000 0x00 0x43600000 0x00 0x00010000>, /* SA3 sproxy data */
<0x00 0x44043000 0x00 0x44043000 0x00 0x00000fe0>, /* TI SCI DEBUG */
<0x00 0x44860000 0x00 0x44860000 0x00 0x00040000>, /* SA3 sproxy config */
- <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>, /* DMSS */
+ <0x00 0x48000000 0x00 0x48000000 0x00 0x06408000>, /* DMSS */
<0x00 0x60000000 0x00 0x60000000 0x00 0x08000000>, /* FSS0 DAT1 */
<0x00 0x70000000 0x00 0x70000000 0x00 0x00010000>, /* OCSRAM */
<0x01 0x00000000 0x01 0x00000000 0x00 0x00310000>, /* A53 PERIPHBASE */
diff --git a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
index f377eadef0..04e51934d2 100644
--- a/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am62p5-sk.dts
@@ -445,6 +445,10 @@
};
&cpsw3g_mdio {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mdio1_pins_default>;
+ status = "okay";
+
cpsw3g_phy0: ethernet-phy@0 {
reg = <0>;
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
index 0be642bc1b..45042216e5 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -623,6 +623,10 @@
ti,otap-del-sel-mmc-hs = <0x0>;
ti,otap-del-sel-ddr52 = <0x6>;
ti,otap-del-sel-hs200 = <0x7>;
+ ti,itap-del-sel-legacy = <0x10>;
+ ti,itap-del-sel-mmc-hs = <0xa>;
+ ti,itap-del-sel-ddr52 = <0x3>;
+ status = "disabled";
};
sdhci1: mmc@fa00000 {
@@ -634,13 +638,18 @@
clock-names = "clk_ahb", "clk_xin";
ti,trm-icp = <0x2>;
ti,otap-del-sel-legacy = <0x0>;
- ti,otap-del-sel-sd-hs = <0xf>;
+ ti,otap-del-sel-sd-hs = <0x0>;
ti,otap-del-sel-sdr12 = <0xf>;
ti,otap-del-sel-sdr25 = <0xf>;
ti,otap-del-sel-sdr50 = <0xc>;
ti,otap-del-sel-sdr104 = <0x6>;
ti,otap-del-sel-ddr50 = <0x9>;
+ ti,itap-del-sel-legacy = <0x0>;
+ ti,itap-del-sel-sd-hs = <0x0>;
+ ti,itap-del-sel-sdr12 = <0x0>;
+ ti,itap-del-sel-sdr25 = <0x0>;
ti,clkbuf-sel = <0x7>;
+ status = "disabled";
};
cpsw3g: ethernet@8000000 {
diff --git a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
index f87f09d83c..b8f844f667 100644
--- a/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am64-phycore-som.dtsi
@@ -211,6 +211,7 @@
};
&sdhci0 {
+ status = "okay";
bus-width = <8>;
non-removable;
ti,driver-strength-ohm = <50>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
index 4dba189410..256606be56 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
@@ -487,17 +487,19 @@
};
};
+/* eMMC */
&sdhci0 {
- /* emmc */
+ status = "okay";
bus-width = <8>;
non-removable;
ti,driver-strength-ohm = <50>;
disable-wp;
};
+/* SD/MMC */
&sdhci1 {
- /* SD/MMC */
bootph-all;
+ status = "okay";
vmmc-supply = <&vdd_mmc1>;
pinctrl-names = "default";
bus-width = <4>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
index 9175e96842..53b64e5541 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-phyboard-electra-rdk.dts
@@ -264,6 +264,7 @@
};
&sdhci1 {
+ status = "okay";
vmmc-supply = <&vcc_3v3_mmc>;
pinctrl-names = "default";
pinctrl-0 = <&main_mmc1_pins_default>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
index f29c8a9b59..bffbd234f7 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
@@ -439,6 +439,7 @@
};
&sdhci0 {
+ status = "okay";
vmmc-supply = <&wlan_en>;
bus-width = <4>;
non-removable;
@@ -458,9 +459,10 @@
};
};
+/* SD/MMC */
&sdhci1 {
- /* SD/MMC */
bootph-all;
+ status = "okay";
vmmc-supply = <&vdd_mmc1>;
pinctrl-names = "default";
bus-width = <4>;
diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
index d95d80076a..55102d35ce 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
+++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl-mbax4xxl.dts
@@ -425,7 +425,6 @@
ti,driver-strength-ohm = <50>;
ti,fails-without-test-cd;
/* Enabled by overlay */
- status = "disabled";
};
&tscadc0 {
diff --git a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
index d82d4a9830..6c785eff7d 100644
--- a/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am642-tqma64xxl.dtsi
@@ -219,6 +219,7 @@
};
&sdhci0 {
+ status = "okay";
non-removable;
disable-wp;
no-sdio;
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index 29048d6577..fa2304a7cb 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -1013,9 +1013,10 @@
<0x0 0x04a07000 0x0 0x1000>, /* ovr1 */
<0x0 0x04a08000 0x0 0x1000>, /* ovr2 */
<0x0 0x04a0a000 0x0 0x1000>, /* vp1 */
- <0x0 0x04a0b000 0x0 0x1000>; /* vp2 */
+ <0x0 0x04a0b000 0x0 0x1000>, /* vp2 */
+ <0x0 0x04a01000 0x0 0x1000>; /* common1 */
reg-names = "common", "vidl1", "vid",
- "ovr1", "ovr2", "vp1", "vp2";
+ "ovr1", "ovr2", "vp1", "vp2", "common1";
ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
diff --git a/arch/arm64/boot/dts/ti/k3-am69-sk.dts b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
index 9868c7049b..fafa09d6dc 100644
--- a/arch/arm64/boot/dts/ti/k3-am69-sk.dts
+++ b/arch/arm64/boot/dts/ti/k3-am69-sk.dts
@@ -824,13 +824,9 @@
pinctrl-names = "default";
pinctrl-0 = <&dss_vout0_pins_default>;
assigned-clocks = <&k3_clks 218 2>,
- <&k3_clks 218 5>,
- <&k3_clks 218 14>,
- <&k3_clks 218 18>;
+ <&k3_clks 218 5>;
assigned-clock-parents = <&k3_clks 218 3>,
- <&k3_clks 218 7>,
- <&k3_clks 218 16>,
- <&k3_clks 218 22>;
+ <&k3_clks 218 7>;
};
&serdes_wiz4 {
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
index cee2b4b0eb..7a0c599f2b 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
@@ -91,24 +91,25 @@
};
&wkup_pmx0 {
+};
+
+&wkup_pmx2 {
mcu_uart0_pins_default: mcu-uart0-default-pins {
pinctrl-single,pins = <
- J721E_WKUP_IOPAD(0xf4, PIN_INPUT, 0) /* (D20) MCU_UART0_RXD */
- J721E_WKUP_IOPAD(0xf0, PIN_OUTPUT, 0) /* (D19) MCU_UART0_TXD */
- J721E_WKUP_IOPAD(0xf8, PIN_INPUT, 0) /* (E20) MCU_UART0_CTSn */
- J721E_WKUP_IOPAD(0xfc, PIN_OUTPUT, 0) /* (E21) MCU_UART0_RTSn */
+ J721E_WKUP_IOPAD(0x90, PIN_INPUT, 0) /* (E20) MCU_UART0_CTSn */
+ J721E_WKUP_IOPAD(0x94, PIN_OUTPUT, 0) /* (E21) MCU_UART0_RTSn */
+ J721E_WKUP_IOPAD(0x8c, PIN_INPUT, 0) /* (D20) MCU_UART0_RXD */
+ J721E_WKUP_IOPAD(0x88, PIN_OUTPUT, 0) /* (D19) MCU_UART0_TXD */
>;
};
wkup_uart0_pins_default: wkup-uart0-default-pins {
pinctrl-single,pins = <
- J721E_WKUP_IOPAD(0xb0, PIN_INPUT, 0) /* (B14) WKUP_UART0_RXD */
- J721E_WKUP_IOPAD(0xb4, PIN_OUTPUT, 0) /* (A14) WKUP_UART0_TXD */
+ J721E_WKUP_IOPAD(0x48, PIN_INPUT, 0) /* (B14) WKUP_UART0_RXD */
+ J721E_WKUP_IOPAD(0x4c, PIN_OUTPUT, 0) /* (A14) WKUP_UART0_TXD */
>;
};
-};
-&wkup_pmx2 {
mcu_cpsw_pins_default: mcu-cpsw-default-pins {
pinctrl-single,pins = <
J721E_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* MCU_RGMII1_TX_CTL */
@@ -210,7 +211,6 @@
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&mcu_uart0_pins_default>;
- clock-frequency = <96000000>;
};
&main_uart0 {
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
index c6b85bbf9a..1ba1f53c72 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
@@ -190,8 +190,6 @@
&wkup_pmx2 {
wkup_uart0_pins_default: wkup-uart0-default-pins {
pinctrl-single,pins = <
- J721S2_WKUP_IOPAD(0x070, PIN_INPUT, 0) /* (E25) WKUP_GPIO0_6.WKUP_UART0_CTSn */
- J721S2_WKUP_IOPAD(0x074, PIN_OUTPUT, 0) /* (F28) WKUP_GPIO0_7.WKUP_UART0_RTSn */
J721S2_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (D28) WKUP_UART0_RXD */
J721S2_WKUP_IOPAD(0x04c, PIN_OUTPUT, 0) /* (D27) WKUP_UART0_TXD */
>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
index 7254f3bd36..c9cf1dc04f 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-mcu-wakeup.dtsi
@@ -652,7 +652,7 @@
compatible = "ti,j7200-vtm";
reg = <0x00 0x42040000 0x0 0x350>,
<0x00 0x42050000 0x0 0x350>;
- power-domains = <&k3_pds 154 TI_SCI_PD_SHARED>;
+ power-domains = <&k3_pds 180 TI_SCI_PD_SHARED>;
#thermal-sensor-cells = <1>;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
index f1f4c8634a..50210700fd 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-evm.dts
@@ -331,8 +331,6 @@
wkup_uart0_pins_default: wkup-uart0-default-pins {
bootph-all;
pinctrl-single,pins = <
- J721S2_WKUP_IOPAD(0x070, PIN_INPUT, 0) /* (L37) WKUP_GPIO0_6.WKUP_UART0_CTSn */
- J721S2_WKUP_IOPAD(0x074, PIN_INPUT, 0) /* (L36) WKUP_GPIO0_7.WKUP_UART0_RTSn */
J721S2_WKUP_IOPAD(0x048, PIN_INPUT, 0) /* (K35) WKUP_UART0_RXD */
J721S2_WKUP_IOPAD(0x04c, PIN_INPUT, 0) /* (K34) WKUP_UART0_TXD */
>;
diff --git a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
index adb5ea6b97..37fc48aae6 100644
--- a/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j784s4-mcu-wakeup.dtsi
@@ -616,7 +616,7 @@
compatible = "ti,j7200-vtm";
reg = <0x00 0x42040000 0x00 0x350>,
<0x00 0x42050000 0x00 0x350>;
- power-domains = <&k3_pds 154 TI_SCI_PD_SHARED>;
+ power-domains = <&k3_pds 243 TI_SCI_PD_SHARED>;
#thermal-sensor-cells = <1>;
};
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index 7780d343ef..b67b89c54e 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -62,13 +62,13 @@ static inline void cpacr_restore(unsigned long cpacr)
* When we defined the maximum SVE vector length we defined the ABI so
* that the maximum vector length included all the reserved for future
* expansion bits in ZCR rather than those just currently defined by
- * the architecture. While SME follows a similar pattern the fact that
- * it includes a square matrix means that any allocations that attempt
- * to cover the maximum potential vector length (such as happen with
- * the regset used for ptrace) end up being extremely large. Define
- * the much lower actual limit for use in such situations.
+ * the architecture. Using this length to allocate worst size buffers
+ * results in excessively large allocations, and this effect is even
+ * more pronounced for SME due to ZA. Define more suitable VLs for
+ * these situations.
*/
-#define SME_VQ_MAX 16
+#define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1)
+#define SME_VQ_MAX ((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1)
struct task_struct;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index b3f64144b5..c94c0f8c9a 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1499,7 +1499,8 @@ static const struct user_regset aarch64_regsets[] = {
#ifdef CONFIG_ARM64_SVE
[REGSET_SVE] = { /* Scalable Vector Extension */
.core_note_type = NT_ARM_SVE,
- .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
+ .n = DIV_ROUND_UP(SVE_PT_SIZE(ARCH_SVE_VQ_MAX,
+ SVE_PT_REGS_SVE),
SVE_VQ_BYTES),
.size = SVE_VQ_BYTES,
.align = SVE_VQ_BYTES,
diff --git a/arch/hexagon/kernel/vmlinux.lds.S b/arch/hexagon/kernel/vmlinux.lds.S
index 1140051a0c..1150b77fa2 100644
--- a/arch/hexagon/kernel/vmlinux.lds.S
+++ b/arch/hexagon/kernel/vmlinux.lds.S
@@ -63,6 +63,7 @@ SECTIONS
STABS_DEBUG
DWARF_DEBUG
ELF_DETAILS
+ .hexagon.attributes 0 : { *(.hexagon.attributes) }
DISCARDS
}
diff --git a/arch/loongarch/crypto/crc32-loongarch.c b/arch/loongarch/crypto/crc32-loongarch.c
index a49e507af3..3eebea3a7b 100644
--- a/arch/loongarch/crypto/crc32-loongarch.c
+++ b/arch/loongarch/crypto/crc32-loongarch.c
@@ -44,7 +44,6 @@ static u32 crc32_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
CRC32(crc, value, w);
p += sizeof(u32);
- len -= sizeof(u32);
}
if (len & sizeof(u16)) {
@@ -80,7 +79,6 @@ static u32 crc32c_loongarch_hw(u32 crc_, const u8 *p, unsigned int len)
CRC32C(crc, value, w);
p += sizeof(u32);
- len -= sizeof(u32);
}
if (len & sizeof(u16)) {
diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild
index 93783fa24f..dede0b422c 100644
--- a/arch/loongarch/include/asm/Kbuild
+++ b/arch/loongarch/include/asm/Kbuild
@@ -4,6 +4,7 @@ generic-y += mcs_spinlock.h
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
+generic-y += qspinlock.h
generic-y += rwsem.h
generic-y += segment.h
generic-y += user.h
diff --git a/arch/loongarch/include/asm/io.h b/arch/loongarch/include/asm/io.h
index c486c2341b..4a8adcca32 100644
--- a/arch/loongarch/include/asm/io.h
+++ b/arch/loongarch/include/asm/io.h
@@ -71,6 +71,8 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
#define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
#define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l))
+#define __io_aw() mmiowb()
+
#include <asm-generic/io.h>
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h
index 9b36ac003f..8f290e5546 100644
--- a/arch/loongarch/include/asm/percpu.h
+++ b/arch/loongarch/include/asm/percpu.h
@@ -29,7 +29,12 @@ static inline void set_my_cpu_offset(unsigned long off)
__my_cpu_offset = off;
csr_write64(off, PERCPU_BASE_KS);
}
-#define __my_cpu_offset __my_cpu_offset
+
+#define __my_cpu_offset \
+({ \
+ __asm__ __volatile__("":"+r"(__my_cpu_offset)); \
+ __my_cpu_offset; \
+})
#define PERCPU_OP(op, asm_op, c_op) \
static __always_inline unsigned long __percpu_##op(void *ptr, \
diff --git a/arch/loongarch/include/asm/qspinlock.h b/arch/loongarch/include/asm/qspinlock.h
deleted file mode 100644
index 34f43f8ad5..0000000000
--- a/arch/loongarch/include/asm/qspinlock.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_QSPINLOCK_H
-#define _ASM_QSPINLOCK_H
-
-#include <asm-generic/qspinlock_types.h>
-
-#define queued_spin_unlock queued_spin_unlock
-
-static inline void queued_spin_unlock(struct qspinlock *lock)
-{
- compiletime_assert_atomic_type(lock->locked);
- c_sync();
- WRITE_ONCE(lock->locked, 0);
-}
-
-#include <asm-generic/qspinlock.h>
-
-#endif /* _ASM_QSPINLOCK_H */
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 701a233583..d14d0e37ad 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -60,6 +60,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->cp0_epc = val;
+ regs->cp0_cause &= ~CAUSEF_BD;
}
/* Query offset/name of register from its name/offset */
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 5937d5edab..000a28e1c5 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -97,26 +97,28 @@
* version takes two arguments: a src and destination register.
* However, the source and destination registers can not be
* the same register.
+ *
+ * We use add,l to avoid clobbering the C/B bits in the PSW.
*/
.macro tophys grvirt, grphys
- ldil L%(__PAGE_OFFSET), \grphys
- sub \grvirt, \grphys, \grphys
+ ldil L%(-__PAGE_OFFSET), \grphys
+ addl \grvirt, \grphys, \grphys
.endm
-
+
.macro tovirt grphys, grvirt
ldil L%(__PAGE_OFFSET), \grvirt
- add \grphys, \grvirt, \grvirt
+ addl \grphys, \grvirt, \grvirt
.endm
.macro tophys_r1 gr
- ldil L%(__PAGE_OFFSET), %r1
- sub \gr, %r1, \gr
+ ldil L%(-__PAGE_OFFSET), %r1
+ addl \gr, %r1, \gr
.endm
-
+
.macro tovirt_r1 gr
ldil L%(__PAGE_OFFSET), %r1
- add \gr, %r1, \gr
+ addl \gr, %r1, \gr
.endm
.macro delay value
diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h
index 3c43baca7b..2aceebcd69 100644
--- a/arch/parisc/include/asm/checksum.h
+++ b/arch/parisc/include/asm/checksum.h
@@ -40,7 +40,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
" addc %0, %5, %0\n"
" addc %0, %3, %0\n"
"1: ldws,ma 4(%1), %3\n"
-" addib,< 0, %2, 1b\n"
+" addib,> -1, %2, 1b\n"
" addc %0, %3, %0\n"
"\n"
" extru %0, 31, 16, %4\n"
@@ -126,6 +126,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
** Try to keep 4 registers with "live" values ahead of the ALU.
*/
+" depdi 0, 31, 32, %0\n"/* clear upper half of incoming checksum */
" ldd,ma 8(%1), %4\n" /* get 1st saddr word */
" ldd,ma 8(%2), %5\n" /* get 1st daddr word */
" add %4, %0, %0\n"
@@ -137,8 +138,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
" extrd,u %0, 31, 32, %4\n"/* copy upper half down */
" depdi 0, 31, 32, %0\n"/* clear upper half */
-" add %4, %0, %0\n" /* fold into 32-bits */
-" addc 0, %0, %0\n" /* add carry */
+" add,dc %4, %0, %0\n" /* fold into 32-bits, plus carry */
+" addc 0, %0, %0\n" /* add final carry */
#else
@@ -163,7 +164,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
" ldw,ma 4(%2), %7\n" /* 4th daddr */
" addc %6, %0, %0\n"
" addc %7, %0, %0\n"
-" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
+" addc %3, %0, %0\n" /* fold in proto+len */
+" addc 0, %0, %0\n" /* add carry */
#endif
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len),
diff --git a/arch/parisc/include/asm/mman.h b/arch/parisc/include/asm/mman.h
new file mode 100644
index 0000000000..47c5a1991d
--- /dev/null
+++ b/arch/parisc/include/asm/mman.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMAN_H__
+#define __ASM_MMAN_H__
+
+#include <uapi/asm/mman.h>
+
+/* PARISC cannot allow mdwe as it needs writable stacks */
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return false;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+
+#endif /* __ASM_MMAN_H__ */
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index d1defb9ede..621a4b386a 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -78,7 +78,7 @@ asmlinkage void notrace __hot ftrace_function_trampoline(unsigned long parent,
#endif
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_FUNCTION_GRAPH_TRACER)
int ftrace_enable_ftrace_graph_caller(void)
{
static_key_enable(&ftrace_graph_enable.key);
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index c520e551a1..a8e75e5b88 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -169,6 +169,7 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop)
static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
{
unsigned long saddr = regs->ior;
+ unsigned long shift, temp1;
__u64 val = 0;
ASM_EXCEPTIONTABLE_VAR(ret);
@@ -180,25 +181,22 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
#ifdef CONFIG_64BIT
__asm__ __volatile__ (
-" depd,z %3,60,3,%%r19\n" /* r19=(ofs&7)*8 */
-" mtsp %4, %%sr1\n"
-" depd %%r0,63,3,%3\n"
-"1: ldd 0(%%sr1,%3),%0\n"
-"2: ldd 8(%%sr1,%3),%%r20\n"
-" subi 64,%%r19,%%r19\n"
-" mtsar %%r19\n"
-" shrpd %0,%%r20,%%sar,%0\n"
+" depd,z %2,60,3,%3\n" /* shift=(ofs&7)*8 */
+" mtsp %5, %%sr1\n"
+" depd %%r0,63,3,%2\n"
+"1: ldd 0(%%sr1,%2),%0\n"
+"2: ldd 8(%%sr1,%2),%4\n"
+" subi 64,%3,%3\n"
+" mtsar %3\n"
+" shrpd %0,%4,%%sar,%0\n"
"3: \n"
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1")
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1")
- : "=r" (val), "+r" (ret)
- : "0" (val), "r" (saddr), "r" (regs->isr)
- : "r19", "r20" );
+ : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
+ : "r" (regs->isr) );
#else
- {
- unsigned long shift, temp1;
__asm__ __volatile__ (
-" zdep %2,29,2,%3\n" /* r19=(ofs&3)*8 */
+" zdep %2,29,2,%3\n" /* shift=(ofs&3)*8 */
" mtsp %5, %%sr1\n"
" dep %%r0,31,2,%2\n"
"1: ldw 0(%%sr1,%2),%0\n"
@@ -214,7 +212,6 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop)
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1")
: "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1)
: "r" (regs->isr) );
- }
#endif
DPRINTF("val = 0x%llx\n", val);
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
index a21f529c43..8359c06d92 100644
--- a/arch/powerpc/include/asm/reg_fsl_emb.h
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -12,9 +12,16 @@
#ifndef __ASSEMBLY__
/* Performance Monitor Registers */
#define mfpmr(rn) ({unsigned int rval; \
- asm volatile("mfpmr %0," __stringify(rn) \
+ asm volatile(".machine push; " \
+ ".machine e300; " \
+ "mfpmr %0," __stringify(rn) ";" \
+ ".machine pop; " \
: "=r" (rval)); rval;})
-#define mtpmr(rn, v) asm volatile("mtpmr " __stringify(rn) ",%0" : : "r" (v))
+#define mtpmr(rn, v) asm volatile(".machine push; " \
+ ".machine e300; " \
+ "mtpmr " __stringify(rn) ",%0; " \
+ ".machine pop; " \
+ : : "r" (v))
#endif /* __ASSEMBLY__ */
/* Freescale Book E Performance Monitor APU Registers */
diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
index 4c69ece52a..59ed89890c 100644
--- a/arch/powerpc/include/asm/vmalloc.h
+++ b/arch/powerpc/include/asm/vmalloc.h
@@ -7,14 +7,14 @@
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
#define arch_vmap_pud_supported arch_vmap_pud_supported
-static inline bool arch_vmap_pud_supported(pgprot_t prot)
+static __always_inline bool arch_vmap_pud_supported(pgprot_t prot)
{
/* HPT does not cope with large pages in the vmalloc area */
return radix_enabled();
}
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
-static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+static __always_inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
return radix_enabled();
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 0b5878c312..77364729a1 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -375,6 +375,18 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
if (IS_ENABLED(CONFIG_PPC64))
boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
+ if (nr_cpu_ids % nthreads != 0) {
+ set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads));
+ pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n",
+ nr_cpu_ids);
+ }
+
+ if (boot_cpuid >= nr_cpu_ids) {
+ set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads)));
+ pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n",
+ boot_cpuid, nr_cpu_ids);
+ }
+
/*
* PAPR defines "logical" PVR values for cpus that
* meet various levels of the architecture:
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 6eac63e79a..0ab65eeb93 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -76,7 +76,7 @@ obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
-CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
+CFLAGS_xor_vmx.o += -mhard-float -maltivec $(call cc-option,-mabi=altivec)
# Enable <altivec.h>
CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 27f18119fd..241551d128 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -695,6 +695,20 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO,
virt_to_phys(arg), HGPCI_REQ_BUFFER_SIZE);
+
+ /*
+ * ret value as 'H_PARAMETER' with detail_rc as 'GEN_BUF_TOO_SMALL',
+ * specifies that the current buffer size cannot accommodate
+ * all the information and a partial buffer returned.
+ * Since in this function we are only accessing data for a given starting index,
+ * we don't need to accommodate whole data and can get required count by
+ * accessing first entry data.
+ * Hence hcall fails only incase the ret value is other than H_SUCCESS or
+ * H_PARAMETER with detail_rc value as GEN_BUF_TOO_SMALL(0x1B).
+ */
+ if (ret == H_PARAMETER && be32_to_cpu(arg->params.detail_rc) == 0x1B)
+ ret = 0;
+
if (ret) {
pr_devel("hcall failed: 0x%lx\n", ret);
goto out;
@@ -759,6 +773,7 @@ static int h_gpci_event_init(struct perf_event *event)
{
u64 count;
u8 length;
+ unsigned long ret;
/* Not our event */
if (event->attr.type != event->pmu->type)
@@ -789,13 +804,23 @@ static int h_gpci_event_init(struct perf_event *event)
}
/* check if the request works... */
- if (single_gpci_request(event_get_request(event),
+ ret = single_gpci_request(event_get_request(event),
event_get_starting_index(event),
event_get_secondary_index(event),
event_get_counter_info_version(event),
event_get_offset(event),
length,
- &count)) {
+ &count);
+
+ /*
+ * ret value as H_AUTHORITY implies that partition is not permitted to retrieve
+ * performance information, and required to set
+ * "Enable Performance Information Collection" option.
+ */
+ if (ret == H_AUTHORITY)
+ return -EPERM;
+
+ if (ret) {
pr_devel("gpci hcall failed\n");
return -EINVAL;
}
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index 9c10aac40c..e265f026ee 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -99,9 +99,6 @@ static void __init linkstation_init_IRQ(void)
mpic_init(mpic);
}
-extern void avr_uart_configure(void);
-extern void avr_uart_send(const char);
-
static void __noreturn linkstation_restart(char *cmd)
{
local_irq_disable();
diff --git a/arch/powerpc/platforms/embedded6xx/mpc10x.h b/arch/powerpc/platforms/embedded6xx/mpc10x.h
index 5ad12023e5..ebc258fa48 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc10x.h
+++ b/arch/powerpc/platforms/embedded6xx/mpc10x.h
@@ -156,4 +156,7 @@ int mpc10x_disable_store_gathering(struct pci_controller *hose);
/* For MPC107 boards that use the built-in openpic */
void mpc10x_set_openpic(void);
+void avr_uart_configure(void);
+void avr_uart_send(const char c);
+
#endif /* __PPC_KERNEL_MPC10X_H */
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index 8bdae0caf2..84f101ec53 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -2,7 +2,7 @@
config PPC_PMAC
bool "Apple PowerMac based machines"
depends on PPC_BOOK3S && CPU_BIG_ENDIAN
- select ADB_CUDA if POWER_RESET && PPC32
+ select ADB_CUDA if POWER_RESET && ADB
select MPIC
select FORCE_PCI
select PPC_INDIRECT_PCI if PPC32
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index a44869e5ea..1bd1b0b49b 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -67,6 +67,7 @@ config PS3_VUART
config PS3_PS3AV
depends on PPC_PS3
tristate "PS3 AV settings driver" if PS3_ADVANCED
+ select VIDEO
select PS3_VUART
default y
help
diff --git a/arch/powerpc/platforms/pseries/papr_platform_attributes.c b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
index 526c621b09..eea2041b27 100644
--- a/arch/powerpc/platforms/pseries/papr_platform_attributes.c
+++ b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
@@ -101,10 +101,12 @@ retry:
esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs);
temp_buf = krealloc(buf, esi_buf_size, GFP_KERNEL);
- if (temp_buf)
+ if (temp_buf) {
buf = temp_buf;
- else
- return -ENOMEM;
+ } else {
+ ret = -ENOMEM;
+ goto out_buf;
+ }
goto retry;
}
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index 07387f9c13..72b87b08ab 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -123,6 +123,7 @@
interrupt-parent = <&gpio>;
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
+ #interrupt-cells = <2>;
onkey {
compatible = "dlg,da9063-onkey";
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 76b131e7bb..a66845e31a 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -439,9 +439,11 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
#define pte_leaf_size(pte) (pte_napot(pte) ? \
napot_cont_size(napot_cont_order(pte)) :\
PAGE_SIZE)
+#endif
#ifdef CONFIG_NUMA_BALANCING
/*
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 5255f8134a..1ed769c87a 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -632,7 +632,7 @@ void unaligned_emulation_finish(void)
* accesses emulated since tasks requesting such control can run on any
* CPU.
*/
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
if (per_cpu(misaligned_access_speed, cpu) !=
RISCV_HWPROBE_MISALIGNED_EMULATED) {
return;
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 56254fa06f..4f26690302 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -166,5 +166,6 @@ int populate_cache_leaves(unsigned int cpu)
ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
}
}
+ this_cpu_ci->cpu_map_populated = true;
return 0;
}
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 39a91b0043..fc2dff1e4f 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -715,7 +715,7 @@ static int __init attr_event_init(void)
for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
ret = attr_event_init_one(attrs, i);
if (ret) {
- attr_event_free(attrs, i - 1);
+ attr_event_free(attrs, i);
return ret;
}
}
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index e7013a2e89..32b467c300 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -604,7 +604,7 @@ static int __init attr_event_init(void)
for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
ret = attr_event_init_one(attrs, i);
if (ret) {
- attr_event_free(attrs, i - 1);
+ attr_event_free(attrs, i);
return ret;
}
}
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index caec7db6f9..b12a274cbb 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -22,7 +22,7 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
-LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
+LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
--hash-style=both --build-id=sha1 -melf_s390 -T
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index e3c9085f8f..caa4ebff8a 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -26,7 +26,7 @@ KBUILD_AFLAGS_64 += -m64
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
-ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
+ldflags-y := -shared -soname=linux-vdso64.so.1 \
--hash-style=both --build-id=sha1 -T
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e0a88dcaf5..24a18e5ef6 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -210,13 +210,13 @@ void vtime_flush(struct task_struct *tsk)
virt_timer_expire();
steal = S390_lowcore.steal_timer;
- avg_steal = S390_lowcore.avg_steal_timer / 2;
+ avg_steal = S390_lowcore.avg_steal_timer;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
account_steal_time(cputime_to_nsecs(steal));
avg_steal += steal;
}
- S390_lowcore.avg_steal_timer = avg_steal;
+ S390_lowcore.avg_steal_timer = avg_steal / 2;
}
static u64 vtime_delta(void)
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index 0a7ffcfd59..e2eed8f976 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -1,256 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* parport.h: sparc64 specific parport initialization and dma.
- *
- * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
- */
+#ifndef ___ASM_SPARC_PARPORT_H
+#define ___ASM_SPARC_PARPORT_H
-#ifndef _ASM_SPARC64_PARPORT_H
-#define _ASM_SPARC64_PARPORT_H 1
-
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-#include <asm/ebus_dma.h>
-#include <asm/ns87303.h>
-#include <asm/prom.h>
-
-#define PARPORT_PC_MAX_PORTS PARPORT_MAX
-
-/*
- * While sparc64 doesn't have an ISA DMA API, we provide something that looks
- * close enough to make parport_pc happy
- */
-#define HAS_DMA
-
-#ifdef CONFIG_PARPORT_PC_FIFO
-static DEFINE_SPINLOCK(dma_spin_lock);
-
-#define claim_dma_lock() \
-({ unsigned long flags; \
- spin_lock_irqsave(&dma_spin_lock, flags); \
- flags; \
-})
-
-#define release_dma_lock(__flags) \
- spin_unlock_irqrestore(&dma_spin_lock, __flags);
+#if defined(__sparc__) && defined(__arch64__)
+#include <asm/parport_64.h>
+#else
+#include <asm-generic/parport.h>
+#endif
#endif
-static struct sparc_ebus_info {
- struct ebus_dma_info info;
- unsigned int addr;
- unsigned int count;
- int lock;
-
- struct parport *port;
-} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
-
-static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
-
-static inline int request_dma(unsigned int dmanr, const char *device_id)
-{
- if (dmanr >= PARPORT_PC_MAX_PORTS)
- return -EINVAL;
- if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
- return -EBUSY;
- return 0;
-}
-
-static inline void free_dma(unsigned int dmanr)
-{
- if (dmanr >= PARPORT_PC_MAX_PORTS) {
- printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
- return;
- }
- if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
- printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
- return;
- }
-}
-
-static inline void enable_dma(unsigned int dmanr)
-{
- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
-
- if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
- sparc_ebus_dmas[dmanr].addr,
- sparc_ebus_dmas[dmanr].count))
- BUG();
-}
-
-static inline void disable_dma(unsigned int dmanr)
-{
- ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
-}
-
-static inline void clear_dma_ff(unsigned int dmanr)
-{
- /* nothing */
-}
-
-static inline void set_dma_mode(unsigned int dmanr, char mode)
-{
- ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
-}
-
-static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
-{
- sparc_ebus_dmas[dmanr].addr = addr;
-}
-
-static inline void set_dma_count(unsigned int dmanr, unsigned int count)
-{
- sparc_ebus_dmas[dmanr].count = count;
-}
-
-static inline unsigned int get_dma_residue(unsigned int dmanr)
-{
- return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
-}
-
-static int ecpp_probe(struct platform_device *op)
-{
- unsigned long base = op->resource[0].start;
- unsigned long config = op->resource[1].start;
- unsigned long d_base = op->resource[2].start;
- unsigned long d_len;
- struct device_node *parent;
- struct parport *p;
- int slot, err;
-
- parent = op->dev.of_node->parent;
- if (of_node_name_eq(parent, "dma")) {
- p = parport_pc_probe_port(base, base + 0x400,
- op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
- op->dev.parent->parent, 0);
- if (!p)
- return -ENOMEM;
- dev_set_drvdata(&op->dev, p);
- return 0;
- }
-
- for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
- if (!test_and_set_bit(slot, dma_slot_map))
- break;
- }
- err = -ENODEV;
- if (slot >= PARPORT_PC_MAX_PORTS)
- goto out_err;
-
- spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
-
- d_len = (op->resource[2].end - d_base) + 1UL;
- sparc_ebus_dmas[slot].info.regs =
- of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
-
- if (!sparc_ebus_dmas[slot].info.regs)
- goto out_clear_map;
-
- sparc_ebus_dmas[slot].info.flags = 0;
- sparc_ebus_dmas[slot].info.callback = NULL;
- sparc_ebus_dmas[slot].info.client_cookie = NULL;
- sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
- strcpy(sparc_ebus_dmas[slot].info.name, "parport");
- if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
- goto out_unmap_regs;
-
- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
-
- /* Configure IRQ to Push Pull, Level Low */
- /* Enable ECP, set bit 2 of the CTR first */
- outb(0x04, base + 0x02);
- ns87303_modify(config, PCR,
- PCR_EPP_ENABLE |
- PCR_IRQ_ODRAIN,
- PCR_ECP_ENABLE |
- PCR_ECP_CLK_ENA |
- PCR_IRQ_POLAR);
-
- /* CTR bit 5 controls direction of port */
- ns87303_modify(config, PTR,
- 0, PTR_LPT_REG_DIR);
-
- p = parport_pc_probe_port(base, base + 0x400,
- op->archdata.irqs[0],
- slot,
- op->dev.parent,
- 0);
- err = -ENOMEM;
- if (!p)
- goto out_disable_irq;
-
- dev_set_drvdata(&op->dev, p);
-
- return 0;
-
-out_disable_irq:
- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
- ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
-
-out_unmap_regs:
- of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
-
-out_clear_map:
- clear_bit(slot, dma_slot_map);
-
-out_err:
- return err;
-}
-
-static int ecpp_remove(struct platform_device *op)
-{
- struct parport *p = dev_get_drvdata(&op->dev);
- int slot = p->dma;
-
- parport_pc_unregister_port(p);
-
- if (slot != PARPORT_DMA_NOFIFO) {
- unsigned long d_base = op->resource[2].start;
- unsigned long d_len;
-
- d_len = (op->resource[2].end - d_base) + 1UL;
-
- ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
- ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
- of_iounmap(&op->resource[2],
- sparc_ebus_dmas[slot].info.regs,
- d_len);
- clear_bit(slot, dma_slot_map);
- }
-
- return 0;
-}
-
-static const struct of_device_id ecpp_match[] = {
- {
- .name = "ecpp",
- },
- {
- .name = "parallel",
- .compatible = "ecpp",
- },
- {
- .name = "parallel",
- .compatible = "ns87317-ecpp",
- },
- {
- .name = "parallel",
- .compatible = "pnpALI,1533,3",
- },
- {},
-};
-
-static struct platform_driver ecpp_driver = {
- .driver = {
- .name = "ecpp",
- .of_match_table = ecpp_match,
- },
- .probe = ecpp_probe,
- .remove = ecpp_remove,
-};
-
-static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
-{
- return platform_driver_register(&ecpp_driver);
-}
-
-#endif /* !(_ASM_SPARC64_PARPORT_H */
diff --git a/arch/sparc/include/asm/parport_64.h b/arch/sparc/include/asm/parport_64.h
new file mode 100644
index 0000000000..0a7ffcfd59
--- /dev/null
+++ b/arch/sparc/include/asm/parport_64.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* parport.h: sparc64 specific parport initialization and dma.
+ *
+ * Copyright (C) 1999 Eddie C. Dost (ecd@skynet.be)
+ */
+
+#ifndef _ASM_SPARC64_PARPORT_H
+#define _ASM_SPARC64_PARPORT_H 1
+
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include <asm/ebus_dma.h>
+#include <asm/ns87303.h>
+#include <asm/prom.h>
+
+#define PARPORT_PC_MAX_PORTS PARPORT_MAX
+
+/*
+ * While sparc64 doesn't have an ISA DMA API, we provide something that looks
+ * close enough to make parport_pc happy
+ */
+#define HAS_DMA
+
+#ifdef CONFIG_PARPORT_PC_FIFO
+static DEFINE_SPINLOCK(dma_spin_lock);
+
+#define claim_dma_lock() \
+({ unsigned long flags; \
+ spin_lock_irqsave(&dma_spin_lock, flags); \
+ flags; \
+})
+
+#define release_dma_lock(__flags) \
+ spin_unlock_irqrestore(&dma_spin_lock, __flags);
+#endif
+
+static struct sparc_ebus_info {
+ struct ebus_dma_info info;
+ unsigned int addr;
+ unsigned int count;
+ int lock;
+
+ struct parport *port;
+} sparc_ebus_dmas[PARPORT_PC_MAX_PORTS];
+
+static DECLARE_BITMAP(dma_slot_map, PARPORT_PC_MAX_PORTS);
+
+static inline int request_dma(unsigned int dmanr, const char *device_id)
+{
+ if (dmanr >= PARPORT_PC_MAX_PORTS)
+ return -EINVAL;
+ if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0)
+ return -EBUSY;
+ return 0;
+}
+
+static inline void free_dma(unsigned int dmanr)
+{
+ if (dmanr >= PARPORT_PC_MAX_PORTS) {
+ printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
+ return;
+ }
+ if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) {
+ printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
+ return;
+ }
+}
+
+static inline void enable_dma(unsigned int dmanr)
+{
+ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 1);
+
+ if (ebus_dma_request(&sparc_ebus_dmas[dmanr].info,
+ sparc_ebus_dmas[dmanr].addr,
+ sparc_ebus_dmas[dmanr].count))
+ BUG();
+}
+
+static inline void disable_dma(unsigned int dmanr)
+{
+ ebus_dma_enable(&sparc_ebus_dmas[dmanr].info, 0);
+}
+
+static inline void clear_dma_ff(unsigned int dmanr)
+{
+ /* nothing */
+}
+
+static inline void set_dma_mode(unsigned int dmanr, char mode)
+{
+ ebus_dma_prepare(&sparc_ebus_dmas[dmanr].info, (mode != DMA_MODE_WRITE));
+}
+
+static inline void set_dma_addr(unsigned int dmanr, unsigned int addr)
+{
+ sparc_ebus_dmas[dmanr].addr = addr;
+}
+
+static inline void set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ sparc_ebus_dmas[dmanr].count = count;
+}
+
+static inline unsigned int get_dma_residue(unsigned int dmanr)
+{
+ return ebus_dma_residue(&sparc_ebus_dmas[dmanr].info);
+}
+
+static int ecpp_probe(struct platform_device *op)
+{
+ unsigned long base = op->resource[0].start;
+ unsigned long config = op->resource[1].start;
+ unsigned long d_base = op->resource[2].start;
+ unsigned long d_len;
+ struct device_node *parent;
+ struct parport *p;
+ int slot, err;
+
+ parent = op->dev.of_node->parent;
+ if (of_node_name_eq(parent, "dma")) {
+ p = parport_pc_probe_port(base, base + 0x400,
+ op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
+ op->dev.parent->parent, 0);
+ if (!p)
+ return -ENOMEM;
+ dev_set_drvdata(&op->dev, p);
+ return 0;
+ }
+
+ for (slot = 0; slot < PARPORT_PC_MAX_PORTS; slot++) {
+ if (!test_and_set_bit(slot, dma_slot_map))
+ break;
+ }
+ err = -ENODEV;
+ if (slot >= PARPORT_PC_MAX_PORTS)
+ goto out_err;
+
+ spin_lock_init(&sparc_ebus_dmas[slot].info.lock);
+
+ d_len = (op->resource[2].end - d_base) + 1UL;
+ sparc_ebus_dmas[slot].info.regs =
+ of_ioremap(&op->resource[2], 0, d_len, "ECPP DMA");
+
+ if (!sparc_ebus_dmas[slot].info.regs)
+ goto out_clear_map;
+
+ sparc_ebus_dmas[slot].info.flags = 0;
+ sparc_ebus_dmas[slot].info.callback = NULL;
+ sparc_ebus_dmas[slot].info.client_cookie = NULL;
+ sparc_ebus_dmas[slot].info.irq = 0xdeadbeef;
+ strcpy(sparc_ebus_dmas[slot].info.name, "parport");
+ if (ebus_dma_register(&sparc_ebus_dmas[slot].info))
+ goto out_unmap_regs;
+
+ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 1);
+
+ /* Configure IRQ to Push Pull, Level Low */
+ /* Enable ECP, set bit 2 of the CTR first */
+ outb(0x04, base + 0x02);
+ ns87303_modify(config, PCR,
+ PCR_EPP_ENABLE |
+ PCR_IRQ_ODRAIN,
+ PCR_ECP_ENABLE |
+ PCR_ECP_CLK_ENA |
+ PCR_IRQ_POLAR);
+
+ /* CTR bit 5 controls direction of port */
+ ns87303_modify(config, PTR,
+ 0, PTR_LPT_REG_DIR);
+
+ p = parport_pc_probe_port(base, base + 0x400,
+ op->archdata.irqs[0],
+ slot,
+ op->dev.parent,
+ 0);
+ err = -ENOMEM;
+ if (!p)
+ goto out_disable_irq;
+
+ dev_set_drvdata(&op->dev, p);
+
+ return 0;
+
+out_disable_irq:
+ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
+ ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
+
+out_unmap_regs:
+ of_iounmap(&op->resource[2], sparc_ebus_dmas[slot].info.regs, d_len);
+
+out_clear_map:
+ clear_bit(slot, dma_slot_map);
+
+out_err:
+ return err;
+}
+
+static int ecpp_remove(struct platform_device *op)
+{
+ struct parport *p = dev_get_drvdata(&op->dev);
+ int slot = p->dma;
+
+ parport_pc_unregister_port(p);
+
+ if (slot != PARPORT_DMA_NOFIFO) {
+ unsigned long d_base = op->resource[2].start;
+ unsigned long d_len;
+
+ d_len = (op->resource[2].end - d_base) + 1UL;
+
+ ebus_dma_irq_enable(&sparc_ebus_dmas[slot].info, 0);
+ ebus_dma_unregister(&sparc_ebus_dmas[slot].info);
+ of_iounmap(&op->resource[2],
+ sparc_ebus_dmas[slot].info.regs,
+ d_len);
+ clear_bit(slot, dma_slot_map);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ecpp_match[] = {
+ {
+ .name = "ecpp",
+ },
+ {
+ .name = "parallel",
+ .compatible = "ecpp",
+ },
+ {
+ .name = "parallel",
+ .compatible = "ns87317-ecpp",
+ },
+ {
+ .name = "parallel",
+ .compatible = "pnpALI,1533,3",
+ },
+ {},
+};
+
+static struct platform_driver ecpp_driver = {
+ .driver = {
+ .name = "ecpp",
+ .of_match_table = ecpp_match,
+ },
+ .probe = ecpp_probe,
+ .remove = ecpp_remove,
+};
+
+static int parport_pc_find_nonpci_ports(int autoirq, int autodma)
+{
+ return platform_driver_register(&ecpp_driver);
+}
+
+#endif /* !(_ASM_SPARC64_PARPORT_H */
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index 8700a0e3b0..b2b639bee0 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -697,7 +697,7 @@ err1:
return err;
}
-static const struct of_device_id grpci1_of_match[] __initconst = {
+static const struct of_device_id grpci1_of_match[] = {
{
.name = "GAISLER_PCIFBRG",
},
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
index 60b6bdf776..ac2acd62a2 100644
--- a/arch/sparc/kernel/leon_pci_grpci2.c
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -889,7 +889,7 @@ err1:
return err;
}
-static const struct of_device_id grpci2_of_match[] __initconst = {
+static const struct of_device_id grpci2_of_match[] = {
{
.name = "GAISLER_GRPCI2",
},
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 17cdfdbf1f..149adc0947 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -279,7 +279,7 @@ static int __init setup_nmi_watchdog(char *str)
if (!strncmp(str, "panic", 5))
panic_on_timeout = 1;
- return 0;
+ return 1;
}
__setup("nmi_watchdog=", setup_nmi_watchdog);
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index 136c78f28f..1bbf4335de 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -449,9 +449,8 @@ static __init int vdso_setup(char *s)
unsigned long val;
err = kstrtoul(s, 10, &val);
- if (err)
- return err;
- vdso_enabled = val;
- return 0;
+ if (!err)
+ vdso_enabled = val;
+ return 1;
}
__setup("vdso=", vdso_setup);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 1566748f16..9241274858 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1539,19 +1539,6 @@ config AMD_MEM_ENCRYPT
This requires an AMD processor that supports Secure Memory
Encryption (SME).
-config AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT
- bool "Activate AMD Secure Memory Encryption (SME) by default"
- depends on AMD_MEM_ENCRYPT
- help
- Say yes to have system memory encrypted by default if running on
- an AMD processor that supports Secure Memory Encryption (SME).
-
- If set to Y, then the encryption of system memory can be
- deactivated with the mem_encrypt=off command line option.
-
- If set to N, then the encryption of system memory can be
- activated with the mem_encrypt=on command line option.
-
# Common NUMA Features
config NUMA
bool "NUMA Memory Allocation and Scheduler Support"
@@ -2609,6 +2596,17 @@ config GDS_FORCE_MITIGATION
If in doubt, say N.
+config MITIGATION_RFDS
+ bool "RFDS Mitigation"
+ depends on CPU_SUP_INTEL
+ default y
+ help
+ Enable mitigation for Register File Data Sampling (RFDS) by default.
+ RFDS is a hardware vulnerability which affects Intel Atom CPUs. It
+ allows unprivileged speculative access to stale data previously
+ stored in floating point, vector and integer registers.
+ See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
+
endif
config ARCH_HAS_ADD_PAGES
diff --git a/arch/x86/boot/compressed/efi_mixed.S b/arch/x86/boot/compressed/efi_mixed.S
index f4e22ef774..876fc6d46a 100644
--- a/arch/x86/boot/compressed/efi_mixed.S
+++ b/arch/x86/boot/compressed/efi_mixed.S
@@ -15,10 +15,12 @@
*/
#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
#include <asm/msr.h>
#include <asm/page_types.h>
#include <asm/processor-flags.h>
#include <asm/segment.h>
+#include <asm/setup.h>
.code64
.text
@@ -49,6 +51,11 @@ SYM_FUNC_START(startup_64_mixed_mode)
lea efi32_boot_args(%rip), %rdx
mov 0(%rdx), %edi
mov 4(%rdx), %esi
+
+ /* Switch to the firmware's stack */
+ movl efi32_boot_sp(%rip), %esp
+ andl $~7, %esp
+
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
mov 8(%rdx), %edx // saved bootparams pointer
test %edx, %edx
@@ -144,6 +151,7 @@ SYM_FUNC_END(__efi64_thunk)
SYM_FUNC_START(efi32_stub_entry)
call 1f
1: popl %ecx
+ leal (efi32_boot_args - 1b)(%ecx), %ebx
/* Clear BSS */
xorl %eax, %eax
@@ -158,6 +166,7 @@ SYM_FUNC_START(efi32_stub_entry)
popl %ecx
popl %edx
popl %esi
+ movl %esi, 8(%ebx)
jmp efi32_entry
SYM_FUNC_END(efi32_stub_entry)
#endif
@@ -234,8 +243,6 @@ SYM_FUNC_END(efi_enter32)
*
* Arguments: %ecx image handle
* %edx EFI system table pointer
- * %esi struct bootparams pointer (or NULL when not using
- * the EFI handover protocol)
*
* Since this is the point of no return for ordinary execution, no registers
* are considered live except for the function parameters. [Note that the EFI
@@ -254,13 +261,25 @@ SYM_FUNC_START_LOCAL(efi32_entry)
/* Store firmware IDT descriptor */
sidtl (efi32_boot_idt - 1b)(%ebx)
+ /* Store firmware stack pointer */
+ movl %esp, (efi32_boot_sp - 1b)(%ebx)
+
/* Store boot arguments */
leal (efi32_boot_args - 1b)(%ebx), %ebx
movl %ecx, 0(%ebx)
movl %edx, 4(%ebx)
- movl %esi, 8(%ebx)
movb $0x0, 12(%ebx) // efi_is64
+ /*
+ * Allocate some memory for a temporary struct boot_params, which only
+ * needs the minimal pieces that startup_32() relies on.
+ */
+ subl $PARAM_SIZE, %esp
+ movl %esp, %esi
+ movl $PAGE_SIZE, BP_kernel_alignment(%esi)
+ movl $_end - 1b, BP_init_size(%esi)
+ subl $startup_32 - 1b, BP_init_size(%esi)
+
/* Disable paging */
movl %cr0, %eax
btrl $X86_CR0_PG_BIT, %eax
@@ -286,8 +305,7 @@ SYM_FUNC_START(efi32_pe_entry)
movl 8(%ebp), %ecx // image_handle
movl 12(%ebp), %edx // sys_table
- xorl %esi, %esi
- jmp efi32_entry // pass %ecx, %edx, %esi
+ jmp efi32_entry // pass %ecx, %edx
// no other registers remain live
2: popl %edi // restore callee-save registers
@@ -318,5 +336,6 @@ SYM_DATA_END(efi32_boot_idt)
SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
+SYM_DATA_LOCAL(efi32_boot_sp, .long 0)
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index eeec998657..d07be9d05c 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -14,7 +14,7 @@
#include <asm/processor.h>
enum cc_vendor cc_vendor __ro_after_init = CC_VENDOR_NONE;
-static u64 cc_mask __ro_after_init;
+u64 cc_mask __ro_after_init;
static bool noinstr intel_cc_platform_has(enum cc_attr attr)
{
@@ -148,8 +148,3 @@ u64 cc_mkdec(u64 val)
}
}
EXPORT_SYMBOL_GPL(cc_mkdec);
-
-__init void cc_set_mask(u64 mask)
-{
- cc_mask = mask;
-}
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index e24976593a..5365d6acbf 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -604,7 +604,6 @@ static void amd_pmu_cpu_dead(int cpu)
kfree(cpuhw->lbr_sel);
cpuhw->lbr_sel = NULL;
- amd_pmu_cpu_reset(cpu);
if (!x86_pmu.amd_nb_constraints)
return;
diff --git a/arch/x86/hyperv/hv_vtl.c b/arch/x86/hyperv/hv_vtl.c
index 96e6c51515..c8062975a5 100644
--- a/arch/x86/hyperv/hv_vtl.c
+++ b/arch/x86/hyperv/hv_vtl.c
@@ -12,10 +12,16 @@
#include <asm/i8259.h>
#include <asm/mshyperv.h>
#include <asm/realmode.h>
+#include <../kernel/smpboot.h>
extern struct boot_params boot_params;
static struct real_mode_header hv_vtl_real_mode_header;
+static bool __init hv_vtl_msi_ext_dest_id(void)
+{
+ return true;
+}
+
void __init hv_vtl_init_platform(void)
{
pr_info("Linux runs in Hyper-V Virtual Trust Level\n");
@@ -38,6 +44,8 @@ void __init hv_vtl_init_platform(void)
x86_platform.legacy.warm_reset = 0;
x86_platform.legacy.reserve_bios_regions = 0;
x86_platform.legacy.devices.pnpbios = 0;
+
+ x86_init.hyper.msi_ext_dest_id = hv_vtl_msi_ext_dest_id;
}
static inline u64 hv_vtl_system_desc_base(struct ldttss_desc *desc)
@@ -57,7 +65,7 @@ static void hv_vtl_ap_entry(void)
((secondary_startup_64_fn)secondary_startup_64)(&boot_params, &boot_params);
}
-static int hv_vtl_bringup_vcpu(u32 target_vp_index, u64 eip_ignored)
+static int hv_vtl_bringup_vcpu(u32 target_vp_index, int cpu, u64 eip_ignored)
{
u64 status;
int ret = 0;
@@ -71,7 +79,9 @@ static int hv_vtl_bringup_vcpu(u32 target_vp_index, u64 eip_ignored)
struct ldttss_desc *ldt;
struct desc_struct *gdt;
- u64 rsp = current->thread.sp;
+ struct task_struct *idle = idle_thread_get(cpu);
+ u64 rsp = (unsigned long)idle->thread.sp;
+
u64 rip = (u64)&hv_vtl_ap_entry;
native_store_gdt(&gdt_ptr);
@@ -198,7 +208,15 @@ static int hv_vtl_apicid_to_vp_id(u32 apic_id)
static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
{
- int vp_id;
+ int vp_id, cpu;
+
+ /* Find the logical CPU for the APIC ID */
+ for_each_present_cpu(cpu) {
+ if (arch_match_cpu_phys_id(cpu, apicid))
+ break;
+ }
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
pr_debug("Bringing up CPU with APIC ID %d in VTL2...\n", apicid);
vp_id = hv_vtl_apicid_to_vp_id(apicid);
@@ -212,7 +230,7 @@ static int hv_vtl_wakeup_secondary_cpu(u32 apicid, unsigned long start_eip)
return -EINVAL;
}
- return hv_vtl_bringup_vcpu(vp_id, start_eip);
+ return hv_vtl_bringup_vcpu(vp_id, cpu, start_eip);
}
int __init hv_vtl_early_init(void)
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index b1a98fa388..0e82074517 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -13,6 +13,7 @@
#include <asm/preempt.h>
#include <asm/asm.h>
#include <asm/gsseg.h>
+#include <asm/nospec-branch.h>
#ifndef CONFIG_X86_CMPXCHG64
extern void cmpxchg8b_emu(void);
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index fbcfec4dc4..ca8eed1d49 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -113,6 +113,20 @@
#endif
+#ifndef __ASSEMBLY__
+#ifndef __pic__
+static __always_inline __pure void *rip_rel_ptr(void *p)
+{
+ asm("leaq %c1(%%rip), %0" : "=r"(p) : "i"(p));
+
+ return p;
+}
+#define RIP_REL_REF(var) (*(typeof(&(var)))rip_rel_ptr(&(var)))
+#else
+#define RIP_REL_REF(var) (var)
+#endif
+#endif
+
/*
* Macros to generate condition code outputs from inline assembly,
* The output operand must be type "bool".
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index 6ae2d16a76..21940ef8d2 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -2,6 +2,7 @@
#ifndef _ASM_X86_COCO_H
#define _ASM_X86_COCO_H
+#include <asm/asm.h>
#include <asm/types.h>
enum cc_vendor {
@@ -11,9 +12,14 @@ enum cc_vendor {
};
extern enum cc_vendor cc_vendor;
+extern u64 cc_mask;
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
-void cc_set_mask(u64 mask);
+static inline void cc_set_mask(u64 mask)
+{
+ RIP_REL_REF(cc_mask) = mask;
+}
+
u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val);
#else
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index caf4cf2e10..0e4f2da9f6 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -499,4 +499,5 @@
/* BUG word 2 */
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
+#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/crash_core.h b/arch/x86/include/asm/crash_core.h
index 76af98f4e8..041020da8d 100644
--- a/arch/x86/include/asm/crash_core.h
+++ b/arch/x86/include/asm/crash_core.h
@@ -39,4 +39,6 @@ static inline unsigned long crash_low_size_default(void)
#endif
}
+#define HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+
#endif /* _X86_CRASH_CORE_H */
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 359ada486f..b31eb9fd59 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -15,7 +15,8 @@
#include <linux/init.h>
#include <linux/cc_platform.h>
-#include <asm/bootparam.h>
+#include <asm/asm.h>
+struct boot_params;
#ifdef CONFIG_X86_MEM_ENCRYPT
void __init mem_encrypt_init(void);
@@ -58,6 +59,11 @@ void __init mem_encrypt_free_decrypted_mem(void);
void __init sev_es_init_vc_handling(void);
+static inline u64 sme_get_me_mask(void)
+{
+ return RIP_REL_REF(sme_me_mask);
+}
+
#define __bss_decrypted __section(".bss..decrypted")
#else /* !CONFIG_AMD_MEM_ENCRYPT */
@@ -89,6 +95,8 @@ early_set_mem_enc_dec_hypercall(unsigned long vaddr, unsigned long size, bool en
static inline void mem_encrypt_free_decrypted_mem(void) { }
+static inline u64 sme_get_me_mask(void) { return 0; }
+
#define __bss_decrypted
#endif /* CONFIG_AMD_MEM_ENCRYPT */
@@ -106,11 +114,6 @@ void add_encrypt_protection_map(void);
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
-static inline u64 sme_get_me_mask(void)
-{
- return sme_me_mask;
-}
-
#endif /* __ASSEMBLY__ */
#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1d51e1850e..857839df66 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -165,6 +165,14 @@
* CPU is not vulnerable to Gather
* Data Sampling (GDS).
*/
+#define ARCH_CAP_RFDS_NO BIT(27) /*
+ * Not susceptible to Register
+ * File Data Sampling.
+ */
+#define ARCH_CAP_RFDS_CLEAR BIT(28) /*
+ * VERW clears CPU Register
+ * File.
+ */
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
* IA32_XAPIC_DISABLE_STATUS MSR
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index d15b35815e..4e33cc834b 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -271,11 +271,20 @@
.Lskip_rsb_\@:
.endm
+/*
+ * The CALL to srso_alias_untrain_ret() must be patched in directly at
+ * the spot where untraining must be done, ie., srso_alias_untrain_ret()
+ * must be the target of a CALL instruction instead of indirectly
+ * jumping to a wrapper which then calls it. Therefore, this macro is
+ * called outside of __UNTRAIN_RET below, for the time being, before the
+ * kernel can support nested alternatives with arbitrary nesting.
+ */
+.macro CALL_UNTRAIN_RET
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
-#define CALL_UNTRAIN_RET "call entry_untrain_ret"
-#else
-#define CALL_UNTRAIN_RET ""
+ ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
+ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
#endif
+.endm
/*
* Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
@@ -291,8 +300,8 @@
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
VALIDATE_UNRET_END
- ALTERNATIVE_3 "", \
- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ CALL_UNTRAIN_RET
+ ALTERNATIVE_2 "", \
"call entry_ibpb", \ibpb_feature, \
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
#endif
@@ -351,6 +360,8 @@ extern void retbleed_return_thunk(void);
static inline void retbleed_return_thunk(void) {}
#endif
+extern void srso_alias_untrain_ret(void);
+
#ifdef CONFIG_CPU_SRSO
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index d18e5c332c..1b93ff80b4 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -66,10 +66,14 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
* virt_addr_valid(kaddr) returns true.
*/
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
extern bool __virt_addr_valid(unsigned long kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
+static __always_inline void *pfn_to_kaddr(unsigned long pfn)
+{
+ return __va(pfn << PAGE_SHIFT);
+}
+
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
{
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 5b4a1ce3d3..36f9057970 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -203,12 +203,12 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
unsigned long npages);
void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
unsigned long npages);
-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
void __init __noreturn snp_abort(void);
+void snp_dmi_setup(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 snp_get_unsupported_features(u64 status);
@@ -227,12 +227,12 @@ static inline void __init
early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
static inline void __init
early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned long npages) { }
-static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
static inline void snp_set_memory_shared(unsigned long vaddr, unsigned long npages) { }
static inline void snp_set_memory_private(unsigned long vaddr, unsigned long npages) { }
static inline void snp_set_wakeup_secondary_cpu(void) { }
static inline bool snp_init(struct boot_params *bp) { return false; }
static inline void snp_abort(void) { }
+static inline void snp_dmi_setup(void) { }
static inline int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
{
return -ENOTTY;
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index a800abb1a9..d8416b3bf8 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -12,11 +12,6 @@
/* image of the saved processor state */
struct saved_context {
- /*
- * On x86_32, all segment registers except gs are saved at kernel
- * entry in pt_regs.
- */
- u16 gs;
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
struct saved_msrs saved_msrs;
@@ -27,6 +22,11 @@ struct saved_context {
unsigned long tr;
unsigned long safety;
unsigned long return_address;
+ /*
+ * On x86_32, all segment registers except gs are saved at kernel
+ * entry in pt_regs.
+ */
+ u16 gs;
bool misc_enable_saved;
} __attribute__((packed));
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index ab60a71a8d..472f0263db 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -4,6 +4,7 @@
#include <linux/seqlock.h>
#include <uapi/asm/vsyscall.h>
+#include <asm/page_types.h>
#ifdef CONFIG_X86_VSYSCALL_EMULATION
extern void map_vsyscall(void);
@@ -24,4 +25,13 @@ static inline bool emulate_vsyscall(unsigned long error_code,
}
#endif
+/*
+ * The (legacy) vsyscall page is the long page in the kernel portion
+ * of the address space that has user-accessible permissions.
+ */
+static inline bool is_vsyscall_vaddr(unsigned long vaddr)
+{
+ return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
+}
+
#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index c878616a18..550dcbbbb1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -30,12 +30,13 @@ struct x86_init_mpparse {
* @reserve_resources: reserve the standard resources for the
* platform
* @memory_setup: platform specific memory setup
- *
+ * @dmi_setup: platform specific DMI setup
*/
struct x86_init_resources {
void (*probe_roms)(void);
void (*reserve_resources)(void);
char *(*memory_setup)(void);
+ void (*dmi_setup)(void);
};
/**
diff --git a/arch/x86/kernel/acpi/cppc.c b/arch/x86/kernel/acpi/cppc.c
index 8d8752b44f..ff8f25faca 100644
--- a/arch/x86/kernel/acpi/cppc.c
+++ b/arch/x86/kernel/acpi/cppc.c
@@ -20,7 +20,7 @@ bool cpc_supported_by_cpu(void)
(boot_cpu_data.x86_model >= 0x20 && boot_cpu_data.x86_model <= 0x2f)))
return true;
else if (boot_cpu_data.x86 == 0x17 &&
- boot_cpu_data.x86_model >= 0x70 && boot_cpu_data.x86_model <= 0x7f)
+ boot_cpu_data.x86_model >= 0x30 && boot_cpu_data.x86_model <= 0x7f)
return true;
return boot_cpu_has(X86_FEATURE_CPPC);
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 2055fb308f..77a1ceb717 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1002,11 +1002,11 @@ static bool cpu_has_zenbleed_microcode(void)
u32 good_rev = 0;
switch (boot_cpu_data.x86_model) {
- case 0x30 ... 0x3f: good_rev = 0x0830107a; break;
- case 0x60 ... 0x67: good_rev = 0x0860010b; break;
- case 0x68 ... 0x6f: good_rev = 0x08608105; break;
- case 0x70 ... 0x7f: good_rev = 0x08701032; break;
- case 0xa0 ... 0xaf: good_rev = 0x08a00008; break;
+ case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
+ case 0x60 ... 0x67: good_rev = 0x0860010c; break;
+ case 0x68 ... 0x6f: good_rev = 0x08608107; break;
+ case 0x70 ... 0x7f: good_rev = 0x08701033; break;
+ case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
default:
return false;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 48d049cd74..01ac18f561 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -422,6 +422,13 @@ static void __init mmio_select_mitigation(void)
if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
boot_cpu_has(X86_FEATURE_RTM)))
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+
+ /*
+ * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
+ * mitigations, disable KVM-only mitigation in that case.
+ */
+ if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
+ static_branch_disable(&mmio_stale_data_clear);
else
static_branch_enable(&mmio_stale_data_clear);
@@ -474,6 +481,57 @@ static int __init mmio_stale_data_parse_cmdline(char *str)
early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "Register File Data Sampling: " fmt
+
+enum rfds_mitigations {
+ RFDS_MITIGATION_OFF,
+ RFDS_MITIGATION_VERW,
+ RFDS_MITIGATION_UCODE_NEEDED,
+};
+
+/* Default mitigation for Register File Data Sampling */
+static enum rfds_mitigations rfds_mitigation __ro_after_init =
+ IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF;
+
+static const char * const rfds_strings[] = {
+ [RFDS_MITIGATION_OFF] = "Vulnerable",
+ [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File",
+ [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
+};
+
+static void __init rfds_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
+ rfds_mitigation = RFDS_MITIGATION_OFF;
+ return;
+ }
+ if (rfds_mitigation == RFDS_MITIGATION_OFF)
+ return;
+
+ if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+ else
+ rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+}
+
+static __init int rfds_parse_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!boot_cpu_has_bug(X86_BUG_RFDS))
+ return 0;
+
+ if (!strcmp(str, "off"))
+ rfds_mitigation = RFDS_MITIGATION_OFF;
+ else if (!strcmp(str, "on"))
+ rfds_mitigation = RFDS_MITIGATION_VERW;
+
+ return 0;
+}
+early_param("reg_file_data_sampling", rfds_parse_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "" fmt
static void __init md_clear_update_mitigation(void)
@@ -498,11 +556,19 @@ static void __init md_clear_update_mitigation(void)
taa_mitigation = TAA_MITIGATION_VERW;
taa_select_mitigation();
}
- if (mmio_mitigation == MMIO_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
+ /*
+ * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
+ * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
+ */
+ if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
mmio_mitigation = MMIO_MITIGATION_VERW;
mmio_select_mitigation();
}
+ if (rfds_mitigation == RFDS_MITIGATION_OFF &&
+ boot_cpu_has_bug(X86_BUG_RFDS)) {
+ rfds_mitigation = RFDS_MITIGATION_VERW;
+ rfds_select_mitigation();
+ }
out:
if (boot_cpu_has_bug(X86_BUG_MDS))
pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
@@ -512,6 +578,8 @@ out:
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
pr_info("MMIO Stale Data: Unknown: No mitigations\n");
+ if (boot_cpu_has_bug(X86_BUG_RFDS))
+ pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
}
static void __init md_clear_select_mitigation(void)
@@ -519,11 +587,12 @@ static void __init md_clear_select_mitigation(void)
mds_select_mitigation();
taa_select_mitigation();
mmio_select_mitigation();
+ rfds_select_mitigation();
/*
- * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
- * and print their mitigation after MDS, TAA and MMIO Stale Data
- * mitigation selection is done.
+ * As these mitigations are inter-related and rely on VERW instruction
+ * to clear the microarchitural buffers, update and print their status
+ * after mitigation selection is done for each of these vulnerabilities.
*/
md_clear_update_mitigation();
}
@@ -2612,6 +2681,11 @@ static ssize_t mmio_stale_data_show_state(char *buf)
sched_smt_active() ? "vulnerable" : "disabled");
}
+static ssize_t rfds_show_state(char *buf)
+{
+ return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
+}
+
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
@@ -2771,6 +2845,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_GDS:
return gds_show_state(buf);
+ case X86_BUG_RFDS:
+ return rfds_show_state(buf);
+
default:
break;
}
@@ -2845,4 +2922,9 @@ ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *bu
{
return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
}
+
+ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
+}
#endif
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 34cac9ea19..97ea52a4e8 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1274,6 +1274,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define SRSO BIT(5)
/* CPU is affected by GDS */
#define GDS BIT(6)
+/* CPU is affected by Register File Data Sampling */
+#define RFDS BIT(7)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
@@ -1301,9 +1303,18 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO),
- VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS),
+ VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS),
+ VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS),
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),
@@ -1337,6 +1348,24 @@ static bool arch_cap_mmio_immune(u64 ia32_cap)
ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
}
+static bool __init vulnerable_to_rfds(u64 ia32_cap)
+{
+ /* The "immunity" bit trumps everything else: */
+ if (ia32_cap & ARCH_CAP_RFDS_NO)
+ return false;
+
+ /*
+ * VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
+ * indicate that mitigation is needed because guest is running on a
+ * vulnerable hardware or may migrate to such hardware:
+ */
+ if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+ return true;
+
+ /* Only consult the blacklist when there is no enumeration: */
+ return cpu_matches(cpu_vuln_blacklist, RFDS);
+}
+
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 ia32_cap = x86_read_arch_cap_msr();
@@ -1448,6 +1477,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
boot_cpu_has(X86_FEATURE_AVX))
setup_force_cpu_bug(X86_BUG_GDS);
+ if (vulnerable_to_rfds(ia32_cap))
+ setup_force_cpu_bug(X86_BUG_RFDS);
+
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 19e0681f04..d04371e851 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -231,9 +231,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r)
static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
{
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
- union cpuid_0x10_3_eax eax;
- union cpuid_0x10_x_edx edx;
- u32 ebx, ecx, subleaf;
+ u32 eax, ebx, ecx, edx, subleaf;
/*
* Query CPUID_Fn80000020_EDX_x01 for MBA and
@@ -241,9 +239,9 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
*/
subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1;
- cpuid_count(0x80000020, subleaf, &eax.full, &ebx, &ecx, &edx.full);
- hw_res->num_closid = edx.split.cos_max + 1;
- r->default_ctrl = MAX_MBA_BW_AMD;
+ cpuid_count(0x80000020, subleaf, &eax, &ebx, &ecx, &edx);
+ hw_res->num_closid = edx + 1;
+ r->default_ctrl = 1 << eax;
/* AMD does not use delay */
r->membw.delay_linear = false;
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index a4f1aa15f0..52e7e7deee 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -18,7 +18,6 @@
#define MBM_OVERFLOW_INTERVAL 1000
#define MAX_MBA_BW 100u
#define MBA_IS_LINEAR 0x4
-#define MAX_MBA_BW_AMD 0x800
#define MBM_CNTR_WIDTH_OFFSET_AMD 20
#define RMID_VAL_ERROR BIT_ULL(63)
@@ -296,14 +295,10 @@ struct rftype {
* struct mbm_state - status for each MBM counter in each domain
* @prev_bw_bytes: Previous bytes value read for bandwidth calculation
* @prev_bw: The most recent bandwidth in MBps
- * @delta_bw: Difference between the current and previous bandwidth
- * @delta_comp: Indicates whether to compute the delta_bw
*/
struct mbm_state {
u64 prev_bw_bytes;
u32 prev_bw;
- u32 delta_bw;
- bool delta_comp;
};
/**
@@ -395,6 +390,8 @@ struct rdt_parse_data {
* @msr_update: Function pointer to update QOS MSRs
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
* @mbm_width: Monitor width, to detect and correct for overflow.
+ * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth
+ * Monitoring Event Configuration (BMEC) is supported.
* @cdp_enabled: CDP state of this resource
*
* Members of this structure are either private to the architecture
@@ -409,6 +406,7 @@ struct rdt_hw_resource {
struct rdt_resource *r);
unsigned int mon_scale;
unsigned int mbm_width;
+ unsigned int mbm_cfg_mask;
bool cdp_enabled;
};
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index f136ac0468..3a6c069614 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -440,9 +440,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
cur_bw = bytes / SZ_1M;
- if (m->delta_comp)
- m->delta_bw = abs(cur_bw - m->prev_bw);
- m->delta_comp = false;
m->prev_bw = cur_bw;
}
@@ -520,11 +517,11 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
{
u32 closid, rmid, cur_msr_val, new_msr_val;
struct mbm_state *pmbm_data, *cmbm_data;
- u32 cur_bw, delta_bw, user_bw;
struct rdt_resource *r_mba;
struct rdt_domain *dom_mba;
struct list_head *head;
struct rdtgroup *entry;
+ u32 cur_bw, user_bw;
if (!is_mbm_local_enabled())
return;
@@ -543,7 +540,6 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
cur_bw = pmbm_data->prev_bw;
user_bw = dom_mba->mbps_val[closid];
- delta_bw = pmbm_data->delta_bw;
/* MBA resource doesn't support CDP */
cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
@@ -555,49 +551,31 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
list_for_each_entry(entry, head, mon.crdtgrp_list) {
cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
cur_bw += cmbm_data->prev_bw;
- delta_bw += cmbm_data->delta_bw;
}
/*
* Scale up/down the bandwidth linearly for the ctrl group. The
* bandwidth step is the bandwidth granularity specified by the
* hardware.
- *
- * The delta_bw is used when increasing the bandwidth so that we
- * dont alternately increase and decrease the control values
- * continuously.
- *
- * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if
- * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep
- * switching between 90 and 110 continuously if we only check
- * cur_bw < user_bw.
+ * Always increase throttling if current bandwidth is above the
+ * target set by user.
+ * But avoid thrashing up and down on every poll by checking
+ * whether a decrease in throttling is likely to push the group
+ * back over target. E.g. if currently throttling to 30% of bandwidth
+ * on a system with 10% granularity steps, check whether moving to
+ * 40% would go past the limit by multiplying current bandwidth by
+ * "(30 + 10) / 30".
*/
if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
} else if (cur_msr_val < MAX_MBA_BW &&
- (user_bw > (cur_bw + delta_bw))) {
+ (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
} else {
return;
}
resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
-
- /*
- * Delta values are updated dynamically package wise for each
- * rdtgrp every time the throttle MSR changes value.
- *
- * This is because (1)the increase in bandwidth is not perfectly
- * linear and only "approximately" linear even when the hardware
- * says it is linear.(2)Also since MBA is a core specific
- * mechanism, the delta values vary based on number of cores used
- * by the rdtgrp.
- */
- pmbm_data->delta_comp = true;
- list_for_each_entry(entry, head, mon.crdtgrp_list) {
- cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
- cmbm_data->delta_comp = true;
- }
}
static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid)
@@ -813,6 +791,12 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r)
return ret;
if (rdt_cpu_has(X86_FEATURE_BMEC)) {
+ u32 eax, ebx, ecx, edx;
+
+ /* Detect list of bandwidth sources that can be tracked */
+ cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx);
+ hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
+
if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) {
mbm_total_event.configurable = true;
mbm_config_rftype_init("mbm_total_bytes_config");
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 69a1de9238..2b69e560b0 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1620,12 +1620,6 @@ static int mbm_config_write_domain(struct rdt_resource *r,
struct mon_config_info mon_info = {0};
int ret = 0;
- /* mon_config cannot be more than the supported set of events */
- if (val > MAX_EVT_CONFIG_BITS) {
- rdt_last_cmd_puts("Invalid event configuration\n");
- return -EINVAL;
- }
-
/*
* Read the current config value first. If both are the same then
* no need to write it again.
@@ -1663,6 +1657,7 @@ out:
static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
{
+ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
char *dom_str = NULL, *id_str;
unsigned long dom_id, val;
struct rdt_domain *d;
@@ -1686,6 +1681,13 @@ next:
return -EINVAL;
}
+ /* Value from user cannot be more than the supported set of events */
+ if ((val & hw_res->mbm_cfg_mask) != val) {
+ rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n",
+ hw_res->mbm_cfg_mask);
+ return -EINVAL;
+ }
+
list_for_each_entry(d, &r->domains, list) {
if (d->id == dom_id) {
ret = mbm_config_write_domain(r, d, evtid, val);
diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
index e963344b04..53935b4d62 100644
--- a/arch/x86/kernel/eisa.c
+++ b/arch/x86/kernel/eisa.c
@@ -2,6 +2,7 @@
/*
* EISA specific code
*/
+#include <linux/cc_platform.h>
#include <linux/ioport.h>
#include <linux/eisa.h>
#include <linux/io.h>
@@ -12,7 +13,7 @@ static __init int eisa_bus_probe(void)
{
void __iomem *p;
- if (xen_pv_domain() && !xen_initial_domain())
+ if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return 0;
p = ioremap(0x0FFFD9, 4);
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 117e74c44e..33a214b1a4 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -178,10 +178,11 @@ void fpu__init_cpu_xstate(void)
* Must happen after CR4 setup and before xsetbv() to allow KVM
* lazy passthrough. Write independent of the dynamic state static
* key as that does not work on the boot CPU. This also ensures
- * that any stale state is wiped out from XFD.
+ * that any stale state is wiped out from XFD. Reset the per CPU
+ * xfd cache too.
*/
if (cpu_feature_enabled(X86_FEATURE_XFD))
- wrmsrl(MSR_IA32_XFD, init_fpstate.xfd);
+ xfd_set_state(init_fpstate.xfd);
/*
* XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 3518fb26d0..19ca623ffa 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -148,20 +148,26 @@ static inline void xfd_validate_state(struct fpstate *fpstate, u64 mask, bool rs
#endif
#ifdef CONFIG_X86_64
+static inline void xfd_set_state(u64 xfd)
+{
+ wrmsrl(MSR_IA32_XFD, xfd);
+ __this_cpu_write(xfd_state, xfd);
+}
+
static inline void xfd_update_state(struct fpstate *fpstate)
{
if (fpu_state_size_dynamic()) {
u64 xfd = fpstate->xfd;
- if (__this_cpu_read(xfd_state) != xfd) {
- wrmsrl(MSR_IA32_XFD, xfd);
- __this_cpu_write(xfd_state, xfd);
- }
+ if (__this_cpu_read(xfd_state) != xfd)
+ xfd_set_state(xfd);
}
}
extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
#else
+static inline void xfd_set_state(u64 xfd) { }
+
static inline void xfd_update_state(struct fpstate *fpstate) { }
static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index a0ce46c0a2..a6a3475e1d 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -335,7 +335,16 @@ out:
kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
bool *on_func_entry)
{
- if (is_endbr(*(u32 *)addr)) {
+ u32 insn;
+
+ /*
+ * Since 'addr' is not guaranteed to be safe to access, use
+ * copy_from_kernel_nofault() to read the instruction:
+ */
+ if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(u32)))
+ return NULL;
+
+ if (is_endbr(insn)) {
*on_func_entry = !offset || offset == 4;
if (*on_func_entry)
offset = 4;
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index b223922248..15c700d358 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -196,12 +196,12 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
if (!smp_check_mpc(mpc, oem, str))
return 0;
- /* Initialize the lapic mapping */
- if (!acpi_lapic)
- register_lapic_address(mpc->lapic);
-
- if (early)
+ if (early) {
+ /* Initialize the lapic mapping */
+ if (!acpi_lapic)
+ register_lapic_address(mpc->lapic);
return 1;
+ }
/* Now process the configuration blocks. */
while (count < mpc->length) {
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 3082cf24b6..6da2cfa23c 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -636,7 +636,7 @@ void nmi_backtrace_stall_check(const struct cpumask *btp)
msgp = nmi_check_stall_msg[idx];
if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1))
modp = ", but OK because ignore_nmis was set";
- if (nmi_seq & ~0x1)
+ if (nmi_seq & 0x1)
msghp = " (CPU currently in NMI handler function)";
else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)
msghp = " (CPU exited one NMI handler function)";
diff --git a/arch/x86/kernel/probe_roms.c b/arch/x86/kernel/probe_roms.c
index 319fef37d9..cc2c34ba72 100644
--- a/arch/x86/kernel/probe_roms.c
+++ b/arch/x86/kernel/probe_roms.c
@@ -203,16 +203,6 @@ void __init probe_roms(void)
unsigned char c;
int i;
- /*
- * The ROM memory range is not part of the e820 table and is therefore not
- * pre-validated by BIOS. The kernel page table maps the ROM region as encrypted
- * memory, and SNP requires encrypted memory to be validated before access.
- * Do that here.
- */
- snp_prep_memory(video_rom_resource.start,
- ((system_rom_resource.end + 1) - video_rom_resource.start),
- SNP_PAGE_STATE_PRIVATE);
-
/* video rom */
upper = adapter_rom_resources[0].start;
for (start = video_rom_resource.start; start < upper; start += 2048) {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 1526747bed..b002ebf024 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -9,7 +9,6 @@
#include <linux/console.h>
#include <linux/crash_dump.h>
#include <linux/dma-map-ops.h>
-#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/ima.h>
#include <linux/init_ohci1394_dma.h>
@@ -904,7 +903,7 @@ void __init setup_arch(char **cmdline_p)
efi_init();
reserve_ibft_region();
- dmi_setup();
+ x86_init.resources.dmi_setup();
/*
* VMware detection requires dmi to be available, so this
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index ccb0915e84..466fe09898 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -556,9 +556,9 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
/* Skip post-processing for out-of-range zero leafs. */
- if (!(leaf->fn <= cpuid_std_range_max ||
- (leaf->fn >= 0x40000000 && leaf->fn <= cpuid_hyp_range_max) ||
- (leaf->fn >= 0x80000000 && leaf->fn <= cpuid_ext_range_max)))
+ if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
+ (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
+ (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
return 0;
}
@@ -1063,11 +1063,11 @@ static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
if (fn->eax_in == 0x0)
- cpuid_std_range_max = fn->eax;
+ RIP_REL_REF(cpuid_std_range_max) = fn->eax;
else if (fn->eax_in == 0x40000000)
- cpuid_hyp_range_max = fn->eax;
+ RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
else if (fn->eax_in == 0x80000000)
- cpuid_ext_range_max = fn->eax;
+ RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
}
}
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index c67285824e..0f58242b54 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/psp-sev.h>
+#include <linux/dmi.h>
#include <uapi/linux/sev-guest.h>
#include <asm/cpu_entry_area.h>
@@ -748,7 +749,7 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
* This eliminates worries about jump tables or checking boot_cpu_data
* in the cc_platform_has() function.
*/
- if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
return;
/*
@@ -767,28 +768,13 @@ void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr
* This eliminates worries about jump tables or checking boot_cpu_data
* in the cc_platform_has() function.
*/
- if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
return;
/* Ask hypervisor to mark the memory pages shared in the RMP table. */
early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);
}
-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)
-{
- unsigned long vaddr, npages;
-
- vaddr = (unsigned long)__va(paddr);
- npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;
-
- if (op == SNP_PAGE_STATE_PRIVATE)
- early_snp_set_memory_private(vaddr, paddr, npages);
- else if (op == SNP_PAGE_STATE_SHARED)
- early_snp_set_memory_shared(vaddr, paddr, npages);
- else
- WARN(1, "invalid memory op %d\n", op);
-}
-
static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
unsigned long vaddr_end, int op)
{
@@ -2112,6 +2098,17 @@ void __init __noreturn snp_abort(void)
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
}
+/*
+ * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
+ * enabled, as the alternative (fallback) logic for DMI probing in the legacy
+ * ROM region can cause a crash since this region is not pre-validated.
+ */
+void __init snp_dmi_setup(void)
+{
+ if (efi_enabled(EFI_CONFIG_TABLES))
+ dmi_setup();
+}
+
static void dump_cpuid_table(void)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index a37ebd3b47..3f0718b4a7 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -3,6 +3,7 @@
*
* For licencing details see kernel-base/COPYING
*/
+#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/export.h>
@@ -66,6 +67,7 @@ struct x86_init_ops x86_init __initdata = {
.probe_roms = probe_roms,
.reserve_resources = reserve_standard_io_resources,
.memory_setup = e820__memory_setup_default,
+ .dmi_setup = dmi_setup,
},
.mpparse = {
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index dda6fc4cfa..1811a9ddfe 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -679,6 +679,11 @@ void kvm_set_cpu_caps(void)
F(AMX_COMPLEX)
);
+ kvm_cpu_cap_init_kvm_defined(CPUID_7_2_EDX,
+ F(INTEL_PSFD) | F(IPRED_CTRL) | F(RRSBA_CTRL) | F(DDPD_U) |
+ F(BHI_CTRL) | F(MCDT_NO)
+ );
+
kvm_cpu_cap_mask(CPUID_D_1_EAX,
F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
);
@@ -960,13 +965,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
/* function 7 has additional index. */
case 7:
- entry->eax = min(entry->eax, 1u);
+ max_idx = entry->eax = min(entry->eax, 2u);
cpuid_entry_override(entry, CPUID_7_0_EBX);
cpuid_entry_override(entry, CPUID_7_ECX);
cpuid_entry_override(entry, CPUID_7_EDX);
- /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
- if (entry->eax == 1) {
+ /* KVM only supports up to 0x7.2, capped above via min(). */
+ if (max_idx >= 1) {
entry = do_host_cpuid(array, function, 1);
if (!entry)
goto out;
@@ -976,6 +981,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->ebx = 0;
entry->ecx = 0;
}
+ if (max_idx >= 2) {
+ entry = do_host_cpuid(array, function, 2);
+ if (!entry)
+ goto out;
+
+ cpuid_entry_override(entry, CPUID_7_2_EDX);
+ entry->ecx = 0;
+ entry->ebx = 0;
+ entry->eax = 0;
+ }
break;
case 0xa: { /* Architectural Performance Monitoring */
union cpuid10_eax eax;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 238afd7335..4943f6b2bb 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -2388,7 +2388,7 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *h
if (!eventfd)
return HV_STATUS_INVALID_PORT_ID;
- eventfd_signal(eventfd, 1);
+ eventfd_signal(eventfd);
return HV_STATUS_SUCCESS;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 245b20973c..23fab75993 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -41,6 +41,7 @@
#include "ioapic.h"
#include "trace.h"
#include "x86.h"
+#include "xen.h"
#include "cpuid.h"
#include "hyperv.h"
#include "smm.h"
@@ -499,8 +500,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
}
/* Check if there are APF page ready requests pending */
- if (enabled)
+ if (enabled) {
kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
+ kvm_xen_sw_enable_lapic(apic->vcpu);
+ }
}
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index b816506783..aadefcaa95 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -16,6 +16,7 @@ enum kvm_only_cpuid_leafs {
CPUID_7_1_EDX,
CPUID_8000_0007_EDX,
CPUID_8000_0022_EAX,
+ CPUID_7_2_EDX,
NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -46,6 +47,14 @@ enum kvm_only_cpuid_leafs {
#define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
+/* Intel-defined sub-features, CPUID level 0x00000007:2 (EDX) */
+#define X86_FEATURE_INTEL_PSFD KVM_X86_FEATURE(CPUID_7_2_EDX, 0)
+#define X86_FEATURE_IPRED_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 1)
+#define KVM_X86_FEATURE_RRSBA_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 2)
+#define X86_FEATURE_DDPD_U KVM_X86_FEATURE(CPUID_7_2_EDX, 3)
+#define X86_FEATURE_BHI_CTRL KVM_X86_FEATURE(CPUID_7_2_EDX, 4)
+#define X86_FEATURE_MCDT_NO KVM_X86_FEATURE(CPUID_7_2_EDX, 5)
+
/* CPUID level 0x80000007 (EDX). */
#define KVM_X86_FEATURE_CONSTANT_TSC KVM_X86_FEATURE(CPUID_8000_0007_EDX, 8)
@@ -80,6 +89,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX},
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
+ [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
};
/*
@@ -106,18 +116,19 @@ static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
*/
static __always_inline u32 __feature_translate(int x86_feature)
{
- if (x86_feature == X86_FEATURE_SGX1)
- return KVM_X86_FEATURE_SGX1;
- else if (x86_feature == X86_FEATURE_SGX2)
- return KVM_X86_FEATURE_SGX2;
- else if (x86_feature == X86_FEATURE_SGX_EDECCSSA)
- return KVM_X86_FEATURE_SGX_EDECCSSA;
- else if (x86_feature == X86_FEATURE_CONSTANT_TSC)
- return KVM_X86_FEATURE_CONSTANT_TSC;
- else if (x86_feature == X86_FEATURE_PERFMON_V2)
- return KVM_X86_FEATURE_PERFMON_V2;
-
- return x86_feature;
+#define KVM_X86_TRANSLATE_FEATURE(f) \
+ case X86_FEATURE_##f: return KVM_X86_FEATURE_##f
+
+ switch (x86_feature) {
+ KVM_X86_TRANSLATE_FEATURE(SGX1);
+ KVM_X86_TRANSLATE_FEATURE(SGX2);
+ KVM_X86_TRANSLATE_FEATURE(SGX_EDECCSSA);
+ KVM_X86_TRANSLATE_FEATURE(CONSTANT_TSC);
+ KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
+ KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
+ default:
+ return x86_feature;
+ }
}
static __always_inline u32 __feature_leaf(int x86_feature)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 6ee925d666..1226bb2151 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -57,7 +57,7 @@ static bool sev_es_enabled = true;
module_param_named(sev_es, sev_es_enabled, bool, 0444);
/* enable/disable SEV-ES DebugSwap support */
-static bool sev_es_debug_swap_enabled = true;
+static bool sev_es_debug_swap_enabled = false;
module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
#else
#define sev_enabled false
@@ -612,8 +612,11 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
save->xss = svm->vcpu.arch.ia32_xss;
save->dr6 = svm->vcpu.arch.dr6;
- if (sev_es_debug_swap_enabled)
+ if (sev_es_debug_swap_enabled) {
save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
+ pr_warn_once("Enabling DebugSwap with KVM_SEV_ES_INIT. "
+ "This will not work starting with Linux 6.10\n");
+ }
pr_debug("Virtual Machine Save Area (VMSA):\n");
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
@@ -1975,20 +1978,22 @@ int sev_mem_enc_register_region(struct kvm *kvm,
goto e_free;
}
- region->uaddr = range->addr;
- region->size = range->size;
-
- list_add_tail(&region->list, &sev->regions_list);
- mutex_unlock(&kvm->lock);
-
/*
* The guest may change the memory encryption attribute from C=0 -> C=1
* or vice versa for this memory range. Lets make sure caches are
* flushed to ensure that guest data gets written into memory with
- * correct C-bit.
+ * correct C-bit. Note, this must be done before dropping kvm->lock,
+ * as region and its array of pages can be freed by a different task
+ * once kvm->lock is released.
*/
sev_clflush_pages(region->pages, region->npages);
+ region->uaddr = range->addr;
+ region->size = range->size;
+
+ list_add_tail(&region->list, &sev->regions_list);
+ mutex_unlock(&kvm->lock);
+
return ret;
e_free:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 468870450b..365caf7328 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1620,7 +1620,8 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
- ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO)
+ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
+ ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR)
static u64 kvm_get_arch_capabilities(void)
{
@@ -1652,6 +1653,8 @@ static u64 kvm_get_arch_capabilities(void)
data |= ARCH_CAP_SSB_NO;
if (!boot_cpu_has_bug(X86_BUG_MDS))
data |= ARCH_CAP_MDS_NO;
+ if (!boot_cpu_has_bug(X86_BUG_RFDS))
+ data |= ARCH_CAP_RFDS_NO;
if (!boot_cpu_has(X86_FEATURE_RTM)) {
/*
@@ -7948,6 +7951,16 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
if (r < 0)
return X86EMUL_UNHANDLEABLE;
+
+ /*
+ * Mark the page dirty _before_ checking whether or not the CMPXCHG was
+ * successful, as the old value is written back on failure. Note, for
+ * live migration, this is unnecessarily conservative as CMPXCHG writes
+ * back the original value and the access is atomic, but KVM's ABI is
+ * that all writes are dirty logged, regardless of the value written.
+ */
+ kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa));
+
if (r)
return X86EMUL_CMPXCHG_FAILED;
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index e53fad915a..c069521f24 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -493,7 +493,7 @@ void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
kvm_xen_update_runstate_guest(v, state == RUNSTATE_runnable);
}
-static void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
+void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
{
struct kvm_lapic_irq irq = { };
int r;
@@ -2088,7 +2088,7 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
if (ret < 0 && ret != -ENOTCONN)
return false;
} else {
- eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1);
+ eventfd_signal(evtchnfd->deliver.eventfd.ctx);
}
*r = 0;
diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
index f8f1fe22d0..f5841d9000 100644
--- a/arch/x86/kvm/xen.h
+++ b/arch/x86/kvm/xen.h
@@ -18,6 +18,7 @@ extern struct static_key_false_deferred kvm_xen_enabled;
int __kvm_xen_has_interrupt(struct kvm_vcpu *vcpu);
void kvm_xen_inject_pending_events(struct kvm_vcpu *vcpu);
+void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *vcpu);
int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data);
int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
@@ -36,6 +37,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
const struct kvm_irq_routing_entry *ue);
void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu);
+static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
+{
+ /*
+ * The local APIC is being enabled. If the per-vCPU upcall vector is
+ * set and the vCPU's evtchn_upcall_pending flag is set, inject the
+ * interrupt.
+ */
+ if (static_branch_unlikely(&kvm_xen_enabled.key) &&
+ vcpu->arch.xen.vcpu_info_cache.active &&
+ vcpu->arch.xen.upcall_vector && __kvm_xen_has_interrupt(vcpu))
+ kvm_xen_inject_vcpu_vector(vcpu);
+}
+
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
{
return static_branch_unlikely(&kvm_xen_enabled.key) &&
@@ -101,6 +115,10 @@ static inline void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
{
}
+static inline void kvm_xen_sw_enable_lapic(struct kvm_vcpu *vcpu)
+{
+}
+
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
{
return false;
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 7b2589877d..1e59367b46 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -163,6 +163,7 @@ SYM_CODE_START_NOALIGN(srso_alias_untrain_ret)
lfence
jmp srso_alias_return_thunk
SYM_FUNC_END(srso_alias_untrain_ret)
+__EXPORT_THUNK(srso_alias_untrain_ret)
.popsection
.pushsection .text..__x86.rethunk_safe
@@ -224,10 +225,12 @@ SYM_CODE_START(srso_return_thunk)
SYM_CODE_END(srso_return_thunk)
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
-#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
#else /* !CONFIG_CPU_SRSO */
#define JMP_SRSO_UNTRAIN_RET "ud2"
-#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
+/* Dummy for the alternative in CALL_UNTRAIN_RET. */
+SYM_CODE_START(srso_alias_untrain_ret)
+ RET
+SYM_FUNC_END(srso_alias_untrain_ret)
#endif /* CONFIG_CPU_SRSO */
#ifdef CONFIG_CPU_UNRET_ENTRY
@@ -319,9 +322,7 @@ SYM_FUNC_END(retbleed_untrain_ret)
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
SYM_FUNC_START(entry_untrain_ret)
- ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
- JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \
- JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
+ ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO
SYM_FUNC_END(entry_untrain_ret)
__EXPORT_THUNK(entry_untrain_ret)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 679b09cfe2..d6375b3c63 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -798,15 +798,6 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
show_opcodes(regs, loglvl);
}
-/*
- * The (legacy) vsyscall page is the long page in the kernel portion
- * of the address space that has user-accessible permissions.
- */
-static bool is_vsyscall_vaddr(unsigned long vaddr)
-{
- return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
-}
-
static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
unsigned long address, u32 pkey, int si_code)
diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c
index 6993f026ad..42115ac079 100644
--- a/arch/x86/mm/maccess.c
+++ b/arch/x86/mm/maccess.c
@@ -3,6 +3,8 @@
#include <linux/uaccess.h>
#include <linux/kernel.h>
+#include <asm/vsyscall.h>
+
#ifdef CONFIG_X86_64
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
@@ -16,6 +18,14 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
return false;
/*
+ * Reading from the vsyscall page may cause an unhandled fault in
+ * certain cases. Though it is at an address above TASK_SIZE_MAX, it is
+ * usually considered as a user space address.
+ */
+ if (is_vsyscall_vaddr(vaddr))
+ return false;
+
+ /*
* Allow everything during early boot before 'x86_virt_bits'
* is initialized. Needed for instruction decoding in early
* exception handlers.
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index 70b91de2e0..94cd06d4b0 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -492,6 +492,24 @@ void __init sme_early_init(void)
*/
if (sev_status & MSR_AMD64_SEV_ENABLED)
ia32_disable();
+
+ /*
+ * Override init functions that scan the ROM region in SEV-SNP guests,
+ * as this memory is not pre-validated and would thus cause a crash.
+ */
+ if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.pci.init_irq = x86_init_noop;
+ x86_init.resources.probe_roms = x86_init_noop;
+
+ /*
+ * DMI setup behavior for SEV-SNP guests depends on
+ * efi_enabled(EFI_CONFIG_TABLES), which hasn't been
+ * parsed yet. snp_dmi_setup() will run after that
+ * parsing has happened.
+ */
+ x86_init.resources.dmi_setup = snp_dmi_setup;
+ }
}
void __init mem_encrypt_free_decrypted_mem(void)
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index d73aeb1641..0166ab1780 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -97,7 +97,6 @@ static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
static char sme_cmdline_arg[] __initdata = "mem_encrypt";
static char sme_cmdline_on[] __initdata = "on";
-static char sme_cmdline_off[] __initdata = "off";
static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
{
@@ -305,7 +304,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
* instrumentation or checking boot_cpu_data in the cc_platform_has()
* function.
*/
- if (!sme_get_me_mask() || sev_status & MSR_AMD64_SEV_ENABLED)
+ if (!sme_get_me_mask() ||
+ RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
return;
/*
@@ -504,10 +504,9 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
void __init sme_enable(struct boot_params *bp)
{
- const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
+ const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
unsigned int eax, ebx, ecx, edx;
unsigned long feature_mask;
- bool active_by_default;
unsigned long me_mask;
char buffer[16];
bool snp;
@@ -543,11 +542,11 @@ void __init sme_enable(struct boot_params *bp)
me_mask = 1UL << (ebx & 0x3f);
/* Check the SEV MSR whether SEV or SME is enabled */
- sev_status = __rdmsr(MSR_AMD64_SEV);
- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+ RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
+ feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
/* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
- if (snp && !(sev_status & MSR_AMD64_SEV_SNP_ENABLED))
+ if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
snp_abort();
/* Check if memory encryption is enabled */
@@ -573,7 +572,6 @@ void __init sme_enable(struct boot_params *bp)
return;
} else {
/* SEV state cannot be controlled by a command line option */
- sme_me_mask = me_mask;
goto out;
}
@@ -588,31 +586,17 @@ void __init sme_enable(struct boot_params *bp)
asm ("lea sme_cmdline_on(%%rip), %0"
: "=r" (cmdline_on)
: "p" (sme_cmdline_on));
- asm ("lea sme_cmdline_off(%%rip), %0"
- : "=r" (cmdline_off)
- : "p" (sme_cmdline_off));
-
- if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
- active_by_default = true;
- else
- active_by_default = false;
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
((u64)bp->ext_cmd_line_ptr << 32));
- if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
+ if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
+ strncmp(buffer, cmdline_on, sizeof(buffer)))
return;
- if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
- sme_me_mask = me_mask;
- else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
- sme_me_mask = 0;
- else
- sme_me_mask = active_by_default ? me_mask : 0;
out:
- if (sme_me_mask) {
- physical_mask &= ~sme_me_mask;
- cc_vendor = CC_VENDOR_AMD;
- cc_set_mask(sme_me_mask);
- }
+ RIP_REL_REF(sme_me_mask) = me_mask;
+ physical_mask &= ~me_mask;
+ cc_vendor = CC_VENDOR_AMD;
+ cc_set_mask(me_mask);
}
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index d30949e25e..e701328364 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -653,6 +653,14 @@ static void print_absolute_relocs(void)
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
continue;
}
+ /*
+ * Do not perform relocations in .notes section; any
+ * values there are meant for pre-boot consumption (e.g.
+ * startup_xen).
+ */
+ if (sec_applies->shdr.sh_type == SHT_NOTE) {
+ continue;
+ }
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 4b0d6fff88..1fb9a1644d 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -65,6 +65,8 @@ int xen_smp_intr_init(unsigned int cpu)
char *resched_name, *callfunc_name, *debug_name;
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
+ if (!resched_name)
+ goto fail_mem;
per_cpu(xen_resched_irq, cpu).name = resched_name;
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
cpu,
@@ -77,6 +79,8 @@ int xen_smp_intr_init(unsigned int cpu)
per_cpu(xen_resched_irq, cpu).irq = rc;
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
+ if (!callfunc_name)
+ goto fail_mem;
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
cpu,
@@ -90,6 +94,9 @@ int xen_smp_intr_init(unsigned int cpu)
if (!xen_fifo_events) {
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
+ if (!debug_name)
+ goto fail_mem;
+
per_cpu(xen_debug_irq, cpu).name = debug_name;
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
xen_debug_interrupt,
@@ -101,6 +108,9 @@ int xen_smp_intr_init(unsigned int cpu)
}
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
+ if (!callfunc_name)
+ goto fail_mem;
+
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
cpu,
@@ -114,6 +124,8 @@ int xen_smp_intr_init(unsigned int cpu)
return 0;
+ fail_mem:
+ rc = -ENOMEM;
fail:
xen_smp_intr_free(cpu);
return rc;
diff --git a/block/bio.c b/block/bio.c
index 270f6b9992..62419aa09d 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1149,7 +1149,7 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
bio_for_each_folio_all(fi, bio) {
struct page *page;
- size_t done = 0;
+ size_t nr_pages;
if (mark_dirty) {
folio_lock(fi.folio);
@@ -1157,10 +1157,11 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty)
folio_unlock(fi.folio);
}
page = folio_page(fi.folio, fi.offset / PAGE_SIZE);
+ nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
+ fi.offset / PAGE_SIZE + 1;
do {
bio_release_page(bio, page++);
- done += PAGE_SIZE;
- } while (done < fi.length);
+ } while (--nr_pages != 0);
}
}
EXPORT_SYMBOL_GPL(__bio_release_pages);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a02d3d922c..a71974a5e5 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -772,16 +772,11 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
/*
* Partial zone append completions cannot be supported as the
* BIO fragments may end up not being written sequentially.
- * For such case, force the completed nbytes to be equal to
- * the BIO size so that bio_advance() sets the BIO remaining
- * size to 0 and we end up calling bio_endio() before returning.
*/
- if (bio->bi_iter.bi_size != nbytes) {
+ if (bio->bi_iter.bi_size != nbytes)
bio->bi_status = BLK_STS_IOERR;
- nbytes = bio->bi_iter.bi_size;
- } else {
+ else
bio->bi_iter.bi_sector = rq->__sector;
- }
}
bio_advance(bio, nbytes);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 0046b44726..7019b8e204 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -686,6 +686,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->zone_write_granularity = max(t->zone_write_granularity,
b->zone_write_granularity);
t->zoned = max(t->zoned, b->zoned);
+ if (!t->zoned) {
+ t->zone_write_granularity = 0;
+ t->max_zone_append_sectors = 0;
+ }
return ret;
}
EXPORT_SYMBOL(blk_stack_limits);
diff --git a/block/holder.c b/block/holder.c
index 37d18c13d9..791091a7ea 100644
--- a/block/holder.c
+++ b/block/holder.c
@@ -8,6 +8,8 @@ struct bd_holder_disk {
int refcnt;
};
+static DEFINE_MUTEX(blk_holder_mutex);
+
static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
struct gendisk *disk)
{
@@ -80,7 +82,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
kobject_get(bdev->bd_holder_dir);
mutex_unlock(&bdev->bd_disk->open_mutex);
- mutex_lock(&disk->open_mutex);
+ mutex_lock(&blk_holder_mutex);
WARN_ON_ONCE(!bdev->bd_holder);
holder = bd_find_holder_disk(bdev, disk);
@@ -108,7 +110,7 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
goto out_del_symlink;
list_add(&holder->list, &disk->slave_bdevs);
- mutex_unlock(&disk->open_mutex);
+ mutex_unlock(&blk_holder_mutex);
return 0;
out_del_symlink:
@@ -116,7 +118,7 @@ out_del_symlink:
out_free_holder:
kfree(holder);
out_unlock:
- mutex_unlock(&disk->open_mutex);
+ mutex_unlock(&blk_holder_mutex);
if (ret)
kobject_put(bdev->bd_holder_dir);
return ret;
@@ -140,7 +142,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
if (WARN_ON_ONCE(!disk->slave_dir))
return;
- mutex_lock(&disk->open_mutex);
+ mutex_lock(&blk_holder_mutex);
holder = bd_find_holder_disk(bdev, disk);
if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
del_symlink(disk->slave_dir, bdev_kobj(bdev));
@@ -149,6 +151,6 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
list_del_init(&holder->list);
kfree(holder);
}
- mutex_unlock(&disk->open_mutex);
+ mutex_unlock(&blk_holder_mutex);
}
EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index f958e79277..02a916ba62 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -646,9 +646,8 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
- unsigned int shift = tags->bitmap_tags.sb.shift;
- dd->async_depth = max(1U, 3 * (1U << shift) / 4);
+ dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
}
diff --git a/block/opal_proto.h b/block/opal_proto.h
index dec7ce3a3e..d247a457bf 100644
--- a/block/opal_proto.h
+++ b/block/opal_proto.h
@@ -71,6 +71,7 @@ enum opal_response_token {
#define SHORT_ATOM_BYTE 0xBF
#define MEDIUM_ATOM_BYTE 0xDF
#define LONG_ATOM_BYTE 0xE3
+#define EMPTY_ATOM_BYTE 0xFF
#define OPAL_INVAL_PARAM 12
#define OPAL_MANUFACTURED_INACTIVE 0x08
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 3d9e9cd250..fa4dba5d85 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -1056,16 +1056,20 @@ static int response_parse(const u8 *buf, size_t length,
token_length = response_parse_medium(iter, pos);
else if (pos[0] <= LONG_ATOM_BYTE) /* long atom */
token_length = response_parse_long(iter, pos);
+ else if (pos[0] == EMPTY_ATOM_BYTE) /* empty atom */
+ token_length = 1;
else /* TOKEN */
token_length = response_parse_token(iter, pos);
if (token_length < 0)
return token_length;
+ if (pos[0] != EMPTY_ATOM_BYTE)
+ num_entries++;
+
pos += token_length;
total -= token_length;
iter++;
- num_entries++;
}
resp->num = num_entries;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 70661f58ee..dd5353c5eb 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1292,10 +1292,11 @@ config CRYPTO_JITTERENTROPY
A non-physical non-deterministic ("true") RNG (e.g., an entropy source
compliant with NIST SP800-90B) intended to provide a seed to a
- deterministic RNG (e.g. per NIST SP800-90C).
+ deterministic RNG (e.g., per NIST SP800-90C).
This RNG does not perform any cryptographic whitening of the generated
+ random numbers.
- See https://www.chronox.de/jent.html
+ See https://www.chronox.de/jent/
if CRYPTO_JITTERENTROPY
if CRYPTO_FIPS && EXPERT
diff --git a/crypto/asymmetric_keys/mscode_parser.c b/crypto/asymmetric_keys/mscode_parser.c
index 05402ef896..8aecbe4637 100644
--- a/crypto/asymmetric_keys/mscode_parser.c
+++ b/crypto/asymmetric_keys/mscode_parser.c
@@ -75,6 +75,9 @@ int mscode_note_digest_algo(void *context, size_t hdrlen,
oid = look_up_OID(value, vlen);
switch (oid) {
+ case OID_sha1:
+ ctx->digest_algo = "sha1";
+ break;
case OID_sha256:
ctx->digest_algo = "sha256";
break;
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index 5b08c50722..231ad7b378 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -227,6 +227,9 @@ int pkcs7_sig_note_digest_algo(void *context, size_t hdrlen,
struct pkcs7_parse_context *ctx = context;
switch (ctx->last_oid) {
+ case OID_sha1:
+ ctx->sinfo->sig->hash_algo = "sha1";
+ break;
case OID_sha256:
ctx->sinfo->sig->hash_algo = "sha256";
break;
@@ -278,6 +281,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
ctx->sinfo->sig->pkey_algo = "rsa";
ctx->sinfo->sig->encoding = "pkcs1";
break;
+ case OID_id_ecdsa_with_sha1:
case OID_id_ecdsa_with_sha224:
case OID_id_ecdsa_with_sha256:
case OID_id_ecdsa_with_sha384:
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index e5f22691fe..e314fd57e6 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -115,7 +115,8 @@ software_key_determine_akcipher(const struct public_key *pkey,
*/
if (!hash_algo)
return -EINVAL;
- if (strcmp(hash_algo, "sha224") != 0 &&
+ if (strcmp(hash_algo, "sha1") != 0 &&
+ strcmp(hash_algo, "sha224") != 0 &&
strcmp(hash_algo, "sha256") != 0 &&
strcmp(hash_algo, "sha384") != 0 &&
strcmp(hash_algo, "sha512") != 0 &&
diff --git a/crypto/asymmetric_keys/signature.c b/crypto/asymmetric_keys/signature.c
index 398983be77..2deff81f8a 100644
--- a/crypto/asymmetric_keys/signature.c
+++ b/crypto/asymmetric_keys/signature.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(decrypt_blob);
* Sign the specified data blob using the private key specified by params->key.
* The signature is wrapped in an encoding if params->encoding is specified
* (eg. "pkcs1"). If the encoding needs to know the digest type, this can be
- * passed through params->hash_algo (eg. "sha512").
+ * passed through params->hash_algo (eg. "sha1").
*
* Returns the length of the data placed in the signature buffer or an error.
*/
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 487204d394..bb0bffa271 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -198,6 +198,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
default:
return -ENOPKG; /* Unsupported combination */
+ case OID_sha1WithRSAEncryption:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto rsa_pkcs1;
+
case OID_sha256WithRSAEncryption:
ctx->cert->sig->hash_algo = "sha256";
goto rsa_pkcs1;
@@ -214,6 +218,10 @@ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag,
ctx->cert->sig->hash_algo = "sha224";
goto rsa_pkcs1;
+ case OID_id_ecdsa_with_sha1:
+ ctx->cert->sig->hash_algo = "sha1";
+ goto ecdsa;
+
case OID_id_rsassa_pkcs1_v1_5_with_sha3_256:
ctx->cert->sig->hash_algo = "sha3-256";
goto rsa_pkcs1;
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index d7e9839754..0cd6e06002 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -653,6 +653,30 @@ static const struct akcipher_testvec rsa_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
{
.key =
+ "\x04\xf7\x46\xf8\x2f\x15\xf6\x22\x8e\xd7\x57\x4f\xcc\xe7\xbb\xc1"
+ "\xd4\x09\x73\xcf\xea\xd0\x15\x07\x3d\xa5\x8a\x8a\x95\x43\xe4\x68"
+ "\xea\xc6\x25\xc1\xc1\x01\x25\x4c\x7e\xc3\x3c\xa6\x04\x0a\xe7\x08"
+ "\x98",
+ .key_len = 49,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x01",
+ .param_len = 21,
+ .m =
+ "\xcd\xb9\xd2\x1c\xb7\x6f\xcd\x44\xb3\xfd\x63\xea\xa3\x66\x7f\xae"
+ "\x63\x85\xe7\x82",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x35\x02\x19\x00\xba\xe5\x93\x83\x6e\xb6\x3b\x63\xa0\x27\x91"
+ "\xc6\xf6\x7f\xc3\x09\xad\x59\xad\x88\x27\xd6\x92\x6b\x02\x18\x10"
+ "\x68\x01\x9d\xba\xce\x83\x08\xef\x95\x52\x7b\xa0\x0f\xe4\x18\x86"
+ "\x80\x6f\xa5\x79\x77\xda\xd0",
+ .c_size = 55,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
"\x04\xb6\x4b\xb1\xd1\xac\xba\x24\x8f\x65\xb2\x60\x00\x90\xbf\xbd"
"\x78\x05\x73\xe9\x79\x1d\x6f\x7c\x0b\xd2\xc3\x93\xa7\x28\xe1\x75"
"\xf7\xd5\x95\x1d\x28\x10\xc0\x75\x50\x5c\x1a\x4f\x3f\x8f\xa5\xee"
@@ -756,6 +780,32 @@ static const struct akcipher_testvec ecdsa_nist_p192_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
{
.key =
+ "\x04\xb9\x7b\xbb\xd7\x17\x64\xd2\x7e\xfc\x81\x5d\x87\x06\x83\x41"
+ "\x22\xd6\x9a\xaa\x87\x17\xec\x4f\x63\x55\x2f\x94\xba\xdd\x83\xe9"
+ "\x34\x4b\xf3\xe9\x91\x13\x50\xb6\xcb\xca\x62\x08\xe7\x3b\x09\xdc"
+ "\xc3\x63\x4b\x2d\xb9\x73\x53\xe4\x45\xe6\x7c\xad\xe7\x6b\xb0\xe8"
+ "\xaf",
+ .key_len = 65,
+ .params =
+ "\x30\x13\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x08\x2a\x86\x48"
+ "\xce\x3d\x03\x01\x07",
+ .param_len = 21,
+ .m =
+ "\xc2\x2b\x5f\x91\x78\x34\x26\x09\x42\x8d\x6f\x51\xb2\xc5\xaf\x4c"
+ "\x0b\xde\x6a\x42",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x46\x02\x21\x00\xf9\x25\xce\x9f\x3a\xa6\x35\x81\xcf\xd4\xe7"
+ "\xb7\xf0\x82\x56\x41\xf7\xd4\xad\x8d\x94\x5a\x69\x89\xee\xca\x6a"
+ "\x52\x0e\x48\x4d\xcc\x02\x21\x00\xd7\xe4\xef\x52\x66\xd3\x5b\x9d"
+ "\x8a\xfa\x54\x93\x29\xa7\x70\x86\xf1\x03\x03\xf3\x3b\xe2\x73\xf7"
+ "\xfb\x9d\x8b\xde\xd4\x8d\x6f\xad",
+ .c_size = 72,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
+ .key =
"\x04\x8b\x6d\xc0\x33\x8e\x2d\x8b\x67\xf5\xeb\xc4\x7f\xa0\xf5\xd9"
"\x7b\x03\xa5\x78\x9a\xb5\xea\x14\xe4\x23\xd0\xaf\xd7\x0e\x2e\xa0"
"\xc9\x8b\xdb\x95\xf8\xb3\xaf\xac\x00\x2c\x2c\x1f\x7a\xfd\x95\x88"
@@ -866,6 +916,36 @@ static const struct akcipher_testvec ecdsa_nist_p256_tv_template[] = {
static const struct akcipher_testvec ecdsa_nist_p384_tv_template[] = {
{
+ .key = /* secp384r1(sha1) */
+ "\x04\x89\x25\xf3\x97\x88\xcb\xb0\x78\xc5\x72\x9a\x14\x6e\x7a\xb1"
+ "\x5a\xa5\x24\xf1\x95\x06\x9e\x28\xfb\xc4\xb9\xbe\x5a\x0d\xd9\x9f"
+ "\xf3\xd1\x4d\x2d\x07\x99\xbd\xda\xa7\x66\xec\xbb\xea\xba\x79\x42"
+ "\xc9\x34\x89\x6a\xe7\x0b\xc3\xf2\xfe\x32\x30\xbe\xba\xf9\xdf\x7e"
+ "\x4b\x6a\x07\x8e\x26\x66\x3f\x1d\xec\xa2\x57\x91\x51\xdd\x17\x0e"
+ "\x0b\x25\xd6\x80\x5c\x3b\xe6\x1a\x98\x48\x91\x45\x7a\x73\xb0\xc3"
+ "\xf1",
+ .key_len = 97,
+ .params =
+ "\x30\x10\x06\x07\x2a\x86\x48\xce\x3d\x02\x01\x06\x05\x2b\x81\x04"
+ "\x00\x22",
+ .param_len = 18,
+ .m =
+ "\x12\x55\x28\xf0\x77\xd5\xb6\x21\x71\x32\x48\xcd\x28\xa8\x25\x22"
+ "\x3a\x69\xc1\x93",
+ .m_size = 20,
+ .algo = OID_id_ecdsa_with_sha1,
+ .c =
+ "\x30\x66\x02\x31\x00\xf5\x0f\x24\x4c\x07\x93\x6f\x21\x57\x55\x07"
+ "\x20\x43\x30\xde\xa0\x8d\x26\x8e\xae\x63\x3f\xbc\x20\x3a\xc6\xf1"
+ "\x32\x3c\xce\x70\x2b\x78\xf1\x4c\x26\xe6\x5b\x86\xcf\xec\x7c\x7e"
+ "\xd0\x87\xd7\xd7\x6e\x02\x31\x00\xcd\xbb\x7e\x81\x5d\x8f\x63\xc0"
+ "\x5f\x63\xb1\xbe\x5e\x4c\x0e\xa1\xdf\x28\x8c\x1b\xfa\xf9\x95\x88"
+ "\x74\xa0\x0f\xbf\xaf\xc3\x36\x76\x4a\xa1\x59\xf1\x1c\xa4\x58\x26"
+ "\x79\x12\x2a\xb7\xc5\x15\x92\xc5",
+ .c_size = 104,
+ .public_key_vec = true,
+ .siggen_sigver_test = true,
+ }, {
.key = /* secp384r1(sha224) */
"\x04\x69\x6c\xcf\x62\xee\xd0\x0d\xe5\xb5\x2f\x70\x54\xcf\x26\xa0"
"\xd9\x98\x8d\x92\x2a\xab\x9b\x11\xcb\x48\x18\xa1\xa9\x0d\xd5\x18"
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 9290d43745..83821e1757 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -2047,7 +2047,7 @@ static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64
notifier_event->events_mask |= event_mask;
if (notifier_event->eventfd)
- eventfd_signal(notifier_event->eventfd, 1);
+ eventfd_signal(notifier_event->eventfd);
mutex_unlock(&notifier_event->lock);
}
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index eea2a2fa4f..45f9061031 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -208,8 +208,10 @@ void spk_do_flush(void)
wake_up_process(speakup_task);
}
-void synth_write(const char *buf, size_t count)
+void synth_write(const char *_buf, size_t count)
{
+ const unsigned char *buf = (const unsigned char *) _buf;
+
while (count--)
synth_buffer_add(*buf++);
synth_start();
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 55437f5e0c..bd6a7857ce 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1430,6 +1430,8 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
acpi_processor_registered--;
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
+
+ kfree(dev);
}
pr->flags.power_setup_done = 0;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 03b52d31a3..c843feb02e 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -576,6 +576,39 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"),
},
},
+ {
+ /* Infinity E15-5A165-BM */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM5RG1E0009COM"),
+ },
+ },
+ {
+ /* Infinity E15-5A305-1M */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GM5RGEE0016COM"),
+ },
+ },
+ {
+ /* Lunnen Ground 15 / AMD Ryzen 5 5500U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
+ DMI_MATCH(DMI_BOARD_NAME, "LLL5DAW"),
+ },
+ },
+ {
+ /* Lunnen Ground 16 / AMD Ryzen 7 5800U */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Lunnen"),
+ DMI_MATCH(DMI_BOARD_NAME, "LL6FA"),
+ },
+ },
+ {
+ /* MAIBENBEN X577 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MAIBENBEN"),
+ DMI_MATCH(DMI_BOARD_NAME, "X577"),
+ },
+ },
{ }
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 02bb2cce42..35ad5781f0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -314,18 +314,14 @@ static int acpi_scan_device_check(struct acpi_device *adev)
* again).
*/
if (adev->handler) {
- dev_warn(&adev->dev, "Already enumerated\n");
- return -EALREADY;
+ dev_dbg(&adev->dev, "Already enumerated\n");
+ return 0;
}
error = acpi_bus_scan(adev->handle);
if (error) {
dev_warn(&adev->dev, "Namespace scan failure\n");
return error;
}
- if (!adev->handler) {
- dev_warn(&adev->dev, "Enumeration failure\n");
- error = -ENODEV;
- }
} else {
error = acpi_scan_device_not_enumerated(adev);
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index da2e74fce2..df3fd6474b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -671,11 +671,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
- if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
- dev_info(&pdev->dev, "ASM1166 has only six ports\n");
- hpriv->saved_port_map = 0x3f;
- }
-
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->saved_port_map = 1;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index b0d6e69c4a..214b935c2c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -712,8 +712,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ehc->saved_ncq_enabled |= 1 << devno;
/* If we are resuming, wake up the device */
- if (ap->pflags & ATA_PFLAG_RESUMING)
+ if (ap->pflags & ATA_PFLAG_RESUMING) {
+ dev->flags |= ATA_DFLAG_RESUMING;
ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
+ }
}
}
@@ -3169,6 +3171,7 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
return 0;
err:
+ dev->flags &= ~ATA_DFLAG_RESUMING;
*r_failed_dev = dev;
return rc;
}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 0a0f483124..2f4c588376 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4730,6 +4730,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
+ bool do_resume;
int ret = 0;
mutex_lock(&ap->scsi_scan_mutex);
@@ -4751,7 +4752,15 @@ void ata_scsi_dev_rescan(struct work_struct *work)
if (scsi_device_get(sdev))
continue;
+ do_resume = dev->flags & ATA_DFLAG_RESUMING;
+
spin_unlock_irqrestore(ap->lock, flags);
+ if (do_resume) {
+ ret = scsi_resume_device(sdev);
+ if (ret == -EWOULDBLOCK)
+ goto unlock;
+ dev->flags &= ~ATA_DFLAG_RESUMING;
+ }
ret = scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 548491de81..ef427ee787 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -565,6 +565,7 @@ CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
+CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -579,6 +580,7 @@ static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
+static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -594,6 +596,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_retbleed.attr,
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
+ &dev_attr_reg_file_data_sampling.attr,
NULL
};
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index 42171f766d..5a5a9e978e 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -313,8 +313,10 @@ void dev_pm_enable_wake_irq_complete(struct device *dev)
return;
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
- wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE) {
enable_irq(wirq->irq);
+ wirq->status |= WAKE_IRQ_DEDICATED_ENABLED;
+ }
}
/**
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index e14cc03a17..4c9e38ead5 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -9,6 +9,23 @@
#define BLOCK_TEST_SIZE 12
+static void get_changed_bytes(void *orig, void *new, size_t size)
+{
+ char *o = orig;
+ char *n = new;
+ int i;
+
+ get_random_bytes(new, size);
+
+ /*
+ * This could be nicer and more efficient but we shouldn't
+ * super care.
+ */
+ for (i = 0; i < size; i++)
+ while (n[i] == o[i])
+ get_random_bytes(&n[i], 1);
+}
+
static const struct regmap_config test_regmap_config = {
.max_register = BLOCK_TEST_SIZE,
.reg_stride = 1,
@@ -1192,7 +1209,7 @@ static void raw_sync(struct kunit *test)
struct regmap *map;
struct regmap_config config;
struct regmap_ram_data *data;
- u16 val[2];
+ u16 val[3];
u16 *hw_buf;
unsigned int rval;
int i;
@@ -1206,17 +1223,13 @@ static void raw_sync(struct kunit *test)
hw_buf = (u16 *)data->vals;
- get_random_bytes(&val, sizeof(val));
+ get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
/* Do a regular write and a raw write in cache only mode */
regcache_cache_only(map, true);
- KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
- if (config.val_format_endian == REGMAP_ENDIAN_BIG)
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- be16_to_cpu(val[0])));
- else
- KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
- le16_to_cpu(val[0])));
+ KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
+ sizeof(u16) * 2));
+ KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
/* We should read back the new values, and defaults for the rest */
for (i = 0; i < config.max_register + 1; i++) {
@@ -1225,24 +1238,34 @@ static void raw_sync(struct kunit *test)
switch (i) {
case 2:
case 3:
- case 6:
if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
KUNIT_EXPECT_EQ(test, rval,
- be16_to_cpu(val[i % 2]));
+ be16_to_cpu(val[i - 2]));
} else {
KUNIT_EXPECT_EQ(test, rval,
- le16_to_cpu(val[i % 2]));
+ le16_to_cpu(val[i - 2]));
}
break;
+ case 4:
+ KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
+ break;
default:
KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
break;
}
}
+
+ /*
+ * The value written via _write() was translated by the core,
+ * translate the original copy for comparison purposes.
+ */
+ if (config.val_format_endian == REGMAP_ENDIAN_BIG)
+ val[2] = cpu_to_be16(val[2]);
+ else
+ val[2] = cpu_to_le16(val[2]);
/* The values should not appear in the "hardware" */
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
for (i = 0; i < config.max_register + 1; i++)
data->written[i] = false;
@@ -1253,8 +1276,7 @@ static void raw_sync(struct kunit *test)
KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
/* The values should now appear in the "hardware" */
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
- KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
+ KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
regmap_exit(map);
}
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d7317425be..cc9077b588 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -419,13 +419,16 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
rcu_read_lock();
for_each_netdev_rcu(&init_net, ifp) {
dev_hold(ifp);
- if (!is_aoe_netif(ifp))
- goto cont;
+ if (!is_aoe_netif(ifp)) {
+ dev_put(ifp);
+ continue;
+ }
skb = new_skb(sizeof *h + sizeof *ch);
if (skb == NULL) {
printk(KERN_INFO "aoe: skb alloc failure\n");
- goto cont;
+ dev_put(ifp);
+ continue;
}
skb_put(skb, sizeof *h + sizeof *ch);
skb->dev = ifp;
@@ -440,9 +443,6 @@ aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *qu
h->major = cpu_to_be16(aoemajor);
h->minor = aoeminor;
h->cmd = AOECMD_CFG;
-
-cont:
- dev_put(ifp);
}
rcu_read_unlock();
}
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
index c51ea95bc2..923a134fd7 100644
--- a/drivers/block/aoe/aoenet.c
+++ b/drivers/block/aoe/aoenet.c
@@ -63,6 +63,7 @@ tx(int id) __must_hold(&txlock)
pr_warn("aoe: packet could not be sent on %s. %s\n",
ifp ? ifp->name : "netif",
"consider increasing tx_queue_len");
+ dev_put(ifp);
spin_lock_irq(&txlock);
}
return 0;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index aa65313aab..df738eab02 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -2437,6 +2437,12 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
}
dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
+ if (!dev_list) {
+ nlmsg_free(reply);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
if (index == -1) {
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
if (ret) {
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index aaabb73208..285418dbb4 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -372,8 +372,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
struct btmediatek_data *data = hci_get_priv(hdev);
int err;
- if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ kfree_skb(skb);
return 0;
+ }
switch (data->cd_info.state) {
case HCI_DEVCOREDUMP_IDLE:
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 951fe3014a..abccd571cf 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1234,6 +1234,9 @@ static int btnxpuart_close(struct hci_dev *hdev)
ps_wakeup(nxpdev);
serdev_device_close(nxpdev->serdev);
+ skb_queue_purge(&nxpdev->txq);
+ kfree_skb(nxpdev->rx_skb);
+ nxpdev->rx_skb = NULL;
clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state);
return 0;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index b8e9de887b..81ccb88c90 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -3271,7 +3271,6 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
{
struct btusb_data *data = hci_get_drvdata(hdev);
u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle);
- struct sk_buff *skb_cd;
switch (handle) {
case 0xfc6f: /* Firmware dump from device */
@@ -3284,9 +3283,12 @@ static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb)
* for backward compatibility, so we have to clone the packet
* extraly for the in-kernel coredump support.
*/
- skb_cd = skb_clone(skb, GFP_ATOMIC);
- if (skb_cd)
- btmtk_process_coredump(hdev, skb_cd);
+ if (IS_ENABLED(CONFIG_DEV_COREDUMP)) {
+ struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC);
+
+ if (skb_cd)
+ btmtk_process_coredump(hdev, skb_cd);
+ }
fallthrough;
case 0x05ff: /* Firmware debug logging 1 */
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 71e748a947..c0436881a5 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -113,6 +113,7 @@ struct h5_vnd {
int (*suspend)(struct h5 *h5);
int (*resume)(struct h5 *h5);
const struct acpi_gpio_mapping *acpi_gpio_map;
+ int sizeof_priv;
};
struct h5_device_data {
@@ -863,7 +864,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
- return hci_uart_register_device(&h5->serdev_hu, &h5p);
+ return hci_uart_register_device_priv(&h5->serdev_hu, &h5p,
+ h5->vnd->sizeof_priv);
}
static void h5_serdev_remove(struct serdev_device *serdev)
@@ -1070,6 +1072,7 @@ static struct h5_vnd rtl_vnd = {
.suspend = h5_btrtl_suspend,
.resume = h5_btrtl_resume,
.acpi_gpio_map = acpi_btrtl_gpios,
+ .sizeof_priv = sizeof(struct btrealtek_data),
};
static const struct h5_device_data h5_data_rtl8822cs = {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index a65de7309d..bb69b7ea81 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -2304,7 +2304,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en) &&
+ if (IS_ERR(qcadev->bt_en) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
@@ -2313,7 +2313,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
- if (IS_ERR_OR_NULL(qcadev->sw_ctrl) &&
+ if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
data->soc_type == QCA_WCN7850))
@@ -2335,7 +2335,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
default:
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
- if (IS_ERR_OR_NULL(qcadev->bt_en)) {
+ if (IS_ERR(qcadev->bt_en)) {
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
power_ctrl_enabled = false;
}
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index f16fd79bc0..611a11fbb2 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -300,8 +300,9 @@ static const struct serdev_device_ops hci_serdev_client_ops = {
.write_wakeup = hci_uart_write_wakeup,
};
-int hci_uart_register_device(struct hci_uart *hu,
- const struct hci_uart_proto *p)
+int hci_uart_register_device_priv(struct hci_uart *hu,
+ const struct hci_uart_proto *p,
+ int sizeof_priv)
{
int err;
struct hci_dev *hdev;
@@ -325,7 +326,7 @@ int hci_uart_register_device(struct hci_uart *hu,
set_bit(HCI_UART_PROTO_READY, &hu->flags);
/* Initialize and register HCI device */
- hdev = hci_alloc_dev();
+ hdev = hci_alloc_dev_priv(sizeof_priv);
if (!hdev) {
BT_ERR("Can't allocate HCI device");
err = -ENOMEM;
@@ -394,7 +395,7 @@ err_rwsem:
percpu_free_rwsem(&hu->proto_lock);
return err;
}
-EXPORT_SYMBOL_GPL(hci_uart_register_device);
+EXPORT_SYMBOL_GPL(hci_uart_register_device_priv);
void hci_uart_unregister_device(struct hci_uart *hu)
{
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index fb4a2d0d8c..68c8c7e95d 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -97,7 +97,17 @@ struct hci_uart {
int hci_uart_register_proto(const struct hci_uart_proto *p);
int hci_uart_unregister_proto(const struct hci_uart_proto *p);
-int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
+
+int hci_uart_register_device_priv(struct hci_uart *hu,
+ const struct hci_uart_proto *p,
+ int sizeof_priv);
+
+static inline int hci_uart_register_device(struct hci_uart *hu,
+ const struct hci_uart_proto *p)
+{
+ return hci_uart_register_device_priv(hu, p, 0);
+}
+
void hci_uart_unregister_device(struct hci_uart *hu);
int hci_uart_tx_wakeup(struct hci_uart *hu);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index e6742998f3..d5e7fa9173 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -186,11 +186,12 @@ config SUNXI_RSB
config TEGRA_ACONNECT
tristate "Tegra ACONNECT Bus Driver"
- depends on ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA
depends on OF && PM
help
Driver for the Tegra ACONNECT bus which is used to interface with
- the devices inside the Audio Processing Engine (APE) for Tegra210.
+ the devices inside the Audio Processing Engine (APE) for
+ Tegra210 and later.
config TEGRA_GMI
tristate "Tegra Generic Memory Interface bus driver"
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 582d5c166a..934cdbca08 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -1427,7 +1427,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
sizeof(struct mhi_ep_ring_item), 0,
0, NULL);
- if (!mhi_cntrl->ev_ring_el_cache) {
+ if (!mhi_cntrl->ring_item_cache) {
ret = -ENOMEM;
goto err_destroy_tre_buf_cache;
}
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 1b350412d8..64c8756576 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -919,8 +919,6 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
int rc;
u32 int_status;
- INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
-
rc = devm_request_threaded_irq(chip->dev.parent, irq, NULL,
tis_int_handler, IRQF_ONESHOT | flags,
dev_name(&chip->dev), chip);
@@ -1132,6 +1130,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
priv->phy_ops = phy_ops;
priv->locality_count = 0;
mutex_init(&priv->locality_count_mutex);
+ INIT_WORK(&priv->free_irq_work, tpm_tis_free_irq_func);
dev_set_drvdata(&chip->dev, priv);
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 019cf6079c..6d2eadefd9 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -639,8 +639,8 @@ static int hwicap_setup(struct platform_device *pdev, int id,
dev_set_drvdata(dev, (void *)drvdata);
drvdata->base_address = devm_platform_ioremap_resource(pdev, 0);
- if (!drvdata->base_address) {
- retval = -ENODEV;
+ if (IS_ERR(drvdata->base_address)) {
+ retval = PTR_ERR(drvdata->base_address);
goto failed;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 2253c154a8..20c4b28fed 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -418,6 +418,9 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
if (IS_ERR(hw))
return ERR_CAST(hw);
+ if (!hw)
+ return NULL;
+
return hw->core;
}
diff --git a/drivers/clk/hisilicon/clk-hi3519.c b/drivers/clk/hisilicon/clk-hi3519.c
index b871872d99..141b727ff6 100644
--- a/drivers/clk/hisilicon/clk-hi3519.c
+++ b/drivers/clk/hisilicon/clk-hi3519.c
@@ -130,7 +130,7 @@ static void hi3519_clk_unregister(struct platform_device *pdev)
of_clk_del_provider(pdev->dev.of_node);
hisi_clk_unregister_gate(hi3519_gate_clks,
- ARRAY_SIZE(hi3519_mux_clks),
+ ARRAY_SIZE(hi3519_gate_clks),
crg->clk_data);
hisi_clk_unregister_mux(hi3519_mux_clks,
ARRAY_SIZE(hi3519_mux_clks),
diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c
index ff4ca0edce..4623befafa 100644
--- a/drivers/clk/hisilicon/clk-hi3559a.c
+++ b/drivers/clk/hisilicon/clk-hi3559a.c
@@ -491,7 +491,6 @@ static void hisi_clk_register_pll(struct hi3559av100_pll_clock *clks,
clk = clk_register(NULL, &p_clk->hw);
if (IS_ERR(clk)) {
- devm_kfree(dev, p_clk);
dev_err(dev, "%s: failed to register clock %s\n",
__func__, clks[i].name);
continue;
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index e4300df88f..55ed211a5e 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -18,7 +18,12 @@
#define CLKEN0 0x000
#define CLKEN1 0x004
-#define SAI_MCLK_SEL(n) (0x300 + 4 * (n)) /* n in 0..5 */
+#define SAI1_MCLK_SEL 0x300
+#define SAI2_MCLK_SEL 0x304
+#define SAI3_MCLK_SEL 0x308
+#define SAI5_MCLK_SEL 0x30C
+#define SAI6_MCLK_SEL 0x310
+#define SAI7_MCLK_SEL 0x314
#define PDM_SEL 0x318
#define SAI_PLL_GNRL_CTL 0x400
@@ -95,13 +100,13 @@ static const struct clk_parent_data clk_imx8mp_audiomix_pll_bypass_sels[] = {
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK1_SEL, {}, \
clk_imx8mp_audiomix_sai##n##_mclk1_parents, \
ARRAY_SIZE(clk_imx8mp_audiomix_sai##n##_mclk1_parents), \
- SAI_MCLK_SEL(n), 1, 0 \
+ SAI##n##_MCLK_SEL, 1, 0 \
}, { \
"sai"__stringify(n)"_mclk2_sel", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_MCLK2_SEL, {}, \
clk_imx8mp_audiomix_sai_mclk2_parents, \
ARRAY_SIZE(clk_imx8mp_audiomix_sai_mclk2_parents), \
- SAI_MCLK_SEL(n), 4, 1 \
+ SAI##n##_MCLK_SEL, 4, 1 \
}, { \
"sai"__stringify(n)"_ipg_cg", \
IMX8MP_CLK_AUDIOMIX_SAI##n##_IPG, \
diff --git a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
index 9cffd278e9..1b8f859b6b 100644
--- a/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt7622-apmixedsys.c
@@ -127,7 +127,6 @@ static void clk_mt7622_apmixed_remove(struct platform_device *pdev)
of_clk_del_provider(node);
mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
- mtk_free_clk_data(clk_data);
}
static const struct of_device_id of_match_clk_mt7622_apmixed[] = {
diff --git a/drivers/clk/mediatek/clk-mt7981-topckgen.c b/drivers/clk/mediatek/clk-mt7981-topckgen.c
index 682f4ca9e8..493aa11d3a 100644
--- a/drivers/clk/mediatek/clk-mt7981-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7981-topckgen.c
@@ -357,8 +357,9 @@ static const struct mtk_mux top_muxes[] = {
MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_325M_SEL, "sgm_325m_sel",
sgm_325m_parents, 0x050, 0x054, 0x058, 8, 1, 15,
0x1C0, 21),
- MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel", sgm_reg_parents,
- 0x050, 0x054, 0x058, 16, 1, 23, 0x1C0, 22),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel", sgm_reg_parents,
+ 0x050, 0x054, 0x058, 16, 1, 23, 0x1C0, 22,
+ CLK_IS_CRITICAL | CLK_SET_RATE_PARENT),
MUX_GATE_CLR_SET_UPD(CLK_TOP_EIP97B_SEL, "eip97b_sel", eip97b_parents,
0x050, 0x054, 0x058, 24, 3, 31, 0x1C0, 23),
/* CLK_CFG_6 */
diff --git a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
index d1239b4b3d..41bb2d2e2e 100644
--- a/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8135-apmixedsys.c
@@ -59,7 +59,7 @@ static int clk_mt8135_apmixed_probe(struct platform_device *pdev)
ret = mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
if (ret)
- return ret;
+ goto free_clk_data;
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (ret)
@@ -69,6 +69,8 @@ static int clk_mt8135_apmixed_probe(struct platform_device *pdev)
unregister_plls:
mtk_clk_unregister_plls(plls, ARRAY_SIZE(plls), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
return ret;
}
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 6e23461a04..934d5a15ac 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -790,7 +790,7 @@ static const struct mtk_gate infra_clks[] = {
/* infra_sspm_26m_self is main clock in co-processor, should not be closed in Linux. */
GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_26M_SELF, "infra_sspm_26m_self", "f_f26m_ck", 3, CLK_IS_CRITICAL),
/* infra_sspm_32k_self is main clock in co-processor, should not be closed in Linux. */
- GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "f_f26m_ck", 4, CLK_IS_CRITICAL),
+ GATE_INFRA3_FLAGS(CLK_INFRA_SSPM_32K_SELF, "infra_sspm_32k_self", "clk32k", 4, CLK_IS_CRITICAL),
GATE_INFRA3(CLK_INFRA_UFS_AXI, "infra_ufs_axi", "axi_sel", 5),
GATE_INFRA3(CLK_INFRA_I2C6, "infra_i2c6", "i2c_sel", 6),
GATE_INFRA3(CLK_INFRA_AP_MSDC0, "infra_ap_msdc0", "msdc50_hclk_sel", 7),
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index c12f81dfa6..5f60f2bcca 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -2142,7 +2142,9 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_vclk_input,
&axg_vclk2_input,
&axg_vclk_div,
+ &axg_vclk_div1,
&axg_vclk2_div,
+ &axg_vclk2_div1,
&axg_vclk_div2_en,
&axg_vclk_div4_en,
&axg_vclk_div6_en,
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 735adfefc3..e792e0b130 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -759,6 +759,8 @@ static struct clk_branch disp_cc_mdss_vsync_clk = {
static struct gdsc mdss_gdsc = {
.gdscr = 0x3000,
+ .en_few_wait_val = 0x6,
+ .en_rest_wait_val = 0x5,
.pd = {
.name = "mdss_gdsc",
},
diff --git a/drivers/clk/qcom/gcc-ipq5018.c b/drivers/clk/qcom/gcc-ipq5018.c
index 4aba47e870..c1732d70e3 100644
--- a/drivers/clk/qcom/gcc-ipq5018.c
+++ b/drivers/clk/qcom/gcc-ipq5018.c
@@ -857,6 +857,7 @@ static struct clk_rcg2 lpass_sway_clk_src = {
static const struct freq_tbl ftbl_pcie0_aux_clk_src[] = {
F(2000000, P_XO, 12, 0, 0),
+ { }
};
static struct clk_rcg2 pcie0_aux_clk_src = {
@@ -1099,6 +1100,7 @@ static const struct freq_tbl ftbl_qpic_io_macro_clk_src[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(200000000, P_GPLL0, 4, 0, 0),
F(320000000, P_GPLL0, 2.5, 0, 0),
+ { }
};
static struct clk_rcg2 qpic_io_macro_clk_src = {
@@ -1194,6 +1196,7 @@ static struct clk_rcg2 ubi0_axi_clk_src = {
static const struct freq_tbl ftbl_ubi0_core_clk_src[] = {
F(850000000, P_UBI32_PLL, 1, 0, 0),
F(1000000000, P_UBI32_PLL, 1, 0, 0),
+ { }
};
static struct clk_rcg2 ubi0_core_clk_src = {
@@ -1754,7 +1757,7 @@ static struct clk_branch gcc_gmac0_sys_clk = {
.halt_check = BRANCH_HALT_DELAY,
.halt_bit = 31,
.clkr = {
- .enable_reg = 0x683190,
+ .enable_reg = 0x68190,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data) {
.name = "gcc_gmac0_sys_clk",
@@ -2180,7 +2183,7 @@ static struct clk_branch gcc_pcie1_axi_s_clk = {
};
static struct clk_branch gcc_pcie1_pipe_clk = {
- .halt_reg = 8,
+ .halt_reg = 0x76018,
.halt_check = BRANCH_HALT_DELAY,
.halt_bit = 31,
.clkr = {
@@ -3632,7 +3635,7 @@ static const struct qcom_reset_map gcc_ipq5018_resets[] = {
[GCC_SYSTEM_NOC_BCR] = { 0x26000, 0 },
[GCC_TCSR_BCR] = { 0x28000, 0 },
[GCC_TLMM_BCR] = { 0x34000, 0 },
- [GCC_UBI0_AXI_ARES] = { 0x680},
+ [GCC_UBI0_AXI_ARES] = { 0x68010, 0 },
[GCC_UBI0_AHB_ARES] = { 0x68010, 1 },
[GCC_UBI0_NC_AXI_ARES] = { 0x68010, 2 },
[GCC_UBI0_DBG_ARES] = { 0x68010, 3 },
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index b366912cd6..ef1e2ce480 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -1554,6 +1554,7 @@ static struct clk_regmap_div nss_ubi0_div_clk_src = {
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
F(24000000, P_XO, 1, 0, 0),
+ { }
};
static const struct clk_parent_data gcc_xo_gpll0_core_pi_sleep_clk[] = {
@@ -1734,6 +1735,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
F(160000000, P_GPLL0, 5, 0, 0),
F(216000000, P_GPLL6, 5, 0, 0),
F(308570000, P_GPLL6, 3.5, 0, 0),
+ { }
};
static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c
index b7faf12a51..7bc679871f 100644
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -644,6 +644,7 @@ static struct clk_rcg2 pcie0_axi_clk_src = {
static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
+ { }
};
static const struct clk_parent_data gcc_xo_gpll0_sleep_clk[] = {
@@ -795,6 +796,7 @@ static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
F(308570000, P_GPLL6, 3.5, 0, 0),
+ { }
};
static const struct clk_parent_data gcc_xo_gpll0_gpll6_gpll0_div2[] = {
diff --git a/drivers/clk/qcom/gcc-ipq9574.c b/drivers/clk/qcom/gcc-ipq9574.c
index e8190108e1..0a3f846695 100644
--- a/drivers/clk/qcom/gcc-ipq9574.c
+++ b/drivers/clk/qcom/gcc-ipq9574.c
@@ -2082,6 +2082,7 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
static const struct freq_tbl ftbl_sdcc_ice_core_clk_src[] = {
F(150000000, P_GPLL4, 8, 0, 0),
F(300000000, P_GPLL4, 4, 0, 0),
+ { }
};
static struct clk_rcg2 sdcc1_ice_core_clk_src = {
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 725cd52d23..ea4c3bf4fb 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -4037,3 +4037,4 @@ module_exit(gcc_sdm845_exit);
MODULE_DESCRIPTION("QTI GCC SDM845 Driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:gcc-sdm845");
+MODULE_SOFTDEP("pre: rpmhpd");
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 02fc21208d..c89700ab93 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -348,6 +348,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
F(333430000, P_MMPLL1, 3.5, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
F(466800000, P_MMPLL1, 2.5, 0, 0),
+ { }
};
static struct clk_rcg2 mmss_axi_clk_src = {
@@ -372,6 +373,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
F(150000000, P_GPLL0, 4, 0, 0),
F(228570000, P_MMPLL0, 3.5, 0, 0),
F(320000000, P_MMPLL0, 2.5, 0, 0),
+ { }
};
static struct clk_rcg2 ocmemnoc_clk_src = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index a31f6cf0c4..36f460b78b 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -290,6 +290,7 @@ static struct freq_tbl ftbl_mmss_axi_clk[] = {
F(291750000, P_MMPLL1, 4, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
F(466800000, P_MMPLL1, 2.5, 0, 0),
+ { }
};
static struct clk_rcg2 mmss_axi_clk_src = {
@@ -314,6 +315,7 @@ static struct freq_tbl ftbl_ocmemnoc_clk[] = {
F(150000000, P_GPLL0, 4, 0, 0),
F(291750000, P_MMPLL1, 4, 0, 0),
F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
};
static struct clk_rcg2 ocmemnoc_clk_src = {
diff --git a/drivers/clk/qcom/reset.c b/drivers/clk/qcom/reset.c
index e45e32804d..d96c96a908 100644
--- a/drivers/clk/qcom/reset.c
+++ b/drivers/clk/qcom/reset.c
@@ -22,8 +22,8 @@ static int qcom_reset(struct reset_controller_dev *rcdev, unsigned long id)
return 0;
}
-static int
-qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+static int qcom_reset_set_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct qcom_reset_controller *rst;
const struct qcom_reset_map *map;
@@ -33,21 +33,22 @@ qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
map = &rst->reset_map[id];
mask = map->bitmask ? map->bitmask : BIT(map->bit);
- return regmap_update_bits(rst->regmap, map->reg, mask, mask);
+ regmap_update_bits(rst->regmap, map->reg, mask, assert ? mask : 0);
+
+ /* Read back the register to ensure write completion, ignore the value */
+ regmap_read(rst->regmap, map->reg, &mask);
+
+ return 0;
}
-static int
-qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+static int qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
- struct qcom_reset_controller *rst;
- const struct qcom_reset_map *map;
- u32 mask;
-
- rst = to_qcom_reset_controller(rcdev);
- map = &rst->reset_map[id];
- mask = map->bitmask ? map->bitmask : BIT(map->bit);
+ return qcom_reset_set_assert(rcdev, id, true);
+}
- return regmap_update_bits(rst->regmap, map->reg, mask, 0);
+static int qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return qcom_reset_set_assert(rcdev, id, false);
}
const struct reset_control_ops qcom_reset_ops = {
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
index f721835c7e..cc06127406 100644
--- a/drivers/clk/renesas/r8a779f0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -161,7 +161,7 @@ static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
DEF_MOD("cmt1", 911, R8A779F0_CLK_R),
DEF_MOD("cmt2", 912, R8A779F0_CLK_R),
DEF_MOD("cmt3", 913, R8A779F0_CLK_R),
- DEF_MOD("pfc0", 915, R8A779F0_CLK_CL16M),
+ DEF_MOD("pfc0", 915, R8A779F0_CLK_CPEX),
DEF_MOD("tsc", 919, R8A779F0_CLK_CL16M),
DEF_MOD("rswitch2", 1505, R8A779F0_CLK_RSW2),
DEF_MOD("ether-serdes", 1506, R8A779F0_CLK_S0D2_HSC),
diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
index 7cc580d673..7999faa9a9 100644
--- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c
@@ -22,7 +22,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A779G0_CLK_R,
+ LAST_DT_CORE_CLK = R8A779G0_CLK_CP,
/* External Input Clocks */
CLK_EXTAL,
@@ -141,6 +141,7 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = {
DEF_FIXED("svd2_vip", R8A779G0_CLK_SVD2_VIP, CLK_SV_VIP, 2, 1),
DEF_FIXED("cbfusa", R8A779G0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
DEF_FIXED("cpex", R8A779G0_CLK_CPEX, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cp", R8A779G0_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("viobus", R8A779G0_CLK_VIOBUS, CLK_VIO, 1, 1),
DEF_FIXED("viobusd2", R8A779G0_CLK_VIOBUSD2, CLK_VIO, 2, 1),
DEF_FIXED("vcbus", R8A779G0_CLK_VCBUS, CLK_VC, 1, 1),
@@ -230,10 +231,10 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = {
DEF_MOD("cmt1", 911, R8A779G0_CLK_R),
DEF_MOD("cmt2", 912, R8A779G0_CLK_R),
DEF_MOD("cmt3", 913, R8A779G0_CLK_R),
- DEF_MOD("pfc0", 915, R8A779G0_CLK_CL16M),
- DEF_MOD("pfc1", 916, R8A779G0_CLK_CL16M),
- DEF_MOD("pfc2", 917, R8A779G0_CLK_CL16M),
- DEF_MOD("pfc3", 918, R8A779G0_CLK_CL16M),
+ DEF_MOD("pfc0", 915, R8A779G0_CLK_CP),
+ DEF_MOD("pfc1", 916, R8A779G0_CLK_CP),
+ DEF_MOD("pfc2", 917, R8A779G0_CLK_CP),
+ DEF_MOD("pfc3", 918, R8A779G0_CLK_CP),
DEF_MOD("tsc", 919, R8A779G0_CLK_CL16M),
DEF_MOD("ssiu", 2926, R8A779G0_CLK_S0D6_PER),
DEF_MOD("ssi", 2927, R8A779G0_CLK_S0D6_PER),
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index b70bb378ab..075ade0925 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -138,7 +138,7 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_shdi,
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
- DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI0_STS, sel_shdi,
+ DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_shdi,
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4),
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4),
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 1047278c90..bc822b9fd7 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -178,7 +178,7 @@ static const struct {
DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_shdi,
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
- DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI0_STS, sel_shdi,
+ DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_shdi,
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index bdc1eef7d6..c7b0b97513 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -605,7 +605,7 @@ static const struct samsung_div_clock apm_div_clks[] __initconst = {
static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus",
- CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, 0, 0),
+ CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus",
"mout_clkcmu_chub_bus",
CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0),
@@ -974,19 +974,19 @@ static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = {
static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p,
CLK_CON_MUX_CLK_CMGP_ADC, 0, 1),
- MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
- CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1),
- MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
- CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1),
+ MUX_F(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1, CLK_SET_RATE_PARENT, 0),
+ MUX_F(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus",
CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4),
- DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
- CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5),
- DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
- CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5),
+ DIV_F(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
@@ -1001,12 +1001,12 @@ static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0",
- CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1",
- CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk",
"gout_clkcmu_cmgp_bus",
CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0),
@@ -1557,8 +1557,9 @@ static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1),
MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user",
mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1),
- MUX(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", mout_peri_spi_user_p,
- PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1),
+ MUX_F(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user",
+ mout_peri_spi_user_p, PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1,
+ CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_div_clock peri_div_clks[] __initconst = {
@@ -1568,8 +1569,8 @@ static const struct samsung_div_clock peri_div_clks[] __initconst = {
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5),
DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2",
CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5),
- DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
- CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5),
+ DIV_F(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
+ CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5, CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
@@ -1611,7 +1612,7 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
"mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0",
- CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 7bdeaff2bf..c28d3dacf0 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -42,6 +42,7 @@ static void __iomem *zynq_clkc_base;
#define SLCR_SWDT_CLK_SEL (zynq_clkc_base + 0x204)
#define NUM_MIO_PINS 54
+#define CLK_NAME_LEN 16
#define DBG_CLK_CTRL_CLKACT_TRC BIT(0)
#define DBG_CLK_CTRL_CPU_1XCLKACT BIT(1)
@@ -215,7 +216,7 @@ static void __init zynq_clk_setup(struct device_node *np)
int i;
u32 tmp;
int ret;
- char *clk_name;
+ char clk_name[CLK_NAME_LEN];
unsigned int fclk_enable = 0;
const char *clk_output_name[clk_max];
const char *cpu_parents[4];
@@ -426,12 +427,10 @@ static void __init zynq_clk_setup(struct device_node *np)
"gem1_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
- tmp = strlen("mio_clk_00x");
- clk_name = kmalloc(tmp, GFP_KERNEL);
for (i = 0; i < NUM_MIO_PINS; i++) {
int idx;
- snprintf(clk_name, tmp, "mio_clk_%2.2d", i);
+ snprintf(clk_name, CLK_NAME_LEN, "mio_clk_%2.2d", i);
idx = of_property_match_string(np, "clock-names", clk_name);
if (idx >= 0)
can_mio_mux_parents[i] = of_clk_get_parent_name(np,
@@ -439,7 +438,6 @@ static void __init zynq_clk_setup(struct device_node *np)
else
can_mio_mux_parents[i] = dummy_nm;
}
- kfree(clk_name);
clk_register_mux(NULL, "can_mux", periph_parents, 4,
CLK_SET_RATE_NO_REPARENT, SLCR_CAN_CLK_CTRL, 4, 2, 0,
&canclk_lock);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 44a61dc6f9..e1c773bb55 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -32,7 +32,7 @@
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
#define GT_CONTROL_PRESCALER_SHIFT 8
-#define GT_CONTROL_PRESCALER_MAX 0xF
+#define GT_CONTROL_PRESCALER_MAX 0xFF
#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
GT_CONTROL_PRESCALER_SHIFT)
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index 57857c0dfb..1c732479a2 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -101,6 +101,9 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
+ /* Clear timer interrupt */
+ riscv_clock_event_stop();
+
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
diff --git a/drivers/comedi/drivers/comedi_8255.c b/drivers/comedi/drivers/comedi_8255.c
index e4974b5083..a933ef5384 100644
--- a/drivers/comedi/drivers/comedi_8255.c
+++ b/drivers/comedi/drivers/comedi_8255.c
@@ -159,6 +159,7 @@ static int __subdev_8255_init(struct comedi_device *dev,
return -ENOMEM;
spriv->context = context;
+ spriv->io = io;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index 30ea8b53eb..05ae912282 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -87,6 +87,8 @@ struct waveform_private {
struct comedi_device *dev; /* parent comedi device */
u64 ao_last_scan_time; /* time of previous AO scan in usec */
unsigned int ao_scan_period; /* AO scan period in usec */
+ bool ai_timer_enable:1; /* should AI timer be running? */
+ bool ao_timer_enable:1; /* should AO timer be running? */
unsigned short ao_loopbacks[N_CHANS];
};
@@ -236,8 +238,12 @@ static void waveform_ai_timer(struct timer_list *t)
time_increment = devpriv->ai_convert_time - now;
else
time_increment = 1;
- mod_timer(&devpriv->ai_timer,
- jiffies + usecs_to_jiffies(time_increment));
+ spin_lock(&dev->spinlock);
+ if (devpriv->ai_timer_enable) {
+ mod_timer(&devpriv->ai_timer,
+ jiffies + usecs_to_jiffies(time_increment));
+ }
+ spin_unlock(&dev->spinlock);
}
overrun:
@@ -393,9 +399,12 @@ static int waveform_ai_cmd(struct comedi_device *dev,
* Seem to need an extra jiffy here, otherwise timer expires slightly
* early!
*/
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ai_timer_enable = true;
devpriv->ai_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ai_convert_period) + 1;
add_timer(&devpriv->ai_timer);
+ spin_unlock_bh(&dev->spinlock);
return 0;
}
@@ -404,6 +413,9 @@ static int waveform_ai_cancel(struct comedi_device *dev,
{
struct waveform_private *devpriv = dev->private;
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ai_timer_enable = false;
+ spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
del_timer(&devpriv->ai_timer);
@@ -495,8 +507,12 @@ static void waveform_ao_timer(struct timer_list *t)
unsigned int time_inc = devpriv->ao_last_scan_time +
devpriv->ao_scan_period - now;
- mod_timer(&devpriv->ao_timer,
- jiffies + usecs_to_jiffies(time_inc));
+ spin_lock(&dev->spinlock);
+ if (devpriv->ao_timer_enable) {
+ mod_timer(&devpriv->ao_timer,
+ jiffies + usecs_to_jiffies(time_inc));
+ }
+ spin_unlock(&dev->spinlock);
}
underrun:
@@ -517,9 +533,12 @@ static int waveform_ao_inttrig_start(struct comedi_device *dev,
async->inttrig = NULL;
devpriv->ao_last_scan_time = ktime_to_us(ktime_get());
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ao_timer_enable = true;
devpriv->ao_timer.expires =
jiffies + usecs_to_jiffies(devpriv->ao_scan_period);
add_timer(&devpriv->ao_timer);
+ spin_unlock_bh(&dev->spinlock);
return 1;
}
@@ -604,6 +623,9 @@ static int waveform_ao_cancel(struct comedi_device *dev,
struct waveform_private *devpriv = dev->private;
s->async->inttrig = NULL;
+ spin_lock_bh(&dev->spinlock);
+ devpriv->ao_timer_enable = false;
+ spin_unlock_bh(&dev->spinlock);
if (in_softirq()) {
/* Assume we were called from the timer routine itself. */
del_timer(&devpriv->ao_timer);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f911606897..a0ebad7766 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -173,6 +173,7 @@ config ARM_QCOM_CPUFREQ_NVMEM
config ARM_QCOM_CPUFREQ_HW
tristate "QCOM CPUFreq HW driver"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on COMMON_CLK
help
Support for the CPUFreq HW driver.
Some QCOM chipsets have a HW engine to offload the steps
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 1791d37fbc..07f3419954 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -570,7 +570,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (target_perf < capacity)
des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
- min_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
if (_min_perf < capacity)
min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 35fb3a559e..1a1857b0a6 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -481,6 +481,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return 0;
struct private_data *priv = policy->driver_data;
cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 8bd6e5e8f1..2d83bbc65d 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
if (!priv)
return -ENOMEM;
- if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpu, priv->cpus);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 934d35f570..5104f853a9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -644,14 +644,16 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (policy->boost_enabled == enable)
return count;
+ policy->boost_enabled = enable;
+
cpus_read_lock();
ret = cpufreq_driver->set_boost(policy, enable);
cpus_read_unlock();
- if (ret)
+ if (ret) {
+ policy->boost_enabled = !policy->boost_enabled;
return ret;
-
- policy->boost_enabled = enable;
+ }
return count;
}
@@ -1419,6 +1421,9 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
+
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -2755,11 +2760,12 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
+ policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
- if (ret)
+ if (ret) {
+ policy->boost_enabled = !policy->boost_enabled;
goto err_reset_state;
-
- policy->boost_enabled = state;
+ }
}
cpus_read_unlock();
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c4d4643b6c..c17dc51a5a 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -40,7 +40,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
- if (!cpufreq_boost_enabled()
+ if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index d46afb3c00..8d097dcddd 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -13,6 +13,7 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#define LUT_MAX_ENTRIES 32U
@@ -300,7 +301,23 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
const void *data;
- int ret;
+ int ret, cpu;
+ struct device *cpu_dev;
+ struct regulator *cpu_reg;
+
+ /* Make sure that all CPU supplies are available before proceeding. */
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return dev_err_probe(&pdev->dev, -EPROBE_DEFER,
+ "Failed to get cpu%d device\n", cpu);
+
+ cpu_reg = devm_regulator_get(cpu_dev, "cpu");
+ if (IS_ERR(cpu_reg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(cpu_reg),
+ "CPU%d regulator get failed\n", cpu);
+ }
+
data = of_device_get_match_data(&pdev->dev);
if (!data)
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 8d4c42863a..d2cf961901 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -299,22 +299,6 @@ theend:
return err;
}
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun8i_ce_dev *ce = op->ce;
- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
- int flow, err;
-
- flow = rctx->flow;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
- local_bh_disable();
- crypto_finalize_skcipher_request(engine, breq, err);
- local_bh_enable();
-}
-
static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
void *async_req)
{
@@ -360,6 +344,23 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
+static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
+ struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = op->ce;
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
+ int flow, err;
+
+ flow = rctx->flow;
+ err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
+ sun8i_ce_cipher_unprepare(engine, areq);
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, breq, err);
+ local_bh_enable();
+}
+
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
int err = sun8i_ce_cipher_prepare(engine, areq);
@@ -368,7 +369,6 @@ int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
return err;
sun8i_ce_cipher_run(engine, areq);
- sun8i_ce_cipher_unprepare(engine, areq);
return 0;
}
diff --git a/drivers/crypto/ccp/platform-access.c b/drivers/crypto/ccp/platform-access.c
index 94367bc49e..1b8ed33897 100644
--- a/drivers/crypto/ccp/platform-access.c
+++ b/drivers/crypto/ccp/platform-access.c
@@ -118,9 +118,16 @@ int psp_send_platform_access_msg(enum psp_platform_access_msg msg,
goto unlock;
}
- /* Store the status in request header for caller to investigate */
+ /*
+ * Read status from PSP. If status is non-zero, it indicates an error
+ * occurred during "processing" of the command.
+ * If status is zero, it indicates the command was "processed"
+ * successfully, but the result of the command is in the payload.
+ * Return both cases to the caller as -EIO to investigate.
+ */
cmd_reg = ioread32(cmd);
- req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
+ if (FIELD_GET(PSP_CMDRESP_STS, cmd_reg))
+ req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg);
if (req->header.status) {
ret = -EIO;
goto unlock;
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 0faedb5b2e..b64aaecdd9 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -120,29 +120,6 @@ static struct adf_hw_device_class adf_4xxx_class = {
.instances = 0,
};
-static int get_service_enabled(struct adf_accel_dev *accel_dev)
-{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- int ret;
-
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
- if (ret) {
- dev_err(&GET_DEV(accel_dev),
- ADF_SERVICES_ENABLED " param not found\n");
- return ret;
- }
-
- ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
- services);
- if (ret < 0)
- dev_err(&GET_DEV(accel_dev),
- "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
- services);
-
- return ret;
-}
-
static u32 get_accel_mask(struct adf_hw_device_data *self)
{
return ADF_4XXX_ACCELERATORS_MASK;
@@ -275,7 +252,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
}
- switch (get_service_enabled(accel_dev)) {
+ switch (adf_get_service_enabled(accel_dev)) {
case SVC_CY:
case SVC_CY2:
return capabilities_sym | capabilities_asym;
@@ -311,7 +288,7 @@ static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
- switch (get_service_enabled(accel_dev)) {
+ switch (adf_get_service_enabled(accel_dev)) {
case SVC_DC:
return thrd_to_arb_map_dc;
case SVC_DCC:
@@ -420,7 +397,7 @@ static u32 uof_get_num_objs(void)
static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
{
- switch (get_service_enabled(accel_dev)) {
+ switch (adf_get_service_enabled(accel_dev)) {
case SVC_CY:
case SVC_CY2:
return adf_fw_cy_config;
@@ -460,6 +437,13 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
if (!fw_config)
return 0;
+ /* If dcc, all rings handle compression requests */
+ if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
+ for (i = 0; i < RP_GROUP_COUNT; i++)
+ rps[i] = COMP;
+ goto set_mask;
+ }
+
for (i = 0; i < RP_GROUP_COUNT; i++) {
switch (fw_config[i].ae_mask) {
case ADF_AE_GROUP_0:
@@ -488,6 +472,7 @@ static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
}
}
+set_mask:
ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index a39e70bd4b..621d14ea3b 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -92,7 +92,8 @@ static void adf_device_reset_worker(struct work_struct *work)
if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
- if (reset_data->mode == ADF_DEV_RESET_ASYNC)
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+ completion_done(&reset_data->compl))
kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
@@ -100,11 +101,19 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- /* The dev is back alive. Notify the caller if in sync mode */
- if (reset_data->mode == ADF_DEV_RESET_SYNC)
- complete(&reset_data->compl);
- else
+ /*
+ * The dev is back alive. Notify the caller if in sync mode
+ *
+ * If device restart will take a more time than expected,
+ * the schedule_reset() function can timeout and exit. This can be
+ * detected by calling the completion_done() function. In this case
+ * the reset_data structure needs to be freed here.
+ */
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
+ completion_done(&reset_data->compl))
kfree(reset_data);
+ else
+ complete(&reset_data->compl);
}
static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
@@ -137,8 +146,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
ret = -EFAULT;
+ } else {
+ kfree(reset_data);
}
- kfree(reset_data);
return ret;
}
return 0;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
index 8e13fe9389..2680522944 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -2,6 +2,9 @@
/* Copyright(c) 2023 Intel Corporation */
#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include "adf_cfg.h"
#include "adf_cfg_services.h"
#include "adf_cfg_strings.h"
@@ -18,3 +21,27 @@ const char *const adf_cfg_services[] = {
[SVC_SYM_DC] = ADF_CFG_SYM_DC,
};
EXPORT_SYMBOL_GPL(adf_cfg_services);
+
+int adf_get_service_enabled(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ ADF_SERVICES_ENABLED " param not found\n");
+ return ret;
+ }
+
+ ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services),
+ services);
+ if (ret < 0)
+ dev_err(&GET_DEV(accel_dev),
+ "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n",
+ services);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_get_service_enabled);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index f78fd697b4..c6b0328b0f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -5,6 +5,8 @@
#include "adf_cfg_strings.h"
+struct adf_accel_dev;
+
enum adf_services {
SVC_CY = 0,
SVC_CY2,
@@ -21,4 +23,6 @@ enum adf_services {
extern const char *const adf_cfg_services[SVC_COUNT];
+int adf_get_service_enabled(struct adf_accel_dev *accel_dev);
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c
index 01e0a389e4..cf89f57de2 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_clock.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c
@@ -83,6 +83,9 @@ static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency)
}
delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1);
+ if (!delta_us)
+ return -EINVAL;
+
temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10;
temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us);
/*
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c
index 07119c487d..627953a72d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c
@@ -16,7 +16,6 @@
#define CNV_ERR_INFO_MASK GENMASK(11, 0)
#define CNV_ERR_TYPE_MASK GENMASK(15, 12)
-#define CNV_SLICE_ERR_MASK GENMASK(7, 0)
#define CNV_SLICE_ERR_SIGN_BIT_INDEX 7
#define CNV_DELTA_ERR_SIGN_BIT_INDEX 11
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
index 048c246079..2dd3772bf5 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
@@ -1007,8 +1007,7 @@ static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev,
static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev,
void __iomem *csr, u32 iastatssm)
{
- u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR);
- u32 bits_num = BITS_PER_REG(reg);
+ u32 reg, bits_num = BITS_PER_REG(reg);
bool reset_required = false;
unsigned long errs_bits;
u32 bit_iterator;
@@ -1106,8 +1105,7 @@ static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev,
void __iomem *csr, u32 iastatssm)
{
- u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH);
- u32 bits_num = BITS_PER_REG(reg);
+ u32 reg, bits_num = BITS_PER_REG(reg);
bool reset_required = false;
unsigned long errs_bits;
u32 bit_iterator;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index de1b214dba..d4f2db3c53 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -788,6 +788,24 @@ static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
sla_type_arr[node_id] = NULL;
}
+static void free_all_sla(struct adf_accel_dev *accel_dev)
+{
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ int sla_id;
+
+ mutex_lock(&rl_data->rl_lock);
+
+ for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) {
+ if (!rl_data->sla[sla_id])
+ continue;
+
+ kfree(rl_data->sla[sla_id]);
+ rl_data->sla[sla_id] = NULL;
+ }
+
+ mutex_unlock(&rl_data->rl_lock);
+}
+
/**
* add_update_sla() - handles the creation and the update of an SLA
* @accel_dev: pointer to acceleration device structure
@@ -1155,7 +1173,7 @@ void adf_rl_stop(struct adf_accel_dev *accel_dev)
return;
adf_sysfs_rl_rm(accel_dev);
- adf_rl_remove_sla_all(accel_dev, true);
+ free_all_sla(accel_dev);
}
void adf_rl_exit(struct adf_accel_dev *accel_dev)
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index bf8c0ee629..2ba4aa22e0 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -13,15 +13,6 @@
#include "qat_compression.h"
#include "qat_algs_send.h"
-#define QAT_RFC_1950_HDR_SIZE 2
-#define QAT_RFC_1950_FOOTER_SIZE 4
-#define QAT_RFC_1950_CM_DEFLATE 8
-#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
-#define QAT_RFC_1950_CM_MASK 0x0f
-#define QAT_RFC_1950_CM_OFFSET 4
-#define QAT_RFC_1950_DICT_MASK 0x20
-#define QAT_RFC_1950_COMP_HDR 0x785e
-
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 1b13b4aa16..a235e6c300 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -332,12 +332,12 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
theend:
pm_runtime_put_autosuspend(rkc->dev);
+ rk_hash_unprepare(engine, breq);
+
local_bh_disable();
crypto_finalize_hash_request(engine, breq, err);
local_bh_enable();
- rk_hash_unprepare(engine, breq);
-
return 0;
}
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index 3c205324b2..e614057188 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -231,7 +231,10 @@ static int zynqmp_handle_aes_req(struct crypto_engine *engine,
err = zynqmp_aes_aead_cipher(areq);
}
+ local_bh_disable();
crypto_finalize_aead_request(engine, areq, err);
+ local_bh_enable();
+
return 0;
}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 7bb656237f..8f0a2507dd 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -729,12 +729,17 @@ static int match_auto_decoder(struct device *dev, void *data)
return 0;
}
-static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
- struct cxl_region *cxlr)
+static struct cxl_decoder *
+cxl_region_find_decoder(struct cxl_port *port,
+ struct cxl_endpoint_decoder *cxled,
+ struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
+ if (port == cxled_to_port(cxled))
+ return &cxled->cxld;
+
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
dev = device_find_child(&port->dev, &cxlr->params,
match_auto_decoder);
@@ -752,8 +757,31 @@ static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
return to_cxl_decoder(dev);
}
-static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
- struct cxl_region *cxlr)
+static bool auto_order_ok(struct cxl_port *port, struct cxl_region *cxlr_iter,
+ struct cxl_decoder *cxld)
+{
+ struct cxl_region_ref *rr = cxl_rr_load(port, cxlr_iter);
+ struct cxl_decoder *cxld_iter = rr->decoder;
+
+ /*
+ * Allow the out of order assembly of auto-discovered regions.
+ * Per CXL Spec 3.1 8.2.4.20.12 software must commit decoders
+ * in HPA order. Confirm that the decoder with the lesser HPA
+ * starting address has the lesser id.
+ */
+ dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n",
+ dev_name(&cxld->dev), cxld->id,
+ dev_name(&cxld_iter->dev), cxld_iter->id);
+
+ if (cxld_iter->id > cxld->id)
+ return true;
+
+ return false;
+}
+
+static struct cxl_region_ref *
+alloc_region_ref(struct cxl_port *port, struct cxl_region *cxlr,
+ struct cxl_endpoint_decoder *cxled)
{
struct cxl_region_params *p = &cxlr->params;
struct cxl_region_ref *cxl_rr, *iter;
@@ -763,16 +791,21 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port,
xa_for_each(&port->regions, index, iter) {
struct cxl_region_params *ip = &iter->region->params;
- if (!ip->res)
+ if (!ip->res || ip->res->start < p->res->start)
continue;
- if (ip->res->start > p->res->start) {
- dev_dbg(&cxlr->dev,
- "%s: HPA order violation %s:%pr vs %pr\n",
- dev_name(&port->dev),
- dev_name(&iter->region->dev), ip->res, p->res);
- return ERR_PTR(-EBUSY);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
+ struct cxl_decoder *cxld;
+
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
+ if (auto_order_ok(port, iter->region, cxld))
+ continue;
}
+ dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n",
+ dev_name(&port->dev),
+ dev_name(&iter->region->dev), ip->res, p->res);
+
+ return ERR_PTR(-EBUSY);
}
cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL);
@@ -852,10 +885,7 @@ static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr,
{
struct cxl_decoder *cxld;
- if (port == cxled_to_port(cxled))
- cxld = &cxled->cxld;
- else
- cxld = cxl_region_find_decoder(port, cxlr);
+ cxld = cxl_region_find_decoder(port, cxled, cxlr);
if (!cxld) {
dev_dbg(&cxlr->dev, "%s: no decoder available\n",
dev_name(&port->dev));
@@ -952,7 +982,7 @@ static int cxl_port_attach_region(struct cxl_port *port,
nr_targets_inc = true;
}
} else {
- cxl_rr = alloc_region_ref(port, cxlr);
+ cxl_rr = alloc_region_ref(port, cxlr, cxled);
if (IS_ERR(cxl_rr)) {
dev_dbg(&cxlr->dev,
"%s: failed to allocate region reference\n",
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index a0b5819bc7..f01d0709c9 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -642,18 +642,18 @@ u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
TRACE_EVENT(cxl_poison,
- TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region,
+ TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,
const struct cxl_poison_record *record, u8 flags,
__le64 overflow_ts, enum cxl_poison_trace_type trace_type),
- TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type),
+ TP_ARGS(cxlmd, cxlr, record, flags, overflow_ts, trace_type),
TP_STRUCT__entry(
__string(memdev, dev_name(&cxlmd->dev))
__string(host, dev_name(cxlmd->dev.parent))
__field(u64, serial)
__field(u8, trace_type)
- __string(region, region)
+ __string(region, cxlr ? dev_name(&cxlr->dev) : "")
__field(u64, overflow_ts)
__field(u64, hpa)
__field(u64, dpa)
@@ -673,10 +673,10 @@ TRACE_EVENT(cxl_poison,
__entry->source = cxl_poison_record_source(record);
__entry->trace_type = trace_type;
__entry->flags = flags;
- if (region) {
- __assign_str(region, dev_name(&region->dev));
- memcpy(__entry->uuid, &region->params.uuid, 16);
- __entry->hpa = cxl_trace_hpa(region, cxlmd,
+ if (cxlr) {
+ __assign_str(region, dev_name(&cxlr->dev));
+ memcpy(__entry->uuid, &cxlr->params.uuid, 16);
+ __entry->hpa = cxl_trace_hpa(cxlr, cxlmd,
__entry->dpa);
} else {
__assign_str(region, "");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 70ba506dab..de6eb370d4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -629,16 +629,16 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
tristate "NVIDIA Tegra210 ADMA support"
- depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
+ depends on (ARCH_TEGRA || COMPILE_TEST)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
- Support for the NVIDIA Tegra210 ADMA controller driver. The
- DMA controller has multiple DMA channels and is used to service
- various audio clients in the Tegra210 audio processing engine
- (APE). This DMA controller transfers data from memory to
- peripheral and vice versa. It does not support memory to
- memory data transfer.
+ Support for the NVIDIA Tegra210/Tegra186/Tegra194/Tegra234 ADMA
+ controller driver. The DMA controller has multiple DMA channels
+ and is used to service various audio clients in the Tegra210
+ audio processing engine (APE). This DMA controller transfers
+ data from memory to peripheral and vice versa. It does not
+ support memory to memory data transfer.
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index bb5221158a..f5e216b157 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -30,8 +30,9 @@
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
-#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
-#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
+#define EDMA_TCD_ITER_MASK GENMASK(14, 0)
+#define EDMA_TCD_CITER_CITER(x) ((x) & EDMA_TCD_ITER_MASK)
+#define EDMA_TCD_BITER_BITER(x) ((x) & EDMA_TCD_ITER_MASK)
#define EDMA_TCD_CSR_START BIT(0)
#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 75cae7ccae..d36e28b9c7 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -9,6 +9,8 @@
* Vybrid and Layerscape SoCs.
*/
+#include <dt-bindings/dma/fsl-edma.h>
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
@@ -21,12 +23,6 @@
#include "fsl-edma-common.h"
-#define ARGS_RX BIT(0)
-#define ARGS_REMOTE BIT(1)
-#define ARGS_MULTI_FIFO BIT(2)
-#define ARGS_EVEN_CH BIT(3)
-#define ARGS_ODD_CH BIT(4)
-
static void fsl_edma_synchronize(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
@@ -155,14 +151,14 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
i = fsl_chan - fsl_edma->chans;
fsl_chan->priority = dma_spec->args[1];
- fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
- fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
- fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
+ fsl_chan->is_rxchan = dma_spec->args[2] & FSL_EDMA_RX;
+ fsl_chan->is_remote = dma_spec->args[2] & FSL_EDMA_REMOTE;
+ fsl_chan->is_multi_fifo = dma_spec->args[2] & FSL_EDMA_MULTI_FIFO;
- if ((dma_spec->args[2] & ARGS_EVEN_CH) && (i & 0x1))
+ if ((dma_spec->args[2] & FSL_EDMA_EVEN_CH) && (i & 0x1))
continue;
- if ((dma_spec->args[2] & ARGS_ODD_CH) && !(i & 0x1))
+ if ((dma_spec->args[2] & FSL_EDMA_ODD_CH) && !(i & 0x1))
continue;
if (!b_chmux && i == dma_spec->args[0]) {
@@ -587,7 +583,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
- dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ dma_set_max_seg_size(fsl_edma->dma_dev.dev,
+ FIELD_GET(EDMA_TCD_ITER_MASK, EDMA_TCD_ITER_MASK));
fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index f8fbf03942..e72ebdf621 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -128,9 +128,9 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
reg = dpll_pin_registration_find(ref, ops, priv);
if (WARN_ON(!reg))
return -EINVAL;
+ list_del(&reg->list);
+ kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
- list_del(&reg->list);
- kfree(reg);
xa_erase(xa_pins, i);
WARN_ON(!list_empty(&ref->registration_list));
kfree(ref);
@@ -208,9 +208,9 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
reg = dpll_pin_registration_find(ref, ops, priv);
if (WARN_ON(!reg))
return;
+ list_del(&reg->list);
+ kfree(reg);
if (refcount_dec_and_test(&ref->refcount)) {
- list_del(&reg->list);
- kfree(reg);
xa_erase(xa_dplls, i);
WARN_ON(!list_empty(&ref->registration_list));
kfree(ref);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 8aaa7fcb26..401a77e3b5 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -500,7 +500,19 @@ static void bm_work(struct work_struct *work)
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
- reset_bus(card, true);
+ /*
+ * Where possible, use a short bus reset to minimize
+ * disruption to isochronous transfers. But in the event
+ * of a gap count inconsistency, use a long bus reset.
+ *
+ * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
+ * may set different gap counts after a bus reset. On a mixed
+ * 1394/1394a bus, a short bus reset can get doubled. Some
+ * nodes may treat the double reset as one bus reset and others
+ * may treat it as two, causing a gap count inconsistency
+ * again. Using a long bus reset prevents this.
+ */
+ reset_bus(card, card->gap_count != 0);
/* Will allocate broadcast channel after the reset. */
goto out;
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9db9290c32..7bc71f4be6 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3773,6 +3773,7 @@ static int pci_probe(struct pci_dev *dev,
return 0;
fail_msi:
+ devm_free_irq(&dev->dev, dev->irq, ohci);
pci_disable_msi(dev);
return err;
@@ -3800,6 +3801,7 @@ static void pci_remove(struct pci_dev *dev)
software_reset(ohci);
+ devm_free_irq(&dev->dev, dev->irq, ohci);
pci_disable_msi(dev);
dev_notice(&dev->dev, "removing fw-ohci device\n");
diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c
index 7611e96650..39936e1dd3 100644
--- a/drivers/firmware/arm_scmi/smc.c
+++ b/drivers/firmware/arm_scmi/smc.c
@@ -214,6 +214,13 @@ static int smc_chan_free(int id, void *p, void *data)
struct scmi_chan_info *cinfo = p;
struct scmi_smc *scmi_info = cinfo->transport_info;
+ /*
+ * Different protocols might share the same chan info, so a previous
+ * smc_chan_free call might have already freed the structure.
+ */
+ if (!scmi_info)
+ return 0;
+
/* Ignore any possible further reception on the IRQ path */
if (scmi_info->irq > 0)
free_irq(scmi_info->irq, scmi_info);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9d3910d1ab..abdfcb5aa4 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -199,6 +199,8 @@ static bool generic_ops_supported(void)
name_size = sizeof(name);
+ if (!efi.get_next_variable)
+ return false;
status = efi.get_next_variable(&name_size, &name, &guid);
if (status == EFI_UNSUPPORTED)
return false;
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index 4e96a855fd..c41e7b2091 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -120,7 +120,7 @@ efi_status_t efi_random_alloc(unsigned long size,
continue;
}
- target = round_up(md->phys_addr, align) + target_slot * align;
+ target = round_up(max_t(u64, md->phys_addr, alloc_min), align) + target_slot * align;
pages = size / EFI_PAGE_SIZE;
status = efi_bs_call(allocate_pages, EFI_ALLOCATE_ADDRESS,
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 99429bc4b0..dd80082aac 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -21,6 +21,8 @@
#include "efistub.h"
#include "x86-stub.h"
+extern char _bss[], _ebss[];
+
const efi_system_table_t *efi_system_table;
const efi_dxe_services_table_t *efi_dxe_table;
static efi_loaded_image_t *image = NULL;
@@ -465,6 +467,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_status_t status;
char *cmdline_ptr;
+ if (efi_is_native())
+ memset(_bss, 0, _ebss - _bss);
+
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
@@ -482,6 +487,7 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr->vid_mode = 0xffff;
hdr->type_of_loader = 0x21;
+ hdr->initrd_addr_max = INT_MAX;
/* Convert unicode cmdline to ascii */
cmdline_ptr = efi_convert_cmdline(image, &options_size);
@@ -958,8 +964,6 @@ fail:
void efi_handover_entry(efi_handle_t handle, efi_system_table_t *sys_table_arg,
struct boot_params *boot_params)
{
- extern char _bss[], _ebss[];
-
memset(_bss, 0, _ebss - _bss);
efi_stub_entry(handle, sys_table_arg, boot_params);
}
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index dd7a783d53..e73f88050f 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1872,7 +1872,7 @@ static irqreturn_t dfl_irq_handler(int irq, void *arg)
{
struct eventfd_ctx *trigger = arg;
- eventfd_signal(trigger, 1);
+ eventfd_signal(trigger);
return IRQ_HANDLED;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b3a133ed31..41b51536bd 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -691,7 +691,8 @@ config GPIO_UNIPHIER
Say yes here to support UniPhier GPIOs.
config GPIO_VF610
- def_bool y
+ bool "VF610 GPIO support"
+ default y if SOC_VF610
depends on ARCH_MXC
select GPIOLIB_IRQCHIP
help
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index fe9ce6b19f..4987e62dcb 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -158,7 +158,7 @@ struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
if (!dr)
return ERR_PTR(-ENOMEM);
- desc = fwnode_gpiod_get_index(fwnode, con_id, index, flags, label);
+ desc = gpiod_find_and_request(dev, fwnode, con_id, index, flags, label, false);
if (IS_ERR(desc)) {
devres_free(dr);
return desc;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 1d033106cf..2d8c07a442 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -4127,13 +4127,13 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
return desc;
}
-static struct gpio_desc *gpiod_find_and_request(struct device *consumer,
- struct fwnode_handle *fwnode,
- const char *con_id,
- unsigned int idx,
- enum gpiod_flags flags,
- const char *label,
- bool platform_lookup_allowed)
+struct gpio_desc *gpiod_find_and_request(struct device *consumer,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags,
+ const char *label,
+ bool platform_lookup_allowed)
{
unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
struct gpio_desc *desc;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 3ccacf3c12..87ec7ba5b6 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -207,6 +207,14 @@ static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
return ret;
}
+struct gpio_desc *gpiod_find_and_request(struct device *consumer,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags flags,
+ const char *label,
+ bool platform_lookup_allowed);
+
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 3eee8636f8..9b079f3a1b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -198,7 +198,7 @@ config DRM_TTM
config DRM_TTM_KUNIT_TEST
tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS
default n
- depends on DRM && KUNIT && MMU
+ depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST)
select DRM_TTM
select DRM_EXPORT_FOR_TESTS if m
select DRM_KUNIT_TEST_HELPERS
@@ -206,7 +206,8 @@ config DRM_TTM_KUNIT_TEST
help
Enables unit tests for TTM, a GPU memory manager subsystem used
to manage memory buffers. This option is mostly useful for kernel
- developers.
+ developers. It depends on (UML || COMPILE_TEST) since no other driver
+ which uses TTM can be loaded while running the tests.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 31d4b5a2c5..13c62a26aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -198,6 +198,7 @@ extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dc_visual_confirm;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight;
+extern int amdgpu_damage_clips;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index c7d60dd0fb..4f9900779e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1278,11 +1278,10 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
* 0b10 : encode is disabled
* 0b01 : decode is disabled
*/
- adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
- ip->revision & 0xc0;
- ip->revision &= ~0xc0;
if (adev->vcn.num_vcn_inst <
AMDGPU_MAX_VCN_INSTANCES) {
+ adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
+ ip->revision & 0xc0;
adev->vcn.num_vcn_inst++;
adev->vcn.inst_mask |=
(1U << ip->instance_number);
@@ -1293,6 +1292,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->vcn.num_vcn_inst + 1,
AMDGPU_MAX_VCN_INSTANCES);
}
+ ip->revision &= ~0xc0;
}
if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 10c4a8cfa1..855ab59632 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -209,6 +209,7 @@ int amdgpu_umsch_mm;
int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
+int amdgpu_damage_clips = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -858,6 +859,18 @@ MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (defau
module_param_named(backlight, amdgpu_backlight, bint, 0444);
/**
+ * DOC: damageclips (int)
+ * Enable or disable damage clips support. If damage clips support is disabled,
+ * we will force full frame updates, irrespective of what user space sends to
+ * us.
+ *
+ * Defaults to -1 (where it is enabled unless a PSR-SU display is detected).
+ */
+MODULE_PARM_DESC(damageclips,
+ "Damage clips support (0 = disable, 1 = enable, -1 auto (default))");
+module_param_named(damageclips, amdgpu_damage_clips, int, 0444);
+
+/**
* DOC: tmz (int)
* Trusted Memory Zone (TMZ) is a method to protect data being written
* to or read from memory.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 081267161d..57516a8c5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
*/
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
+ int r;
+
if (bo->kfd_bo)
- return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo),
&amdgpu_hmm_hsa_ops);
- return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
- amdgpu_bo_size(bo),
- &amdgpu_hmm_gfx_ops);
+ else
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_hmm_gfx_ops);
+ if (r)
+ /*
+ * Make sure amdgpu_hmm_unregister() doesn't call
+ * mmu_interval_notifier_remove() when the notifier isn't properly
+ * initialized.
+ */
+ bo->notifier.mm = NULL;
+
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 45424ebf96..4e526d7730 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -524,46 +524,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
volatile u32 *mqd;
- int r;
+ u32 *kbuf;
+ int r, i;
uint32_t value, result;
if (*pos & 3 || size & 3)
return -EINVAL;
- result = 0;
+ kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
- return r;
+ goto err_free;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
- if (r) {
- amdgpu_bo_unreserve(ring->mqd_obj);
- return r;
- }
+ if (r)
+ goto err_unreserve;
+ /*
+ * Copy to local buffer to avoid put_user(), which might fault
+ * and acquire mmap_sem, under reservation_ww_class_mutex.
+ */
+ for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
+ kbuf[i] = mqd[i];
+
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ amdgpu_bo_unreserve(ring->mqd_obj);
+
+ result = 0;
while (size) {
if (*pos >= ring->mqd_size)
- goto done;
+ break;
- value = mqd[*pos/4];
+ value = kbuf[*pos/4];
r = put_user(value, (uint32_t *)buf);
if (r)
- goto done;
+ goto err_free;
buf += 4;
result += 4;
size -= 4;
*pos += 4;
}
-done:
- amdgpu_bo_kunmap(ring->mqd_obj);
- mqd = NULL;
- amdgpu_bo_unreserve(ring->mqd_obj);
- if (r)
- return r;
-
+ kfree(kbuf);
return result;
+
+err_unreserve:
+ amdgpu_bo_unreserve(ring->mqd_obj);
+err_free:
+ kfree(kbuf);
+ return r;
}
static const struct file_operations amdgpu_debugfs_mqd_fops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 75c9fd2c6c..b0ed10f4de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -869,6 +869,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
gtt->ttm.dma_address, flags);
}
+ gtt->bound = true;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 2c22100078..1b51be9b5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -313,7 +313,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
DEBUG("IMM 0x%02X\n", val);
return val;
}
- return 0;
+ break;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index c9c653cfc7..3f1692194b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -570,6 +570,7 @@ static void gmc_v11_0_set_mmhub_funcs(struct amdgpu_device *adev)
adev->mmhub.funcs = &mmhub_v3_0_2_funcs;
break;
case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
adev->mmhub.funcs = &mmhub_v3_3_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index dc4812ecc9..238ea40c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -98,16 +98,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
- mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
+ case IP_VERSION(3, 3, 1):
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
+ mmhub_client_ids_v3_3[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
break;
default:
mmhub_cid = NULL;
break;
}
- if (!mmhub_cid && cid == 0x140)
- mmhub_cid = "UMSCH";
-
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 0f24af6f28..8eed9a73d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -429,16 +429,11 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
+ int i;
for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].ring;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
@@ -485,20 +480,10 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
uint32_t inst_mask)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
for_each_inst(i, inst_mask) {
- sdma[i] = &adev->sdma.instance[i].page;
-
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -948,13 +933,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
r = amdgpu_ring_test_helper(page);
if (r)
return r;
-
- if (adev->mman.buffer_funcs_ring == page)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 9b5af3f138..f9ba180304 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -574,11 +574,34 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
return AMD_RESET_METHOD_MODE1;
}
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+ * 2) S3 suspend abort and TOS already launched.
+ */
+ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+ !adev->suspend_complete &&
+ sol_reg)
+ return true;
+
+ return false;
+}
+
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if ((adev->apu_flags & AMD_APU_IS_RAVEN) ||
- (adev->apu_flags & AMD_APU_IS_RAVEN2))
+ /* On the latest Raven, the GPU reset can be performed
+ * successfully. So now, temporarily enable it for the
+ * S3 suspend abort case.
+ */
+ if (((adev->apu_flags & AMD_APU_IS_RAVEN) ||
+ (adev->apu_flags & AMD_APU_IS_RAVEN2)) &&
+ !soc15_need_reset_on_resume(adev))
return 0;
switch (soc15_asic_reset_method(adev)) {
@@ -1297,24 +1320,6 @@ static int soc15_common_suspend(void *handle)
return soc15_common_hw_fini(adev);
}
-static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
-{
- u32 sol_reg;
-
- sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
-
- /* Will reset for the following suspend abort cases.
- * 1) Only reset limit on APU side, dGPU hasn't checked yet.
- * 2) S3 suspend abort and TOS already launched.
- */
- if (adev->flags & AMD_IS_APU && adev->in_s3 &&
- !adev->suspend_complete &&
- sol_reg)
- return true;
-
- return false;
-}
-
static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 71445ab63b..02d1d9afbf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -1466,7 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
- return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
+ return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 272c27495e..dafe9562a7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1896,17 +1896,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
adev->dm.hdcp_workqueue = NULL;
}
- if (adev->dm.dc)
+ if (adev->dm.dc) {
dc_deinit_callbacks(adev->dm.dc);
-
- if (adev->dm.dc)
dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
-
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
- kfree(adev->dm.dmub_notify);
- adev->dm.dmub_notify = NULL;
- destroy_workqueue(adev->dm.delayed_hpd_wq);
- adev->dm.delayed_hpd_wq = NULL;
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
+ }
}
if (adev->dm.dmub_bo)
@@ -5131,6 +5129,10 @@ static inline void fill_dc_dirty_rect(struct drm_plane *plane,
* @new_plane_state: New state of @plane
* @crtc_state: New state of CRTC connected to the @plane
* @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
+ * If PSR SU is enabled and damage clips are available, only the regions of the screen
+ * that have changed will be updated. If PSR SU is not enabled,
+ * or if damage clips are not available, the entire screen will be updated.
* @dirty_regions_changed: dirty regions changed
*
* For PSR SU, DC informs the DMUB uController of dirty rectangle regions
@@ -5149,6 +5151,7 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *crtc_state,
struct dc_flip_addrs *flip_addrs,
+ bool is_psr_su,
bool *dirty_regions_changed)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
@@ -5173,6 +5176,10 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
num_clips = drm_plane_get_damage_clips_count(new_plane_state);
clips = drm_plane_get_damage_clips(new_plane_state);
+ if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
+ is_psr_su)))
+ goto ffu;
+
if (!dm_crtc_state->mpo_requested) {
if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
goto ffu;
@@ -6165,9 +6172,8 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
- else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
- stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
- stream->signal == SIGNAL_TYPE_EDP) {
+
+ if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
@@ -6183,9 +6189,8 @@ create_stream_for_sink(struct drm_connector *connector,
if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
tf = TRANSFER_FUNC_GAMMA_22;
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
- if (stream->link->psr_settings.psr_feature_enabled)
- aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
finish:
dc_sink_release(sink);
@@ -8220,6 +8225,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count],
+ acrtc_state->stream->link->psr_settings.psr_version ==
+ DC_PSR_VERSION_SU_1,
&dirty_rects_changed);
/*
@@ -10841,18 +10848,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
- if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
- if (edid) {
- edid_check_required = is_dp_capable_without_timing_msa(
- adev->dm.dc,
- amdgpu_dm_connector);
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
}
- if (edid_check_required == true && (edid->version > 1 ||
- (edid->version == 1 && edid->revision > 1))) {
+ if (edid_check_required) {
for (i = 0; i < 4; i++) {
timing = &edid->detailed_timings[i];
@@ -10872,14 +10885,23 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (range->flags != 1)
continue;
- amdgpu_dm_connector->min_vfreq = range->min_vfreq;
- amdgpu_dm_connector->max_vfreq = range->max_vfreq;
- amdgpu_dm_connector->pixel_clock_mhz =
- range->pixel_clock_mhz * 10;
-
connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ connector->display_info.monitor_range.min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ connector->display_info.monitor_range.max_vfreq += 255;
+ }
+
+ amdgpu_dm_connector->min_vfreq =
+ connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq =
+ connector->display_info.monitor_range.max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+
break;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 45c972f263..417c9cb47b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1483,7 +1483,7 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
const uint32_t rd_buf_size = 10;
struct pipe_ctx *pipe_ctx;
ssize_t result = 0;
- int i, r, str_len = 30;
+ int i, r, str_len = 10;
rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index a496930b1f..289918ea72 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -217,6 +217,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
+ /* DPPCLK */
+ dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ &num_entries_per_clk->num_dppclk_levels);
+ num_levels = num_entries_per_clk->num_dppclk_levels;
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
+ //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+ if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
+
if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels &&
num_entries_per_clk->num_dispclk_levels)
@@ -241,6 +251,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
}
+ for (i = 0; i < num_levels; i++)
+ if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
+
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 54df6cac19..353d5fb9e3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -705,7 +705,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
clock_table->NumFclkLevelsEnabled;
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
- num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
clock_table->NumDcfClkLevelsEnabled;
for (i = 0; i < num_dcfclk; i++) {
int j;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index bbdeda4897..b51208f44c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2258,23 +2258,16 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_context *dml2 = NULL;
-#endif
if (!new_ctx)
return NULL;
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
#ifdef CONFIG_DRM_AMD_DC_FP
- if (new_ctx->bw_ctx.dml2) {
- dml2 = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
- if (!dml2)
- return NULL;
-
- memcpy(dml2, src_ctx->bw_ctx.dml2, sizeof(struct dml2_context));
- new_ctx->bw_ctx.dml2 = dml2;
- }
+ if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) {
+ dc_release_state(new_ctx);
+ return NULL;
+ }
#endif
for (i = 0; i < MAX_PIPES; i++) {
@@ -3340,6 +3333,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
if (stream->link->replay_settings.config.replay_supported)
return true;
+ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+ return true;
+
return false;
}
@@ -4929,22 +4925,16 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
- uint32_t idle_state = 0;
-
if (dc->debug.disable_idle_power_optimizations)
return false;
if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return false;
- if (dc->hwss.get_idle_state)
- idle_state = dc->hwss.get_idle_state(dc);
-
- if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
- !(idle_state & DMUB_IPS2_ALLOW_MASK))
- return true;
+ if (!dc->ctx->dmub_srv)
+ return false;
- return false;
+ return dc->ctx->dmub_srv->idle_allowed;
}
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index d1500b2238..2f9da981a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -44,6 +44,36 @@
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
+void mpc3_mpc_init(struct mpc *mpc)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ int opp_id;
+
+ mpc1_mpc_init(mpc);
+
+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {
+ if (REG(MUX[opp_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+ }
+}
+
+void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc1_mpc_init_single_inst(mpc, mpcc_id);
+
+ /* assuming mpc out mux is connected to opp with the same index at this
+ * point in time (e.g. transitioning from vbios to driver)
+ */
+ if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id]))
+ /* disable mpc out rate and flow control */
+ REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE,
+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);
+}
+
bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id)
@@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux(
MPC_DWB0_MUX, 0xf);
}
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control)
-{
- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
-
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_RATE_CONTROL_DISABLE, !enable,
- MPC_OUT_RATE_CONTROL, rate_2x_mode);
-
- if (flow_control)
- REG_UPDATE_2(MUX[opp_id],
- MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
- MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);
-}
-
enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)
{
/*Contrary to DCN2 and DCN1 wherein a single status register field holds this info;
@@ -1386,8 +1397,8 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
- .mpc_init = mpc1_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init = mpc3_mpc_init,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1404,7 +1415,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc3_program_shaper,
.acquire_rmu = mpcc3_acquire_rmu,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
index 5198f2167c..d3b904517a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
@@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
int num_mpcc,
int num_rmu);
+void mpc3_mpc_init(
+ struct mpc *mpc);
+
+void mpc3_mpc_init_single_inst(
+ struct mpc *mpc,
+ unsigned int mpcc_id);
+
bool mpc3_program_shaper(
struct mpc *mpc,
const struct pwl_params *params,
@@ -1074,13 +1081,6 @@ bool mpc3_is_dwb_idle(
struct mpc *mpc,
int dwb_id);
-void mpc3_set_out_rate_control(
- struct mpc *mpc,
- int opp_id,
- bool enable,
- bool rate_2x_mode,
- struct mpc_dwb_flow_control *flow_control);
-
void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 994b21ed27..3279b61022 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc)
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
int mpcc_id;
- mpc1_mpc_init(mpc);
+ mpc3_mpc_init(mpc);
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) {
@@ -990,7 +990,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.insert_plane = mpc1_insert_plane,
.remove_mpcc = mpc1_remove_mpcc,
.mpc_init = mpc32_mpc_init,
- .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
.cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
@@ -1007,7 +1007,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.set_dwb_mux = mpc3_set_dwb_mux,
.disable_dwb_mux = mpc3_disable_dwb_mux,
.is_dwb_idle = mpc3_is_dwb_idle,
- .set_out_rate_control = mpc3_set_out_rate_control,
.set_gamut_remap = mpc3_set_gamut_remap,
.program_shaper = mpc32_program_shaper,
.program_3dlut = mpc32_program_3dlut,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index e940dd0f92..f663de1cdc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -1866,6 +1866,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
dcn32_override_min_req_memclk(dc, context);
+ dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1924,7 +1925,21 @@ int dcn32_populate_dml_pipes_from_context(
dcn32_zero_pipe_dcc_fraction(pipes, pipe_cnt);
DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
- pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ if (dc->config.enable_windowed_mpo_odm &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ switch (resource_get_odm_slice_count(pipe)) {
+ case 2:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ break;
+ case 4:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
+ break;
+ default:
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ }
+ } else {
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ }
pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19;
@@ -2011,6 +2026,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
{
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index b931008114..351c8a2843 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -41,6 +41,7 @@
#define SUBVP_HIGH_REFRESH_LIST_LEN 4
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define MIN_SUBVP_DCFCLK_KHZ 400000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -183,6 +184,10 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context);
bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int vlevel);
+void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
+
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index bc5f0db23d..1f89428499 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -778,3 +778,35 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
return result;
}
+
+void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe = NULL;
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ int odm_slice_count = 0;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+ pipe = &res_ctx->pipe_ctx[i];
+ odm_slice_count = resource_get_odm_slice_count(pipe);
+
+ if (odm_slice_count == 1)
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal;
+ else if (odm_slice_count == 2)
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ else if (odm_slice_count == 4)
+ pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1;
+
+ pipe_cnt++;
+ }
+}
+
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
+{
+ if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index 4156a8cc2b..3b7505b5f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -1579,6 +1579,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
{
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index fe2b67d745..b315ca6f1c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1265,7 +1265,7 @@ static bool update_pipes_with_split_flags(struct dc *dc, struct dc_state *contex
return updated;
}
-static bool should_allow_odm_power_optimization(struct dc *dc,
+static bool should_apply_odm_power_optimization(struct dc *dc,
struct dc_state *context, struct vba_vars_st *v, int *split,
bool *merge)
{
@@ -1369,9 +1369,12 @@ static void try_odm_power_optimization_and_revalidate(
{
int i;
unsigned int new_vlevel;
+ unsigned int cur_policy[MAX_PIPES];
- for (i = 0; i < pipe_cnt; i++)
+ for (i = 0; i < pipe_cnt; i++) {
+ cur_policy[i] = pipes[i].pipe.dest.odm_combine_policy;
pipes[i].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1;
+ }
new_vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
@@ -1380,6 +1383,9 @@ static void try_odm_power_optimization_and_revalidate(
memset(merge, 0, MAX_PIPES * sizeof(bool));
*vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, new_vlevel, split, merge);
context->bw_ctx.dml.vba.VoltageLevel = *vlevel;
+ } else {
+ for (i = 0; i < pipe_cnt; i++)
+ pipes[i].pipe.dest.odm_combine_policy = cur_policy[i];
}
}
@@ -1550,7 +1556,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
}
}
- if (should_allow_odm_power_optimization(dc, context, vba, split, merge))
+ if (should_apply_odm_power_optimization(dc, context, vba, split, merge))
try_odm_power_optimization_and_revalidate(
dc, context, pipes, split, merge, vlevel, *pipe_cnt);
@@ -2178,6 +2184,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
int i;
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
+ if (!dc->config.enable_windowed_mpo_odm)
+ dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes);
/* repopulate_pipes = 1 means the pipes were either split or merged. In this case
* we have to re-calculate the DET allocation and run through DML once more to
@@ -2186,7 +2194,9 @@ bool dcn32_internal_validate_bw(struct dc *dc,
* */
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
dm_prefetch_support_uclk_fclk_and_stutter_if_possible;
+
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
+
if (vlevel == context->bw_ctx.dml.soc.num_states) {
/* failed after DET size changes */
goto validate_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 2c379be19a..b6744ad778 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -228,17 +228,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
break;
case dml_project_dcn35:
+ case dml_project_dcn351:
out->num_chans = 4;
out->round_trip_ping_latency_dcfclk_cycles = 106;
out->smn_latency_us = 2;
out->dispclk_dppclk_vco_speed_mhz = 3600;
break;
- case dml_project_dcn351:
- out->num_chans = 16;
- out->round_trip_ping_latency_dcfclk_cycles = 1100;
- out->smn_latency_us = 2;
- break;
}
/* ---Overrides if available--- */
if (dml2->config.bbox_overrides.dram_num_chan)
@@ -398,7 +394,6 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
/* Copy clocks tables entries, if available */
if (dml2->config.bbox_overrides.clks_table.num_states) {
p->in_states->num_states = dml2->config.bbox_overrides.clks_table.num_states;
-
for (i = 0; i < dml2->config.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels; i++) {
p->in_states->state_array[i].dcfclk_mhz = dml2->config.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz;
}
@@ -437,6 +432,14 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
}
dml2_policy_build_synthetic_soc_states(s, p);
+ if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 ||
+ dml2->v20.dml_core_ctx.project == dml_project_dcn351) {
+ // Override last out_state with data from last in_state
+ // This will ensure that out_state contains max fclk
+ memcpy(&p->out_states->state_array[p->out_states->num_states - 1],
+ &p->in_states->state_array[p->in_states->num_states - 1],
+ sizeof(struct soc_state_bounding_box_st));
+ }
}
void dml2_translate_ip_params(const struct dc *in, struct ip_params_st *out)
@@ -817,13 +820,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
{
+ dml_uint_t width, height;
+
+ if (in->timing.h_addressable > 3840)
+ width = 3840;
+ else
+ width = in->timing.h_addressable; // 4K max
+
+ if (in->timing.v_addressable > 2160)
+ height = 2160;
+ else
+ height = in->timing.v_addressable; // 4K max
+
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
out->GPUVMMinPageSizeKBytes[location] = 256;
- out->ViewportWidth[location] = in->timing.h_addressable;
- out->ViewportHeight[location] = in->timing.v_addressable;
+ out->ViewportWidth[location] = width;
+ out->ViewportHeight[location] = height;
out->ViewportStationary[location] = false;
out->ViewportWidthChroma[location] = 0;
out->ViewportHeightChroma[location] = 0;
@@ -842,7 +857,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->HTapsChroma[location] = 0;
out->VTapsChroma[location] = 0;
out->SourceScan[location] = dml_rotation_0;
- out->ScalerRecoutWidth[location] = in->timing.h_addressable;
+ out->ScalerRecoutWidth[location] = width;
out->LBBitPerPixel[location] = 57;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index c62b61ac45..269bfb14c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -696,13 +696,13 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_v
return out;
}
-bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+static inline struct dml2_context *dml2_allocate_memory(void)
{
- // Allocate Mode Lib Ctx
- *dml2 = (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+}
- if (!(*dml2))
- return false;
+static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+{
// Store config options
(*dml2)->config = *config;
@@ -730,9 +730,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+}
+
+bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+{
+ // Allocate Mode Lib Ctx
+ *dml2 = dml2_allocate_memory();
+
+ if (!(*dml2))
+ return false;
+
+ dml2_init(in_dc, config, dml2);
- /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
- //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
return true;
}
@@ -750,3 +759,33 @@ void dml2_extract_dram_and_fclk_change_support(struct dml2_context *dml2,
*fclk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.FCLKChangeSupport[0];
*dram_clk_change_support = (unsigned int) dml2->v20.dml_core_ctx.ms.support.DRAMClockChangeSupport[0];
}
+
+void dml2_copy(struct dml2_context *dst_dml2,
+ struct dml2_context *src_dml2)
+{
+ /* copy Mode Lib Ctx */
+ memcpy(dst_dml2, src_dml2, sizeof(struct dml2_context));
+}
+
+bool dml2_create_copy(struct dml2_context **dst_dml2,
+ struct dml2_context *src_dml2)
+{
+ /* Allocate Mode Lib Ctx */
+ *dst_dml2 = dml2_allocate_memory();
+
+ if (!(*dst_dml2))
+ return false;
+
+ /* copy Mode Lib Ctx */
+ dml2_copy(*dst_dml2, src_dml2);
+
+ return true;
+}
+
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2)
+{
+
+ dml2_init(in_dc, config, dml2);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index fe15baa4bf..548504d7de 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -191,6 +191,13 @@ bool dml2_create(const struct dc *in_dc,
struct dml2_context **dml2);
void dml2_destroy(struct dml2_context *dml2);
+void dml2_copy(struct dml2_context *dst_dml2,
+ struct dml2_context *src_dml2);
+bool dml2_create_copy(struct dml2_context **dst_dml2,
+ struct dml2_context *src_dml2);
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2);
/*
* dml2_validate - Determines if a display configuration is supported or not.
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index c1f1665e55..3642c069bd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -1184,7 +1184,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
if (dccg) {
dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ if (dccg && dccg->funcs->set_dtbclk_dto)
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
}
} else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 1fc8436c81..2b3ef5cdbd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -1834,6 +1834,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ if (!stream)
+ return false;
+
if (dpp == NULL)
return false;
@@ -1856,8 +1859,8 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
} else
dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
- if (stream != NULL && stream->ctx != NULL &&
- stream->out_transfer_func != NULL) {
+ if (stream->ctx &&
+ stream->out_transfer_func) {
log_tf(stream->ctx,
stream->out_transfer_func,
dpp->regamma_params.hw_points_num);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index c966f38583..e3f547e061 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1385,6 +1385,11 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
return;
}
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
@@ -1434,10 +1439,6 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
/* Detect top pipe only changes */
if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
- /* Detect odm changes */
- if (resource_is_odm_topology_changed(new_pipe, old_pipe))
- new_pipe->update_flags.bits.odm = 1;
-
/* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
@@ -1879,19 +1880,20 @@ void dcn20_program_front_end_for_ctx(
DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
+ struct pipe_ctx *pipe;
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -1965,12 +1967,22 @@ void dcn20_program_front_end_for_ctx(
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
/*
* Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) {
@@ -2009,17 +2021,6 @@ void dcn20_program_front_end_for_ctx(
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
-
- /* when dynamic ODM is active, pipes must be reconfigured when all planes are
- * disabled, as some transitions will leave software and hardware state
- * mismatched.
- */
- if (dc->debug.enable_single_display_2to1_odm_policy &&
- pipe->stream &&
- pipe->update_flags.bits.disable &&
- !pipe->prev_odm_pipe &&
- hws->funcs.update_odm)
- hws->funcs.update_odm(dc, context, pipe);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 772dc0db91..c89149d153 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -656,10 +656,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
if (pipe_ctx == NULL)
return;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc,
enable);
+
+ /* Wait for two frame to make sure AV mute is sent out */
+ if (enable) {
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ }
+ }
}
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 3a9cc8ac0c..093f438755 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -69,29 +69,6 @@
#define FN(reg_name, field_name) \
hws->shifts->field_name, hws->masks->field_name
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index cb9d838932..580afb0088 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -969,29 +969,6 @@ void dcn32_init_hw(struct dc *dc)
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -1106,10 +1083,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -1122,20 +1095,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
@@ -1159,6 +1118,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
dsc->funcs->dsc_disconnect(dsc);
}
}
+
+ if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
+ /*
+ * blank pattern is generated by OPP, reprogram blank pattern
+ * due to OPP count change
+ */
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index cf26d2ad40..325a711a14 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -338,29 +338,6 @@ void dcn35_init_hw(struct dc *dc)
}
}
-static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,
- int opp_cnt)
-{
- bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing);
- int flow_ctrl_cnt;
-
- if (opp_cnt >= 2)
- hblank_halved = true;
-
- flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable -
- stream->timing.h_border_left -
- stream->timing.h_border_right;
-
- if (hblank_halved)
- flow_ctrl_cnt /= 2;
-
- /* ODM combine 4:1 case */
- if (opp_cnt == 4)
- flow_ctrl_cnt /= 2;
-
- return flow_ctrl_cnt;
-}
-
static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
@@ -454,10 +431,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
struct pipe_ctx *odm_pipe;
int opp_cnt = 0;
int opp_inst[MAX_PIPES] = {0};
- bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing));
- struct mpc_dwb_flow_control flow_control;
- struct mpc *mpc = dc->res_pool->mpc;
- int i;
opp_cnt = get_odm_config(pipe_ctx, opp_inst);
@@ -470,20 +443,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
- rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1;
- flow_control.flow_ctrl_mode = 0;
- flow_control.flow_ctrl_cnt0 = 0x80;
- flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt);
- if (mpc->funcs->set_out_rate_control) {
- for (i = 0; i < opp_cnt; ++i) {
- mpc->funcs->set_out_rate_control(
- mpc, opp_inst[i],
- true,
- rate_control_2x_pclk,
- &flow_control);
- }
- }
-
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control(
odm_pipe->stream_res.opp,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 06ca8bfb91..3d72443938 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -427,22 +427,18 @@ struct pipe_ctx *resource_get_primary_dpp_pipe(const struct pipe_ctx *dpp_pipe);
int resource_get_mpc_slice_index(const struct pipe_ctx *dpp_pipe);
/*
- * Get number of MPC "cuts" of the plane associated with the pipe. MPC slice
- * count is equal to MPC splits + 1. For example if a plane is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for a plane with MPCC combine. otherwise
- * the number of MPC "cuts" for the plane.
+ * Get the number of MPC slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an MPC combine
+ * pipe topology.
*/
-int resource_get_mpc_slice_count(const struct pipe_ctx *opp_head);
+int resource_get_mpc_slice_count(const struct pipe_ctx *pipe);
/*
- * Get number of ODM "cuts" of the timing associated with the pipe. ODM slice
- * count is equal to ODM splits + 1. For example if a timing is cut 3 times, it
- * will have 4 pieces of slice.
- * return - 0 if pipe is not used for ODM combine. otherwise
- * the number of ODM "cuts" for the timing.
+ * Get the number of ODM slices associated with the pipe.
+ * The function returns 0 if the pipe is not associated with an ODM combine
+ * pipe topology.
*/
-int resource_get_odm_slice_count(const struct pipe_ctx *otg_master);
+int resource_get_odm_slice_count(const struct pipe_ctx *pipe);
/* Get the ODM slice index counting from 0 from left most slice */
int resource_get_odm_slice_index(const struct pipe_ctx *opp_head);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index bdae54c464..7c4a93d3cd 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -889,7 +889,8 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
/* Set power optimization flag */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
- if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) {
+ if (replay != NULL && link->replay_settings.replay_feature_enabled &&
+ replay->funcs->replay_set_power_opt) {
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
link->replay_settings.replay_power_opt_active = *power_opts;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index ee67a35c2a..ff930a71e4 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 738ee763f2..84f9b412a4 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */
- if (stream->link->psr_settings.psr_feature_enabled) {
- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
- vsc_packet_revision = vsc_packet_rev4;
- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
- vsc_packet_revision = vsc_packet_rev2;
- }
-
- if (stream->link->replay_settings.config.replay_supported)
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
+ vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->replay_settings.config.replay_supported)
vsc_packet_revision = vsc_packet_rev4;
+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
if (stream->use_vsc_sdp_for_colorimetry)
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 20c53eefd6..fee86be55c 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -2518,6 +2518,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err, ret;
+ u32 pwm_mode;
int value;
if (amdgpu_in_reset(adev))
@@ -2529,13 +2530,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
if (err)
return err;
+ if (value == 0)
+ pwm_mode = AMD_FAN_CTRL_NONE;
+ else if (value == 1)
+ pwm_mode = AMD_FAN_CTRL_MANUAL;
+ else if (value == 2)
+ pwm_mode = AMD_FAN_CTRL_AUTO;
+ else
+ return -EINVAL;
+
ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (ret < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
}
- ret = amdgpu_dpm_set_fan_control_mode(adev, value);
+ ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index b9c5398cb9..ac04ed1e49 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1285,8 +1285,9 @@ static int arcturus_get_power_limit(struct smu_context *smu,
{
struct smu_11_0_powerplay_table *powerplay_table =
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -1303,12 +1304,16 @@ static int arcturus_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ od_settings->cap[SMU_11_0_ODCAP_POWER_LIMIT]) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (od_settings->cap[SMU_11_0_ODCAP_POWER_LIMIT]) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
@@ -2272,8 +2277,8 @@ static uint16_t arcturus_get_current_pcie_link_speed(struct smu_context *smu)
/* TODO: confirm this on real target */
esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
- if ((esm_ctrl >> 15) & 0x1FFFF)
- return (uint16_t)(((esm_ctrl >> 8) & 0x3F) + 128);
+ if ((esm_ctrl >> 15) & 0x1)
+ return (uint16_t)(((esm_ctrl >> 8) & 0x7F) + 128);
return smu_v11_0_get_current_pcie_link_speed(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index aefe72c8ab..0821018459 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu,
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -2356,13 +2356,16 @@ static int navi10_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index fa953d4445..6f37ca7a06 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
return throttler_status;
}
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -625,7 +631,8 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
{
struct smu_11_0_7_powerplay_table *powerplay_table =
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint16_t *table_member;
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
@@ -640,12 +647,16 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
@@ -1250,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
return dpm_desc->SnapToDiscrete == 0;
}
-static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
- enum SMU_11_0_7_ODFEATURE_CAP cap)
-{
- return od_table->cap[cap];
-}
-
static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
enum SMU_11_0_7_ODSETTING_ID setting,
uint32_t *min, uint32_t *max)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index f1440869d1..e3579a8fd7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1683,8 +1683,8 @@ static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
/* TODO: confirm this on real target */
esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
- if ((esm_ctrl >> 15) & 0x1FFFF)
- return (((esm_ctrl >> 8) & 0x3F) + 128);
+ if ((esm_ctrl >> 15) & 0x1)
+ return (((esm_ctrl >> 8) & 0x7F) + 128);
return smu_v13_0_get_current_pcie_link_speed(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index ac3c9e0966..9ac6c408d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2351,7 +2351,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2364,12 +2364,16 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 8cd2b8cc3d..58f63b7a72 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -2016,8 +2016,8 @@ static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
/* TODO: confirm this on real target */
esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
- if ((esm_ctrl >> 15) & 0x1FFFF)
- return (((esm_ctrl >> 8) & 0x3F) + 128);
+ if ((esm_ctrl >> 15) & 0x1)
+ return (((esm_ctrl >> 8) & 0x7F) + 128);
speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index b296c5f9d9..402e0d184e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2315,7 +2315,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2328,12 +2328,16 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index d8f8ad0e71..01f2ab4567 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -229,8 +229,6 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
break;
case IP_VERSION(14, 0, 0):
- if ((smu->smc_fw_version < 0x5d3a00))
- dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version);
smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
break;
default:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index 94ccdbfd70..24a43374a7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -261,7 +261,10 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->MpipuclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = metrics->GfxActivity / 100;
+ if ((smu->smc_fw_version > 0x5d4600))
+ *value = metrics->GfxActivity;
+ else
+ *value = metrics->GfxActivity / 100;
break;
case METRICS_AVERAGE_VCNACTIVITY:
*value = metrics->VcnActivity / 100;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 8be235144f..6fc292393c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1277,17 +1277,6 @@ static int adv7511_probe(struct i2c_client *i2c)
INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
- if (i2c->irq) {
- init_waitqueue_head(&adv7511->wq);
-
- ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
- adv7511_irq_handler,
- IRQF_ONESHOT, dev_name(dev),
- adv7511);
- if (ret)
- goto err_unregister_cec;
- }
-
adv7511_power_off(adv7511);
i2c_set_clientdata(i2c, adv7511);
@@ -1311,6 +1300,17 @@ static int adv7511_probe(struct i2c_client *i2c)
adv7511_audio_init(dev, adv7511);
+ if (i2c->irq) {
+ init_waitqueue_head(&adv7511->wq);
+
+ ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
+ adv7511_irq_handler,
+ IRQF_ONESHOT, dev_name(dev),
+ adv7511);
+ if (ret)
+ goto err_unregister_audio;
+ }
+
if (adv7511->info->has_dsi) {
ret = adv7533_attach_dsi(adv7511);
if (ret)
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 03532efb89..e5839c89a3 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -429,26 +429,24 @@ lt8912_connector_mode_valid(struct drm_connector *connector,
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
- struct edid *edid;
- int ret = -1;
- int num = 0;
+ const struct drm_edid *drm_edid;
struct lt8912 *lt = connector_to_lt8912(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ int ret, num;
- edid = drm_bridge_get_edid(lt->hdmi_port, connector);
- if (edid) {
- drm_connector_update_edid_property(connector, edid);
- num = drm_add_edid_modes(connector, edid);
- } else {
- return ret;
- }
+ drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
+ drm_edid_connector_update(connector, drm_edid);
+ if (!drm_edid)
+ return 0;
+
+ num = drm_edid_connector_add_modes(connector);
ret = drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
- if (ret)
- num = ret;
+ if (ret < 0)
+ num = 0;
- kfree(edid);
+ drm_edid_free(drm_edid);
return num;
}
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index f285ed67eb..2aa4b19166 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -104,7 +104,10 @@ msm:apq8016:
DRIVER_NAME: msm
BM_DTB: https://${PIPELINE_ARTIFACTS_BASE}/arm64/apq8016-sbc.dtb
GPU_VERSION: apq8016
- BM_CMDLINE: "ip=dhcp console=ttyMSM0,115200n8 $BM_KERNEL_EXTRA_ARGS root=/dev/nfs rw nfsrootdebug nfsroot=,tcp,nfsvers=4.2 init=/init $BM_KERNELARGS"
+ # disabling unused clocks congests with the MDSS runtime PM trying to
+ # disable those clocks and causes boot to fail.
+ # Reproducer: DRM_MSM=y, DRM_I2C_ADV7511=m
+ BM_KERNEL_EXTRA_ARGS: clk_ignore_unused
RUNNER_TAG: google-freedreno-db410c
script:
- ./install/bare-metal/fastboot.sh
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 30d66bee0e..e1cfba2ff5 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -27,8 +27,9 @@
#include <linux/mutex.h>
#include <drm/drm_atomic_state_helper.h>
-#include <drm/drm_debugfs.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
#include <drm/drm_of.h>
@@ -1207,6 +1208,47 @@ int drm_bridge_get_modes(struct drm_bridge *bridge,
EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
/**
+ * drm_bridge_edid_read - read the EDID data of the connected display
+ * @bridge: bridge control structure
+ * @connector: the connector to read EDID for
+ *
+ * If the bridge supports output EDID retrieval, as reported by the
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get
+ * the EDID and return it. Otherwise return NULL.
+ *
+ * If &drm_bridge_funcs.edid_read is not set, fall back to using
+ * drm_bridge_get_edid() and wrapping it in struct drm_edid.
+ *
+ * RETURNS:
+ * The retrieved EDID on success, or NULL otherwise.
+ */
+const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
+ return NULL;
+
+ /* Transitional: Fall back to ->get_edid. */
+ if (!bridge->funcs->edid_read) {
+ const struct drm_edid *drm_edid;
+ struct edid *edid;
+
+ edid = drm_bridge_get_edid(bridge, connector);
+ if (!edid)
+ return NULL;
+
+ drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
+
+ kfree(edid);
+
+ return drm_edid;
+ }
+
+ return bridge->funcs->edid_read(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_edid_read);
+
+/**
* drm_bridge_get_edid - get the EDID data of the connected display
* @bridge: bridge control structure
* @connector: the connector to read EDID for
@@ -1215,6 +1257,8 @@ EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
* DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
* get the EDID and return it. Otherwise return NULL.
*
+ * Deprecated. Prefer using drm_bridge_edid_read().
+ *
* RETURNS:
* The retrieved EDID on success, or NULL otherwise.
*/
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index f3a6ac908f..5ebdd6f8f3 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -771,8 +771,12 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
return -EINVAL;
/* Actual range allocation */
- if (start + size == end)
+ if (start + size == end) {
+ if (!IS_ALIGNED(start | end, min_block_size))
+ return -EINVAL;
+
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
+ }
original_size = size;
original_min_size = min_block_size;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index e814020bbc..cfbe020de5 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
* The modes probed from the panel are automatically added to the connector
* that the panel is attached to.
*
- * Return: The number of modes available from the panel on success or a
- * negative error code on failure.
+ * Return: The number of modes available from the panel on success, or 0 on
+ * failure (no modes).
*/
int drm_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
if (!panel)
- return -EINVAL;
+ return 0;
- if (panel->funcs && panel->funcs->get_modes)
- return panel->funcs->get_modes(panel, connector);
+ if (panel->funcs && panel->funcs->get_modes) {
+ int num;
- return -EOPNOTSUPP;
+ num = panel->funcs->get_modes(panel, connector);
+ if (num > 0)
+ return num;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_panel_get_modes);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 3f479483d7..15ed974bcb 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -419,6 +419,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
count = connector_funcs->get_modes(connector);
+ /* The .get_modes() callback should not return negative values. */
+ if (count < 0) {
+ drm_err(connector->dev, ".get_modes() returned %pe\n",
+ ERR_PTR(count));
+ count = 0;
+ }
+
/*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID.
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 5860428da8..f0bdcfc0c2 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -1367,7 +1367,7 @@ static void syncobj_eventfd_entry_fence_func(struct dma_fence *fence,
struct syncobj_eventfd_entry *entry =
container_of(cb, struct syncobj_eventfd_entry, fence_cb);
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
}
@@ -1401,13 +1401,13 @@ syncobj_eventfd_entry_func(struct drm_syncobj *syncobj,
entry->fence = fence;
if (entry->flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) {
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
} else {
ret = dma_fence_add_callback(fence, &entry->fence_cb,
syncobj_eventfd_entry_fence_func);
if (ret == -ENOENT) {
- eventfd_signal(entry->ev_fd_ctx, 1);
+ eventfd_signal(entry->ev_fd_ctx);
syncobj_eventfd_entry_free(entry);
}
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index a8d3fa81e4..f9bc837e22 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -494,7 +494,7 @@ static const struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 3,
+ .minor = 4,
};
/*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 6720124243..8665f2658d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -265,6 +265,9 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
{
struct etnaviv_chip_identity *ident = &gpu->identity;
+ const u32 product_id = ident->product_id;
+ const u32 customer_id = ident->customer_id;
+ const u32 eco_id = ident->eco_id;
int i;
for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
@@ -278,6 +281,12 @@ bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
etnaviv_chip_identities[i].eco_id == ~0U)) {
memcpy(ident, &etnaviv_chip_identities[i],
sizeof(*ident));
+
+ /* Restore some id values as ~0U aka 'don't care' might been used. */
+ ident->product_id = product_id;
+ ident->customer_id = customer_id;
+ ident->eco_id = eco_id;
+
return true;
}
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index f5e1adfcaa..fb941a8c99 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector)
*/
if (!ctx->raw_edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
- return -EFAULT;
+ return 0;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
- return -ENOMEM;
+ return 0;
}
drm_connector_update_edid_property(connector, edid);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index dd9903eab5..eff51bfc46 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
- return -ENODEV;
+ return 0;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return -ENODEV;
+ return 0;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index bf0dbd7d06..67143a0f51 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1155,7 +1155,6 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
}
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
- intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
/* ensure all panel commands dispatched before enabling transcoder */
wait_for_cmds_dispatched_to_panel(encoder);
@@ -1256,6 +1255,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
/* step7: enable backlight */
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 4e8f1e91bb..e7ea287845 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1952,16 +1952,12 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *i915,
* these devices we split the init OTP sequence into a deassert sequence and
* the actual init OTP part.
*/
-static void fixup_mipi_sequences(struct drm_i915_private *i915,
- struct intel_panel *panel)
+static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
{
u8 *init_otp;
int len;
- /* Limit this to VLV for now. */
- if (!IS_VALLEYVIEW(i915))
- return;
-
/* Limit this to v1 vid-mode sequences */
if (panel->vbt.dsi.config->is_cmd_mode ||
panel->vbt.dsi.seq_version != 1)
@@ -1997,6 +1993,41 @@ static void fixup_mipi_sequences(struct drm_i915_private *i915,
panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
}
+/*
+ * Some machines (eg. Lenovo 82TQ) appear to have broken
+ * VBT sequences:
+ * - INIT_OTP is not present at all
+ * - what should be in INIT_OTP is in DISPLAY_ON
+ * - what should be in DISPLAY_ON is in BACKLIGHT_ON
+ * (along with the actual backlight stuff)
+ *
+ * To make those work we simply swap DISPLAY_ON and INIT_OTP.
+ *
+ * TODO: Do we need to limit this to specific machines,
+ * or examine the contents of the sequences to
+ * avoid false positives?
+ */
+static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {
+ drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");
+
+ swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],
+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);
+ }
+}
+
+static void fixup_mipi_sequences(struct drm_i915_private *i915,
+ struct intel_panel *panel)
+{
+ if (DISPLAY_VER(i915) >= 11)
+ icl_fixup_mipi_sequences(i915, panel);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_fixup_mipi_sequences(i915, panel);
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *i915,
struct intel_panel *panel)
@@ -3321,6 +3352,9 @@ bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_da
{
const struct child_device_config *child = &devdata->child;
+ if (!devdata)
+ return false;
+
if (!intel_bios_encoder_supports_dp(devdata) ||
!intel_bios_encoder_supports_hdmi(devdata))
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 07d6500500..82dd5be72e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -246,7 +246,14 @@ static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915,
enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well);
struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch);
- return intel_port_to_phy(i915, dig_port->base.port);
+ /*
+ * FIXME should we care about the (VBT defined) dig_port->aux_ch
+ * relationship or should this be purely defined by the hardware layout?
+ * Currently if the port doesn't appear in the VBT, or if it's declared
+ * as HDMI-only and routed to a combo PHY, the encoder either won't be
+ * present at all or it will not have an aux_ch assigned.
+ */
+ return dig_port ? intel_port_to_phy(i915, dig_port->base.port) : PHY_NONE;
}
static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
@@ -414,7 +421,8 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
intel_de_rmw(dev_priv, regs->driver, 0, HSW_PWR_WELL_CTL_REQ(pw_idx));
- if (DISPLAY_VER(dev_priv) < 12)
+ /* FIXME this is a mess */
+ if (phy != PHY_NONE)
intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
0, ICL_LANE_ENABLE_AUX);
@@ -437,7 +445,10 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy), ICL_LANE_ENABLE_AUX, 0);
+ /* FIXME this is a mess */
+ if (phy != PHY_NONE)
+ intel_de_rmw(dev_priv, ICL_PORT_CL_DW12(phy),
+ ICL_LANE_ENABLE_AUX, 0);
intel_de_rmw(dev_priv, regs->driver, HSW_PWR_WELL_CTL_REQ(pw_idx), 0);
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 99bdb83359..7862e7cefe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -411,7 +411,7 @@ TRACE_EVENT(intel_fbc_activate,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -438,7 +438,7 @@ TRACE_EVENT(intel_fbc_deactivate,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -465,7 +465,7 @@ TRACE_EVENT(intel_fbc_nuke,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 399653a20f..fb694ff9dc 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -631,9 +631,9 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
};
static const struct dpll_info pch_plls[] = {
- { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
- { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
- { },
+ { .name = "PCH DPLL A", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_A, },
+ { .name = "PCH DPLL B", .funcs = &ibx_pch_dpll_funcs, .id = DPLL_ID_PCH_PLL_B, },
+ {}
};
static const struct intel_dpll_mgr pch_pll_mgr = {
@@ -1239,13 +1239,16 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
};
static const struct dpll_info hsw_plls[] = {
- { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
- { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
- { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
- { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
- { },
+ { .name = "WRPLL 1", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL1, },
+ { .name = "WRPLL 2", .funcs = &hsw_ddi_wrpll_funcs, .id = DPLL_ID_WRPLL2, },
+ { .name = "SPLL", .funcs = &hsw_ddi_spll_funcs, .id = DPLL_ID_SPLL, },
+ { .name = "LCPLL 810", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_810,
+ .flags = INTEL_DPLL_ALWAYS_ON, },
+ { .name = "LCPLL 1350", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_1350,
+ .flags = INTEL_DPLL_ALWAYS_ON, },
+ { .name = "LCPLL 2700", .funcs = &hsw_ddi_lcpll_funcs, .id = DPLL_ID_LCPLL_2700,
+ .flags = INTEL_DPLL_ALWAYS_ON, },
+ {}
};
static const struct intel_dpll_mgr hsw_pll_mgr = {
@@ -1921,11 +1924,12 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
};
static const struct dpll_info skl_plls[] = {
- { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
- { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &skl_ddi_dpll0_funcs, .id = DPLL_ID_SKL_DPLL0,
+ .flags = INTEL_DPLL_ALWAYS_ON, },
+ { .name = "DPLL 1", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
+ { .name = "DPLL 2", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
+ { .name = "DPLL 3", .funcs = &skl_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL3, },
+ {}
};
static const struct intel_dpll_mgr skl_pll_mgr = {
@@ -2376,10 +2380,10 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
};
static const struct dpll_info bxt_plls[] = {
- { "PORT PLL A", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
- { "PORT PLL B", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "PORT PLL C", &bxt_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { },
+ { .name = "PORT PLL A", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL0, },
+ { .name = "PORT PLL B", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL1, },
+ { .name = "PORT PLL C", .funcs = &bxt_ddi_pll_funcs, .id = DPLL_ID_SKL_DPLL2, },
+ {}
};
static const struct intel_dpll_mgr bxt_pll_mgr = {
@@ -2485,7 +2489,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
static bool
ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
{
- return (((IS_ELKHARTLAKE(i915) || IS_JASPERLAKE(i915)) &&
+ return ((IS_ELKHARTLAKE(i915) &&
IS_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) &&
i915->display.dpll.ref_clks.nssc == 38400;
@@ -3284,6 +3288,8 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct icl_port_dpll *port_dpll =
&crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
struct skl_wrpll_params pll_params = {};
@@ -3302,7 +3308,11 @@ static int icl_compute_tc_phy_dplls(struct intel_atomic_state *state,
return ret;
/* this is mainly for the fastset check */
- icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
+ if (old_crtc_state->shared_dpll &&
+ old_crtc_state->shared_dpll->info->id == DPLL_ID_ICL_TBTPLL)
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_DEFAULT);
+ else
+ icl_set_active_port_dpll(crtc_state, ICL_PORT_DPLL_MG_PHY);
crtc_state->port_clock = icl_ddi_mg_pll_get_freq(i915, NULL,
&port_dpll->hw_state);
@@ -4014,14 +4024,15 @@ static const struct intel_shared_dpll_funcs mg_pll_funcs = {
};
static const struct dpll_info icl_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .flags = INTEL_DPLL_IS_ALT_PORT_DPLL, },
+ { .name = "MG PLL 1", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
+ { .name = "MG PLL 2", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
+ { .name = "MG PLL 3", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
+ { .name = "MG PLL 4", .funcs = &mg_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
+ {}
};
static const struct intel_dpll_mgr icl_pll_mgr = {
@@ -4035,10 +4046,10 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
};
static const struct dpll_info ehl_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
+ {}
};
static const struct intel_dpll_mgr ehl_pll_mgr = {
@@ -4058,16 +4069,17 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
};
static const struct dpll_info tgl_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
- { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 },
- { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .flags = INTEL_DPLL_IS_ALT_PORT_DPLL, },
+ { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
+ { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
+ { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
+ { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
+ { .name = "TC PLL 5", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL5, },
+ { .name = "TC PLL 6", .funcs = &dkl_pll_funcs, .id = DPLL_ID_TGL_MGPLL6, },
+ {}
};
static const struct intel_dpll_mgr tgl_pll_mgr = {
@@ -4081,10 +4093,10 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
};
static const struct dpll_info rkl_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "DPLL 4", &combo_pll_funcs, DPLL_ID_EHL_DPLL4, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
+ {}
};
static const struct intel_dpll_mgr rkl_pll_mgr = {
@@ -4097,11 +4109,11 @@ static const struct intel_dpll_mgr rkl_pll_mgr = {
};
static const struct dpll_info dg1_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_DG1_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_DG1_DPLL1, 0 },
- { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
- { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL1, },
+ { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
+ { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
+ {}
};
static const struct intel_dpll_mgr dg1_pll_mgr = {
@@ -4114,11 +4126,11 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
};
static const struct dpll_info adls_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "DPLL 2", &combo_pll_funcs, DPLL_ID_DG1_DPLL2, 0 },
- { "DPLL 3", &combo_pll_funcs, DPLL_ID_DG1_DPLL3, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "DPLL 2", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL2, },
+ { .name = "DPLL 3", .funcs = &combo_pll_funcs, .id = DPLL_ID_DG1_DPLL3, },
+ {}
};
static const struct intel_dpll_mgr adls_pll_mgr = {
@@ -4131,14 +4143,15 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
};
static const struct dpll_info adlp_plls[] = {
- { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
- { },
+ { .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
+ { .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
+ { .name = "TBT PLL", .funcs = &tbt_pll_funcs, .id = DPLL_ID_ICL_TBTPLL,
+ .flags = INTEL_DPLL_IS_ALT_PORT_DPLL, },
+ { .name = "TC PLL 1", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL1, },
+ { .name = "TC PLL 2", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL2, },
+ { .name = "TC PLL 3", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL3, },
+ { .name = "TC PLL 4", .funcs = &dkl_pll_funcs, .id = DPLL_ID_ICL_MGPLL4, },
+ {}
};
static const struct intel_dpll_mgr adlp_pll_mgr = {
@@ -4462,31 +4475,29 @@ verify_single_dpll_state(struct drm_i915_private *i915,
struct intel_crtc *crtc,
const struct intel_crtc_state *new_crtc_state)
{
- struct intel_dpll_hw_state dpll_hw_state;
+ struct intel_dpll_hw_state dpll_hw_state = {};
u8 pipe_mask;
bool active;
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
-
- drm_dbg_kms(&i915->drm, "%s\n", pll->info->name);
-
active = intel_dpll_get_hw_state(i915, pll, &dpll_hw_state);
if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
I915_STATE_WARN(i915, !pll->on && pll->active_mask,
- "pll in active use but not on in sw tracking\n");
+ "%s: pll in active use but not on in sw tracking\n",
+ pll->info->name);
I915_STATE_WARN(i915, pll->on && !pll->active_mask,
- "pll is on but not used by any active pipe\n");
+ "%s: pll is on but not used by any active pipe\n",
+ pll->info->name);
I915_STATE_WARN(i915, pll->on != active,
- "pll on state mismatch (expected %i, found %i)\n",
- pll->on, active);
+ "%s: pll on state mismatch (expected %i, found %i)\n",
+ pll->info->name, pll->on, active);
}
if (!crtc) {
I915_STATE_WARN(i915,
pll->active_mask & ~pll->state.pipe_mask,
- "more active pll users than references: 0x%x vs 0x%x\n",
- pll->active_mask, pll->state.pipe_mask);
+ "%s: more active pll users than references: 0x%x vs 0x%x\n",
+ pll->info->name, pll->active_mask, pll->state.pipe_mask);
return;
}
@@ -4495,21 +4506,30 @@ verify_single_dpll_state(struct drm_i915_private *i915,
if (new_crtc_state->hw.active)
I915_STATE_WARN(i915, !(pll->active_mask & pipe_mask),
- "pll active mismatch (expected pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
+ "%s: pll active mismatch (expected pipe %c in active mask 0x%x)\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
- pipe_name(crtc->pipe), pll->active_mask);
+ "%s: pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(i915, !(pll->state.pipe_mask & pipe_mask),
- "pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
- pipe_mask, pll->state.pipe_mask);
+ "%s: pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
+ pll->info->name, pipe_mask, pll->state.pipe_mask);
I915_STATE_WARN(i915,
pll->on && memcmp(&pll->state.hw_state, &dpll_hw_state,
sizeof(dpll_hw_state)),
- "pll hw state mismatch\n");
+ "%s: pll hw state mismatch\n",
+ pll->info->name);
+}
+
+static bool has_alt_port_dpll(const struct intel_shared_dpll *old_pll,
+ const struct intel_shared_dpll *new_pll)
+{
+ return old_pll && new_pll && old_pll != new_pll &&
+ (old_pll->info->flags & INTEL_DPLL_IS_ALT_PORT_DPLL ||
+ new_pll->info->flags & INTEL_DPLL_IS_ALT_PORT_DPLL);
}
void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
@@ -4531,11 +4551,15 @@ void intel_shared_dpll_state_verify(struct intel_atomic_state *state,
struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
I915_STATE_WARN(i915, pll->active_mask & pipe_mask,
- "pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->active_mask);
- I915_STATE_WARN(i915, pll->state.pipe_mask & pipe_mask,
- "pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
- pipe_name(crtc->pipe), pll->state.pipe_mask);
+ "%s: pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->active_mask);
+
+ /* TC ports have both MG/TC and TBT PLL referenced simultaneously */
+ I915_STATE_WARN(i915, !has_alt_port_dpll(old_crtc_state->shared_dpll,
+ new_crtc_state->shared_dpll) &&
+ pll->state.pipe_mask & pipe_mask,
+ "%s: pll enabled crtcs mismatch (found pipe %c in enabled mask (0x%x))\n",
+ pll->info->name, pipe_name(crtc->pipe), pll->state.pipe_mask);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index dd4796a617..1a88ccc983 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -271,12 +271,16 @@ struct dpll_info {
enum intel_dpll_id id;
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
+#define INTEL_DPLL_IS_ALT_PORT_DPLL (1 << 1)
/**
* @flags:
*
* INTEL_DPLL_ALWAYS_ON
* Inform the state checker that the DPLL is kept enabled even if
* not in use by any CRTC.
+ * INTEL_DPLL_IS_ALT_PORT_DPLL
+ * Inform the state checker that the DPLL can be used as a fallback
+ * (for TC->TBT fallback).
*/
u32 flags;
};
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 7fd6280c54..15c3dd5d38 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -339,6 +339,17 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
return max(0, vblank_start - intel_usecs_to_scanlines(adjusted_mode, latency));
}
+static u32 dsb_chicken(struct intel_crtc *crtc)
+{
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR)
+ return DSB_CTRL_WAIT_SAFE_WINDOW |
+ DSB_CTRL_NO_WAIT_VBLANK |
+ DSB_INST_WAIT_SAFE_WINDOW |
+ DSB_INST_NO_WAIT_VBLANK;
+ else
+ return 0;
+}
+
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
int dewake_scanline)
{
@@ -360,6 +371,9 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
ctrl | DSB_ENABLE);
+ intel_de_write_fw(dev_priv, DSB_CHICKEN(pipe, dsb->id),
+ dsb_chicken(crtc));
+
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
i915_ggtt_offset(dsb->vma));
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5d905f932c..eb5bd07439 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -187,10 +187,11 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/*
- * TRANS_SET_CONTEXT_LATENCY with VRR enabled
- * requires this chicken bit on ADL/DG2.
+ * This bit seems to have two meanings depending on the platform:
+ * TGL: generate VRR "safe window" for DSB vblank waits
+ * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
*/
- if (DISPLAY_VER(dev_priv) == 13)
+ if (IS_DISPLAY_VER(dev_priv, 12, 13))
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
0, PIPE_VBLANK_WITH_DELAY);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 1d3ebdf406..c08b675935 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -379,6 +379,9 @@ i915_gem_userptr_release(struct drm_i915_gem_object *obj)
{
GEM_WARN_ON(obj->userptr.page_ref);
+ if (!obj->userptr.notifier.mm)
+ return;
+
mmu_interval_notifier_remove(&obj->userptr.notifier);
obj->userptr.notifier.mm = NULL;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index e91fc881db..5a3a5b29d1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -278,9 +278,6 @@ static int __engine_park(struct intel_wakeref *wf)
intel_engine_park_heartbeat(engine);
intel_breadcrumbs_park(engine->breadcrumbs);
- /* Must be reset upon idling, or we may miss the busy wakeup. */
- GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
-
if (engine->park)
engine->park(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index e8f42ec6b1..42e09f1589 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3272,6 +3272,9 @@ static void execlists_park(struct intel_engine_cs *engine)
{
cancel_timer(&engine->execlists.timer);
cancel_timer(&engine->execlists.preempt);
+
+ /* Reset upon idling, or we may delay the busy wakeup. */
+ WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
}
static void add_to_engine(struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index de3f5903d1..c8e7dfc9f7 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -422,7 +422,7 @@ static void init_irq_map(struct intel_gvt_irq *irq)
#define MSI_CAP_DATA(offset) (offset + 8)
#define MSI_CAP_EN 0x1
-static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
+static void inject_virtual_interrupt(struct intel_vgpu *vgpu)
{
unsigned long offset = vgpu->gvt->device_info.msi_cap_offset;
u16 control, data;
@@ -434,10 +434,10 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
/* Do not generate MSI if MSIEN is disabled */
if (!(control & MSI_CAP_EN))
- return 0;
+ return;
if (WARN(control & GENMASK(15, 1), "only support one MSI format\n"))
- return -EINVAL;
+ return;
trace_inject_msi(vgpu->id, addr, data);
@@ -451,10 +451,9 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
* returned and don't inject interrupt into guest.
*/
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return -ESRCH;
- if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
- return -EFAULT;
- return 0;
+ return;
+ if (vgpu->msi_trigger)
+ eventfd_signal(vgpu->msi_trigger);
}
static void propagate_event(struct intel_gvt_irq *irq,
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 8c3f443c83..b758fd110c 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -72,12 +72,13 @@ hwm_locked_with_pm_intel_uncore_rmw(struct hwm_drvdata *ddat,
struct intel_uncore *uncore = ddat->uncore;
intel_wakeref_t wakeref;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
intel_uncore_rmw(uncore, reg, clear, set);
- mutex_unlock(&hwmon->hwmon_lock);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
/*
@@ -136,20 +137,21 @@ hwm_energy(struct hwm_drvdata *ddat, long *energy)
else
rgaddr = hwmon->rg.energy_status_all;
- mutex_lock(&hwmon->hwmon_lock);
+ with_intel_runtime_pm(uncore->rpm, wakeref) {
+ mutex_lock(&hwmon->hwmon_lock);
- with_intel_runtime_pm(uncore->rpm, wakeref)
reg_val = intel_uncore_read(uncore, rgaddr);
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
- ei->reg_val_prev = reg_val;
+ if (reg_val >= ei->reg_val_prev)
+ ei->accum_energy += reg_val - ei->reg_val_prev;
+ else
+ ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ ei->reg_val_prev = reg_val;
- *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
- hwmon->scl_shift_energy);
- mutex_unlock(&hwmon->hwmon_lock);
+ *energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
+ hwmon->scl_shift_energy);
+ mutex_unlock(&hwmon->hwmon_lock);
+ }
}
static ssize_t
@@ -404,6 +406,7 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
/* Block waiting for GuC reset to complete when needed */
for (;;) {
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
mutex_lock(&hwmon->hwmon_lock);
prepare_to_wait(&ddat->waitq, &wait, TASK_INTERRUPTIBLE);
@@ -417,14 +420,13 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
}
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
schedule();
}
finish_wait(&ddat->waitq, &wait);
if (ret)
- goto unlock;
-
- wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ goto exit;
/* Disable PL1 limit and verify, because the limit cannot be disabled on all platforms */
if (val == PL1_DISABLE) {
@@ -444,9 +446,8 @@ hwm_power_max_write(struct hwm_drvdata *ddat, long val)
intel_uncore_rmw(ddat->uncore, hwmon->rg.pkg_rapl_limit,
PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, nval);
exit:
- intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
-unlock:
mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 135e8d8dbd..b89cf2041f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -4599,7 +4599,7 @@
#define MTL_CHICKEN_TRANS(trans) _MMIO_TRANS((trans), \
_MTL_CHICKEN_TRANS_A, \
_MTL_CHICKEN_TRANS_B)
-#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* ADL/DG2 */
+#define PIPE_VBLANK_WITH_DELAY REG_BIT(31) /* tgl+ */
#define SKL_UNMASK_VBL_TO_PIPE_IN_SRD REG_BIT(30) /* skl+ */
#define HSW_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
#define HSW_FRAME_START_DELAY(x) REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 70349739dd..55dedd73f5 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
int ret;
if (!mode)
- return -EINVAL;
+ return 0;
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
if (ret) {
drm_mode_destroy(connector->dev, mode);
- return ret;
+ return 0;
}
drm_mode_copy(mode, &imxpd->mode);
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 4f9736e5f9..7ea244d876 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -75,29 +75,34 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
} else {
bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
if (!bo->base.sgt) {
- sg_free_table(&sgt);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_out0;
}
}
ret = dma_map_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
- if (ret) {
- sg_free_table(&sgt);
- kfree(bo->base.sgt);
- bo->base.sgt = NULL;
- return ret;
- }
+ if (ret)
+ goto err_out1;
*bo->base.sgt = sgt;
if (vm) {
ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
if (ret)
- return ret;
+ goto err_out2;
}
bo->heap_size = new_size;
return 0;
+
+err_out2:
+ dma_unmap_sgtable(dev, &sgt, DMA_BIDIRECTIONAL, 0);
+err_out1:
+ kfree(bo->base.sgt);
+ bo->base.sgt = NULL;
+err_out0:
+ sg_free_table(&sgt);
+ return ret;
}
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index db43f9dff9..d645b85f97 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -95,11 +95,13 @@ static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
struct drm_crtc *crtc = &mtk_crtc->base;
unsigned long flags;
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
- drm_crtc_vblank_put(crtc);
- mtk_crtc->event = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ if (mtk_crtc->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
+ drm_crtc_vblank_put(crtc);
+ mtk_crtc->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
}
static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index a2fdfc8ddb..cd19885ee0 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -71,8 +71,8 @@
#define DSI_PS_WC 0x3fff
#define DSI_PS_SEL (3 << 16)
#define PACKED_PS_16BIT_RGB565 (0 << 16)
-#define LOOSELY_PS_18BIT_RGB666 (1 << 16)
-#define PACKED_PS_18BIT_RGB666 (2 << 16)
+#define PACKED_PS_18BIT_RGB666 (1 << 16)
+#define LOOSELY_PS_24BIT_RGB666 (2 << 16)
#define PACKED_PS_24BIT_RGB888 (3 << 16)
#define DSI_VSA_NL 0x20
@@ -369,10 +369,10 @@ static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
ps_bpp_mode |= PACKED_PS_24BIT_RGB888;
break;
case MIPI_DSI_FMT_RGB666:
- ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
+ ps_bpp_mode |= LOOSELY_PS_24BIT_RGB666;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
- ps_bpp_mode |= LOOSELY_PS_18BIT_RGB666;
+ ps_bpp_mode |= PACKED_PS_18BIT_RGB666;
break;
case MIPI_DSI_FMT_RGB565:
ps_bpp_mode |= PACKED_PS_16BIT_RGB565;
@@ -426,7 +426,7 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB666:
- tmp_reg = LOOSELY_PS_18BIT_RGB666;
+ tmp_reg = LOOSELY_PS_24BIT_RGB666;
dsi_tmp_buf_bpp = 3;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 500ed2d183..edc1eeef84 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2416,7 +2416,7 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
msm_devfreq_resume(gpu);
- adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate : a6xx_llc_activate(a6xx_gpu);
+ adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate(a6xx_gpu) : a6xx_llc_activate(a6xx_gpu);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index cf0d44b6e7..b26d23b2ab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -226,6 +226,13 @@ bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
return dpu_enc->wide_bus_en;
}
+bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc)
+{
+ const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ return dpu_enc->dsc ? true : false;
+}
+
int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
{
struct dpu_encoder_virt *dpu_enc;
@@ -1860,7 +1867,9 @@ static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
dsc_common_mode = 0;
pic_width = dsc->pic_width;
- dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
+ dsc_common_mode = DSC_MODE_SPLIT_PANEL;
+ if (dpu_encoder_use_dsc_merge(enc_master->parent))
+ dsc_common_mode |= DSC_MODE_MULTIPLEX;
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 4c05fd5e9e..fe6b1d312a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -159,6 +159,13 @@ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc);
/**
+ * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled
+ * for the encoder.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+bool dpu_encoder_is_dsc_enabled(const struct drm_encoder *drm_enc);
+
+/**
* dpu_encoder_get_crc_values_cnt - get number of physical encoders contained
* in virtual encoder that can collect CRC values
* @drm_enc: Pointer to previously created drm encoder structure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index eeb0acf966..f66e0f125a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -100,6 +100,7 @@ static void drm_mode_to_intf_timing_params(
}
timing->wide_bus_en = dpu_encoder_is_widebus_enabled(phys_enc->parent);
+ timing->compression_en = dpu_encoder_is_dsc_enabled(phys_enc->parent);
/*
* for DP, divide the horizonal parameters by 2 when
@@ -257,12 +258,14 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
mode.htotal >>= 1;
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
+ mode.hskew >>= 1;
DPU_DEBUG_VIDENC(phys_enc,
- "split_role %d, halve horizontal %d %d %d %d\n",
+ "split_role %d, halve horizontal %d %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
- mode.hsync_start, mode.hsync_end);
+ mode.hsync_start, mode.hsync_end,
+ mode.hskew);
}
drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 86182c7346..e7b680a151 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -4,6 +4,9 @@
*/
#include <linux/delay.h>
+
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
#include "dpu_kms.h"
@@ -680,14 +683,15 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
};
-struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
- void __iomem *addr,
- u32 mixer_count,
- const struct dpu_lm_cfg *mixer)
+struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
+ const struct dpu_ctl_cfg *cfg,
+ void __iomem *addr,
+ u32 mixer_count,
+ const struct dpu_lm_cfg *mixer)
{
struct dpu_hw_ctl *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -702,8 +706,3 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
return c;
}
-
-void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
-{
- kfree(ctx);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 1c242298ff..279ebd8dfb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -274,20 +274,16 @@ static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
/**
* dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
* Should be called before accessing any ctl_path register.
+ * @dev: Corresponding device for devres management
* @cfg: ctl_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mixer_count: Number of mixers in @mixer
* @mixer: Pointer to an array of Layer Mixers defined in the catalog
*/
-struct dpu_hw_ctl *dpu_hw_ctl_init(const struct dpu_ctl_cfg *cfg,
- void __iomem *addr,
- u32 mixer_count,
- const struct dpu_lm_cfg *mixer);
-
-/**
- * dpu_hw_ctl_destroy(): Destroys ctl driver context
- * should be called to free the context
- */
-void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
+ const struct dpu_ctl_cfg *cfg,
+ void __iomem *addr,
+ u32 mixer_count,
+ const struct dpu_lm_cfg *mixer);
#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 509dbaa51d..5e9aad1b2a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -3,6 +3,8 @@
* Copyright (c) 2020-2022, Linaro Limited
*/
+#include <drm/drm_managed.h>
+
#include <drm/display/drm_dsc_helper.h>
#include "dpu_kms.h"
@@ -188,12 +190,13 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
};
-struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dsc *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -206,8 +209,3 @@ struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
return c;
}
-
-void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc)
-{
- kfree(dsc);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index d5b597ab8c..989c88d244 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -64,20 +64,24 @@ struct dpu_hw_dsc {
/**
* dpu_hw_dsc_init() - Initializes the DSC hw driver object.
+ * @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: Error code or allocated dpu_hw_dsc context
*/
-struct dpu_hw_dsc *dpu_hw_dsc_init(const struct dpu_dsc_cfg *cfg,
- void __iomem *addr);
+struct dpu_hw_dsc *dpu_hw_dsc_init(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
+ void __iomem *addr);
/**
* dpu_hw_dsc_init_1_2() - initializes the v1.2 DSC hw driver object
+ * @dev: Corresponding device for devres management
* @cfg: DSC catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Returns: Error code or allocated dpu_hw_dsc context
*/
-struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr);
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
index 24fe1d98eb..ba193b0376 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc_1_2.c
@@ -4,6 +4,8 @@
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved
*/
+#include <drm/drm_managed.h>
+
#include <drm/display/drm_dsc_helper.h>
#include "dpu_kms.h"
@@ -367,12 +369,13 @@ static void _setup_dcs_ops_1_2(struct dpu_hw_dsc_ops *ops,
ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk_1_2;
}
-struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(const struct dpu_dsc_cfg *cfg,
+struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
+ const struct dpu_dsc_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_dsc *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 9419b2209a..b1da88e293 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
@@ -68,15 +70,16 @@ static void _setup_dspp_ops(struct dpu_hw_dspp *c,
c->ops.setup_pcc = dpu_setup_dspp_pcc;
}
-struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
+ const struct dpu_dspp_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_dspp *c;
if (!addr)
return ERR_PTR(-EINVAL);
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -90,10 +93,3 @@ struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
return c;
}
-
-void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
-{
- kfree(dspp);
-}
-
-
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
index bea9656813..3b435690b6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -81,18 +81,14 @@ static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
/**
* dpu_hw_dspp_init() - Initializes the DSPP hw driver object.
* should be called once before accessing every DSPP.
+ * @dev: Corresponding device for devres management
* @cfg: DSPP catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: pointer to structure or ERR_PTR
*/
-struct dpu_hw_dspp *dpu_hw_dspp_init(const struct dpu_dspp_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_dspp_destroy(): Destroys DSPP driver context
- * @dspp: Pointer to DSPP driver context
- */
-void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
+struct dpu_hw_dspp *dpu_hw_dspp_init(struct drm_device *dev,
+ const struct dpu_dspp_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_DSPP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 27b9373cd7..965692ef78 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -12,6 +12,8 @@
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#define INTF_TIMING_ENGINE_EN 0x000
#define INTF_CONFIG 0x004
#define INTF_HSYNC_CTL 0x008
@@ -161,13 +163,8 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
- /*
- * DATA_HCTL_EN controls data timing which can be different from
- * video timing. It is recommended to enable it for all cases, except
- * if compression is enabled in 1 pixel per clock mode
- */
if (p->wide_bus_en)
- intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN | INTF_CFG2_DATA_HCTL_EN;
+ intf_cfg2 |= INTF_CFG2_DATABUS_WIDEN;
data_width = p->width;
@@ -227,6 +224,14 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
if (ctx->cap->features & BIT(DPU_DATA_HCTL_EN)) {
+ /*
+ * DATA_HCTL_EN controls data timing which can be different from
+ * video timing. It is recommended to enable it for all cases, except
+ * if compression is enabled in 1 pixel per clock mode
+ */
+ if (!(p->compression_en && !p->wide_bus_en))
+ intf_cfg2 |= INTF_CFG2_DATA_HCTL_EN;
+
DPU_REG_WRITE(c, INTF_CONFIG2, intf_cfg2);
DPU_REG_WRITE(c, INTF_DISPLAY_DATA_HCTL, display_data_hctl);
DPU_REG_WRITE(c, INTF_ACTIVE_DATA_HCTL, active_data_hctl);
@@ -527,8 +532,10 @@ static void dpu_hw_intf_program_intf_cmd_cfg(struct dpu_hw_intf *ctx,
DPU_REG_WRITE(&ctx->hw, INTF_CONFIG2, intf_cfg2);
}
-struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
+ const struct dpu_intf_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_intf *c;
@@ -537,7 +544,7 @@ struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
return NULL;
}
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -581,9 +588,3 @@ struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
return c;
}
-
-void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
-{
- kfree(intf);
-}
-
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 66a5603dc7..6f4c87244f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -33,6 +33,7 @@ struct dpu_hw_intf_timing_params {
u32 hsync_skew;
bool wide_bus_en;
+ bool compression_en;
};
struct dpu_hw_intf_prog_fetch {
@@ -131,17 +132,14 @@ struct dpu_hw_intf {
/**
* dpu_hw_intf_init() - Initializes the INTF driver for the passed
* interface catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: interface catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
*/
-struct dpu_hw_intf *dpu_hw_intf_init(const struct dpu_intf_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_intf_destroy(): Destroys INTF driver context
- * @intf: Pointer to INTF driver context
- */
-void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+struct dpu_hw_intf *dpu_hw_intf_init(struct drm_device *dev,
+ const struct dpu_intf_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index a590c1f746..1d3ccf3228 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -4,6 +4,8 @@
* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_kms.h"
#include "dpu_hw_catalog.h"
#include "dpu_hwio.h"
@@ -156,8 +158,9 @@ static void _setup_mixer_ops(struct dpu_hw_lm_ops *ops,
ops->collect_misr = dpu_hw_lm_collect_misr;
}
-struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
+ const struct dpu_lm_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_mixer *c;
@@ -166,7 +169,7 @@ struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
return NULL;
}
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -180,8 +183,3 @@ struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
return c;
}
-
-void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
-{
- kfree(lm);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 98b77cda65..0a33817552 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -96,16 +96,12 @@ static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
/**
* dpu_hw_lm_init() - Initializes the mixer hw driver object.
* should be called once before accessing every mixer.
+ * @dev: Corresponding device for devres management
* @cfg: mixer catalog entry for which driver object is required
* @addr: mapped register io address of MDP
*/
-struct dpu_hw_mixer *dpu_hw_lm_init(const struct dpu_lm_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_lm_destroy(): Destroys layer mixer driver context
- * @lm: Pointer to LM driver context
- */
-void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+struct dpu_hw_mixer *dpu_hw_lm_init(struct drm_device *dev,
+ const struct dpu_lm_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
index 90e0e05eff..ddfa40a959 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.c
@@ -4,6 +4,8 @@
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
@@ -37,12 +39,13 @@ static void _setup_merge_3d_ops(struct dpu_hw_merge_3d *c,
c->ops.setup_3d_mode = dpu_hw_merge_3d_setup_3d_mode;
};
-struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
- void __iomem *addr)
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
+ const struct dpu_merge_3d_cfg *cfg,
+ void __iomem *addr)
{
struct dpu_hw_merge_3d *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -55,8 +58,3 @@ struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
return c;
}
-
-void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *hw)
-{
- kfree(hw);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
index 19cec5e887..c192f02ec1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
@@ -48,18 +48,13 @@ static inline struct dpu_hw_merge_3d *to_dpu_hw_merge_3d(struct dpu_hw_blk *hw)
/**
* dpu_hw_merge_3d_init() - Initializes the merge_3d driver for the passed
* merge3d catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: Pingpong catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* Return: Error code or allocated dpu_hw_merge_3d context
*/
-struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(const struct dpu_merge_3d_cfg *cfg,
- void __iomem *addr);
-
-/**
- * dpu_hw_merge_3d_destroy - destroys merge_3d driver context
- * should be called to free the context
- * @pp: Pointer to PP driver context returned by dpu_hw_merge_3d_init
- */
-void dpu_hw_merge_3d_destroy(struct dpu_hw_merge_3d *pp);
+struct dpu_hw_merge_3d *dpu_hw_merge_3d_init(struct drm_device *dev,
+ const struct dpu_merge_3d_cfg *cfg,
+ void __iomem *addr);
#endif /*_DPU_HW_MERGE3D_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index 057cac7f5d..2db4c6fba3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -4,6 +4,8 @@
#include <linux/iopoll.h>
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
@@ -281,12 +283,14 @@ static int dpu_hw_pp_setup_dsc(struct dpu_hw_pingpong *pp)
return 0;
}
-struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
+ const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_pingpong *c;
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -317,8 +321,3 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
return c;
}
-
-void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
-{
- kfree(pp);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 0d541ca5b0..a48b69fd79 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -121,19 +121,15 @@ static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
/**
* dpu_hw_pingpong_init() - initializes the pingpong driver for the passed
* pingpong catalog entry.
+ * @dev: Corresponding device for devres management
* @cfg: Pingpong catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_pingpong context
*/
-struct dpu_hw_pingpong *dpu_hw_pingpong_init(const struct dpu_pingpong_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_pingpong_destroy - destroys pingpong driver context
- * should be called to free the context
- * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
- */
-void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(struct drm_device *dev,
+ const struct dpu_pingpong_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 8e3c65989c..069bf429e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -11,6 +11,7 @@
#include "msm_mdss.h"
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
@@ -685,16 +686,18 @@ int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
}
#endif
-struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
- void __iomem *addr, const struct msm_mdss_data *mdss_data,
- const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
+ const struct dpu_sspp_cfg *cfg,
+ void __iomem *addr,
+ const struct msm_mdss_data *mdss_data,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_sspp *hw_pipe;
if (!addr)
return ERR_PTR(-EINVAL);
- hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ hw_pipe = drmm_kzalloc(dev, sizeof(*hw_pipe), GFP_KERNEL);
if (!hw_pipe)
return ERR_PTR(-ENOMEM);
@@ -709,9 +712,3 @@ struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
return hw_pipe;
}
-
-void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx)
-{
- kfree(ctx);
-}
-
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index f93969fddb..3641ef6bef 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -339,21 +339,17 @@ struct dpu_kms;
/**
* dpu_hw_sspp_init() - Initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
+ * @dev: Corresponding device for devres management
* @cfg: Pipe catalog entry for which driver object is required
* @addr: Mapped register io address of MDP
* @mdss_data: UBWC / MDSS configuration data
* @mdss_rev: dpu core's major and minor versions
*/
-struct dpu_hw_sspp *dpu_hw_sspp_init(const struct dpu_sspp_cfg *cfg,
- void __iomem *addr, const struct msm_mdss_data *mdss_data,
- const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_sspp_destroy(): Destroys SSPP driver context
- * should be called during Hw pipe cleanup.
- * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
- */
-void dpu_hw_sspp_destroy(struct dpu_hw_sspp *ctx);
+struct dpu_hw_sspp *dpu_hw_sspp_init(struct drm_device *dev,
+ const struct dpu_sspp_cfg *cfg,
+ void __iomem *addr,
+ const struct msm_mdss_data *mdss_data,
+ const struct dpu_mdss_version *mdss_rev);
int _dpu_hw_sspp_init_debugfs(struct dpu_hw_sspp *hw_pipe, struct dpu_kms *kms,
struct dentry *entry);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 24e734768a..05e48cf4ec 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
@@ -247,16 +249,17 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
-struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
- void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
+ const struct dpu_mdp_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
{
struct dpu_hw_mdp *mdp;
if (!addr)
return ERR_PTR(-EINVAL);
- mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ mdp = drmm_kzalloc(dev, sizeof(*mdp), GFP_KERNEL);
if (!mdp)
return ERR_PTR(-ENOMEM);
@@ -271,9 +274,3 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
return mdp;
}
-
-void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
-{
- kfree(mdp);
-}
-
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 8b1463d2b2..6f3dc98087 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -145,13 +145,15 @@ struct dpu_hw_mdp {
/**
* dpu_hw_mdptop_init - initializes the top driver for the passed config
+ * @dev: Corresponding device for devres management
* @cfg: MDP TOP configuration from catalog
* @addr: Mapped register io address of MDP
* @m: Pointer to mdss catalog data
*/
-struct dpu_hw_mdp *dpu_hw_mdptop_init(const struct dpu_mdp_cfg *cfg,
- void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
+ const struct dpu_mdp_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index d49b3ef768..e75995f7fc 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -3,6 +3,8 @@
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved
*/
+#include <drm/drm_managed.h>
+
#include "dpu_hw_mdss.h"
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
@@ -211,15 +213,17 @@ static void _setup_wb_ops(struct dpu_hw_wb_ops *ops,
ops->setup_clk_force_ctrl = dpu_hw_wb_setup_clk_force_ctrl;
}
-struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev)
+struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
+ const struct dpu_wb_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_wb *c;
if (!addr)
return ERR_PTR(-EINVAL);
- c = kzalloc(sizeof(*c), GFP_KERNEL);
+ c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
@@ -233,8 +237,3 @@ struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
return c;
}
-
-void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb)
-{
- kfree(hw_wb);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
index 88792f450a..e671796ea3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
@@ -76,18 +76,15 @@ struct dpu_hw_wb {
/**
* dpu_hw_wb_init() - Initializes the writeback hw driver object.
+ * @dev: Corresponding device for devres management
* @cfg: wb_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* @mdss_rev: dpu core's major and minor versions
* Return: Error code or allocated dpu_hw_wb context
*/
-struct dpu_hw_wb *dpu_hw_wb_init(const struct dpu_wb_cfg *cfg,
- void __iomem *addr, const struct dpu_mdss_version *mdss_rev);
-
-/**
- * dpu_hw_wb_destroy(): Destroy writeback hw driver object.
- * @hw_wb: Pointer to writeback hw driver object
- */
-void dpu_hw_wb_destroy(struct dpu_hw_wb *hw_wb);
+struct dpu_hw_wb *dpu_hw_wb_init(struct drm_device *dev,
+ const struct dpu_wb_cfg *cfg,
+ void __iomem *addr,
+ const struct dpu_mdss_version *mdss_rev);
#endif /*_DPU_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index fe7267b3bf..81acbebace 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -385,6 +385,12 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
return 0;
}
+static void dpu_kms_global_obj_fini(struct dpu_kms *dpu_kms)
+{
+ drm_atomic_private_obj_fini(&dpu_kms->global_state);
+ drm_modeset_lock_fini(&dpu_kms->global_state_lock);
+}
+
static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
{
struct icc_path *path0;
@@ -822,14 +828,10 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
}
}
- if (dpu_kms->rm_init)
- dpu_rm_destroy(&dpu_kms->rm);
- dpu_kms->rm_init = false;
+ dpu_kms_global_obj_fini(dpu_kms);
dpu_kms->catalog = NULL;
- if (dpu_kms->hw_mdp)
- dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
dpu_kms->hw_mdp = NULL;
}
@@ -1104,15 +1106,14 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
+ rc = dpu_rm_init(dev, &dpu_kms->rm, dpu_kms->catalog, dpu_kms->mdss, dpu_kms->mmio);
if (rc) {
DPU_ERROR("rm init failed: %d\n", rc);
goto power_error;
}
- dpu_kms->rm_init = true;
-
- dpu_kms->hw_mdp = dpu_hw_mdptop_init(dpu_kms->catalog->mdp,
+ dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
+ dpu_kms->catalog->mdp,
dpu_kms->mmio,
dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_mdp)) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index f5473d4dea..9112a702a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -89,7 +89,6 @@ struct dpu_kms {
struct drm_private_obj global_state;
struct dpu_rm rm;
- bool rm_init;
struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
struct dpu_hw_mdp *hw_mdp;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 8759466e2f..0bb28cf4a6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -34,72 +34,8 @@ struct dpu_rm_requirements {
struct msm_display_topology topology;
};
-int dpu_rm_destroy(struct dpu_rm *rm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(rm->dspp_blks); i++) {
- struct dpu_hw_dspp *hw;
-
- if (rm->dspp_blks[i]) {
- hw = to_dpu_hw_dspp(rm->dspp_blks[i]);
- dpu_hw_dspp_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->pingpong_blks); i++) {
- struct dpu_hw_pingpong *hw;
-
- if (rm->pingpong_blks[i]) {
- hw = to_dpu_hw_pingpong(rm->pingpong_blks[i]);
- dpu_hw_pingpong_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->merge_3d_blks); i++) {
- struct dpu_hw_merge_3d *hw;
-
- if (rm->merge_3d_blks[i]) {
- hw = to_dpu_hw_merge_3d(rm->merge_3d_blks[i]);
- dpu_hw_merge_3d_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->mixer_blks); i++) {
- struct dpu_hw_mixer *hw;
-
- if (rm->mixer_blks[i]) {
- hw = to_dpu_hw_mixer(rm->mixer_blks[i]);
- dpu_hw_lm_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->ctl_blks); i++) {
- struct dpu_hw_ctl *hw;
-
- if (rm->ctl_blks[i]) {
- hw = to_dpu_hw_ctl(rm->ctl_blks[i]);
- dpu_hw_ctl_destroy(hw);
- }
- }
- for (i = 0; i < ARRAY_SIZE(rm->hw_intf); i++)
- dpu_hw_intf_destroy(rm->hw_intf[i]);
-
- for (i = 0; i < ARRAY_SIZE(rm->dsc_blks); i++) {
- struct dpu_hw_dsc *hw;
-
- if (rm->dsc_blks[i]) {
- hw = to_dpu_hw_dsc(rm->dsc_blks[i]);
- dpu_hw_dsc_destroy(hw);
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(rm->hw_wb); i++)
- dpu_hw_wb_destroy(rm->hw_wb[i]);
-
- for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++)
- dpu_hw_sspp_destroy(rm->hw_sspp[i]);
-
- return 0;
-}
-
-int dpu_rm_init(struct dpu_rm *rm,
+int dpu_rm_init(struct drm_device *dev,
+ struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio)
@@ -119,7 +55,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_mixer *hw;
const struct dpu_lm_cfg *lm = &cat->mixer[i];
- hw = dpu_hw_lm_init(lm, mmio);
+ hw = dpu_hw_lm_init(dev, lm, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed lm object creation: err %d\n", rc);
@@ -132,7 +68,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_merge_3d *hw;
const struct dpu_merge_3d_cfg *merge_3d = &cat->merge_3d[i];
- hw = dpu_hw_merge_3d_init(merge_3d, mmio);
+ hw = dpu_hw_merge_3d_init(dev, merge_3d, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed merge_3d object creation: err %d\n",
@@ -146,7 +82,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_pingpong *hw;
const struct dpu_pingpong_cfg *pp = &cat->pingpong[i];
- hw = dpu_hw_pingpong_init(pp, mmio, cat->mdss_ver);
+ hw = dpu_hw_pingpong_init(dev, pp, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed pingpong object creation: err %d\n",
@@ -162,7 +98,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_intf *hw;
const struct dpu_intf_cfg *intf = &cat->intf[i];
- hw = dpu_hw_intf_init(intf, mmio, cat->mdss_ver);
+ hw = dpu_hw_intf_init(dev, intf, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed intf object creation: err %d\n", rc);
@@ -175,7 +111,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_wb *hw;
const struct dpu_wb_cfg *wb = &cat->wb[i];
- hw = dpu_hw_wb_init(wb, mmio, cat->mdss_ver);
+ hw = dpu_hw_wb_init(dev, wb, mmio, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed wb object creation: err %d\n", rc);
@@ -188,7 +124,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
- hw = dpu_hw_ctl_init(ctl, mmio, cat->mixer_count, cat->mixer);
+ hw = dpu_hw_ctl_init(dev, ctl, mmio, cat->mixer_count, cat->mixer);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed ctl object creation: err %d\n", rc);
@@ -201,7 +137,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_dspp *hw;
const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
- hw = dpu_hw_dspp_init(dspp, mmio);
+ hw = dpu_hw_dspp_init(dev, dspp, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed dspp object creation: err %d\n", rc);
@@ -215,9 +151,9 @@ int dpu_rm_init(struct dpu_rm *rm,
const struct dpu_dsc_cfg *dsc = &cat->dsc[i];
if (test_bit(DPU_DSC_HW_REV_1_2, &dsc->features))
- hw = dpu_hw_dsc_init_1_2(dsc, mmio);
+ hw = dpu_hw_dsc_init_1_2(dev, dsc, mmio);
else
- hw = dpu_hw_dsc_init(dsc, mmio);
+ hw = dpu_hw_dsc_init(dev, dsc, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
@@ -231,7 +167,7 @@ int dpu_rm_init(struct dpu_rm *rm,
struct dpu_hw_sspp *hw;
const struct dpu_sspp_cfg *sspp = &cat->sspp[i];
- hw = dpu_hw_sspp_init(sspp, mmio, mdss_data, cat->mdss_ver);
+ hw = dpu_hw_sspp_init(dev, sspp, mmio, mdss_data, cat->mdss_ver);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed sspp object creation: err %d\n", rc);
@@ -243,8 +179,6 @@ int dpu_rm_init(struct dpu_rm *rm,
return 0;
fail:
- dpu_rm_destroy(rm);
-
return rc ? rc : -EFAULT;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 2b551566cb..36752d837b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -38,25 +38,20 @@ struct dpu_rm {
/**
* dpu_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
+ * @dev: Corresponding device for devres management
* @rm: DPU Resource Manager handle
* @cat: Pointer to hardware catalog
* @mdss_data: Pointer to MDSS / UBWC configuration
* @mmio: mapped register io address of MDP
* @Return: 0 on Success otherwise -ERROR
*/
-int dpu_rm_init(struct dpu_rm *rm,
+int dpu_rm_init(struct drm_device *dev,
+ struct dpu_rm *rm,
const struct dpu_mdss_cfg *cat,
const struct msm_mdss_data *mdss_data,
void __iomem *mmio);
/**
- * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
- * @rm: DPU Resource Manager handle
- * @Return: 0 on Success otherwise -ERROR
- */
-int dpu_rm_destroy(struct dpu_rm *rm);
-
-/**
* dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
* the use connections and user requirements, specified through related
* topology control properties, and reserve hardware blocks to that
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
index 0d9fc741a7..932c9fd0b2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/client.h
@@ -11,6 +11,7 @@ struct nvkm_client {
u32 debug;
struct rb_root objroot;
+ spinlock_t obj_lock;
void *data;
int (*event)(u64 token, void *argv, u32 argc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 280d1d9a55..254d6c9ef2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1255,6 +1255,8 @@ out:
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
+ nvbo->bo.resource->bus.offset = 0;
+ nvbo->bo.resource->bus.addr = NULL;
goto retry;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 12feecf71e..6fb65b01d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -378,9 +378,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
dma_addr_t *dma_addrs;
struct nouveau_fence *fence;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
- dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
@@ -406,11 +406,11 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
for (i = 0; i < npages; i++)
dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
- kfree(dma_addrs);
+ kvfree(dma_addrs);
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index a0d303e5ce..7b69e6df57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -758,7 +758,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return -ENOMEM;
if (unlikely(nouveau_cli_uvmm(cli)))
- return -ENOSYS;
+ return nouveau_abi16_put(abi16, -ENOSYS);
list_for_each_entry(temp, &abi16->channels, head) {
if (temp->chan->chid == req->channel) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c
index ebdeb8eb9e..c55662937a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/client.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c
@@ -180,6 +180,7 @@ nvkm_client_new(const char *name, u64 device, const char *cfg, const char *dbg,
client->device = device;
client->debug = nvkm_dbgopt(dbg, "CLIENT");
client->objroot = RB_ROOT;
+ spin_lock_init(&client->obj_lock);
client->event = event;
INIT_LIST_HEAD(&client->umem);
spin_lock_init(&client->lock);
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c
index 7c554c14e8..aea3ba7202 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/object.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c
@@ -30,8 +30,10 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
const struct nvkm_object_func *func)
{
struct nvkm_object *object;
+ unsigned long flags;
if (handle) {
+ spin_lock_irqsave(&client->obj_lock, flags);
struct rb_node *node = client->objroot.rb_node;
while (node) {
object = rb_entry(node, typeof(*object), node);
@@ -40,9 +42,12 @@ nvkm_object_search(struct nvkm_client *client, u64 handle,
else
if (handle > object->object)
node = node->rb_right;
- else
+ else {
+ spin_unlock_irqrestore(&client->obj_lock, flags);
goto done;
+ }
}
+ spin_unlock_irqrestore(&client->obj_lock, flags);
return ERR_PTR(-ENOENT);
} else {
object = &client->object;
@@ -57,30 +62,39 @@ done:
void
nvkm_object_remove(struct nvkm_object *object)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&object->client->obj_lock, flags);
if (!RB_EMPTY_NODE(&object->node))
rb_erase(&object->node, &object->client->objroot);
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
}
bool
nvkm_object_insert(struct nvkm_object *object)
{
- struct rb_node **ptr = &object->client->objroot.rb_node;
+ struct rb_node **ptr;
struct rb_node *parent = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&object->client->obj_lock, flags);
+ ptr = &object->client->objroot.rb_node;
while (*ptr) {
struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
parent = *ptr;
- if (object->object < this->object)
+ if (object->object < this->object) {
ptr = &parent->rb_left;
- else
- if (object->object > this->object)
+ } else if (object->object > this->object) {
ptr = &parent->rb_right;
- else
+ } else {
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
return false;
+ }
}
rb_link_node(&object->node, parent, ptr);
rb_insert_color(&object->node, &object->client->objroot);
+ spin_unlock_irqrestore(&object->client->obj_lock, flags);
return true;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
index 666eb93b17..11b4c9c274 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
@@ -41,7 +41,6 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
rm->dtor = r535_devinit_dtor;
rm->post = hw->post;
- rm->disable = hw->disable;
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index c4c0f08e92..bc08814954 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1871,6 +1871,8 @@ static int boe_panel_add(struct boe_panel *boe)
gpiod_set_value(boe->enable_gpio, 0);
+ boe->base.prepare_prev_first = true;
+
drm_panel_init(&boe->base, dev, &boe_panel_funcs,
DRM_MODE_CONNECTOR_DSI);
err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index cba5a93e60..70feee7876 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -413,8 +413,7 @@ static int panel_edp_unprepare(struct drm_panel *panel)
if (!p->prepared)
return 0;
- pm_runtime_mark_last_busy(panel->dev);
- ret = pm_runtime_put_autosuspend(panel->dev);
+ ret = pm_runtime_put_sync_suspend(panel->dev);
if (ret < 0)
return ret;
p->prepared = false;
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 927e5f42e9..3e48cbb522 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -813,7 +813,7 @@ int ni_init_microcode(struct radeon_device *rdev)
err = 0;
} else if (rdev->smc_fw->size != smc_req_size) {
pr_err("ni_mc: Bogus length %zu in firmware \"%s\"\n",
- rdev->mc_fw->size, fw_name);
+ rdev->smc_fw->size, fw_name);
err = -EINVAL;
}
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 6e5b922a12..345253e033 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -412,7 +412,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HBLANK_H, (value >> 8) & 0xFF);
- value = mode->hsync_start - mode->hdisplay;
+ value = mode->htotal - mode->hsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_L, value & 0xFF);
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_HDELAY_H, (value >> 8) & 0xFF);
@@ -427,7 +427,7 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
value = mode->vtotal - mode->vdisplay;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VBLANK, value & 0xFF);
- value = mode->vsync_start - mode->vdisplay;
+ value = mode->vtotal - mode->vsync_start;
hdmi_writeb(hdmi, HDMI_VIDEO_EXT_VDELAY, value & 0xFF);
value = mode->vsync_end - mode->vsync_start;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f0f47e9abf..f2831d304e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -577,8 +577,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
ret = -EINVAL;
goto err_put_port;
} else if (ret) {
- DRM_DEV_ERROR(dev, "failed to find panel and bridge node\n");
- ret = -EPROBE_DEFER;
+ dev_err_probe(dev, ret, "failed to find panel and bridge node\n");
goto err_put_port;
}
if (lvds->panel)
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 0a7a7e4ad8..9ee9289821 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -71,13 +71,19 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
+ /*
+ * It's perfectly valid to initialize an entity without having a valid
+ * scheduler attached. It's just not valid to use the scheduler before it
+ * is initialized itself.
+ */
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
- if (!sched_list[0]->sched_rq) {
- /* Warn drivers not to do this and to fix their DRM
- * calling order.
+ if (num_sched_list && !sched_list[0]->sched_rq) {
+ /* Since every entry covered by num_sched_list
+ * should be non-NULL and therefore we warn drivers
+ * not to do this and to fix their DRM calling order.
*/
pr_warn("%s: called with uninitialized scheduler\n", __func__);
} else if (num_sched_list) {
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index ef02d530f7..ae12d001a0 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -522,7 +522,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
dpaux->irq, err);
- return err;
+ goto err_pm_disable;
}
disable_irq(dpaux->irq);
@@ -542,7 +542,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
*/
err = tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_I2C);
if (err < 0)
- return err;
+ goto err_pm_disable;
#ifdef CONFIG_GENERIC_PINCONF
dpaux->desc.name = dev_name(&pdev->dev);
@@ -555,7 +555,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
if (IS_ERR(dpaux->pinctrl)) {
dev_err(&pdev->dev, "failed to register pincontrol\n");
- return PTR_ERR(dpaux->pinctrl);
+ err = PTR_ERR(dpaux->pinctrl);
+ goto err_pm_disable;
}
#endif
/* enable and clear all interrupts */
@@ -571,10 +572,15 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
err = devm_of_dp_aux_populate_ep_devices(&dpaux->aux);
if (err < 0) {
dev_err(dpaux->dev, "failed to populate AUX bus: %d\n", err);
- return err;
+ goto err_pm_disable;
}
return 0;
+
+err_pm_disable:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return err;
}
static void tegra_dpaux_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index fbfe92a816..db606e151a 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1544,9 +1544,11 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
if (np) {
struct platform_device *gangster = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!gangster)
+ return -EPROBE_DEFER;
dsi->slave = platform_get_drvdata(gangster);
- of_node_put(np);
if (!dsi->slave) {
put_device(&gangster->dev);
@@ -1594,44 +1596,58 @@ static int tegra_dsi_probe(struct platform_device *pdev)
if (!pdev->dev.pm_domain) {
dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
- if (IS_ERR(dsi->rst))
- return PTR_ERR(dsi->rst);
+ if (IS_ERR(dsi->rst)) {
+ err = PTR_ERR(dsi->rst);
+ goto remove;
+ }
}
dsi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(dsi->clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
- "cannot get DSI clock\n");
+ if (IS_ERR(dsi->clk)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk),
+ "cannot get DSI clock\n");
+ goto remove;
+ }
dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
- if (IS_ERR(dsi->clk_lp))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
- "cannot get low-power clock\n");
+ if (IS_ERR(dsi->clk_lp)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_lp),
+ "cannot get low-power clock\n");
+ goto remove;
+ }
dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
- if (IS_ERR(dsi->clk_parent))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
- "cannot get parent clock\n");
+ if (IS_ERR(dsi->clk_parent)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->clk_parent),
+ "cannot get parent clock\n");
+ goto remove;
+ }
dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
- if (IS_ERR(dsi->vdd))
- return dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
- "cannot get VDD supply\n");
+ if (IS_ERR(dsi->vdd)) {
+ err = dev_err_probe(&pdev->dev, PTR_ERR(dsi->vdd),
+ "cannot get VDD supply\n");
+ goto remove;
+ }
err = tegra_dsi_setup_clocks(dsi);
if (err < 0) {
dev_err(&pdev->dev, "cannot setup clocks\n");
- return err;
+ goto remove;
}
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(dsi->regs))
- return PTR_ERR(dsi->regs);
+ if (IS_ERR(dsi->regs)) {
+ err = PTR_ERR(dsi->regs);
+ goto remove;
+ }
dsi->mipi = tegra_mipi_request(&pdev->dev, pdev->dev.of_node);
- if (IS_ERR(dsi->mipi))
- return PTR_ERR(dsi->mipi);
+ if (IS_ERR(dsi->mipi)) {
+ err = PTR_ERR(dsi->mipi);
+ goto remove;
+ }
dsi->host.ops = &tegra_dsi_host_ops;
dsi->host.dev = &pdev->dev;
@@ -1659,9 +1675,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
return 0;
unregister:
+ pm_runtime_disable(&pdev->dev);
mipi_dsi_host_unregister(&dsi->host);
mipi_free:
tegra_mipi_free(dsi->mipi);
+remove:
+ tegra_output_remove(&dsi->output);
return err;
}
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index a719af1dc9..4617075369 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -159,6 +159,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
if (gem->size < size) {
err = -EINVAL;
+ drm_gem_object_put(gem);
goto unreference;
}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 0ba3ca3ac5..4cfa4df214 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1855,12 +1855,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
return err;
hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
+ if (IS_ERR(hdmi->regs)) {
+ err = PTR_ERR(hdmi->regs);
+ goto remove;
+ }
err = platform_get_irq(pdev, 0);
if (err < 0)
- return err;
+ goto remove;
hdmi->irq = err;
@@ -1869,18 +1871,18 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
hdmi->irq, err);
- return err;
+ goto remove;
}
platform_set_drvdata(pdev, hdmi);
err = devm_pm_runtime_enable(&pdev->dev);
if (err)
- return err;
+ goto remove;
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
if (err)
- return err;
+ goto remove;
INIT_LIST_HEAD(&hdmi->client.list);
hdmi->client.ops = &hdmi_client_ops;
@@ -1890,10 +1892,14 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto remove;
}
return 0;
+
+remove:
+ tegra_output_remove(&hdmi->output);
+ return err;
}
static void tegra_hdmi_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index dc2dcb5ca1..d7d2389ac2 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -142,8 +142,10 @@ int tegra_output_probe(struct tegra_output *output)
GPIOD_IN,
"HDMI hotplug detect");
if (IS_ERR(output->hpd_gpio)) {
- if (PTR_ERR(output->hpd_gpio) != -ENOENT)
- return PTR_ERR(output->hpd_gpio);
+ if (PTR_ERR(output->hpd_gpio) != -ENOENT) {
+ err = PTR_ERR(output->hpd_gpio);
+ goto put_i2c;
+ }
output->hpd_gpio = NULL;
}
@@ -152,7 +154,7 @@ int tegra_output_probe(struct tegra_output *output)
err = gpiod_to_irq(output->hpd_gpio);
if (err < 0) {
dev_err(output->dev, "gpiod_to_irq(): %d\n", err);
- return err;
+ goto put_i2c;
}
output->hpd_irq = err;
@@ -165,7 +167,7 @@ int tegra_output_probe(struct tegra_output *output)
if (err < 0) {
dev_err(output->dev, "failed to request IRQ#%u: %d\n",
output->hpd_irq, err);
- return err;
+ goto put_i2c;
}
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
@@ -179,6 +181,12 @@ int tegra_output_probe(struct tegra_output *output)
}
return 0;
+
+put_i2c:
+ if (output->ddc)
+ i2c_put_adapter(output->ddc);
+
+ return err;
}
void tegra_output_remove(struct tegra_output *output)
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index fc66bbd913..1e8ec50b75 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -225,26 +225,28 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
rgb->clk = devm_clk_get(dc->dev, NULL);
if (IS_ERR(rgb->clk)) {
dev_err(dc->dev, "failed to get clock\n");
- return PTR_ERR(rgb->clk);
+ err = PTR_ERR(rgb->clk);
+ goto remove;
}
rgb->clk_parent = devm_clk_get(dc->dev, "parent");
if (IS_ERR(rgb->clk_parent)) {
dev_err(dc->dev, "failed to get parent clock\n");
- return PTR_ERR(rgb->clk_parent);
+ err = PTR_ERR(rgb->clk_parent);
+ goto remove;
}
err = clk_set_parent(rgb->clk, rgb->clk_parent);
if (err < 0) {
dev_err(dc->dev, "failed to set parent clock: %d\n", err);
- return err;
+ goto remove;
}
rgb->pll_d_out0 = clk_get_sys(NULL, "pll_d_out0");
if (IS_ERR(rgb->pll_d_out0)) {
err = PTR_ERR(rgb->pll_d_out0);
dev_err(dc->dev, "failed to get pll_d_out0: %d\n", err);
- return err;
+ goto remove;
}
if (dc->soc->has_pll_d2_out0) {
@@ -252,13 +254,19 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
if (IS_ERR(rgb->pll_d2_out0)) {
err = PTR_ERR(rgb->pll_d2_out0);
dev_err(dc->dev, "failed to get pll_d2_out0: %d\n", err);
- return err;
+ goto put_pll;
}
}
dc->rgb = &rgb->output;
return 0;
+
+put_pll:
+ clk_put(rgb->pll_d_out0);
+remove:
+ tegra_output_remove(&rgb->output);
+ return err;
}
void tegra_dc_rgb_remove(struct tegra_dc *dc)
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 7c78c074e3..1baa4ace12 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -269,6 +269,16 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
reinit_completion(&tcrtc->framedone_completion);
+ /*
+ * If a layer is left enabled when the videoport is disabled, and the
+ * vid pipeline that was used for the layer is taken into use on
+ * another videoport, the DSS will report sync lost issues. Disable all
+ * the layers here as a work-around.
+ */
+ for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
+ dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
+ false);
+
dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index e1c0ef0c38..68fed531f6 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -213,7 +213,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
- drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
+ drm_plane_create_zpos_property(&tplane->plane, tidss->num_planes, 0,
num_planes - 1);
ret = drm_plane_create_color_properties(&tplane->plane,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fd9fd3d151..0b3f426713 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -294,7 +294,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
enum ttm_caching caching;
man = ttm_manager_type(bo->bdev, res->mem_type);
- caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
+ if (man->use_tt) {
+ caching = bo->ttm->caching;
+ if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
+ tmp = pgprot_decrypted(tmp);
+ } else {
+ caching = res->bus.caching;
+ }
return ttm_prot_from_caching(caching, tmp);
}
@@ -337,6 +343,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
.no_wait_gpu = false
};
struct ttm_tt *ttm = bo->ttm;
+ struct ttm_resource_manager *man =
+ ttm_manager_type(bo->bdev, bo->resource->mem_type);
pgprot_t prot;
int ret;
@@ -346,7 +354,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if (num_pages == 1 && ttm->caching == ttm_cached) {
+ if (num_pages == 1 && ttm->caching == ttm_cached &&
+ !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index e0a77671ed..43eaffa7fa 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -31,11 +31,14 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/cc_platform.h>
#include <linux/sched.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <drm/drm_cache.h>
+#include <drm/drm_device.h>
+#include <drm/drm_util.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -60,6 +63,7 @@ static atomic_long_t ttm_dma32_pages_allocated;
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
struct ttm_device *bdev = bo->bdev;
+ struct drm_device *ddev = bo->base.dev;
uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv);
@@ -81,6 +85,15 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
pr_err("Illegal buffer object type\n");
return -EINVAL;
}
+ /*
+ * When using dma_alloc_coherent with memory encryption the
+ * mapped TT pages need to be decrypted or otherwise the drivers
+ * will end up sending encrypted mem to the gpu.
+ */
+ if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ page_flags |= TTM_TT_FLAG_DECRYPTED;
+ drm_info(ddev, "TT memory decryption enabled.");
+ }
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 25c9c71256..4626fe9aac 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -508,7 +508,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
if (!edid)
- return -ENODEV;
+ return 0;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 3c99fb8b54..e7441b227b 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -123,6 +123,8 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
enum lut_channel channel)
{
s64 lut_index = get_lut_index(lut, channel_value);
+ u16 *floor_lut_value, *ceil_lut_value;
+ u16 floor_channel_value, ceil_channel_value;
/*
* This checks if `struct drm_color_lut` has any gap added by the compiler
@@ -130,11 +132,15 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
*/
static_assert(sizeof(struct drm_color_lut) == sizeof(__u16) * 4);
- u16 *floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
- u16 *ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
+ floor_lut_value = (__u16 *)&lut->base[drm_fixp2int(lut_index)];
+ if (drm_fixp2int(lut_index) == (lut->lut_length - 1))
+ /* We're at the end of the LUT array, use same value for ceil and floor */
+ ceil_lut_value = floor_lut_value;
+ else
+ ceil_lut_value = (__u16 *)&lut->base[drm_fixp2int_ceil(lut_index)];
- u16 floor_channel_value = floor_lut_value[channel];
- u16 ceil_channel_value = ceil_lut_value[channel];
+ floor_channel_value = floor_lut_value[channel];
+ ceil_channel_value = ceil_lut_value[channel];
return lerp_u16(floor_channel_value, ceil_channel_value,
lut_index & DRM_FIXED_DECIMAL_MASK);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index d3e308fdfd..c7d90f96d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1444,12 +1444,15 @@ static void vmw_debugfs_resource_managers_init(struct vmw_private *vmw)
root, "system_ttm");
ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, TTM_PL_VRAM),
root, "vram_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
- root, "gmr_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
- root, "mob_ttm");
- ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
- root, "system_mob_ttm");
+ if (vmw->has_gmr)
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_GMR),
+ root, "gmr_ttm");
+ if (vmw->has_mob) {
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_MOB),
+ root, "mob_ttm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&vmw->bdev, VMW_PL_SYSTEM),
+ root, "system_mob_ttm");
+ }
}
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 36987ef3fc..5fef0b31c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -447,7 +447,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
vmw_res_type(ctx) == vmw_res_dx_context) {
for (i = 0; i < cotable_max; ++i) {
res = vmw_context_cotable(ctx, i);
- if (IS_ERR(res))
+ if (IS_ERR_OR_NULL(res))
continue;
ret = vmw_execbuf_res_val_add(sw_context, res,
@@ -1266,6 +1266,8 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
+ if (IS_ERR_OR_NULL(cotable_res))
+ return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
return ret;
@@ -2484,6 +2486,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
return ret;
res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->defined_id);
if (unlikely(ret != 0))
return ret;
@@ -2569,8 +2573,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cotable_notify(res, cmd->defined_id);
@@ -2689,6 +2693,8 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
return -EINVAL;
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->body.shaderId);
if (ret)
return ret;
@@ -3010,6 +3016,8 @@ static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
}
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
+ if (IS_ERR_OR_NULL(res))
+ return res ? PTR_ERR(res) : -EINVAL;
ret = vmw_cotable_notify(res, cmd->body.soid);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ceb4d3d3b9..a0b47c9b33 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -64,8 +64,11 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
ttm_resource_init(bo, place, *res);
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ttm_resource_fini(man, *res);
+ kfree(*res);
return id;
+ }
spin_lock(&gman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 818b7f109f..5681a1b42a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -184,13 +184,12 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
- bool is_iomem;
if (vps->surf) {
if (vps->surf_mapped)
return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
return vps->surf->snooper.image;
} else if (vps->bo)
- return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
+ return vmw_bo_map_and_cache(vps->bo);
return NULL;
}
@@ -272,6 +271,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
u32 i;
u32 cursor_max_dim, mob_max_size;
+ struct vmw_fence_obj *fence = NULL;
int ret;
if (!dev_priv->has_mob ||
@@ -313,7 +313,15 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
if (ret != 0)
goto teardown;
- vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
+ ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ if (ret != 0) {
+ ttm_bo_unreserve(&vps->cursor.bo->tbo);
+ goto teardown;
+ }
+
+ dma_fence_wait(&fence->base, false);
+ dma_fence_put(&fence->base);
+
ttm_bo_unreserve(&vps->cursor.bo->tbo);
return 0;
@@ -643,22 +651,12 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- bool is_iomem;
if (vps->surf_mapped) {
vmw_bo_unmap(vps->surf->res.guest_memory_bo);
vps->surf_mapped = false;
}
- if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
- const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
-
- if (likely(ret == 0)) {
- ttm_bo_kunmap(&vps->bo->map);
- ttm_bo_unreserve(&vps->bo->tbo);
- }
- }
-
vmw_du_cursor_plane_unmap_cm(vps);
vmw_du_put_cursor_mob(vcp, vps);
@@ -694,6 +692,10 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
int ret = 0;
if (vps->surf) {
+ if (vps->surf_mapped) {
+ vmw_bo_unmap(vps->surf->res.guest_memory_bo);
+ vps->surf_mapped = false;
+ }
vmw_surface_unreference(&vps->surf);
vps->surf = NULL;
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2530fa98b5..ce449da08e 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -35,6 +35,8 @@ static int sensor_mask_override = -1;
module_param_named(sensor_mask, sensor_mask_override, int, 0444);
MODULE_PARM_DESC(sensor_mask, "override the detected sensors mask");
+static bool intr_disable = true;
+
static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts)
{
union cmd_response cmd_resp;
@@ -55,7 +57,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
- cmd_base.cmd_v2.intr_disable = 1;
+ cmd_base.cmd_v2.intr_disable = intr_disable;
cmd_base.cmd_v2.period = info.period;
cmd_base.cmd_v2.sensor_id = info.sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -73,7 +75,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
- cmd_base.cmd_v2.intr_disable = 1;
+ cmd_base.cmd_v2.intr_disable = intr_disable;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -87,7 +89,7 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
union sfh_cmd_base cmd_base;
cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
- cmd_base.cmd_v2.intr_disable = 1;
+ cmd_base.cmd_v2.intr_disable = intr_disable;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = 0;
@@ -292,6 +294,26 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
return 0;
}
+static int mp2_disable_intr(const struct dmi_system_id *id)
+{
+ intr_disable = false;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_sfh_table[] = {
+ {
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=218104
+ */
+ .callback = mp2_disable_intr,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook x360 435 G7"),
+ },
+ },
+ {}
+};
+
static const struct dmi_system_id dmi_nodevs[] = {
{
/*
@@ -315,6 +337,8 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (dmi_first_match(dmi_nodevs))
return -ENODEV;
+ dmi_check_system(dmi_sfh_table);
+
privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
if (!privdata)
return -ENOMEM;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index 70add75fc5..05e400a4a8 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -90,10 +90,10 @@ enum mem_use_type {
struct hpd_status {
union {
struct {
- u32 human_presence_report : 4;
- u32 human_presence_actual : 4;
- u32 probablity : 8;
u32 object_distance : 16;
+ u32 probablity : 8;
+ u32 human_presence_actual : 4;
+ u32 human_presence_report : 4;
} shpd;
u32 val;
};
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 149a3c7434..f86c1ea83a 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -54,10 +54,10 @@ struct lenovo_drvdata {
/* 0: Up
* 1: Down (undecided)
* 2: Scrolling
- * 3: Patched firmware, disable workaround
*/
u8 middlebutton_state;
bool fn_lock;
+ bool middleclick_workaround_cptkbd;
};
#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
@@ -621,6 +621,36 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev,
return count;
}
+static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ cptkbd_data->middleclick_workaround_cptkbd);
+}
+
+static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+ struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
+ int value;
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ cptkbd_data->middleclick_workaround_cptkbd = !!value;
+
+ return count;
+}
+
static struct device_attribute dev_attr_fn_lock =
__ATTR(fn_lock, S_IWUSR | S_IRUGO,
@@ -632,10 +662,16 @@ static struct device_attribute dev_attr_sensitivity_cptkbd =
attr_sensitivity_show_cptkbd,
attr_sensitivity_store_cptkbd);
+static struct device_attribute dev_attr_middleclick_workaround_cptkbd =
+ __ATTR(middleclick_workaround, S_IWUSR | S_IRUGO,
+ attr_middleclick_workaround_show_cptkbd,
+ attr_middleclick_workaround_store_cptkbd);
+
static struct attribute *lenovo_attributes_cptkbd[] = {
&dev_attr_fn_lock.attr,
&dev_attr_sensitivity_cptkbd.attr,
+ &dev_attr_middleclick_workaround_cptkbd.attr,
NULL
};
@@ -686,23 +722,7 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
{
struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev);
- if (cptkbd_data->middlebutton_state != 3) {
- /* REL_X and REL_Y events during middle button pressed
- * are only possible on patched, bug-free firmware
- * so set middlebutton_state to 3
- * to never apply workaround anymore
- */
- if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD &&
- cptkbd_data->middlebutton_state == 1 &&
- usage->type == EV_REL &&
- (usage->code == REL_X || usage->code == REL_Y)) {
- cptkbd_data->middlebutton_state = 3;
- /* send middle button press which was hold before */
- input_event(field->hidinput->input,
- EV_KEY, BTN_MIDDLE, 1);
- input_sync(field->hidinput->input);
- }
-
+ if (cptkbd_data->middleclick_workaround_cptkbd) {
/* "wheel" scroll events */
if (usage->type == EV_REL && (usage->code == REL_WHEEL ||
usage->code == REL_HWHEEL)) {
@@ -1166,6 +1186,7 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev)
cptkbd_data->middlebutton_state = 0;
cptkbd_data->fn_lock = true;
cptkbd_data->sensitivity = 0x05;
+ cptkbd_data->middleclick_workaround_cptkbd = true;
lenovo_features_set_cptkbd(hdev);
ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 6ef0c88e3e..d2f3f234f2 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -203,6 +203,8 @@ struct hidpp_device {
struct hidpp_scroll_counter vertical_wheel_counter;
u8 wireless_feature_index;
+
+ bool connected_once;
};
/* HID++ 1.0 error codes */
@@ -988,8 +990,13 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
hidpp->protocol_minor = response.rap.params[1];
print_version:
- hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
- hidpp->protocol_major, hidpp->protocol_minor);
+ if (!hidpp->connected_once) {
+ hid_info(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
+ hidpp->connected_once = true;
+ } else
+ hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
+ hidpp->protocol_major, hidpp->protocol_minor);
return 0;
}
@@ -4184,7 +4191,7 @@ static void hidpp_connect_event(struct work_struct *work)
/* Get device version to check if it is connected */
ret = hidpp_root_get_protocol_version(hidpp);
if (ret) {
- hid_info(hidpp->hid_dev, "Disconnected\n");
+ hid_dbg(hidpp->hid_dev, "Disconnected\n");
if (hidpp->battery.ps) {
hidpp->battery.online = false;
hidpp->battery.status = POWER_SUPPLY_STATUS_UNKNOWN;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index fd5b0637da..3e91e4d6ba 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2153,6 +2153,10 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_SYNAPTICS, 0xcddc) },
+
+ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_SYNAPTICS, 0xce08) },
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 00242107d6..862c47b191 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -16,6 +16,7 @@ config HYPERV
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
depends on X86_64 && HYPERV
+ depends on SMP
default n
help
Virtual Secure Mode (VSM) is a set of hypervisor capabilities and
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 2a7a4b6b00..9b02b304c2 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -934,10 +934,21 @@ static const struct i2c_device_id amc6821_id[] = {
MODULE_DEVICE_TABLE(i2c, amc6821_id);
+static const struct of_device_id __maybe_unused amc6821_of_match[] = {
+ {
+ .compatible = "ti,amc6821",
+ .data = (void *)amc6821,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, amc6821_of_match);
+
static struct i2c_driver amc6821_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "amc6821",
+ .of_match_table = of_match_ptr(amc6821_of_match),
},
.probe = amc6821_probe,
.id_table = amc6821_id,
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 9fabe00a40..4b80026db1 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -441,8 +441,26 @@ static void coresight_disable_helpers(struct coresight_device *csdev)
}
}
+/*
+ * Helper function to call source_ops(csdev)->disable and also disable the
+ * helpers.
+ *
+ * There is an imbalance between coresight_enable_path() and
+ * coresight_disable_path(). Enabling also enables the source's helpers as part
+ * of the path, but disabling always skips the first item in the path (which is
+ * the source), so sources and their helpers don't get disabled as part of that
+ * function and we need the extra step here.
+ */
+void coresight_disable_source(struct coresight_device *csdev, void *data)
+{
+ if (source_ops(csdev)->disable)
+ source_ops(csdev)->disable(csdev, data);
+ coresight_disable_helpers(csdev);
+}
+EXPORT_SYMBOL_GPL(coresight_disable_source);
+
/**
- * coresight_disable_source - Drop the reference count by 1 and disable
+ * coresight_disable_source_sysfs - Drop the reference count by 1 and disable
* the device if there are no users left.
*
* @csdev: The coresight device to disable
@@ -451,17 +469,15 @@ static void coresight_disable_helpers(struct coresight_device *csdev)
*
* Returns true if the device has been disabled.
*/
-bool coresight_disable_source(struct coresight_device *csdev, void *data)
+static bool coresight_disable_source_sysfs(struct coresight_device *csdev,
+ void *data)
{
if (atomic_dec_return(&csdev->refcnt) == 0) {
- if (source_ops(csdev)->disable)
- source_ops(csdev)->disable(csdev, data);
- coresight_disable_helpers(csdev);
+ coresight_disable_source(csdev, data);
csdev->enable = false;
}
return !csdev->enable;
}
-EXPORT_SYMBOL_GPL(coresight_disable_source);
/*
* coresight_disable_path_from : Disable components in the given path beyond
@@ -1202,7 +1218,7 @@ void coresight_disable(struct coresight_device *csdev)
if (ret)
goto out;
- if (!csdev->enable || !coresight_disable_source(csdev, NULL))
+ if (!csdev->enable || !coresight_disable_source_sysfs(csdev, NULL))
goto out;
switch (csdev->subtype.source_subtype) {
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 89e8ed214e..58b32b399f 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -587,7 +587,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
return;
/* stop tracer */
- source_ops(csdev)->disable(csdev, event);
+ coresight_disable_source(csdev, event);
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 34aee59dd1..18c4544f60 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -1160,6 +1160,7 @@ static void etm4_init_arch_data(void *info)
struct etm4_init_arg *init_arg = info;
struct etmv4_drvdata *drvdata;
struct csdev_access *csa;
+ struct device *dev = init_arg->dev;
int i;
drvdata = dev_get_drvdata(init_arg->dev);
@@ -1173,6 +1174,10 @@ static void etm4_init_arch_data(void *info)
if (!etm4_init_csdev_access(drvdata, csa))
return;
+ if (!csa->io_mem ||
+ fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
/* Detect the support for OS Lock before we actually use it */
etm_detect_os_lock(drvdata, csa);
@@ -1998,11 +2003,6 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
if (!drvdata->arch)
return -EINVAL;
- /* TRCPDCR is not accessible with system instructions. */
- if (!desc.access.io_mem ||
- fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
- drvdata->skip_power_up = true;
-
major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 767076e079..30c051055e 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -233,6 +233,6 @@ void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
struct coresight_device *coresight_get_percpu_sink(int cpu);
int coresight_enable_source(struct coresight_device *csdev, enum cs_mode mode,
void *data);
-bool coresight_disable_source(struct coresight_device *csdev, void *data);
+void coresight_disable_source(struct coresight_device *csdev, void *data);
#endif
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
index a991ecb751..24a1f7797a 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.c
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -995,6 +995,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
int ret;
u32 val;
+ if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
+ return -ENOENT;
+
if (event->cpu < 0) {
dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
return -EOPNOTSUPP;
@@ -1003,9 +1006,6 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
if (event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
- if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
- return -ENOENT;
-
ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 6a5a93cf4e..643a816883 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1414,7 +1414,6 @@ static void i801_add_mux(struct i801_priv *priv)
lookup->table[i] = GPIO_LOOKUP(mux_config->gpio_chip,
mux_config->gpios[i], "mux", 0);
gpiod_add_lookup_table(lookup);
- priv->lookup = lookup;
/*
* Register the mux device, we use PLATFORM_DEVID_NONE here
@@ -1428,7 +1427,10 @@ static void i801_add_mux(struct i801_priv *priv)
sizeof(struct i2c_mux_gpio_platform_data));
if (IS_ERR(priv->mux_pdev)) {
gpiod_remove_lookup_table(lookup);
+ devm_kfree(dev, lookup);
dev_err(dev, "Failed to register i2c-mux-gpio device\n");
+ } else {
+ priv->lookup = lookup;
}
}
@@ -1740,9 +1742,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
i801_enable_host_notify(&priv->adapter);
- i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
i801_add_mux(priv);
+ i801_probe_optional_slaves(priv);
pci_set_drvdata(dev, priv);
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index ef5751e91c..276153e10f 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -1163,8 +1163,10 @@ static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
global = reg == 0xffffffff;
reg &= ~BIT(idx);
} else {
- global = reg == 0;
+ bool hj_rejected = !!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_HOT_JOIN_NACK);
+
reg |= BIT(idx);
+ global = (reg == 0xffffffff) && hj_rejected;
}
writel(reg, master->regs + IBI_SIR_REQ_REJECT);
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 90b7ae6d42..484fe2e9fb 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -1429,9 +1429,11 @@ static int adxl367_verify_devid(struct adxl367_state *st)
unsigned int val;
int ret;
- ret = regmap_read_poll_timeout(st->regmap, ADXL367_REG_DEVID, val,
- val == ADXL367_DEVID_AD, 1000, 10000);
+ ret = regmap_read(st->regmap, ADXL367_REG_DEVID, &val);
if (ret)
+ return dev_err_probe(st->dev, ret, "Failed to read dev id\n");
+
+ if (val != ADXL367_DEVID_AD)
return dev_err_probe(st->dev, -ENODEV,
"Invalid dev id 0x%02X, expected 0x%02X\n",
val, ADXL367_DEVID_AD);
@@ -1510,6 +1512,8 @@ int adxl367_probe(struct device *dev, const struct adxl367_ops *ops,
if (ret)
return ret;
+ fsleep(15000);
+
ret = adxl367_verify_devid(st);
if (ret)
return ret;
diff --git a/drivers/iio/accel/adxl367_i2c.c b/drivers/iio/accel/adxl367_i2c.c
index b595fe94f3..62c74bdc0d 100644
--- a/drivers/iio/accel/adxl367_i2c.c
+++ b/drivers/iio/accel/adxl367_i2c.c
@@ -11,7 +11,7 @@
#include "adxl367.h"
-#define ADXL367_I2C_FIFO_DATA 0x42
+#define ADXL367_I2C_FIFO_DATA 0x18
struct adxl367_i2c_state {
struct regmap *regmap;
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index dd94667a62..1c0042fbbb 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -52,7 +52,7 @@
#define SARADC2_START BIT(4)
#define SARADC2_SINGLE_MODE BIT(5)
-#define SARADC2_CONV_CHANNELS GENMASK(15, 0)
+#define SARADC2_CONV_CHANNELS GENMASK(3, 0)
struct rockchip_saradc;
@@ -102,12 +102,12 @@ static void rockchip_saradc_start_v2(struct rockchip_saradc *info, int chn)
writel_relaxed(0xc, info->regs + SARADC_T_DAS_SOC);
writel_relaxed(0x20, info->regs + SARADC_T_PD_SOC);
val = FIELD_PREP(SARADC2_EN_END_INT, 1);
- val |= val << 16;
+ val |= SARADC2_EN_END_INT << 16;
writel_relaxed(val, info->regs + SARADC2_END_INT_EN);
val = FIELD_PREP(SARADC2_START, 1) |
FIELD_PREP(SARADC2_SINGLE_MODE, 1) |
FIELD_PREP(SARADC2_CONV_CHANNELS, chn);
- val |= val << 16;
+ val |= (SARADC2_START | SARADC2_SINGLE_MODE | SARADC2_CONV_CHANNELS) << 16;
writel(val, info->regs + SARADC2_CONV_CON);
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 66d4ba088e..d4f9b5d8d2 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -109,6 +109,8 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
/* compute and process only all complete datum */
nb = fifo_count / bytes_per_datum;
fifo_count = nb * bytes_per_datum;
+ if (nb == 0)
+ goto end_session;
/* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
inv_sensors_timestamp_interrupt(&st->timestamp, fifo_period, nb, nb, pf->timestamp);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 676704f915..e6e6e94452 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -111,6 +111,7 @@ int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable)
if (enable) {
/* reset timestamping */
inv_sensors_timestamp_reset(&st->timestamp);
+ inv_sensors_timestamp_apply_odr(&st->timestamp, 0, 0, 0);
/* reset FIFO */
d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
ret = regmap_write(st->map, st->reg->user_ctrl, d);
@@ -184,6 +185,10 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
if (result)
goto error_power_off;
} else {
+ st->chip_config.gyro_fifo_enable = 0;
+ st->chip_config.accl_fifo_enable = 0;
+ st->chip_config.temp_fifo_enable = 0;
+ st->chip_config.magn_fifo_enable = 0;
result = inv_mpu6050_prepare_fifo(st, false);
if (result)
goto error_power_off;
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index 7653261d2d..b51eb6cb76 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -34,24 +34,11 @@
static int iio_gts_get_gain(const u64 max, const u64 scale)
{
u64 full = max;
- int tmp = 1;
if (scale > full || !scale)
return -EINVAL;
- if (U64_MAX - full < scale) {
- /* Risk of overflow */
- if (full - scale < scale)
- return 1;
-
- full -= scale;
- tmp++;
- }
-
- while (full > scale * (u64)tmp)
- tmp++;
-
- return tmp;
+ return div64_u64(full, scale);
}
/**
diff --git a/drivers/iio/pressure/mprls0025pa.c b/drivers/iio/pressure/mprls0025pa.c
index 30fb2de368..e3f0de020a 100644
--- a/drivers/iio/pressure/mprls0025pa.c
+++ b/drivers/iio/pressure/mprls0025pa.c
@@ -323,6 +323,7 @@ static int mpr_probe(struct i2c_client *client)
struct iio_dev *indio_dev;
struct device *dev = &client->dev;
s64 scale, offset;
+ u32 func;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE))
return dev_err_probe(dev, -EOPNOTSUPP,
@@ -362,10 +363,11 @@ static int mpr_probe(struct i2c_client *client)
return dev_err_probe(dev, ret,
"honeywell,pmax-pascal could not be read\n");
ret = device_property_read_u32(dev,
- "honeywell,transfer-function", &data->function);
+ "honeywell,transfer-function", &func);
if (ret)
return dev_err_probe(dev, ret,
"honeywell,transfer-function could not be read\n");
+ data->function = func - 1;
if (data->function > MPR_FUNCTION_C)
return dev_err_probe(dev, -EINVAL,
"honeywell,transfer-function %d invalid\n",
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 67bcea7a15..07cb6c5ffd 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1730,7 +1730,7 @@ static int assign_client_id(struct ib_client *client)
{
int ret;
- down_write(&clients_rwsem);
+ lockdep_assert_held(&clients_rwsem);
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
@@ -1739,14 +1739,11 @@ static int assign_client_id(struct ib_client *client)
client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
- goto out;
+ return ret;
highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
-
-out:
- up_write(&clients_rwsem);
- return ret;
+ return 0;
}
static void remove_client_id(struct ib_client *client)
@@ -1776,25 +1773,35 @@ int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
unsigned long index;
+ bool need_unreg = false;
int ret;
refcount_set(&client->uses, 1);
init_completion(&client->uses_zero);
+
+ /*
+ * The devices_rwsem is held in write mode to ensure that a racing
+ * ib_register_device() sees a consisent view of clients and devices.
+ */
+ down_write(&devices_rwsem);
+ down_write(&clients_rwsem);
ret = assign_client_id(client);
if (ret)
- return ret;
+ goto out;
- down_read(&devices_rwsem);
+ need_unreg = true;
xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
ret = add_client_context(device, client);
- if (ret) {
- up_read(&devices_rwsem);
- ib_unregister_client(client);
- return ret;
- }
+ if (ret)
+ goto out;
}
- up_read(&devices_rwsem);
- return 0;
+ ret = 0;
+out:
+ up_write(&clients_rwsem);
+ up_write(&devices_rwsem);
+ if (need_unreg && ret)
+ ib_unregister_client(client);
+ return ret;
}
EXPORT_SYMBOL(ib_register_client);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 1627f3b0ef..f08417d372 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -584,6 +584,13 @@ struct hns_roce_work {
u32 queue_num;
};
+enum hns_roce_cong_type {
+ CONG_TYPE_DCQCN,
+ CONG_TYPE_LDCP,
+ CONG_TYPE_HC3,
+ CONG_TYPE_DIP,
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@@ -627,6 +634,7 @@ struct hns_roce_qp {
struct list_head sq_node; /* all send qps are on a list */
struct hns_user_mmap_entry *dwqe_mmap_entry;
u32 config;
+ enum hns_roce_cong_type cong_type;
};
struct hns_roce_ib_iboe {
@@ -698,13 +706,6 @@ struct hns_roce_eq_table {
struct hns_roce_eq *eq;
};
-enum cong_type {
- CONG_TYPE_DCQCN,
- CONG_TYPE_LDCP,
- CONG_TYPE_HC3,
- CONG_TYPE_DIP,
-};
-
struct hns_roce_caps {
u64 fw_ver;
u8 num_ports;
@@ -834,7 +835,7 @@ struct hns_roce_caps {
u16 default_aeq_period;
u16 default_aeq_arm_st;
u16 default_ceq_arm_st;
- enum cong_type cong_type;
+ enum hns_roce_cong_type cong_type;
};
enum hns_roce_device_state {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index aa9527ac2f..81d6d4331c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -4733,12 +4733,15 @@ static int check_cong_type(struct ib_qp *ibqp,
struct hns_roce_congestion_algorithm *cong_alg)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- if (ibqp->qp_type == IB_QPT_UD)
- hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+ if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == IB_QPT_GSI)
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
+ else
+ hr_qp->cong_type = hr_dev->caps.cong_type;
/* different congestion types match different configurations */
- switch (hr_dev->caps.cong_type) {
+ switch (hr_qp->cong_type) {
case CONG_TYPE_DCQCN:
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
@@ -4766,8 +4769,8 @@ static int check_cong_type(struct ib_qp *ibqp,
default:
ibdev_warn(&hr_dev->ib_dev,
"invalid type(%u) for congestion selection.\n",
- hr_dev->caps.cong_type);
- hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+ hr_qp->cong_type);
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
cong_alg->dip_vld = DIP_INVALID;
@@ -4786,6 +4789,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_congestion_algorithm cong_field;
struct ib_device *ibdev = ibqp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
u32 dip_idx = 0;
int ret;
@@ -4798,7 +4802,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return ret;
hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
- hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
+ hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 0b046c0617..12704efb7b 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -719,7 +719,6 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
- ukinfo->qp_id = iwqp->ibqp.qp_num;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
@@ -944,7 +943,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
init_info.pd = &iwpd->sc_pd;
- init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+ init_info.qp_uk_init_info.qp_id = qp_num;
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
init_info.qp_uk_init_info.first_sq_wq = 1;
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 7be4c3adb4..ab91009aea 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -358,7 +358,7 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
sizeof(struct gdma_create_dma_region_resp));
create_req->length = umem->length;
- create_req->offset_in_page = umem->address & (page_sz - 1);
+ create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
create_req->page_count = num_pages_total;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 8ba53edf23..253fea374a 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2498,7 +2498,7 @@ static void dispatch_event_fd(struct list_head *fd_list,
list_for_each_entry_rcu(item, fd_list, xa_list) {
if (item->eventfd)
- eventfd_signal(item->eventfd, 1);
+ eventfd_signal(item->eventfd);
else
deliver_event(item, data);
}
@@ -2949,7 +2949,7 @@ DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
UVERBS_IDR_ANY_OBJECT,
- UVERBS_ACCESS_WRITE,
+ UVERBS_ACCESS_READ,
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(
MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index df1d1b0a3e..9947feb7fb 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -78,7 +78,7 @@ static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
*/
copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
left);
- memcpy(eseg->inline_hdr.start, pdata, copysz);
+ memcpy(eseg->inline_hdr.data, pdata, copysz);
stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
sizeof(eseg->inline_hdr.start) + copysz, 16);
*size += stride / 16;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index d3c436ead6..4aa80c9388 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -133,7 +133,7 @@ static ssize_t mpath_policy_store(struct device *dev,
/* distinguish "mi" and "min-latency" with length */
len = strnlen(buf, NAME_MAX);
- if (buf[len - 1] == '\n')
+ if (len && buf[len - 1] == '\n')
len--;
if (!strncasecmp(buf, "round-robin", 11) ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 040234c01b..9632afbd72 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3209,7 +3209,6 @@ static int srpt_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
srpt_event_handler);
- ib_register_event_handler(&sdev->event_handler);
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
@@ -3232,6 +3231,7 @@ static int srpt_add_one(struct ib_device *device)
}
}
+ ib_register_event_handler(&sdev->event_handler);
spin_lock(&srpt_dev_lock);
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
@@ -3242,7 +3242,6 @@ static int srpt_add_one(struct ib_device *device)
err_port:
srpt_unregister_mad_agent(sdev, i);
- ib_unregister_event_handler(&sdev->event_handler);
err_cm:
if (sdev->cm_id)
ib_destroy_cm_id(sdev->cm_id);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d0bb3edfd0..c11af4441c 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -130,7 +130,12 @@ static const struct xpad_device {
{ 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
{ 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
+ { 0x03f0, 0x038D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wired */
+ { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
+ { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
+ { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */
+ { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -463,6 +468,7 @@ static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */
XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */
XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */
+ XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index ba00ecfbd3..b41fd1240f 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -315,12 +315,10 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
error = devm_gpio_request_one(dev, button->gpio,
flags, button->desc ? : DRV_NAME);
- if (error) {
- dev_err(dev,
- "unable to claim gpio %u, err=%d\n",
- button->gpio, error);
- return error;
- }
+ if (error)
+ return dev_err_probe(dev, error,
+ "unable to claim gpio %u\n",
+ button->gpio);
bdata->gpiod = gpio_to_desc(button->gpio);
if (!bdata->gpiod) {
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 36aeeae776..9ca5a743f1 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -622,6 +622,118 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
{
.prod_num = IQS7222_PROD_NUM_D,
+ .fw_major = 1,
+ .fw_minor = 2,
+ .touch_link = 1770,
+ .allow_offset = 9,
+ .event_offset = 10,
+ .comms_offset = 11,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 7,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 7,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8700,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 14,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 14,
+ .num_col = 4,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAE00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_TPAD] = {
+ .base = 0xB000,
+ .num_row = 1,
+ .num_col = 24,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 3,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 12,
+ },
+ },
+ },
+ {
+ .prod_num = IQS7222_PROD_NUM_D,
+ .fw_major = 1,
+ .fw_minor = 1,
+ .touch_link = 1774,
+ .allow_offset = 9,
+ .event_offset = 10,
+ .comms_offset = 11,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 7,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 7,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8700,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 14,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 14,
+ .num_col = 4,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAE00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_TPAD] = {
+ .base = 0xB000,
+ .num_row = 1,
+ .num_col = 24,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 3,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 12,
+ },
+ },
+ },
+ {
+ .prod_num = IQS7222_PROD_NUM_D,
.fw_major = 0,
.fw_minor = 37,
.touch_link = 1770,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 7673bb8294..a66f7df7a2 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -195,7 +195,7 @@ source "drivers/iommu/iommufd/Kconfig"
config IRQ_REMAP
bool "Support for Interrupt Remapping"
depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI
- select DMAR_TABLE
+ select DMAR_TABLE if INTEL_IOMMU
help
Supports Interrupt remapping for IO-APIC and MSI devices.
To use x2apic mode in the CPU's which support x2APIC enhancements or
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 64bcf3df37..7f65f3ecb2 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2068,6 +2068,9 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
/* Prevent binding other PCI device drivers to IOMMU devices */
iommu->dev->match_driver = false;
+ /* ACPI _PRT won't have an IRQ for IOMMU */
+ iommu->dev->irq_managed = 1;
+
pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
&iommu->cap);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 037fcf8264..a0767ce1bd 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1706,6 +1706,14 @@ static size_t iommu_dma_opt_mapping_size(void)
return iova_rcache_range();
}
+static size_t iommu_dma_max_mapping_size(struct device *dev)
+{
+ if (dev_is_untrusted(dev))
+ return swiotlb_max_mapping_size(dev);
+
+ return SIZE_MAX;
+}
+
static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
@@ -1728,6 +1736,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
.opt_mapping_size = iommu_dma_opt_mapping_size,
+ .max_mapping_size = iommu_dma_max_mapping_size,
};
/*
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index 5dabf081a7..5402b699a1 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -5,5 +5,7 @@ obj-$(CONFIG_DMAR_TABLE) += trace.o cap_audit.o
obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
+ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
+endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 6e102cbbde..cc2490e5cf 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -481,6 +481,9 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
if (!info || !info->ats_enabled)
return;
+ if (pci_dev_is_disconnected(to_pci_dev(dev)))
+ return;
+
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
pfsid = info->pfsid;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 83314b9d8f..ee59647c20 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -99,7 +99,8 @@ int __init irq_remapping_prepare(void)
if (disable_irq_remap)
return -ENOSYS;
- if (intel_irq_remap_ops.prepare() == 0)
+ if (IS_ENABLED(CONFIG_INTEL_IOMMU) &&
+ intel_irq_remap_ops.prepare() == 0)
remap_ops = &intel_irq_remap_ops;
else if (IS_ENABLED(CONFIG_AMD_IOMMU) &&
amd_iommu_irq_ops.prepare() == 0)
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index fe8d516f36..dc822111fc 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -28,8 +28,7 @@
#define ISCR 0x10
#define IITSR 0x14
#define TSCR 0x20
-#define TITSR0 0x24
-#define TITSR1 0x28
+#define TITSR(n) (0x24 + (n) * 4)
#define TITSR0_MAX_INT 16
#define TITSEL_WIDTH 0x2
#define TSSR(n) (0x30 + ((n) * 4))
@@ -67,28 +66,43 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
return data->domain->host_data;
}
-static void rzg2l_irq_eoi(struct irq_data *d)
+static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = hwirq - IRQC_IRQ_START;
u32 bit = BIT(hw_irq);
- u32 reg;
+ u32 iitsr, iscr;
- reg = readl_relaxed(priv->base + ISCR);
- if (reg & bit)
- writel_relaxed(reg & ~bit, priv->base + ISCR);
+ iscr = readl_relaxed(priv->base + ISCR);
+ iitsr = readl_relaxed(priv->base + IITSR);
+
+ /*
+ * ISCR can only be cleared if the type is falling-edge, rising-edge or
+ * falling/rising-edge.
+ */
+ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
+ writel_relaxed(iscr & ~bit, priv->base + ISCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + ISCR);
+ }
}
-static void rzg2l_tint_eoi(struct irq_data *d)
+static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- u32 bit = BIT(hw_irq);
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
u32 reg;
reg = readl_relaxed(priv->base + TSCR);
- if (reg & bit)
+ if (reg & bit) {
writel_relaxed(reg & ~bit, priv->base + TSCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + TSCR);
+ }
}
static void rzg2l_irqc_eoi(struct irq_data *d)
@@ -98,9 +112,9 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
raw_spin_lock(&priv->lock);
if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
- rzg2l_irq_eoi(d);
+ rzg2l_clear_irq_int(priv, hw_irq);
else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
- rzg2l_tint_eoi(d);
+ rzg2l_clear_tint_int(priv, hw_irq);
raw_spin_unlock(&priv->lock);
irq_chip_eoi_parent(d);
}
@@ -148,8 +162,10 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 iitseln = hwirq - IRQC_IRQ_START;
+ bool clear_irq_int = false;
u16 sense, tmp;
switch (type & IRQ_TYPE_SENSE_MASK) {
@@ -159,14 +175,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_FALLING:
sense = IITSR_IITSEL_EDGE_FALLING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_RISING:
sense = IITSR_IITSEL_EDGE_RISING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_BOTH:
sense = IITSR_IITSEL_EDGE_BOTH;
+ clear_irq_int = true;
break;
default:
@@ -175,22 +194,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock(&priv->lock);
tmp = readl_relaxed(priv->base + IITSR);
- tmp &= ~IITSR_IITSEL_MASK(hw_irq);
- tmp |= IITSR_IITSEL(hw_irq, sense);
+ tmp &= ~IITSR_IITSEL_MASK(iitseln);
+ tmp |= IITSR_IITSEL(iitseln, sense);
+ if (clear_irq_int)
+ rzg2l_clear_irq_int(priv, hwirq);
writel_relaxed(tmp, priv->base + IITSR);
raw_spin_unlock(&priv->lock);
return 0;
}
+static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
+ u32 reg, u32 tssr_offset, u8 tssr_index)
+{
+ u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
+ u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
+
+ /* Clear the relevant byte in reg */
+ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ /* Set TINT and leave TIEN clear */
+ reg |= tint << TSSEL_SHIFT(tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+
+ return reg | tien;
+}
+
static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
u32 titseln = hwirq - IRQC_TINT_START;
- u32 offset;
- u8 sense;
- u32 reg;
+ u32 tssr_offset = TSSR_OFFSET(titseln);
+ u8 tssr_index = TSSR_INDEX(titseln);
+ u8 index, sense;
+ u32 reg, tssr;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -205,17 +242,21 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- offset = TITSR0;
+ index = 0;
if (titseln >= TITSR0_MAX_INT) {
titseln -= TITSR0_MAX_INT;
- offset = TITSR1;
+ index = 1;
}
raw_spin_lock(&priv->lock);
- reg = readl_relaxed(priv->base + offset);
+ tssr = readl_relaxed(priv->base + TSSR(tssr_index));
+ tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
+ reg = readl_relaxed(priv->base + TITSR(index));
reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
reg |= sense << (titseln * TITSEL_WIDTH);
- writel_relaxed(reg, priv->base + offset);
+ writel_relaxed(reg, priv->base + TITSR(index));
+ rzg2l_clear_tint_int(priv, hwirq);
+ writel_relaxed(tssr, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
return 0;
diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
index eb648ff54b..db0ac66419 100644
--- a/drivers/leds/flash/leds-sgm3140.c
+++ b/drivers/leds/flash/leds-sgm3140.c
@@ -114,8 +114,11 @@ static int sgm3140_brightness_set(struct led_classdev *led_cdev,
"failed to enable regulator: %d\n", ret);
return ret;
}
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
gpiod_set_value_cansleep(priv->enable_gpio, 1);
} else {
+ del_timer_sync(&priv->powerdown_timer);
+ gpiod_set_value_cansleep(priv->flash_gpio, 0);
gpiod_set_value_cansleep(priv->enable_gpio, 0);
ret = regulator_disable(priv->vin_regulator);
if (ret) {
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 91f44b23cb..17235a5e57 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -405,6 +405,7 @@ error_reg:
chip->regulators);
error:
+ mutex_unlock(&chip->mutex);
mutex_destroy(&chip->mutex);
return ret;
}
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index d76214fa9a..79719fc8a0 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -462,12 +462,12 @@ static int netdev_trig_notify(struct notifier_block *nb,
trigger_data->duplex = DUPLEX_UNKNOWN;
switch (evt) {
case NETDEV_CHANGENAME:
- get_device_state(trigger_data);
- fallthrough;
case NETDEV_REGISTER:
dev_put(trigger_data->net_dev);
dev_hold(dev);
trigger_data->net_dev = dev;
+ if (evt == NETDEV_CHANGENAME)
+ get_device_state(trigger_data);
break;
case NETDEV_UNREGISTER:
dev_put(trigger_data->net_dev);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index f03d7dba27..4f2808ef38 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1315,7 +1315,7 @@ static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
io_req.mem.ptr.vma = (char *)b->data + offset;
}
- r = dm_io(&io_req, 1, &region, NULL);
+ r = dm_io(&io_req, 1, &region, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
b->end_io(b, errno_to_blk_status(r));
}
@@ -2167,7 +2167,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
if (WARN_ON_ONCE(dm_bufio_in_request()))
return -EINVAL;
- return dm_io(&io_req, 1, &io_reg, NULL);
+ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
@@ -2191,7 +2191,7 @@ int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t c
if (WARN_ON_ONCE(dm_bufio_in_request()))
return -EINVAL; /* discards are optional */
- return dm_io(&io_req, 1, &io_reg, NULL);
+ return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
}
EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4ab4e8dcfd..35f5019395 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -53,11 +53,11 @@
struct convert_context {
struct completion restart;
struct bio *bio_in;
- struct bio *bio_out;
struct bvec_iter iter_in;
+ struct bio *bio_out;
struct bvec_iter iter_out;
- u64 cc_sector;
atomic_t cc_pending;
+ u64 cc_sector;
union {
struct skcipher_request *req;
struct aead_request *req_aead;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index e8e8fc33d3..2cc30b9ab2 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -555,7 +555,7 @@ static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
}
}
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
return r;
@@ -1073,7 +1073,7 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
io_loc.sector = ic->start + SB_SECTORS + sector;
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
"reading journal" : "writing journal", r);
@@ -1190,7 +1190,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, u
io_loc.sector = target;
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
fn(-1UL, data);
@@ -1519,7 +1519,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
fr.io_reg.count = 0,
fr.ic = ic;
init_completion(&fr.comp);
- r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
+ r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL, IOPRIO_DEFAULT);
BUG_ON(r);
}
@@ -1699,7 +1699,6 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
struct bio_vec bv;
sector_t sector, logical_sector, area, offset;
struct page *page;
- void *buffer;
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
@@ -1708,13 +1707,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
logical_sector = dio->range.logical_sector;
page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
- buffer = page_to_virt(page);
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos = 0;
do {
+ sector_t alignment;
char *mem;
+ char *buffer = page_to_virt(page);
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1727,7 +1727,15 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
io_loc.sector = sector;
io_loc.count = ic->sectors_per_block;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ /* Align the bio to logical block size */
+ alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
+ alignment &= -alignment;
+ io_loc.sector = round_down(io_loc.sector, alignment);
+ io_loc.count += sector - io_loc.sector;
+ buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
+ io_loc.count = round_up(io_loc.count, alignment);
+
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dio->bi_status = errno_to_blk_status(r);
goto free_ret;
@@ -1848,12 +1856,12 @@ again:
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
if (r > 0) {
- integrity_recheck(dio, checksums);
+ integrity_recheck(dio, checksums_onstack);
goto skip_io;
}
- if (likely(checksums != checksums_onstack))
- kfree(checksums);
goto error;
}
@@ -2806,7 +2814,7 @@ next_chunk:
io_loc.sector = get_data_sector(ic, area, offset);
io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dm_integrity_io_error(ic, "reading data", r);
goto err;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index f053ce2458..7409490259 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -305,7 +305,7 @@ static void km_dp_init(struct dpages *dp, void *data)
*/
static void do_region(const blk_opf_t opf, unsigned int region,
struct dm_io_region *where, struct dpages *dp,
- struct io *io)
+ struct io *io, unsigned short ioprio)
{
struct bio *bio;
struct page *page;
@@ -354,6 +354,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
&io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_end_io = endio;
+ bio->bi_ioprio = ioprio;
store_io_and_region_in_bio(bio, io, region);
if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
@@ -383,7 +384,7 @@ static void do_region(const blk_opf_t opf, unsigned int region,
static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
struct dm_io_region *where, struct dpages *dp,
- struct io *io, int sync)
+ struct io *io, int sync, unsigned short ioprio)
{
int i;
struct dpages old_pages = *dp;
@@ -400,7 +401,7 @@ static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
for (i = 0; i < num_regions; i++) {
*dp = old_pages;
if (where[i].count || (opf & REQ_PREFLUSH))
- do_region(opf, i, where + i, dp, io);
+ do_region(opf, i, where + i, dp, io, ioprio);
}
/*
@@ -425,7 +426,7 @@ static void sync_io_complete(unsigned long error, void *context)
static int sync_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
- unsigned long *error_bits)
+ unsigned long *error_bits, unsigned short ioprio)
{
struct io *io;
struct sync_io sio;
@@ -447,7 +448,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(opf, num_regions, where, dp, io, 1);
+ dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
wait_for_completion_io(&sio.wait);
@@ -459,7 +460,8 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
static int async_io(struct dm_io_client *client, unsigned int num_regions,
struct dm_io_region *where, blk_opf_t opf,
- struct dpages *dp, io_notify_fn fn, void *context)
+ struct dpages *dp, io_notify_fn fn, void *context,
+ unsigned short ioprio)
{
struct io *io;
@@ -479,7 +481,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
io->vma_invalidate_address = dp->vma_invalidate_address;
io->vma_invalidate_size = dp->vma_invalidate_size;
- dispatch_io(opf, num_regions, where, dp, io, 0);
+ dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
return 0;
}
@@ -521,7 +523,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
}
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
- struct dm_io_region *where, unsigned long *sync_error_bits)
+ struct dm_io_region *where, unsigned long *sync_error_bits,
+ unsigned short ioprio)
{
int r;
struct dpages dp;
@@ -532,11 +535,11 @@ int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
if (!io_req->notify.fn)
return sync_io(io_req->client, num_regions, where,
- io_req->bi_opf, &dp, sync_error_bits);
+ io_req->bi_opf, &dp, sync_error_bits, ioprio);
return async_io(io_req->client, num_regions, where,
io_req->bi_opf, &dp, io_req->notify.fn,
- io_req->notify.context);
+ io_req->notify.context, ioprio);
}
EXPORT_SYMBOL(dm_io);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index d01807c50f..79c65c9ad5 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -578,9 +578,9 @@ static int run_io_job(struct kcopyd_job *job)
io_job_start(job->kc->throttle);
if (job->op == REQ_OP_READ)
- r = dm_io(&io_req, 1, &job->source, NULL);
+ r = dm_io(&io_req, 1, &job->source, NULL, IOPRIO_DEFAULT);
else
- r = dm_io(&io_req, job->num_dests, job->dests, NULL);
+ r = dm_io(&io_req, job->num_dests, job->dests, NULL, IOPRIO_DEFAULT);
return r;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index f9f84236df..f7f9c21009 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -300,7 +300,7 @@ static int rw_header(struct log_c *lc, enum req_op op)
{
lc->io_req.bi_opf = op;
- return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
+ return dm_io(&lc->io_req, 1, &lc->header_location, NULL, IOPRIO_DEFAULT);
}
static int flush_header(struct log_c *lc)
@@ -313,7 +313,7 @@ static int flush_header(struct log_c *lc)
lc->io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
- return dm_io(&lc->io_req, 1, &null_location, NULL);
+ return dm_io(&lc->io_req, 1, &null_location, NULL, IOPRIO_DEFAULT);
}
static int read_header(struct log_c *log)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index eb009d6bb0..d97355e9b9 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -213,6 +213,7 @@ struct raid_dev {
#define RT_FLAG_RS_IN_SYNC 6
#define RT_FLAG_RS_RESYNCING 7
#define RT_FLAG_RS_GROW 8
+#define RT_FLAG_RS_FROZEN 9
/* Array elements of 64 bit needed for rebuild/failed disk bits */
#define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8)
@@ -3240,11 +3241,12 @@ size_check:
rs->md.ro = 1;
rs->md.in_sync = 1;
- /* Keep array frozen until resume. */
- set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
-
/* Has to be held on running the array */
mddev_suspend_and_lock_nointr(&rs->md);
+
+ /* Keep array frozen until resume. */
+ md_frozen_sync_thread(&rs->md);
+
r = md_run(&rs->md);
rs->md.in_sync = 0; /* Assume already marked dirty */
if (r) {
@@ -3329,17 +3331,18 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
struct mddev *mddev = &rs->md;
/*
- * If we're reshaping to add disk(s)), ti->len and
+ * If we're reshaping to add disk(s), ti->len and
* mddev->array_sectors will differ during the process
* (ti->len > mddev->array_sectors), so we have to requeue
* bios with addresses > mddev->array_sectors here or
* there will occur accesses past EOD of the component
* data images thus erroring the raid set.
*/
- if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
+ if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors))
return DM_MAPIO_REQUEUE;
- md_handle_request(mddev, bio);
+ if (unlikely(!md_handle_request(mddev, bio)))
+ return DM_MAPIO_REQUEUE;
return DM_MAPIO_SUBMITTED;
}
@@ -3718,21 +3721,33 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
+ int ret = 0;
if (!mddev->pers || !mddev->pers->sync_request)
return -EINVAL;
- if (!strcasecmp(argv[0], "frozen"))
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- else
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (test_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags) ||
+ test_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags))
+ return -EBUSY;
- if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
- if (mddev->sync_thread) {
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- md_reap_sync_thread(mddev);
- }
- } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
+ if (!strcasecmp(argv[0], "frozen")) {
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
+ md_frozen_sync_thread(mddev);
+ mddev_unlock(mddev);
+ } else if (!strcasecmp(argv[0], "idle")) {
+ ret = mddev_lock(mddev);
+ if (ret)
+ return ret;
+
+ md_idle_sync_thread(mddev);
+ mddev_unlock(mddev);
+ }
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
return -EBUSY;
else if (!strcasecmp(argv[0], "resync"))
; /* MD_RECOVERY_NEEDED set below */
@@ -3791,15 +3806,46 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
}
+static void raid_presuspend(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+ struct mddev *mddev = &rs->md;
+
+ /*
+ * From now on, disallow raid_message() to change sync_thread until
+ * resume, raid_postsuspend() is too late.
+ */
+ set_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
+
+ if (!reshape_interrupted(mddev))
+ return;
+
+ /*
+ * For raid456, if reshape is interrupted, IO across reshape position
+ * will never make progress, while caller will wait for IO to be done.
+ * Inform raid456 to handle those IO to prevent deadlock.
+ */
+ if (mddev->pers && mddev->pers->prepare_suspend)
+ mddev->pers->prepare_suspend(mddev);
+}
+
+static void raid_presuspend_undo(struct dm_target *ti)
+{
+ struct raid_set *rs = ti->private;
+
+ clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
+}
+
static void raid_postsuspend(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
- /* Writes have to be stopped before suspending to avoid deadlocks. */
- if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery))
- md_stop_writes(&rs->md);
-
+ /*
+ * sync_thread must be stopped during suspend, and writes have
+ * to be stopped before suspending to avoid deadlocks.
+ */
+ md_stop_writes(&rs->md);
mddev_suspend(&rs->md, false);
}
}
@@ -4012,8 +4058,6 @@ static int raid_preresume(struct dm_target *ti)
}
/* Check for any resize/reshape on @rs and adjust/initiate */
- /* Be prepared for mddev_resume() in raid_resume() */
- set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
mddev->resync_min = mddev->recovery_cp;
@@ -4047,7 +4091,9 @@ static void raid_resume(struct dm_target *ti)
* Take this opportunity to check whether any failed
* devices are reachable again.
*/
+ mddev_lock_nointr(mddev);
attempt_restore_of_faulty_devices(rs);
+ mddev_unlock(mddev);
}
if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
@@ -4055,10 +4101,13 @@ static void raid_resume(struct dm_target *ti)
if (mddev->delta_disks < 0)
rs_set_capacity(rs);
+ WARN_ON_ONCE(!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery));
+ WARN_ON_ONCE(test_bit(MD_RECOVERY_RUNNING, &mddev->recovery));
+ clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
mddev_lock_nointr(mddev);
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
mddev->ro = 0;
mddev->in_sync = 0;
+ md_unfrozen_sync_thread(mddev);
mddev_unlock_and_resume(mddev);
}
}
@@ -4074,6 +4123,8 @@ static struct target_type raid_target = {
.message = raid_message,
.iterate_devices = raid_iterate_devices,
.io_hints = raid_io_hints,
+ .presuspend = raid_presuspend,
+ .presuspend_undo = raid_presuspend_undo,
.postsuspend = raid_postsuspend,
.preresume = raid_preresume,
.resume = raid_resume,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index ddcb2bc4a6..9511dae5b5 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -278,7 +278,7 @@ static int mirror_flush(struct dm_target *ti)
}
error_bits = -1;
- dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
+ dm_io(&io_req, ms->nr_mirrors, io, &error_bits, IOPRIO_DEFAULT);
if (unlikely(error_bits != 0)) {
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error_bits))
@@ -554,7 +554,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
map_region(&io, m, bio);
bio_set_m(bio, m);
- BUG_ON(dm_io(&io_req, 1, &io, NULL));
+ BUG_ON(dm_io(&io_req, 1, &io, NULL, IOPRIO_DEFAULT));
}
static inline int region_in_sync(struct mirror_set *ms, region_t region,
@@ -681,7 +681,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
*/
bio_set_m(bio, get_default_mirror(ms));
- BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
+ BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL, IOPRIO_DEFAULT));
}
static void do_writes(struct mirror_set *ms, struct bio_list *writes)
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 15649921f2..568d10842b 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -223,7 +223,7 @@ static void do_metadata(struct work_struct *work)
{
struct mdata_req *req = container_of(work, struct mdata_req, work);
- req->result = dm_io(req->io_req, 1, req->where, NULL);
+ req->result = dm_io(req->io_req, 1, req->where, NULL, IOPRIO_DEFAULT);
}
/*
@@ -247,7 +247,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
struct mdata_req req;
if (!metadata)
- return dm_io(&io_req, 1, &where, NULL);
+ return dm_io(&io_req, 1, &where, NULL, IOPRIO_DEFAULT);
req.where = &where;
req.io_req = &io_req;
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index bf7a574499..0ace06d1be 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -684,8 +684,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
for (i = 0; i < size; i++) {
slot = et->table + i;
- hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
kmem_cache_free(mem, ex);
+ cond_resched();
+ }
}
kvfree(et->table);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 7b620b187d..49e4a35d70 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -511,7 +511,7 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
io_loc.bdev = v->data_dev->bdev;
io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
- r = dm_io(&io_req, 1, &io_loc, NULL);
+ r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
goto free_ret;
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 4620a98c99..db93a91169 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -80,12 +80,12 @@ struct dm_verity_io {
/* original value of bio->bi_end_io */
bio_end_io_t *orig_bi_end_io;
+ struct bvec_iter iter;
+
sector_t block;
unsigned int n_blocks;
bool in_tasklet;
- struct bvec_iter iter;
-
struct work_struct work;
char *recheck_buffer;
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 074cb785ea..6a4279bfb1 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
req.notify.context = &endio;
/* writing via async dm-io (implied by notify.fn above) won't return an error */
- (void) dm_io(&req, 1, &region, NULL);
+ (void) dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
i = j;
}
@@ -568,7 +568,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc)
req.notify.fn = NULL;
req.notify.context = NULL;
- r = dm_io(&req, 1, &region, NULL);
+ r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
writecache_error(wc, r, "error writing superblock");
}
@@ -596,7 +596,7 @@ static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
req.client = wc->dm_io;
req.notify.fn = NULL;
- r = dm_io(&req, 1, &region, NULL);
+ r = dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
if (unlikely(r))
writecache_error(wc, r, "error flushing metadata: %d", r);
}
@@ -990,7 +990,7 @@ static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors
req.client = wc->dm_io;
req.notify.fn = NULL;
- return dm_io(&req, 1, &region, NULL);
+ return dm_io(&req, 1, &region, NULL, IOPRIO_DEFAULT);
}
static void writecache_resume(struct dm_target *ti)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 23c32cd1f1..4ff9bebb81 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2945,6 +2945,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend
static void __dm_internal_resume(struct mapped_device *md)
{
+ int r;
+ struct dm_table *map;
+
BUG_ON(!md->internal_suspend_count);
if (--md->internal_suspend_count)
@@ -2953,12 +2956,23 @@ static void __dm_internal_resume(struct mapped_device *md)
if (dm_suspended_md(md))
goto done; /* resume from nested suspend */
- /*
- * NOTE: existing callers don't need to call dm_table_resume_targets
- * (which may fail -- so best to avoid it for now by passing NULL map)
- */
- (void) __dm_resume(md, NULL);
-
+ map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ r = __dm_resume(md, map);
+ if (r) {
+ /*
+ * If a preresume method of some target failed, we are in a
+ * tricky situation. We can't return an error to the caller. We
+ * can't fake success because then the "resume" and
+ * "postsuspend" methods would not be paired correctly, and it
+ * would break various targets, for example it would cause list
+ * corruption in the "origin" target.
+ *
+ * So, we fake normal suspend here, to make sure that the
+ * "resume" and "postsuspend" methods will be paired correctly.
+ */
+ DMERR("Preresume method failed: %d", r);
+ set_bit(DMF_SUSPENDED, &md->flags);
+ }
done:
clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
smp_mb__after_atomic();
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 9672f75c30..a4976ceae8 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -234,7 +234,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
sector_t doff;
bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev;
- if (pg_index == store->file_pages - 1) {
+ /* we compare length (page numbers), not page offset. */
+ if ((pg_index - store->sb_index) == store->file_pages - 1) {
unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1);
if (last_page_size == 0)
@@ -438,8 +439,8 @@ static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index,
struct page *page = store->filemap[pg_index];
if (mddev_is_clustered(bitmap->mddev)) {
- pg_index += bitmap->cluster_slot *
- DIV_ROUND_UP(store->bytes, PAGE_SIZE);
+ /* go to node bitmap area starting point */
+ pg_index += store->sb_index;
}
if (store->file)
@@ -952,6 +953,7 @@ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
unsigned long index = file_page_index(store, chunk);
unsigned long node_offset = 0;
+ index += store->sb_index;
if (mddev_is_clustered(bitmap->mddev))
node_offset = bitmap->cluster_slot * store->file_pages;
@@ -982,6 +984,7 @@ static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block)
unsigned long index = file_page_index(store, chunk);
unsigned long node_offset = 0;
+ index += store->sb_index;
if (mddev_is_clustered(bitmap->mddev))
node_offset = bitmap->cluster_slot * store->file_pages;
diff --git a/drivers/md/md-multipath.c b/drivers/md/md-multipath.c
index d222768702..aa77133f31 100644
--- a/drivers/md/md-multipath.c
+++ b/drivers/md/md-multipath.c
@@ -258,15 +258,6 @@ static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
goto abort;
}
p->rdev = NULL;
- if (!test_bit(RemoveSynchronized, &rdev->flags)) {
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- goto abort;
- }
- }
err = md_integrity_register(mddev);
}
abort:
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 58889bc726..67befb598c 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -99,18 +99,6 @@ static void mddev_detach(struct mddev *mddev);
static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
-enum md_ro_state {
- MD_RDWR,
- MD_RDONLY,
- MD_AUTO_READ,
- MD_MAX_STATE
-};
-
-static bool md_is_rdwr(struct mddev *mddev)
-{
- return (mddev->ro == MD_RDWR);
-}
-
/*
* Default number of read corrections we'll attempt on an rdev
* before ejecting it from the array. We divide the read error
@@ -378,7 +366,7 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
return true;
}
-void md_handle_request(struct mddev *mddev, struct bio *bio)
+bool md_handle_request(struct mddev *mddev, struct bio *bio)
{
check_suspended:
if (is_suspended(mddev, bio)) {
@@ -386,7 +374,7 @@ check_suspended:
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
- return;
+ return true;
}
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
@@ -402,10 +390,13 @@ check_suspended:
if (!mddev->pers->make_request(mddev, bio)) {
percpu_ref_put(&mddev->active_io);
+ if (!mddev->gendisk && mddev->pers->prepare_suspend)
+ return false;
goto check_suspended;
}
percpu_ref_put(&mddev->active_io);
+ return true;
}
EXPORT_SYMBOL(md_handle_request);
@@ -2582,6 +2573,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
fail:
pr_warn("md: failed to register dev-%s for %s\n",
b, mdname(mddev));
+ mddev_destroy_serial_pool(mddev, rdev);
return err;
}
@@ -4944,6 +4936,35 @@ static void stop_sync_thread(struct mddev *mddev, bool locked, bool check_seq)
mddev_lock_nointr(mddev);
}
+void md_idle_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, true);
+}
+EXPORT_SYMBOL_GPL(md_idle_sync_thread);
+
+void md_frozen_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, false);
+}
+EXPORT_SYMBOL_GPL(md_frozen_sync_thread);
+
+void md_unfrozen_sync_thread(struct mddev *mddev)
+{
+ lockdep_assert_held(&mddev->reconfig_mutex);
+
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ sysfs_notify_dirent_safe(mddev->sysfs_action);
+}
+EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread);
+
static void idle_sync_thread(struct mddev *mddev)
{
mutex_lock(&mddev->sync_mutex);
@@ -6063,7 +6084,10 @@ int md_run(struct mddev *mddev)
pr_warn("True protection against single-disk failure might be compromised.\n");
}
- mddev->recovery = 0;
+ /* dm-raid expect sync_thread to be frozen until resume */
+ if (mddev->gendisk)
+ mddev->recovery = 0;
+
/* may be over-ridden by personality */
mddev->resync_max_sectors = mddev->dev_sectors;
@@ -6303,7 +6327,15 @@ static void md_clean(struct mddev *mddev)
mddev->persistent = 0;
mddev->level = LEVEL_NONE;
mddev->clevel[0] = 0;
- mddev->flags = 0;
+ /*
+ * Don't clear MD_CLOSING, or mddev can be opened again.
+ * 'hold_active != 0' means mddev is still in the creation
+ * process and will be used later.
+ */
+ if (mddev->hold_active)
+ mddev->flags = 0;
+ else
+ mddev->flags &= BIT_ULL_MASK(MD_CLOSING);
mddev->sb_flags = 0;
mddev->ro = MD_RDWR;
mddev->metadata_type[0] = 0;
@@ -6340,7 +6372,6 @@ static void md_clean(struct mddev *mddev)
static void __md_stop_writes(struct mddev *mddev)
{
- stop_sync_thread(mddev, true, false);
del_timer_sync(&mddev->safemode_timer);
if (mddev->pers && mddev->pers->quiesce) {
@@ -6365,6 +6396,8 @@ static void __md_stop_writes(struct mddev *mddev)
void md_stop_writes(struct mddev *mddev)
{
mddev_lock_nointr(mddev);
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ stop_sync_thread(mddev, true, false);
__md_stop_writes(mddev);
mddev_unlock(mddev);
}
@@ -7649,7 +7682,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
int err = 0;
void __user *argp = (void __user *)arg;
struct mddev *mddev = NULL;
- bool did_set_md_closing = false;
if (!md_ioctl_valid(cmd))
return -ENOTTY;
@@ -7733,7 +7765,6 @@ static int md_ioctl(struct block_device *bdev, blk_mode_t mode,
err = -EBUSY;
goto out;
}
- did_set_md_closing = true;
mutex_unlock(&mddev->open_mutex);
sync_blockdev(bdev);
}
@@ -7875,7 +7906,7 @@ unlock:
mddev_unlock(mddev);
out:
- if(did_set_md_closing)
+ if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY))
clear_bit(MD_CLOSING, &mddev->flags);
return err;
}
@@ -8762,6 +8793,23 @@ void md_account_bio(struct mddev *mddev, struct bio **bio)
}
EXPORT_SYMBOL_GPL(md_account_bio);
+void md_free_cloned_bio(struct bio *bio)
+{
+ struct md_io_clone *md_io_clone = bio->bi_private;
+ struct bio *orig_bio = md_io_clone->orig_bio;
+ struct mddev *mddev = md_io_clone->mddev;
+
+ if (bio->bi_status && !orig_bio->bi_status)
+ orig_bio->bi_status = bio->bi_status;
+
+ if (md_io_clone->start_time)
+ bio_end_io_acct(orig_bio, md_io_clone->start_time);
+
+ bio_put(bio);
+ percpu_ref_put(&mddev->active_io);
+}
+EXPORT_SYMBOL_GPL(md_free_cloned_bio);
+
/* md_allow_write(mddev)
* Calling this ensures that the array is marked 'active' so that writes
* may proceed without blocking. It is important to call this before
@@ -9295,9 +9343,14 @@ static bool md_spares_need_change(struct mddev *mddev)
{
struct md_rdev *rdev;
- rdev_for_each(rdev, mddev)
- if (rdev_removeable(rdev) || rdev_addable(rdev))
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev_removeable(rdev) || rdev_addable(rdev)) {
+ rcu_read_unlock();
return true;
+ }
+ }
+ rcu_read_unlock();
return false;
}
@@ -9307,44 +9360,19 @@ static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *rdev;
int spares = 0;
int removed = 0;
- bool remove_some = false;
if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
/* Mustn't remove devices when resync thread is running */
return 0;
rdev_for_each(rdev, mddev) {
- if ((this == NULL || rdev == this) &&
- rdev->raid_disk >= 0 &&
- !test_bit(Blocked, &rdev->flags) &&
- test_bit(Faulty, &rdev->flags) &&
- atomic_read(&rdev->nr_pending)==0) {
- /* Faulty non-Blocked devices with nr_pending == 0
- * never get nr_pending incremented,
- * never get Faulty cleared, and never get Blocked set.
- * So we can synchronize_rcu now rather than once per device
- */
- remove_some = true;
- set_bit(RemoveSynchronized, &rdev->flags);
- }
- }
-
- if (remove_some)
- synchronize_rcu();
- rdev_for_each(rdev, mddev) {
- if ((this == NULL || rdev == this) &&
- (test_bit(RemoveSynchronized, &rdev->flags) ||
- rdev_removeable(rdev))) {
- if (mddev->pers->hot_remove_disk(
- mddev, rdev) == 0) {
- sysfs_unlink_rdev(mddev, rdev);
- rdev->saved_raid_disk = rdev->raid_disk;
- rdev->raid_disk = -1;
- removed++;
- }
+ if ((this == NULL || rdev == this) && rdev_removeable(rdev) &&
+ !mddev->pers->hot_remove_disk(mddev, rdev)) {
+ sysfs_unlink_rdev(mddev, rdev);
+ rdev->saved_raid_disk = rdev->raid_disk;
+ rdev->raid_disk = -1;
+ removed++;
}
- if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
- clear_bit(RemoveSynchronized, &rdev->flags);
}
if (removed && mddev->kobj.sd)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index ade83af123..375ad4a2df 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -190,11 +190,6 @@ enum flag_bits {
* than other devices in the array
*/
ClusterRemove,
- RemoveSynchronized, /* synchronize_rcu() was called after
- * this device was known to be faulty,
- * so it is safe to remove without
- * another synchronize_rcu() call.
- */
ExternalBbl, /* External metadata provides bad
* block management for a disk
*/
@@ -212,6 +207,7 @@ enum flag_bits {
* check if there is collision between raid1
* serial bios.
*/
+ Nonrot, /* non-rotational device (SSD) */
};
static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
@@ -563,6 +559,37 @@ enum recovery_flags {
MD_RESYNCING_REMOTE, /* remote node is running resync thread */
};
+enum md_ro_state {
+ MD_RDWR,
+ MD_RDONLY,
+ MD_AUTO_READ,
+ MD_MAX_STATE
+};
+
+static inline bool md_is_rdwr(struct mddev *mddev)
+{
+ return (mddev->ro == MD_RDWR);
+}
+
+static inline bool reshape_interrupted(struct mddev *mddev)
+{
+ /* reshape never start */
+ if (mddev->reshape_position == MaxSector)
+ return false;
+
+ /* interrupted */
+ if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ return true;
+
+ /* running reshape will be interrupted soon. */
+ if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_INTR, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
+ return true;
+
+ return false;
+}
+
static inline int __must_check mddev_lock(struct mddev *mddev)
{
return mutex_lock_interruptible(&mddev->reconfig_mutex);
@@ -622,6 +649,7 @@ struct md_personality
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
void (*update_reshape_pos) (struct mddev *mddev);
+ void (*prepare_suspend) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
@@ -755,6 +783,7 @@ extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
void md_account_bio(struct mddev *mddev, struct bio **bio);
+void md_free_cloned_bio(struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
@@ -783,9 +812,12 @@ extern void md_stop_writes(struct mddev *mddev);
extern int md_rdev_init(struct md_rdev *rdev);
extern void md_rdev_clear(struct md_rdev *rdev);
-extern void md_handle_request(struct mddev *mddev, struct bio *bio);
+extern bool md_handle_request(struct mddev *mddev, struct bio *bio);
extern int mddev_suspend(struct mddev *mddev, bool interruptible);
extern void mddev_resume(struct mddev *mddev);
+extern void md_idle_sync_thread(struct mddev *mddev);
+extern void md_frozen_sync_thread(struct mddev *mddev);
+extern void md_unfrozen_sync_thread(struct mddev *mddev);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e138922d51..750a802478 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -600,16 +600,13 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
const sector_t this_sector = r1_bio->sector;
int sectors;
int best_good_sectors;
- int best_disk, best_dist_disk, best_pending_disk;
- int has_nonrot_disk;
+ int best_disk, best_dist_disk, best_pending_disk, sequential_disk;
int disk;
sector_t best_dist;
unsigned int min_pending;
struct md_rdev *rdev;
int choose_first;
- int choose_next_idle;
- rcu_read_lock();
/*
* Check if we can balance. We can balance on the whole
* device if no resync is going on, or below the resync window.
@@ -619,12 +616,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
sectors = r1_bio->sectors;
best_disk = -1;
best_dist_disk = -1;
+ sequential_disk = -1;
best_dist = MaxSector;
best_pending_disk = -1;
min_pending = UINT_MAX;
best_good_sectors = 0;
- has_nonrot_disk = 0;
- choose_next_idle = 0;
clear_bit(R1BIO_FailFast, &r1_bio->state);
if ((conf->mddev->recovery_cp < this_sector + sectors) ||
@@ -640,9 +636,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
sector_t first_bad;
int bad_sectors;
unsigned int pending;
- bool nonrot;
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ rdev = conf->mirrors[disk].rdev;
if (r1_bio->bios[disk] == IO_BLOCKED
|| rdev == NULL
|| test_bit(Faulty, &rdev->flags))
@@ -706,8 +701,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
/* At least two disks to choose from so failfast is OK */
set_bit(R1BIO_FailFast, &r1_bio->state);
- nonrot = bdev_nonrot(rdev->bdev);
- has_nonrot_disk |= nonrot;
pending = atomic_read(&rdev->nr_pending);
dist = abs(this_sector - conf->mirrors[disk].head_position);
if (choose_first) {
@@ -720,7 +713,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
struct raid1_info *mirror = &conf->mirrors[disk];
- best_disk = disk;
/*
* If buffered sequential IO size exceeds optimal
* iosize, check if there is idle disk. If yes, choose
@@ -734,20 +726,27 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
* small, but not a big deal since when the second disk
* starts IO, the first disk is likely still busy.
*/
- if (nonrot && opt_iosize > 0 &&
+ if (test_bit(Nonrot, &rdev->flags) && opt_iosize > 0 &&
mirror->seq_start != MaxSector &&
mirror->next_seq_sect > opt_iosize &&
mirror->next_seq_sect - opt_iosize >=
mirror->seq_start) {
- choose_next_idle = 1;
- continue;
+ /*
+ * Add 'pending' to avoid choosing this disk if
+ * there is other idle disk.
+ */
+ pending++;
+ /*
+ * If there is no other idle disk, this disk
+ * will be chosen.
+ */
+ sequential_disk = disk;
+ } else {
+ best_disk = disk;
+ break;
}
- break;
}
- if (choose_next_idle)
- continue;
-
if (min_pending > pending) {
min_pending = pending;
best_pending_disk = disk;
@@ -760,20 +759,27 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
}
/*
+ * sequential IO size exceeds optimal iosize, however, there is no other
+ * idle disk, so choose the sequential disk.
+ */
+ if (best_disk == -1 && min_pending != 0)
+ best_disk = sequential_disk;
+
+ /*
* If all disks are rotational, choose the closest disk. If any disk is
* non-rotational, choose the disk with less pending request even the
* disk is rotational, which might/might not be optimal for raids with
* mixed ratation/non-rotational disks depending on workload.
*/
if (best_disk == -1) {
- if (has_nonrot_disk || min_pending == 0)
+ if (READ_ONCE(conf->nonrot_disks) || min_pending == 0)
best_disk = best_pending_disk;
else
best_disk = best_dist_disk;
}
if (best_disk >= 0) {
- rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
+ rdev = conf->mirrors[best_disk].rdev;
if (!rdev)
goto retry;
atomic_inc(&rdev->nr_pending);
@@ -784,7 +790,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
}
- rcu_read_unlock();
*max_sectors = sectors;
return best_disk;
@@ -1235,14 +1240,12 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
if (r1bio_existed) {
/* Need to get the block device name carefully */
- struct md_rdev *rdev;
- rcu_read_lock();
- rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
+ struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
+
if (rdev)
snprintf(b, sizeof(b), "%pg", rdev->bdev);
else
strcpy(b, "???");
- rcu_read_unlock();
}
/*
@@ -1396,10 +1399,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
disks = conf->raid_disks * 2;
blocked_rdev = NULL;
- rcu_read_lock();
max_sectors = r1_bio->sectors;
for (i = 0; i < disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
/*
* The write-behind io is only attempted on drives marked as
@@ -1465,7 +1467,6 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
}
r1_bio->bios[i] = bio;
}
- rcu_read_unlock();
if (unlikely(blocked_rdev)) {
/* Wait for this device to become unblocked */
@@ -1617,15 +1618,16 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev)
struct r1conf *conf = mddev->private;
int i;
+ lockdep_assert_held(&mddev->lock);
+
seq_printf(seq, " [%d/%d] [", conf->raid_disks,
conf->raid_disks - mddev->degraded);
- rcu_read_lock();
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = READ_ONCE(conf->mirrors[i].rdev);
+
seq_printf(seq, "%s",
rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
}
- rcu_read_unlock();
seq_printf(seq, "]");
}
@@ -1691,16 +1693,15 @@ static void print_conf(struct r1conf *conf)
pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
conf->raid_disks);
- rcu_read_lock();
+ lockdep_assert_held(&conf->mddev->reconfig_mutex);
for (i = 0; i < conf->raid_disks; i++) {
- struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
+ struct md_rdev *rdev = conf->mirrors[i].rdev;
if (rdev)
pr_debug(" disk %d, wo:%d, o:%d, dev:%pg\n",
i, !test_bit(In_sync, &rdev->flags),
!test_bit(Faulty, &rdev->flags),
rdev->bdev);
}
- rcu_read_unlock();
}
static void close_sync(struct r1conf *conf)
@@ -1767,6 +1768,52 @@ static int raid1_spare_active(struct mddev *mddev)
return count;
}
+static bool raid1_add_conf(struct r1conf *conf, struct md_rdev *rdev, int disk,
+ bool replacement)
+{
+ struct raid1_info *info = conf->mirrors + disk;
+
+ if (replacement)
+ info += conf->raid_disks;
+
+ if (info->rdev)
+ return false;
+
+ if (bdev_nonrot(rdev->bdev)) {
+ set_bit(Nonrot, &rdev->flags);
+ WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks + 1);
+ }
+
+ rdev->raid_disk = disk;
+ info->head_position = 0;
+ info->seq_start = MaxSector;
+ WRITE_ONCE(info->rdev, rdev);
+
+ return true;
+}
+
+static bool raid1_remove_conf(struct r1conf *conf, int disk)
+{
+ struct raid1_info *info = conf->mirrors + disk;
+ struct md_rdev *rdev = info->rdev;
+
+ if (!rdev || test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending))
+ return false;
+
+ /* Only remove non-faulty devices if recovery is not possible. */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ rdev->mddev->recovery_disabled != conf->recovery_disabled &&
+ rdev->mddev->degraded < conf->raid_disks)
+ return false;
+
+ if (test_and_clear_bit(Nonrot, &rdev->flags))
+ WRITE_ONCE(conf->nonrot_disks, conf->nonrot_disks - 1);
+
+ WRITE_ONCE(info->rdev, NULL);
+ return true;
+}
+
static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
@@ -1802,15 +1849,13 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
- p->head_position = 0;
- rdev->raid_disk = mirror;
+ raid1_add_conf(conf, rdev, mirror, false);
err = 0;
/* As all devices are equivalent, we don't need a full recovery
* if this was recently any drive of the array
*/
if (rdev->saved_raid_disk < 0)
conf->fullsync = 1;
- rcu_assign_pointer(p->rdev, rdev);
break;
}
if (test_bit(WantReplacement, &p->rdev->flags) &&
@@ -1820,13 +1865,11 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (err && repl_slot >= 0) {
/* Add this device as a replacement */
- p = conf->mirrors + repl_slot;
clear_bit(In_sync, &rdev->flags);
set_bit(Replacement, &rdev->flags);
- rdev->raid_disk = repl_slot;
+ raid1_add_conf(conf, rdev, repl_slot, true);
err = 0;
conf->fullsync = 1;
- rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
}
print_conf(conf);
@@ -1843,36 +1886,20 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
if (unlikely(number >= conf->raid_disks))
goto abort;
- if (rdev != p->rdev)
- p = conf->mirrors + conf->raid_disks + number;
+ if (rdev != p->rdev) {
+ number += conf->raid_disks;
+ p = conf->mirrors + number;
+ }
print_conf(conf);
if (rdev == p->rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
+ if (!raid1_remove_conf(conf, number)) {
err = -EBUSY;
goto abort;
}
- /* Only remove non-faulty devices if recovery
- * is not possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != conf->recovery_disabled &&
- mddev->degraded < conf->raid_disks) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- if (!test_bit(RemoveSynchronized, &rdev->flags)) {
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- goto abort;
- }
- }
- if (conf->mirrors[conf->raid_disks + number].rdev) {
+
+ if (number < conf->raid_disks &&
+ conf->mirrors[conf->raid_disks + number].rdev) {
/* We just removed a device that is being replaced.
* Move down the replacement. We drain all IO before
* doing this to avoid confusion.
@@ -1892,7 +1919,7 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
goto abort;
}
clear_bit(Replacement, &repl->flags);
- p->rdev = repl;
+ WRITE_ONCE(p->rdev, repl);
conf->mirrors[conf->raid_disks + number].rdev = NULL;
unfreeze_array(conf);
}
@@ -2290,8 +2317,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
sector_t first_bad;
int bad_sectors;
- rcu_read_lock();
- rdev = rcu_dereference(conf->mirrors[d].rdev);
+ rdev = conf->mirrors[d].rdev;
if (rdev &&
(test_bit(In_sync, &rdev->flags) ||
(!test_bit(Faulty, &rdev->flags) &&
@@ -2299,15 +2325,14 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
is_badblock(rdev, sect, s,
&first_bad, &bad_sectors) == 0) {
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
if (sync_page_io(rdev, sect, s<<9,
conf->tmppage, REQ_OP_READ, false))
success = 1;
rdev_dec_pending(rdev, mddev);
if (success)
break;
- } else
- rcu_read_unlock();
+ }
+
d++;
if (d == conf->raid_disks * 2)
d = 0;
@@ -2326,29 +2351,24 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
if (d==0)
d = conf->raid_disks * 2;
d--;
- rcu_read_lock();
- rdev = rcu_dereference(conf->mirrors[d].rdev);
+ rdev = conf->mirrors[d].rdev;
if (rdev &&
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
r1_sync_page_io(rdev, sect, s,
conf->tmppage, REQ_OP_WRITE);
rdev_dec_pending(rdev, mddev);
- } else
- rcu_read_unlock();
+ }
}
d = start;
while (d != read_disk) {
if (d==0)
d = conf->raid_disks * 2;
d--;
- rcu_read_lock();
- rdev = rcu_dereference(conf->mirrors[d].rdev);
+ rdev = conf->mirrors[d].rdev;
if (rdev &&
!test_bit(Faulty, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
if (r1_sync_page_io(rdev, sect, s,
conf->tmppage, REQ_OP_READ)) {
atomic_add(s, &rdev->corrected_errors);
@@ -2359,8 +2379,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
rdev->bdev);
}
rdev_dec_pending(rdev, mddev);
- } else
- rcu_read_unlock();
+ }
}
sectors -= s;
sect += s;
@@ -2741,7 +2760,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
r1_bio = raid1_alloc_init_r1buf(conf);
- rcu_read_lock();
/*
* If we get a correctably read error during resync or recovery,
* we might want to read from a different device. So we
@@ -2762,7 +2780,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
struct md_rdev *rdev;
bio = r1_bio->bios[i];
- rdev = rcu_dereference(conf->mirrors[i].rdev);
+ rdev = conf->mirrors[i].rdev;
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags)) {
if (i < conf->raid_disks)
@@ -2820,7 +2838,6 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_opf |= MD_FAILFAST;
}
}
- rcu_read_unlock();
if (disk < 0)
disk = wonly;
r1_bio->read_disk = disk;
@@ -3025,23 +3042,17 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -EINVAL;
spin_lock_init(&conf->device_lock);
+ conf->raid_disks = mddev->raid_disks;
rdev_for_each(rdev, mddev) {
int disk_idx = rdev->raid_disk;
- if (disk_idx >= mddev->raid_disks
- || disk_idx < 0)
+
+ if (disk_idx >= conf->raid_disks || disk_idx < 0)
continue;
- if (test_bit(Replacement, &rdev->flags))
- disk = conf->mirrors + mddev->raid_disks + disk_idx;
- else
- disk = conf->mirrors + disk_idx;
- if (disk->rdev)
+ if (!raid1_add_conf(conf, rdev, disk_idx,
+ test_bit(Replacement, &rdev->flags)))
goto abort;
- disk->rdev = rdev;
- disk->head_position = 0;
- disk->seq_start = MaxSector;
}
- conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
INIT_LIST_HEAD(&conf->retry_list);
INIT_LIST_HEAD(&conf->bio_end_io_list);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 14d4211a12..5300cbaa58 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -71,6 +71,7 @@ struct r1conf {
* allow for replacements.
*/
int raid_disks;
+ int nonrot_disks;
spinlock_t device_lock;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b7b0a573e7..6e828a6aa0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2247,15 +2247,6 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
goto abort;
}
*rdevp = NULL;
- if (!test_bit(RemoveSynchronized, &rdev->flags)) {
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- *rdevp = rdev;
- goto abort;
- }
- }
if (p->replacement) {
/* We must have just cleared 'rdev' */
p->rdev = p->replacement;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 6fe334bb95..e1d8b5199f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -763,6 +763,7 @@ enum stripe_result {
STRIPE_RETRY,
STRIPE_SCHEDULE_AND_RETRY,
STRIPE_FAIL,
+ STRIPE_WAIT_RESHAPE,
};
struct stripe_request_ctx {
@@ -2422,7 +2423,7 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
atomic_inc(&conf->active_stripes);
raid5_release_stripe(sh);
- conf->max_nr_stripes++;
+ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1);
return 1;
}
@@ -2717,7 +2718,7 @@ static int drop_one_stripe(struct r5conf *conf)
shrink_buffers(sh);
free_stripe(conf->slab_cache, sh);
atomic_dec(&conf->active_stripes);
- conf->max_nr_stripes--;
+ WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1);
return 1;
}
@@ -5991,7 +5992,8 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
if (ahead_of_reshape(mddev, logical_sector,
conf->reshape_safe)) {
spin_unlock_irq(&conf->device_lock);
- return STRIPE_SCHEDULE_AND_RETRY;
+ ret = STRIPE_SCHEDULE_AND_RETRY;
+ goto out;
}
}
spin_unlock_irq(&conf->device_lock);
@@ -6070,6 +6072,12 @@ static enum stripe_result make_stripe_request(struct mddev *mddev,
out_release:
raid5_release_stripe(sh);
+out:
+ if (ret == STRIPE_SCHEDULE_AND_RETRY && reshape_interrupted(mddev)) {
+ bi->bi_status = BLK_STS_RESOURCE;
+ ret = STRIPE_WAIT_RESHAPE;
+ pr_err_ratelimited("dm-raid456: io across reshape position while reshape can't make progress");
+ }
return ret;
}
@@ -6191,7 +6199,7 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
while (1) {
res = make_stripe_request(mddev, conf, &ctx, logical_sector,
bi);
- if (res == STRIPE_FAIL)
+ if (res == STRIPE_FAIL || res == STRIPE_WAIT_RESHAPE)
break;
if (res == STRIPE_RETRY)
@@ -6229,6 +6237,11 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
if (rw == WRITE)
md_write_end(mddev);
+ if (res == STRIPE_WAIT_RESHAPE) {
+ md_free_cloned_bio(bi);
+ return false;
+ }
+
bio_endio(bi);
return true;
}
@@ -6878,7 +6891,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
if (size <= 16 || size > 32768)
return -EINVAL;
- conf->min_nr_stripes = size;
+ WRITE_ONCE(conf->min_nr_stripes, size);
mutex_lock(&conf->cache_size_mutex);
while (size < conf->max_nr_stripes &&
drop_one_stripe(conf))
@@ -6890,7 +6903,7 @@ raid5_set_cache_size(struct mddev *mddev, int size)
mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL)) {
- conf->min_nr_stripes = conf->max_nr_stripes;
+ WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes);
result = -ENOMEM;
break;
}
@@ -7448,11 +7461,13 @@ static unsigned long raid5_cache_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct r5conf *conf = shrink->private_data;
+ int max_stripes = READ_ONCE(conf->max_nr_stripes);
+ int min_stripes = READ_ONCE(conf->min_nr_stripes);
- if (conf->max_nr_stripes < conf->min_nr_stripes)
+ if (max_stripes < min_stripes)
/* unlikely, but not impossible */
return 0;
- return conf->max_nr_stripes - conf->min_nr_stripes;
+ return max_stripes - min_stripes;
}
static struct r5conf *setup_conf(struct mddev *mddev)
@@ -8241,15 +8256,6 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
goto abort;
}
*rdevp = NULL;
- if (!test_bit(RemoveSynchronized, &rdev->flags)) {
- lockdep_assert_held(&mddev->reconfig_mutex);
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- rcu_assign_pointer(*rdevp, rdev);
- }
- }
if (!err) {
err = log_modify(conf, rdev, false);
if (err)
@@ -8990,6 +8996,18 @@ static int raid5_start(struct mddev *mddev)
return r5l_start(conf->log);
}
+/*
+ * This is only used for dm-raid456, caller already frozen sync_thread, hence
+ * if rehsape is still in progress, io that is waiting for reshape can never be
+ * done now, hence wake up and handle those IO.
+ */
+static void raid5_prepare_suspend(struct mddev *mddev)
+{
+ struct r5conf *conf = mddev->private;
+
+ wake_up(&conf->wait_for_overlap);
+}
+
static struct md_personality raid6_personality =
{
.name = "raid6",
@@ -9013,6 +9031,7 @@ static struct md_personality raid6_personality =
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static struct md_personality raid5_personality =
{
@@ -9037,6 +9056,7 @@ static struct md_personality raid5_personality =
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static struct md_personality raid4_personality =
@@ -9062,6 +9082,7 @@ static struct md_personality raid4_personality =
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
.change_consistency_policy = raid5_change_consistency_policy,
+ .prepare_suspend = raid5_prepare_suspend,
};
static int __init raid5_init(void)
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index a366566f22..642c48e8c1 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -113,6 +113,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
{
unsigned pat;
unsigned plane;
+ int ret = 0;
tpg->max_line_width = max_w;
for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++) {
@@ -121,14 +122,18 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
tpg->lines[pat][plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->lines[pat][plane])
- return -ENOMEM;
+ if (!tpg->lines[pat][plane]) {
+ ret = -ENOMEM;
+ goto free_lines;
+ }
if (plane == 0)
continue;
tpg->downsampled_lines[pat][plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->downsampled_lines[pat][plane])
- return -ENOMEM;
+ if (!tpg->downsampled_lines[pat][plane]) {
+ ret = -ENOMEM;
+ goto free_lines;
+ }
}
}
for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
@@ -136,18 +141,45 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
tpg->contrast_line[plane] =
vzalloc(array_size(pixelsz, max_w));
- if (!tpg->contrast_line[plane])
- return -ENOMEM;
+ if (!tpg->contrast_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
tpg->black_line[plane] =
vzalloc(array_size(pixelsz, max_w));
- if (!tpg->black_line[plane])
- return -ENOMEM;
+ if (!tpg->black_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
tpg->random_line[plane] =
vzalloc(array3_size(max_w, 2, pixelsz));
- if (!tpg->random_line[plane])
- return -ENOMEM;
+ if (!tpg->random_line[plane]) {
+ ret = -ENOMEM;
+ goto free_contrast_line;
+ }
}
return 0;
+
+free_contrast_line:
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ vfree(tpg->contrast_line[plane]);
+ vfree(tpg->black_line[plane]);
+ vfree(tpg->random_line[plane]);
+ tpg->contrast_line[plane] = NULL;
+ tpg->black_line[plane] = NULL;
+ tpg->random_line[plane] = NULL;
+ }
+free_lines:
+ for (pat = 0; pat < TPG_MAX_PAT_LINES; pat++)
+ for (plane = 0; plane < TPG_MAX_PLANES; plane++) {
+ vfree(tpg->lines[pat][plane]);
+ tpg->lines[pat][plane] = NULL;
+ if (plane == 0)
+ continue;
+ vfree(tpg->downsampled_lines[pat][plane]);
+ tpg->downsampled_lines[pat][plane] = NULL;
+ }
+ return ret;
}
EXPORT_SYMBOL_GPL(tpg_alloc);
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 49f0eb7d0b..733d0bc4b4 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -490,6 +490,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
if (!dvbdevfops) {
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
@@ -498,6 +499,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
if (!new_node) {
kfree(dvbdevfops);
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return -ENOMEM;
}
@@ -531,6 +533,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
}
list_del(&dvbdev->list_head);
kfree(dvbdev);
+ *pdvbdev = NULL;
up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
return -EINVAL;
@@ -553,6 +556,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvb_media_device_free(dvbdev);
list_del(&dvbdev->list_head);
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return ret;
}
@@ -571,6 +575,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
dvb_media_device_free(dvbdev);
list_del(&dvbdev->list_head);
kfree(dvbdev);
+ *pdvbdev = NULL;
mutex_unlock(&dvbdev_register_lock);
return PTR_ERR(clsdev);
}
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 4832643448..72540ef4e5 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -118,50 +118,32 @@ static const s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_S
}
};
-static
-int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
+static noinline_for_stack
+int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
{
- u8 buf[MAX_XFER_SIZE];
+ u8 buf[3] = { MSB(reg), LSB(reg), data };
struct i2c_msg msg = {
.addr = state->config->demod_address,
.flags = 0,
.buf = buf,
- .len = len + 2
+ .len = 3,
};
int ret;
- if (2 + len > sizeof(buf)) {
- printk(KERN_WARNING
- "%s: i2c wr reg=%04x: len=%d is too big!\n",
- KBUILD_MODNAME, reg, len);
- return -EINVAL;
- }
-
-
- buf[0] = MSB(reg);
- buf[1] = LSB(reg);
- memcpy(buf + 2, data, len);
-
if (i2cdebug)
printk(KERN_DEBUG "%s: [%02x] %02x: %02x\n", __func__,
- state->config->demod_address, reg, buf[2]);
+ state->config->demod_address, reg, data);
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
printk(KERN_ERR "%s: i2c write error! ([%02x] %02x: %02x)\n",
- __func__, state->config->demod_address, reg, buf[2]);
+ __func__, state->config->demod_address, reg, data);
return (ret != 1) ? -EREMOTEIO : 0;
}
-static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
-{
- u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
-
- return stv0367_writeregs(state, reg, &tmp, 1);
-}
-
-static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
+static noinline_for_stack
+u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
{
u8 b0[] = { 0, 0 };
u8 b1[] = { 0 };
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index c6fea5837a..6dacd38ae9 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -150,10 +150,10 @@
#define IMX290_PIXEL_ARRAY_WIDTH 1945
#define IMX290_PIXEL_ARRAY_HEIGHT 1097
-#define IMX920_PIXEL_ARRAY_MARGIN_LEFT 12
-#define IMX920_PIXEL_ARRAY_MARGIN_RIGHT 13
-#define IMX920_PIXEL_ARRAY_MARGIN_TOP 8
-#define IMX920_PIXEL_ARRAY_MARGIN_BOTTOM 9
+#define IMX290_PIXEL_ARRAY_MARGIN_LEFT 12
+#define IMX290_PIXEL_ARRAY_MARGIN_RIGHT 13
+#define IMX290_PIXEL_ARRAY_MARGIN_TOP 8
+#define IMX290_PIXEL_ARRAY_MARGIN_BOTTOM 9
#define IMX290_PIXEL_ARRAY_RECORDING_WIDTH 1920
#define IMX290_PIXEL_ARRAY_RECORDING_HEIGHT 1080
@@ -1161,10 +1161,10 @@ static int imx290_get_selection(struct v4l2_subdev *sd,
* The sensor moves the readout by 1 pixel based on flips to
* keep the Bayer order the same.
*/
- sel->r.top = IMX920_PIXEL_ARRAY_MARGIN_TOP
+ sel->r.top = IMX290_PIXEL_ARRAY_MARGIN_TOP
+ (IMX290_PIXEL_ARRAY_RECORDING_HEIGHT - format->height) / 2
+ imx290->vflip->val;
- sel->r.left = IMX920_PIXEL_ARRAY_MARGIN_LEFT
+ sel->r.left = IMX290_PIXEL_ARRAY_MARGIN_LEFT
+ (IMX290_PIXEL_ARRAY_RECORDING_WIDTH - format->width) / 2
+ imx290->hflip->val;
sel->r.width = format->width;
@@ -1183,8 +1183,8 @@ static int imx290_get_selection(struct v4l2_subdev *sd,
return 0;
case V4L2_SEL_TGT_CROP_DEFAULT:
- sel->r.top = IMX920_PIXEL_ARRAY_MARGIN_TOP;
- sel->r.left = IMX920_PIXEL_ARRAY_MARGIN_LEFT;
+ sel->r.top = IMX290_PIXEL_ARRAY_MARGIN_TOP;
+ sel->r.left = IMX290_PIXEL_ARRAY_MARGIN_LEFT;
sel->r.width = IMX290_PIXEL_ARRAY_RECORDING_WIDTH;
sel->r.height = IMX290_PIXEL_ARRAY_RECORDING_HEIGHT;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 2785935da4..558152575d 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -2091,9 +2091,6 @@ static int tc358743_probe(struct i2c_client *client)
state->mbus_fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
sd->dev = &client->dev;
- err = v4l2_async_register_subdev(sd);
- if (err < 0)
- goto err_hdl;
mutex_init(&state->confctl_mutex);
@@ -2151,6 +2148,10 @@ static int tc358743_probe(struct i2c_client *client)
if (err)
goto err_work_queues;
+ err = v4l2_async_register_subdev(sd);
+ if (err < 0)
+ goto err_work_queues;
+
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 543a392f86..0e28b9a793 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -535,14 +535,15 @@ static int media_pipeline_walk_push(struct media_pipeline_walk *walk,
/*
* Move the top entry link cursor to the next link. If all links of the entry
- * have been visited, pop the entry itself.
+ * have been visited, pop the entry itself. Return true if the entry has been
+ * popped.
*/
-static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+static bool media_pipeline_walk_pop(struct media_pipeline_walk *walk)
{
struct media_pipeline_walk_entry *entry;
if (WARN_ON(walk->stack.top < 0))
- return;
+ return false;
entry = media_pipeline_walk_top(walk);
@@ -552,7 +553,7 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
walk->stack.top);
walk->stack.top--;
- return;
+ return true;
}
entry->links = entry->links->next;
@@ -560,6 +561,8 @@ static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
dev_dbg(walk->mdev->dev,
"media pipeline: moved entry %u to next link\n",
walk->stack.top);
+
+ return false;
}
/* Free all memory allocated while walking the pipeline. */
@@ -605,30 +608,24 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
struct media_pipeline_walk *walk)
{
struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk);
- struct media_pad *pad;
+ struct media_pad *origin;
struct media_link *link;
struct media_pad *local;
struct media_pad *remote;
+ bool last_link;
int ret;
- pad = entry->pad;
+ origin = entry->pad;
link = list_entry(entry->links, typeof(*link), list);
- media_pipeline_walk_pop(walk);
+ last_link = media_pipeline_walk_pop(walk);
dev_dbg(walk->mdev->dev,
"media pipeline: exploring link '%s':%u -> '%s':%u\n",
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index);
- /* Skip links that are not enabled. */
- if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
- dev_dbg(walk->mdev->dev,
- "media pipeline: skipping link (disabled)\n");
- return 0;
- }
-
/* Get the local pad and remote pad. */
- if (link->source->entity == pad->entity) {
+ if (link->source->entity == origin->entity) {
local = link->source;
remote = link->sink;
} else {
@@ -640,25 +637,64 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
* Skip links that originate from a different pad than the incoming pad
* that is not connected internally in the entity to the incoming pad.
*/
- if (pad != local &&
- !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) {
+ if (origin != local &&
+ !media_entity_has_pad_interdep(origin->entity, origin->index,
+ local->index)) {
dev_dbg(walk->mdev->dev,
"media pipeline: skipping link (no route)\n");
- return 0;
+ goto done;
}
/*
- * Add the local and remote pads of the link to the pipeline and push
- * them to the stack, if they're not already present.
+ * Add the local pad of the link to the pipeline and push it to the
+ * stack, if not already present.
*/
ret = media_pipeline_add_pad(pipe, walk, local);
if (ret)
return ret;
+ /* Similarly, add the remote pad, but only if the link is enabled. */
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (disabled)\n");
+ goto done;
+ }
+
ret = media_pipeline_add_pad(pipe, walk, remote);
if (ret)
return ret;
+done:
+ /*
+ * If we're done iterating over links, iterate over pads of the entity.
+ * This is necessary to discover pads that are not connected with any
+ * link. Those are dead ends from a pipeline exploration point of view,
+ * but are still part of the pipeline and need to be added to enable
+ * proper validation.
+ */
+ if (!last_link)
+ return 0;
+
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: adding unconnected pads of '%s'\n",
+ local->entity->name);
+
+ media_entity_for_each_pad(origin->entity, local) {
+ /*
+ * Skip the origin pad (already handled), pad that have links
+ * (already discovered through iterating over links) and pads
+ * not internally connected.
+ */
+ if (origin == local || !local->num_links ||
+ !media_entity_has_pad_interdep(origin->entity, origin->index,
+ local->index))
+ continue;
+
+ ret = media_pipeline_add_pad(pipe, walk, local);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -770,7 +806,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
struct media_pad *pad = ppad->pad;
struct media_entity *entity = pad->entity;
bool has_enabled_link = false;
- bool has_link = false;
struct media_link *link;
dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name,
@@ -800,7 +835,6 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
/* Record if the pad has links and enabled links. */
if (link->flags & MEDIA_LNK_FL_ENABLED)
has_enabled_link = true;
- has_link = true;
/*
* Validate the link if it's enabled and has the
@@ -838,7 +872,7 @@ __must_check int __media_pipeline_start(struct media_pad *pad,
* 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set,
* ensure that it has either no link or an enabled link.
*/
- if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link &&
+ if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) &&
!has_enabled_link) {
dev_dbg(mdev->dev,
"Pad '%s':%u must be connected by an enabled link\n",
@@ -1038,6 +1072,9 @@ static void __media_entity_remove_link(struct media_entity *entity,
/* Remove the reverse links for a data link. */
if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) {
+ link->source->num_links--;
+ link->sink->num_links--;
+
if (link->source->entity == entity)
remote = link->sink->entity;
else
@@ -1092,6 +1129,11 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
struct media_link *link;
struct media_link *backlink;
+ if (flags & MEDIA_LNK_FL_LINK_TYPE)
+ return -EINVAL;
+
+ flags |= MEDIA_LNK_FL_DATA_LINK;
+
if (WARN_ON(!source || !sink) ||
WARN_ON(source_pad >= source->num_pads) ||
WARN_ON(sink_pad >= sink->num_pads))
@@ -1107,7 +1149,7 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
link->source = &source->pads[source_pad];
link->sink = &sink->pads[sink_pad];
- link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK;
+ link->flags = flags;
/* Initialize graph object embedded at the new link */
media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK,
@@ -1138,6 +1180,9 @@ media_create_pad_link(struct media_entity *source, u16 source_pad,
sink->num_links++;
source->num_links++;
+ link->source->num_links++;
+ link->sink->num_links++;
+
return 0;
}
EXPORT_SYMBOL_GPL(media_create_pad_link);
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
index 2a6b828fd8..e111fd6ff6 100644
--- a/drivers/media/pci/intel/ivsc/mei_csi.c
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -71,8 +71,8 @@ enum ivsc_privacy_status {
};
enum csi_pads {
- CSI_PAD_SOURCE,
CSI_PAD_SINK,
+ CSI_PAD_SOURCE,
CSI_NUM_PADS
};
@@ -584,7 +584,7 @@ static int mei_csi_notify_bound(struct v4l2_async_notifier *notifier,
csi->remote_pad = pad;
return media_create_pad_link(&subdev->entity, pad,
- &csi->subdev.entity, 1,
+ &csi->subdev.entity, CSI_PAD_SINK,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
}
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 230b104a7c..a47c5850ef 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -1463,7 +1463,8 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
budget_av->has_saa7113 = 1;
err = saa7146_vv_init(dev, &vv_data);
if (err != 0) {
- /* fixme: proper cleanup here */
+ ttpci_budget_deinit(&budget_av->budget);
+ kfree(budget_av);
ERR("cannot init vv subsystem\n");
return err;
}
@@ -1472,9 +1473,10 @@ static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extensio
vv_data.vid_ops.vidioc_s_input = vidioc_s_input;
if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_VIDEO))) {
- /* fixme: proper cleanup here */
- ERR("cannot register capture v4l2 device\n");
saa7146_vv_release(dev);
+ ttpci_budget_deinit(&budget_av->budget);
+ kfree(budget_av);
+ ERR("cannot register capture v4l2 device\n");
return err;
}
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 889f4fbbaf..c2a979397b 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -465,7 +465,7 @@ static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity,
- s_subdev->fwnode,
+ asd->match.fwnode,
MEDIA_PAD_FL_SOURCE);
if (csi2rx->source_pad < 0) {
dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n",
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
index b065ccd069..378a1cba01 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_vpu.c
@@ -26,7 +26,7 @@ static void mtk_mdp_vpu_handle_init_ack(const struct mdp_ipi_comm_ack *msg)
vpu->inst_addr = msg->vpu_inst_addr;
}
-static void mtk_mdp_vpu_ipi_handler(const void *data, unsigned int len,
+static void mtk_mdp_vpu_ipi_handler(void *data, unsigned int len,
void *priv)
{
const struct mdp_ipi_comm_ack *msg = data;
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
index 9f6e4b5945..4c34344dc7 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
@@ -29,15 +29,7 @@ static int mtk_vcodec_vpu_set_ipi_register(struct mtk_vcodec_fw *fw, int id,
mtk_vcodec_ipi_handler handler,
const char *name, void *priv)
{
- /*
- * The handler we receive takes a void * as its first argument. We
- * cannot change this because it needs to be passed down to the rproc
- * subsystem when SCP is used. VPU takes a const argument, which is
- * more constrained, so the conversion below is safe.
- */
- ipi_handler_t handler_const = (ipi_handler_t)handler;
-
- return vpu_ipi_register(fw->pdev, id, handler_const, name, priv);
+ return vpu_ipi_register(fw->pdev, id, handler, name, priv);
}
static int mtk_vcodec_vpu_ipi_send(struct mtk_vcodec_fw *fw, int id, void *buf,
diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
index 7243604a82..724ae7c2ab 100644
--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
@@ -635,7 +635,7 @@ OUT_LOAD_FW:
}
EXPORT_SYMBOL_GPL(vpu_load_firmware);
-static void vpu_init_ipi_handler(const void *data, unsigned int len, void *priv)
+static void vpu_init_ipi_handler(void *data, unsigned int len, void *priv)
{
struct mtk_vpu *vpu = priv;
const struct vpu_run *run = data;
diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.h b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
index a56053ff13..da05f3e740 100644
--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.h
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.h
@@ -17,7 +17,7 @@
* VPU interfaces with other blocks by share memory and interrupt.
*/
-typedef void (*ipi_handler_t) (const void *data,
+typedef void (*ipi_handler_t) (void *data,
unsigned int len,
void *priv);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
index 792f031e03..c9a4d091b5 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
@@ -161,7 +161,6 @@ mxc_isi_crossbar_xlate_streams(struct mxc_isi_crossbar *xbar,
pad = media_pad_remote_pad_first(&xbar->pads[sink_pad]);
sd = media_entity_to_v4l2_subdev(pad->entity);
-
if (!sd) {
dev_dbg(xbar->isi->dev,
"no entity connected to crossbar input %u\n",
@@ -465,7 +464,8 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi)
}
for (i = 0; i < xbar->num_sinks; ++i)
- xbar->pads[i].flags = MEDIA_PAD_FL_SINK;
+ xbar->pads[i].flags = MEDIA_PAD_FL_SINK
+ | MEDIA_PAD_FL_MUST_CONNECT;
for (i = 0; i < xbar->num_sources; ++i)
xbar->pads[i + xbar->num_sinks].flags = MEDIA_PAD_FL_SOURCE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index c6d7e01c89..3752b702e2 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -725,6 +725,9 @@ irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
unsigned int i;
u32 status;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index 2d7f06281c..a4e272adc1 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -449,6 +449,7 @@ struct rkisp1_debug {
* @debug: debug params to be exposed on debugfs
* @info: version-specific ISP information
* @irqs: IRQ line numbers
+ * @irqs_enabled: the hardware is enabled and can cause interrupts
*/
struct rkisp1_device {
void __iomem *base_addr;
@@ -470,6 +471,7 @@ struct rkisp1_device {
struct rkisp1_debug debug;
const struct rkisp1_info *info;
int irqs[RKISP1_NUM_IRQS];
+ bool irqs_enabled;
};
/*
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index 702adee833..7320c1c72e 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -196,6 +196,9 @@ irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index acc559652d..73cf08a740 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -305,6 +305,24 @@ static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
{
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
+ rkisp1->irqs_enabled = false;
+ /* Make sure the IRQ handler will see the above */
+ mb();
+
+ /*
+ * Wait until any running IRQ handler has returned. The IRQ handler
+ * may get called even after this (as it's a shared interrupt line)
+ * but the 'irqs_enabled' flag will make the handler return immediately.
+ */
+ for (unsigned int il = 0; il < ARRAY_SIZE(rkisp1->irqs); ++il) {
+ if (rkisp1->irqs[il] == -1)
+ continue;
+
+ /* Skip if the irq line is the same as previous */
+ if (il == 0 || rkisp1->irqs[il - 1] != rkisp1->irqs[il])
+ synchronize_irq(rkisp1->irqs[il]);
+ }
+
clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
return pinctrl_pm_select_sleep_state(dev);
}
@@ -321,6 +339,10 @@ static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
if (ret)
return ret;
+ rkisp1->irqs_enabled = true;
+ /* Make sure the IRQ handler will see the above */
+ mb();
+
return 0;
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 5fbc47bda6..caffea6a46 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -971,6 +971,9 @@ irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 status, isp_err;
+ if (!rkisp1->irqs_enabled)
+ return IRQ_NONE;
+
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (!status)
return IRQ_NONE;
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index 90ab1d77b6..f7ff093782 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -66,6 +66,7 @@ static void deinterlace_device_run(void *priv)
struct vb2_v4l2_buffer *src, *dst;
unsigned int hstep, vstep;
dma_addr_t addr;
+ int i;
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -160,6 +161,26 @@ static void deinterlace_device_run(void *priv)
deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep);
deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep);
+ /* neutral filter coefficients */
+ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS);
+ readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
+ val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
+
+ for (i = 0; i < 32; i++) {
+ deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
+ DEINTERLACE_IDENTITY_COEF);
+ }
+
+ deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
+ DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
+
deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL,
DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK,
DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field));
@@ -248,7 +269,6 @@ static irqreturn_t deinterlace_irq(int irq, void *data)
static void deinterlace_init(struct deinterlace_dev *dev)
{
u32 val;
- int i;
deinterlace_write(dev, DEINTERLACE_BYPASS,
DEINTERLACE_BYPASS_CSC);
@@ -284,27 +304,7 @@ static void deinterlace_init(struct deinterlace_dev *dev)
deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF,
DEINTERLACE_CHROMA_DIFF_TH_MSK,
- DEINTERLACE_CHROMA_DIFF_TH(5));
-
- /* neutral filter coefficients */
- deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL,
- DEINTERLACE_FRM_CTRL_COEF_ACCESS);
- readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val,
- val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40);
-
- for (i = 0; i < 32; i++) {
- deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4,
- DEINTERLACE_IDENTITY_COEF);
- deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4,
- DEINTERLACE_IDENTITY_COEF);
- deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4,
- DEINTERLACE_IDENTITY_COEF);
- deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4,
- DEINTERLACE_IDENTITY_COEF);
- }
-
- deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL,
- DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0);
+ DEINTERLACE_CHROMA_DIFF_TH(31));
}
static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file)
@@ -929,11 +929,18 @@ static int deinterlace_runtime_resume(struct device *device)
return ret;
}
+ ret = reset_control_deassert(dev->rstc);
+ if (ret) {
+ dev_err(dev->dev, "Failed to apply reset\n");
+
+ goto err_exclusive_rate;
+ }
+
ret = clk_prepare_enable(dev->bus_clk);
if (ret) {
dev_err(dev->dev, "Failed to enable bus clock\n");
- goto err_exclusive_rate;
+ goto err_rst;
}
ret = clk_prepare_enable(dev->mod_clk);
@@ -950,23 +957,16 @@ static int deinterlace_runtime_resume(struct device *device)
goto err_mod_clk;
}
- ret = reset_control_deassert(dev->rstc);
- if (ret) {
- dev_err(dev->dev, "Failed to apply reset\n");
-
- goto err_ram_clk;
- }
-
deinterlace_init(dev);
return 0;
-err_ram_clk:
- clk_disable_unprepare(dev->ram_clk);
err_mod_clk:
clk_disable_unprepare(dev->mod_clk);
err_bus_clk:
clk_disable_unprepare(dev->bus_clk);
+err_rst:
+ reset_control_assert(dev->rstc);
err_exclusive_rate:
clk_rate_exclusive_put(dev->mod_clk);
@@ -977,11 +977,12 @@ static int deinterlace_runtime_suspend(struct device *device)
{
struct deinterlace_dev *dev = dev_get_drvdata(device);
- reset_control_assert(dev->rstc);
-
clk_disable_unprepare(dev->ram_clk);
clk_disable_unprepare(dev->mod_clk);
clk_disable_unprepare(dev->bus_clk);
+
+ reset_control_assert(dev->rstc);
+
clk_rate_exclusive_put(dev->mod_clk);
return 0;
diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c
index 57ded9ff3f..29bc63021c 100644
--- a/drivers/media/tuners/xc4000.c
+++ b/drivers/media/tuners/xc4000.c
@@ -1515,10 +1515,10 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
{
struct xc4000_priv *priv = fe->tuner_priv;
+ mutex_lock(&priv->lock);
*freq = priv->freq_hz + priv->freq_offset;
if (debug) {
- mutex_lock(&priv->lock);
if ((priv->cur_fw.type
& (BASE | FM | DTV6 | DTV7 | DTV78 | DTV8)) == BASE) {
u16 snr = 0;
@@ -1529,8 +1529,8 @@ static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
return 0;
}
}
- mutex_unlock(&priv->lock);
}
+ mutex_unlock(&priv->lock);
dprintk(1, "%s()\n", __func__);
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 4d037c92af..bae76023cf 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -4094,6 +4094,10 @@ static int em28xx_usb_probe(struct usb_interface *intf,
* topology will likely change after the load of the em28xx subdrivers.
*/
#ifdef CONFIG_MEDIA_CONTROLLER
+ /*
+ * No need to check the return value, the device will still be
+ * usable without media controller API.
+ */
retval = media_device_register(dev->media_dev);
#endif
diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
index 0c24e29843..eb03f98b2e 100644
--- a/drivers/media/usb/go7007/go7007-driver.c
+++ b/drivers/media/usb/go7007/go7007-driver.c
@@ -80,7 +80,7 @@ static int go7007_load_encoder(struct go7007 *go)
const struct firmware *fw_entry;
char fw_name[] = "go7007/go7007fw.bin";
void *bounce;
- int fw_len, rv = 0;
+ int fw_len;
u16 intr_val, intr_data;
if (go->boot_fw == NULL) {
@@ -109,9 +109,11 @@ static int go7007_load_encoder(struct go7007 *go)
go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
(intr_val & ~0x1) != 0x5a5a) {
v4l2_err(go, "error transferring firmware\n");
- rv = -1;
+ kfree(go->boot_fw);
+ go->boot_fw = NULL;
+ return -1;
}
- return rv;
+ return 0;
}
MODULE_FIRMWARE("go7007/go7007fw.bin");
diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
index eeb85981e0..762c13e49b 100644
--- a/drivers/media/usb/go7007/go7007-usb.c
+++ b/drivers/media/usb/go7007/go7007-usb.c
@@ -1201,7 +1201,9 @@ static int go7007_usb_probe(struct usb_interface *intf,
u16 channel;
/* read channel number from GPIO[1:0] */
- go7007_read_addr(go, 0x3c81, &channel);
+ if (go7007_read_addr(go, 0x3c81, &channel))
+ goto allocfail;
+
channel &= 0x3;
go->board_id = GO7007_BOARDID_ADLINK_MPG24;
usb->board = board = &board_adlink_mpg24;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-context.c b/drivers/media/usb/pvrusb2/pvrusb2-context.c
index 1764674de9..73c95ba232 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-context.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-context.c
@@ -90,8 +90,10 @@ static void pvr2_context_destroy(struct pvr2_context *mp)
}
-static void pvr2_context_notify(struct pvr2_context *mp)
+static void pvr2_context_notify(void *ptr)
{
+ struct pvr2_context *mp = ptr;
+
pvr2_context_set_notify(mp,!0);
}
@@ -106,9 +108,7 @@ static void pvr2_context_check(struct pvr2_context *mp)
pvr2_trace(PVR2_TRACE_CTXT,
"pvr2_context %p (initialize)", mp);
/* Finish hardware initialization */
- if (pvr2_hdw_initialize(mp->hdw,
- (void (*)(void *))pvr2_context_notify,
- mp)) {
+ if (pvr2_hdw_initialize(mp->hdw, pvr2_context_notify, mp)) {
mp->video_stream.stream =
pvr2_hdw_get_video_stream(mp->hdw);
/* Trigger interface initialization. By doing this
@@ -267,9 +267,9 @@ static void pvr2_context_exit(struct pvr2_context *mp)
void pvr2_context_disconnect(struct pvr2_context *mp)
{
pvr2_hdw_disconnect(mp->hdw);
- mp->disconnect_flag = !0;
if (!pvr2_context_shutok())
pvr2_context_notify(mp);
+ mp->disconnect_flag = !0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
index 26811efe0f..9a9bae21c6 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-dvb.c
@@ -88,8 +88,10 @@ static int pvr2_dvb_feed_thread(void *data)
return stat;
}
-static void pvr2_dvb_notify(struct pvr2_dvb_adapter *adap)
+static void pvr2_dvb_notify(void *ptr)
{
+ struct pvr2_dvb_adapter *adap = ptr;
+
wake_up(&adap->buffer_wait_data);
}
@@ -149,7 +151,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
}
pvr2_stream_set_callback(pvr->video_stream.stream,
- (pvr2_stream_callback) pvr2_dvb_notify, adap);
+ pvr2_dvb_notify, adap);
ret = pvr2_stream_set_buffer_count(stream, PVR2_DVB_BUFFER_COUNT);
if (ret < 0) return ret;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index c04ab7258d..d608b793fa 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -1033,8 +1033,10 @@ static int pvr2_v4l2_open(struct file *file)
}
-static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
+static void pvr2_v4l2_notify(void *ptr)
{
+ struct pvr2_v4l2_fh *fhp = ptr;
+
wake_up(&fhp->wait_data);
}
@@ -1067,7 +1069,7 @@ static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
hdw = fh->channel.mc_head->hdw;
sp = fh->pdi->stream->stream;
- pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
+ pvr2_stream_set_callback(sp, pvr2_v4l2_notify, fh);
pvr2_hdw_set_stream_type(hdw,fh->pdi->config);
if ((ret = pvr2_hdw_set_streaming(hdw,!0)) < 0) return ret;
return pvr2_ioread_set_enabled(fh->rhp,!0);
@@ -1198,11 +1200,6 @@ static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
dip->minor_type = pvr2_v4l_type_video;
nr_ptr = video_nr;
caps |= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
- if (!dip->stream) {
- pr_err(KBUILD_MODNAME
- ": Failed to set up pvrusb2 v4l video dev due to missing stream instance\n");
- return;
- }
break;
case VFL_TYPE_VBI:
dip->config = pvr2_config_vbi;
diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
index 10005c80f4..ee3475bed3 100644
--- a/drivers/media/v4l2-core/v4l2-cci.c
+++ b/drivers/media/v4l2-core/v4l2-cci.c
@@ -32,7 +32,7 @@ int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
ret = regmap_bulk_read(map, reg, buf, len);
if (ret) {
- dev_err(regmap_get_device(map), "Error reading reg 0x%4x: %d\n",
+ dev_err(regmap_get_device(map), "Error reading reg 0x%04x: %d\n",
reg, ret);
goto out;
}
@@ -131,7 +131,7 @@ int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
ret = regmap_bulk_write(map, reg, buf, len);
if (ret)
- dev_err(regmap_get_device(map), "Error writing reg 0x%4x: %d\n",
+ dev_err(regmap_get_device(map), "Error writing reg 0x%04x: %d\n",
reg, ret);
out:
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 0cc30397fb..8db9ac9c14 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -1084,11 +1084,17 @@ static int v4l2_m2m_register_entity(struct media_device *mdev,
entity->function = function;
ret = media_entity_pads_init(entity, num_pads, pads);
- if (ret)
+ if (ret) {
+ kfree(entity->name);
+ entity->name = NULL;
return ret;
+ }
ret = media_device_register_entity(mdev, entity);
- if (ret)
+ if (ret) {
+ kfree(entity->name);
+ entity->name = NULL;
return ret;
+ }
return 0;
}
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
index abff87f917..b8a7af2d36 100644
--- a/drivers/memory/tegra/tegra234.c
+++ b/drivers/memory/tegra/tegra234.c
@@ -121,7 +121,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDB,
- .name = "dla0rdb",
+ .name = "dla1rdb",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -407,7 +407,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDB1,
- .name = "dla0rdb1",
+ .name = "dla1rdb1",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -417,7 +417,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1WRB,
- .name = "dla0wrb",
+ .name = "dla1wrb",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -699,7 +699,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDA,
- .name = "dla0rda",
+ .name = "dla1rda",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -709,7 +709,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1FALRDB,
- .name = "dla0falrdb",
+ .name = "dla1falrdb",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -719,7 +719,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1WRA,
- .name = "dla0wra",
+ .name = "dla1wra",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -729,7 +729,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1FALWRB,
- .name = "dla0falwrb",
+ .name = "dla1falwrb",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
@@ -917,7 +917,7 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
},
}, {
.id = TEGRA234_MEMORY_CLIENT_DLA1RDA1,
- .name = "dla0rda1",
+ .name = "dla1rda1",
.sid = TEGRA234_SID_NVDLA1,
.regs = {
.sid = {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 68d71b4b55..f8fdf82238 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1773,6 +1773,7 @@ config TWL4030_CORE
bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 Support"
depends on I2C=y
select IRQ_DOMAIN
+ select MFD_CORE
select REGMAP_I2C
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index 0e52bd2ebd..fb5f988e61 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -109,7 +109,9 @@ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
dev = driver_find_device_by_of_node(&altr_sysmgr_driver.driver,
(void *)sysmgr_np);
- of_node_put(sysmgr_np);
+ if (property)
+ of_node_put(sysmgr_np);
+
if (!dev)
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index 7b6d07cbe6..1cea3f8f46 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -84,7 +84,7 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_DRV_CTRL_5, 0x136C00C0 },
{ CS42L43_GPIO_CTRL1, 0x00000707 },
{ CS42L43_GPIO_CTRL2, 0x00000000 },
- { CS42L43_GPIO_FN_SEL, 0x00000000 },
+ { CS42L43_GPIO_FN_SEL, 0x00000004 },
{ CS42L43_MCLK_SRC_SEL, 0x00000000 },
{ CS42L43_SAMPLE_RATE1, 0x00000003 },
{ CS42L43_SAMPLE_RATE2, 0x00000003 },
@@ -131,38 +131,38 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_ASP_TX_CH4_CTRL, 0x00170091 },
{ CS42L43_ASP_TX_CH5_CTRL, 0x001700C1 },
{ CS42L43_ASP_TX_CH6_CTRL, 0x001700F1 },
- { CS42L43_ASPTX1_INPUT, 0x00800000 },
- { CS42L43_ASPTX2_INPUT, 0x00800000 },
- { CS42L43_ASPTX3_INPUT, 0x00800000 },
- { CS42L43_ASPTX4_INPUT, 0x00800000 },
- { CS42L43_ASPTX5_INPUT, 0x00800000 },
- { CS42L43_ASPTX6_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00800000 },
- { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00800000 },
- { CS42L43_ASRC_INT1_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT2_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT3_INPUT1, 0x00800000 },
- { CS42L43_ASRC_INT4_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC1_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC2_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC3_INPUT1, 0x00800000 },
- { CS42L43_ASRC_DEC4_INPUT1, 0x00800000 },
- { CS42L43_ISRC1INT1_INPUT1, 0x00800000 },
- { CS42L43_ISRC1INT2_INPUT1, 0x00800000 },
- { CS42L43_ISRC1DEC1_INPUT1, 0x00800000 },
- { CS42L43_ISRC1DEC2_INPUT1, 0x00800000 },
- { CS42L43_ISRC2INT1_INPUT1, 0x00800000 },
- { CS42L43_ISRC2INT2_INPUT1, 0x00800000 },
- { CS42L43_ISRC2DEC1_INPUT1, 0x00800000 },
- { CS42L43_ISRC2DEC2_INPUT1, 0x00800000 },
+ { CS42L43_ASPTX1_INPUT, 0x00000000 },
+ { CS42L43_ASPTX2_INPUT, 0x00000000 },
+ { CS42L43_ASPTX3_INPUT, 0x00000000 },
+ { CS42L43_ASPTX4_INPUT, 0x00000000 },
+ { CS42L43_ASPTX5_INPUT, 0x00000000 },
+ { CS42L43_ASPTX6_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH3_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP1_CH4_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP2_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP2_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP3_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP3_CH2_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP4_CH1_INPUT, 0x00000000 },
+ { CS42L43_SWIRE_DP4_CH2_INPUT, 0x00000000 },
+ { CS42L43_ASRC_INT1_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT2_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT3_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_INT4_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC2_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC3_INPUT1, 0x00000000 },
+ { CS42L43_ASRC_DEC4_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1INT1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1INT2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC1DEC2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2INT1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2INT2_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2DEC1_INPUT1, 0x00000000 },
+ { CS42L43_ISRC2DEC2_INPUT1, 0x00000000 },
{ CS42L43_EQ1MIX_INPUT1, 0x00800000 },
{ CS42L43_EQ1MIX_INPUT2, 0x00800000 },
{ CS42L43_EQ1MIX_INPUT3, 0x00800000 },
@@ -171,8 +171,8 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_EQ2MIX_INPUT2, 0x00800000 },
{ CS42L43_EQ2MIX_INPUT3, 0x00800000 },
{ CS42L43_EQ2MIX_INPUT4, 0x00800000 },
- { CS42L43_SPDIF1_INPUT1, 0x00800000 },
- { CS42L43_SPDIF2_INPUT1, 0x00800000 },
+ { CS42L43_SPDIF1_INPUT1, 0x00000000 },
+ { CS42L43_SPDIF2_INPUT1, 0x00000000 },
{ CS42L43_AMP1MIX_INPUT1, 0x00800000 },
{ CS42L43_AMP1MIX_INPUT2, 0x00800000 },
{ CS42L43_AMP1MIX_INPUT3, 0x00800000 },
@@ -217,7 +217,7 @@ const struct reg_default cs42l43_reg_default[CS42L43_N_DEFAULTS] = {
{ CS42L43_CTRL_REG, 0x00000006 },
{ CS42L43_FDIV_FRAC, 0x40000000 },
{ CS42L43_CAL_RATIO, 0x00000080 },
- { CS42L43_SPI_CLK_CONFIG1, 0x00000000 },
+ { CS42L43_SPI_CLK_CONFIG1, 0x00000001 },
{ CS42L43_SPI_CONFIG1, 0x00000000 },
{ CS42L43_SPI_CONFIG2, 0x00000000 },
{ CS42L43_SPI_CONFIG3, 0x00000001 },
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index ae57592006..cab11ed23b 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -18,18 +18,29 @@
#include "intel-lpss.h"
-/* Some DSDTs have an unused GEXP ACPI device conflicting with I2C4 resources */
-static const struct pci_device_id ignore_resource_conflicts_ids[] = {
- /* Microsoft Surface Go (version 1) I2C4 */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1182), },
- /* Microsoft Surface Go 2 I2C4 */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1237), },
+static const struct pci_device_id quirk_ids[] = {
+ {
+ /* Microsoft Surface Go (version 1) I2C4 */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1182),
+ .driver_data = QUIRK_IGNORE_RESOURCE_CONFLICTS,
+ },
+ {
+ /* Microsoft Surface Go 2 I2C4 */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x9d64, 0x152d, 0x1237),
+ .driver_data = QUIRK_IGNORE_RESOURCE_CONFLICTS,
+ },
+ {
+ /* Dell XPS 9530 (2023) */
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_INTEL, 0x51fb, 0x1028, 0x0beb),
+ .driver_data = QUIRK_CLOCK_DIVIDER_UNITY,
+ },
{ }
};
static int intel_lpss_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
+ const struct pci_device_id *quirk_pci_info;
struct intel_lpss_platform_info *info;
int ret;
@@ -45,8 +56,9 @@ static int intel_lpss_pci_probe(struct pci_dev *pdev,
info->mem = &pdev->resource[0];
info->irq = pdev->irq;
- if (pci_match_id(ignore_resource_conflicts_ids, pdev))
- info->ignore_resource_conflicts = true;
+ quirk_pci_info = pci_match_id(quirk_ids, pdev);
+ if (quirk_pci_info)
+ info->quirks = quirk_pci_info->driver_data;
pdev->d3cold_delay = 0;
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 00e7b578bb..d422e88ba4 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -292,6 +292,7 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
{
char name[32];
struct clk *tmp = *clk;
+ int ret;
snprintf(name, sizeof(name), "%s-enable", devname);
tmp = clk_register_gate(NULL, name, __clk_get_name(tmp), 0,
@@ -308,6 +309,12 @@ static int intel_lpss_register_clock_divider(struct intel_lpss *lpss,
return PTR_ERR(tmp);
*clk = tmp;
+ if (lpss->info->quirks & QUIRK_CLOCK_DIVIDER_UNITY) {
+ ret = clk_set_rate(tmp, lpss->info->clk_rate);
+ if (ret)
+ return ret;
+ }
+
snprintf(name, sizeof(name), "%s-update", devname);
tmp = clk_register_gate(NULL, name, __clk_get_name(tmp),
CLK_SET_RATE_PARENT, lpss->priv, 31, 0, NULL);
@@ -401,7 +408,7 @@ int intel_lpss_probe(struct device *dev,
return ret;
lpss->cell->swnode = info->swnode;
- lpss->cell->ignore_resource_conflicts = info->ignore_resource_conflicts;
+ lpss->cell->ignore_resource_conflicts = info->quirks & QUIRK_IGNORE_RESOURCE_CONFLICTS;
intel_lpss_init_dev(lpss);
diff --git a/drivers/mfd/intel-lpss.h b/drivers/mfd/intel-lpss.h
index 062ce95b68..f50d11d60d 100644
--- a/drivers/mfd/intel-lpss.h
+++ b/drivers/mfd/intel-lpss.h
@@ -11,16 +11,28 @@
#ifndef __MFD_INTEL_LPSS_H
#define __MFD_INTEL_LPSS_H
+#include <linux/bits.h>
#include <linux/pm.h>
+/*
+ * Some DSDTs have an unused GEXP ACPI device conflicting with I2C4 resources.
+ * Set to ignore resource conflicts with ACPI declared SystemMemory regions.
+ */
+#define QUIRK_IGNORE_RESOURCE_CONFLICTS BIT(0)
+/*
+ * Some devices have misconfigured clock divider due to a firmware bug.
+ * Set this to force the clock divider to 1:1 ratio.
+ */
+#define QUIRK_CLOCK_DIVIDER_UNITY BIT(1)
+
struct device;
struct resource;
struct software_node;
struct intel_lpss_platform_info {
struct resource *mem;
- bool ignore_resource_conflicts;
int irq;
+ unsigned int quirks;
unsigned long clk_rate;
const char *clk_con_id;
const struct software_node *swnode;
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index c9550368d9..7d0e91164c 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -238,7 +238,9 @@ struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np,
return ERR_PTR(-ENODEV);
regmap = syscon_node_to_regmap(syscon_np);
- of_node_put(syscon_np);
+
+ if (property)
+ of_node_put(syscon_np);
return regmap;
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 03319a1fa9..dbd26c3b24 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -263,7 +263,6 @@ struct fastrpc_channel_ctx {
int domain_id;
int sesscount;
int vmcount;
- u64 perms;
struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
struct rpmsg_device *rpdev;
struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
@@ -1279,9 +1278,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
/* Map if we have any heap VMIDs associated with this ADSP Static Process. */
if (fl->cctx->vmcount) {
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
(u64)fl->cctx->remote_heap->size,
- &fl->cctx->perms,
+ &src_perms,
fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
@@ -1915,8 +1916,10 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
/* Add memory to static PD pool, protection thru hypervisor */
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
+ u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
- &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
+ &src_perms, fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
buf->phys, buf->size, err);
@@ -2290,7 +2293,6 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
if (vmcount) {
data->vmcount = vmcount;
- data->perms = BIT(QCOM_SCM_VMID_HLOS);
for (i = 0; i < data->vmcount; i++) {
data->vmperms[i].vmid = vmids[i];
data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
index c6eb27d46c..1511958447 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c
@@ -198,8 +198,14 @@ static int lis3lv02d_i2c_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- if (!lis3->pdata || !lis3->pdata->wakeup_flags)
+ /* Turn on for wakeup if turned off by runtime suspend */
+ if (lis3->pdata && lis3->pdata->wakeup_flags) {
+ if (pm_runtime_suspended(dev))
+ lis3lv02d_poweron(lis3);
+ /* For non wakeup turn off if not already turned off by runtime suspend */
+ } else if (!pm_runtime_suspended(dev))
lis3lv02d_poweroff(lis3);
+
return 0;
}
@@ -208,13 +214,12 @@ static int lis3lv02d_i2c_resume(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
struct lis3lv02d *lis3 = i2c_get_clientdata(client);
- /*
- * pm_runtime documentation says that devices should always
- * be powered on at resume. Pm_runtime turns them off after system
- * wide resume is complete.
- */
- if (!lis3->pdata || !lis3->pdata->wakeup_flags ||
- pm_runtime_suspended(dev))
+ /* Turn back off if turned on for wakeup and runtime suspended*/
+ if (lis3->pdata && lis3->pdata->wakeup_flags) {
+ if (pm_runtime_suspended(dev))
+ lis3lv02d_poweroff(lis3);
+ /* For non wakeup turn back on if not runtime suspended */
+ } else if (!pm_runtime_suspended(dev))
lis3lv02d_poweron(lis3);
return 0;
diff --git a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
index be52b113ae..89364bdbb1 100644
--- a/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
+++ b/drivers/misc/mei/gsc_proxy/mei_gsc_proxy.c
@@ -96,7 +96,8 @@ static const struct component_master_ops mei_component_master_ops = {
*
* The function checks if the device is pci device and
* Intel VGA adapter, the subcomponent is SW Proxy
- * and the parent of MEI PCI and the parent of VGA are the same PCH device.
+ * and the VGA is on the bus 0 reserved for built-in devices
+ * to reject discrete GFX.
*
* @dev: master device
* @subcomponent: subcomponent to match (I915_COMPONENT_SWPROXY)
@@ -123,7 +124,8 @@ static int mei_gsc_proxy_component_match(struct device *dev, int subcomponent,
if (subcomponent != I915_COMPONENT_GSC_PROXY)
return 0;
- return component_compare_dev(dev->parent, ((struct device *)data)->parent);
+ /* Only built-in GFX */
+ return (pdev->bus->number == 0);
}
static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
@@ -146,7 +148,7 @@ static int mei_gsc_proxy_probe(struct mei_cl_device *cldev,
}
component_match_add_typed(&cldev->dev, &master_match,
- mei_gsc_proxy_component_match, cldev->dev.parent);
+ mei_gsc_proxy_component_match, NULL);
if (IS_ERR_OR_NULL(master_match)) {
ret = -ENOMEM;
goto err_exit;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 961e5d53a2..aac36750d2 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -112,6 +112,8 @@
#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */
#define MEI_DEV_ID_MTL_M 0x7E70 /* Meteor Lake Point M */
+#define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */
+#define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */
/*
* MEI HW Section
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 676d566f38..8cf636c540 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -119,6 +119,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)},
/* required last entry */
{0, }
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index ac69b7f361..7eb74711ac 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -184,7 +184,7 @@ static irqreturn_t irq_handler(void *private)
{
struct eventfd_ctx *ev_ctx = private;
- eventfd_signal(ev_ctx, 1);
+ eventfd_signal(ev_ctx);
return IRQ_HANDLED;
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 32d49100df..3564a0f63c 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -413,7 +413,7 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
struct mmc_blk_ioc_data *idata;
int err;
- idata = kmalloc(sizeof(*idata), GFP_KERNEL);
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
if (!idata) {
err = -ENOMEM;
goto out;
@@ -488,7 +488,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->flags & MMC_BLK_IOC_DROP)
return 0;
- if (idata->flags & MMC_BLK_IOC_SBC)
+ if (idata->flags & MMC_BLK_IOC_SBC && i > 0)
prev_idata = idatas[i - 1];
/*
@@ -874,10 +874,11 @@ static const struct block_device_operations mmc_bdops = {
static int mmc_blk_part_switch_pre(struct mmc_card *card,
unsigned int part_type)
{
- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if ((part_type & mask) == mask) {
+ if ((part_type & mask) == rpmb) {
if (card->ext_csd.cmdq_en) {
ret = mmc_cmdq_disable(card);
if (ret)
@@ -892,10 +893,11 @@ static int mmc_blk_part_switch_pre(struct mmc_card *card,
static int mmc_blk_part_switch_post(struct mmc_card *card,
unsigned int part_type)
{
- const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_RPMB;
+ const unsigned int mask = EXT_CSD_PART_CONFIG_ACC_MASK;
+ const unsigned int rpmb = EXT_CSD_PART_CONFIG_ACC_RPMB;
int ret = 0;
- if ((part_type & mask) == mask) {
+ if ((part_type & mask) == rpmb) {
mmc_retune_unpause(card->host);
if (card->reenable_cmdq && !card->ext_csd.cmdq_en)
ret = mmc_cmdq_enable(card);
diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c
index 3a3bae6948..a0524127ca 100644
--- a/drivers/mmc/host/sdhci-of-dwcmshc.c
+++ b/drivers/mmc/host/sdhci-of-dwcmshc.c
@@ -584,6 +584,17 @@ free_pltfm:
return err;
}
+static void dwcmshc_disable_card_clk(struct sdhci_host *host)
+{
+ u16 ctrl;
+
+ ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (ctrl & SDHCI_CLOCK_CARD_EN) {
+ ctrl &= ~SDHCI_CLOCK_CARD_EN;
+ sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
+ }
+}
+
static void dwcmshc_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
@@ -591,8 +602,14 @@ static void dwcmshc_remove(struct platform_device *pdev)
struct dwcmshc_priv *priv = sdhci_pltfm_priv(pltfm_host);
struct rk35xx_priv *rk_priv = priv->priv;
+ pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
sdhci_remove_host(host, 0);
+ dwcmshc_disable_card_clk(host);
+
clk_disable_unprepare(pltfm_host->clk);
clk_disable_unprepare(priv->bus_clk);
if (rk_priv)
@@ -684,17 +701,6 @@ static void dwcmshc_enable_card_clk(struct sdhci_host *host)
}
}
-static void dwcmshc_disable_card_clk(struct sdhci_host *host)
-{
- u16 ctrl;
-
- ctrl = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
- if (ctrl & SDHCI_CLOCK_CARD_EN) {
- ctrl &= ~SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, ctrl, SDHCI_CLOCK_CONTROL);
- }
-}
-
static int dwcmshc_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 1e0bc7bace..0a26831b3b 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1439,6 +1439,9 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
+ if (host->tuning_mode != SDHCI_TUNING_MODE_3)
+ mmc_retune_needed(host->mmc);
+
if (omap_host->con != -EINVAL)
sdhci_runtime_suspend_host(host);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index be7f18fd48..c253d176db 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -259,6 +259,8 @@ static void tmio_mmc_reset_work(struct work_struct *work)
else
mrq->cmd->error = -ETIMEDOUT;
+ /* No new calls yet, but disallow concurrent tmio_mmc_done_work() */
+ host->mrq = ERR_PTR(-EBUSY);
host->cmd = NULL;
host->data = NULL;
diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c
index 77d5f1d244..860380931b 100644
--- a/drivers/mmc/host/wmt-sdmmc.c
+++ b/drivers/mmc/host/wmt-sdmmc.c
@@ -883,7 +883,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc;
struct wmt_mci_priv *priv;
- struct resource *res;
u32 reg_tmp;
mmc = platform_get_drvdata(pdev);
@@ -911,9 +910,6 @@ static void wmt_mci_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk_sdmmc);
clk_put(priv->clk_sdmmc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
mmc_free_host(mmc);
dev_info(&pdev->dev, "WMT MCI device removed\n");
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 746a27d15d..96eb2e782c 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -518,7 +518,7 @@ static int physmap_flash_probe(struct platform_device *dev)
if (!info->maps[i].phys)
info->maps[i].phys = res->start;
- info->win_order = get_bitmask_order(resource_size(res)) - 1;
+ info->win_order = fls64(resource_size(res)) - 1;
info->maps[i].size = BIT(info->win_order +
(info->gpios ?
info->gpios->ndescs : 0));
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index 488fd45261..677fcb03f9 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -303,8 +303,9 @@ static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
return 0;
}
-static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+static irqreturn_t lpc3xxx_nand_irq(int irq, void *data)
{
+ struct lpc32xx_nand_host *host = data;
uint8_t sr;
/* Clear interrupt flag by reading status */
@@ -780,7 +781,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
goto release_dma_chan;
}
- if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+ if (request_irq(host->irq, &lpc3xxx_nand_irq,
IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
res = -ENXIO;
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index 71ec4052e5..b3a881cbcd 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -63,7 +63,7 @@
#define CMDRWGEN(cmd_dir, ran, bch, short_mode, page_size, pages) \
( \
(cmd_dir) | \
- ((ran) << 19) | \
+ (ran) | \
((bch) << 14) | \
((short_mode) << 13) | \
(((page_size) & 0x7f) << 6) | \
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index bbdcfbe643..ccbd42a427 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1207,21 +1207,36 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
return nand_exec_op(chip, &op);
}
+static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun)
+{
+ /* lun is expected to be very small */
+ return (lun * pages_per_lun) + pages_per_lun - 1;
+}
+
static void rawnand_cap_cont_reads(struct nand_chip *chip)
{
struct nand_memory_organization *memorg;
- unsigned int pages_per_lun, first_lun, last_lun;
+ unsigned int ppl, first_lun, last_lun;
memorg = nanddev_get_memorg(&chip->base);
- pages_per_lun = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
- first_lun = chip->cont_read.first_page / pages_per_lun;
- last_lun = chip->cont_read.last_page / pages_per_lun;
+ ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
+ first_lun = chip->cont_read.first_page / ppl;
+ last_lun = chip->cont_read.last_page / ppl;
/* Prevent sequential cache reads across LUN boundaries */
if (first_lun != last_lun)
- chip->cont_read.pause_page = first_lun * pages_per_lun + pages_per_lun - 1;
+ chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun);
else
chip->cont_read.pause_page = chip->cont_read.last_page;
+
+ if (chip->cont_read.first_page == chip->cont_read.pause_page) {
+ chip->cont_read.first_page++;
+ chip->cont_read.pause_page = min(chip->cont_read.last_page,
+ rawnand_last_page_of_lun(ppl, first_lun + 1));
+ }
+
+ if (chip->cont_read.first_page >= chip->cont_read.last_page)
+ chip->cont_read.ongoing = false;
}
static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
@@ -1288,12 +1303,11 @@ static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int p
if (!chip->cont_read.ongoing)
return 0;
- if (page == chip->cont_read.pause_page &&
- page != chip->cont_read.last_page) {
- chip->cont_read.first_page = chip->cont_read.pause_page + 1;
- rawnand_cap_cont_reads(chip);
- } else if (page == chip->cont_read.last_page) {
+ if (page == chip->cont_read.last_page) {
chip->cont_read.ongoing = false;
+ } else if (page == chip->cont_read.pause_page) {
+ chip->cont_read.first_page++;
+ rawnand_cap_cont_reads(chip);
}
return 0;
@@ -3460,30 +3474,36 @@ static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
u32 readlen, int col)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- unsigned int end_page, end_col;
+ unsigned int first_page, last_page;
chip->cont_read.ongoing = false;
if (!chip->controller->supported_op.cont_read)
return;
- end_page = DIV_ROUND_UP(col + readlen, mtd->writesize);
- end_col = (col + readlen) % mtd->writesize;
+ /*
+ * Don't bother making any calculations if the length is too small.
+ * Side effect: avoids possible integer underflows below.
+ */
+ if (readlen < (2 * mtd->writesize))
+ return;
+ /* Derive the page where continuous read should start (the first full page read) */
+ first_page = page;
if (col)
- page++;
+ first_page++;
- if (end_col && end_page)
- end_page--;
+ /* Derive the page where continuous read should stop (the last full page read) */
+ last_page = page + ((col + readlen) / mtd->writesize) - 1;
- if (page + 1 > end_page)
- return;
-
- chip->cont_read.first_page = page;
- chip->cont_read.last_page = end_page;
- chip->cont_read.ongoing = true;
-
- rawnand_cap_cont_reads(chip);
+ /* Configure and enable continuous read when suitable */
+ if (first_page < last_page) {
+ chip->cont_read.first_page = first_page;
+ chip->cont_read.last_page = last_page;
+ chip->cont_read.ongoing = true;
+ /* May reset the ongoing flag */
+ rawnand_cap_cont_reads(chip);
+ }
}
static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
@@ -3492,10 +3512,7 @@ static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned i
return;
chip->cont_read.first_page++;
- if (chip->cont_read.first_page == chip->cont_read.pause_page)
- chip->cont_read.first_page++;
- if (chip->cont_read.first_page >= chip->cont_read.last_page)
- chip->cont_read.ongoing = false;
+ rawnand_cap_cont_reads(chip);
}
/**
@@ -3571,7 +3588,8 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
oob = ops->oobbuf;
oob_required = oob ? 1 : 0;
- rawnand_enable_cont_reads(chip, page, readlen, col);
+ if (likely(ops->mode != MTD_OPS_RAW))
+ rawnand_enable_cont_reads(chip, page, readlen, col);
while (1) {
struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
@@ -5189,6 +5207,15 @@ static void rawnand_late_check_supported_ops(struct nand_chip *chip)
if (!nand_has_exec_op(chip))
return;
+ /*
+ * For now, continuous reads can only be used with the core page helpers.
+ * This can be extended later.
+ */
+ if (!(chip->ecc.read_page == nand_read_page_hwecc ||
+ chip->ecc.read_page == nand_read_page_syndrome ||
+ chip->ecc.read_page == nand_read_page_swecc))
+ return;
+
rawnand_check_cont_read_support(chip);
}
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 31c439a557..4597a82de2 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -104,7 +104,8 @@ static const struct mtd_ooblayout_ops f50l1g41lb_ooblayout = {
static const struct spinand_info esmt_c8_spinand_table[] = {
SPINAND_INFO("F50L1G41LB",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x01, 0x7f,
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -113,7 +114,8 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
SPINAND_INFO("F50D1G41LB",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f,
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
NAND_ECCREQ(1, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
@@ -122,7 +124,8 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
SPINAND_INFO("F50D2G41KA",
- SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51, 0x7f,
+ 0x7f, 0x7f),
NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
NAND_ECCREQ(8, 512),
SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 2a728c31e6..9a4940874b 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
- (sizeof(struct ubi_fm_eba) +
- (ubi->peb_count * sizeof(__be32))) +
- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ ((sizeof(struct ubi_fm_eba) +
+ sizeof(struct ubi_fm_volhdr)) *
+ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
+ (ubi->peb_count * sizeof(__be32));
return roundup(size, ubi->leb_size);
}
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index f700f0e4f2..6e5489e233 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
* The number of supported volumes is limited by the eraseblock size
* and by the UBI_MAX_VOLUMES constant.
*/
+
+ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
+ ubi_err(ubi, "LEB size too small for a volume record");
+ return -EINVAL;
+ }
+
ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi->vtbl_slots = UBI_MAX_VOLUMES;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6cf7f36470..b094c48beb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1811,7 +1811,7 @@ void bond_xdp_set_features(struct net_device *bond_dev)
ASSERT_RTNL();
- if (!bond_xdp_check(bond)) {
+ if (!bond_xdp_check(bond) || !bond_has_slaves(bond)) {
xdp_clear_features_flag(bond_dev);
return;
}
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 16ecc11c7f..2395b1225c 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -418,6 +418,13 @@ static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
{
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Start hrtimer\n");
+ hrtimer_start(&cdev->hrtimer,
+ ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
+ HRTIMER_MODE_REL_PINNED);
+ }
+
/* Only interrupt line 0 is used in this driver */
m_can_write(cdev, M_CAN_ILE, ILE_EINT0);
}
@@ -425,6 +432,11 @@ static inline void m_can_enable_all_interrupts(struct m_can_classdev *cdev)
static inline void m_can_disable_all_interrupts(struct m_can_classdev *cdev)
{
m_can_write(cdev, M_CAN_ILE, 0x0);
+
+ if (!cdev->net->irq) {
+ dev_dbg(cdev->dev, "Stop hrtimer\n");
+ hrtimer_cancel(&cdev->hrtimer);
+ }
}
/* Retrieve internal timestamp counter from TSCV.TSC, and shift it to 32-bit
@@ -1417,12 +1429,6 @@ static int m_can_start(struct net_device *dev)
m_can_enable_all_interrupts(cdev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Start hrtimer\n");
- hrtimer_start(&cdev->hrtimer, ms_to_ktime(HRTIMER_POLL_INTERVAL_MS),
- HRTIMER_MODE_REL_PINNED);
- }
-
return 0;
}
@@ -1577,11 +1583,6 @@ static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
- if (!dev->irq) {
- dev_dbg(cdev->dev, "Stop hrtimer\n");
- hrtimer_cancel(&cdev->hrtimer);
- }
-
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 4bf4d67557..9048d1f196 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -49,9 +49,9 @@ static int ksz8_ind_write8(struct ksz_device *dev, u8 table, u16 addr, u8 data)
mutex_lock(&dev->alu_mutex);
ctrl_addr = IND_ACC_TABLE(table) | addr;
- ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
if (!ret)
- ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write8(dev, regs[REG_IND_BYTE], data);
mutex_unlock(&dev->alu_mutex);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index ff4b39601c..d5d7e48b7b 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2176,6 +2176,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
return ksz_irq_common_setup(dev, pirq);
}
+static int ksz_parse_drive_strength(struct ksz_device *dev);
+
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -2197,6 +2199,10 @@ static int ksz_setup(struct dsa_switch *ds)
return ret;
}
+ ret = ksz_parse_drive_strength(dev);
+ if (ret)
+ return ret;
+
/* set broadcast storm protection 10% rate */
regmap_update_bits(ksz_regmap_16(dev), regs[S_BROADCAST_CTRL],
BROADCAST_STORM_RATE,
@@ -4236,10 +4242,6 @@ int ksz_switch_register(struct ksz_device *dev)
for (port_num = 0; port_num < dev->info->port_cnt; ++port_num)
dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA;
if (dev->dev->of_node) {
- ret = ksz_parse_drive_strength(dev);
- if (ret)
- return ret;
-
ret = of_get_phy_mode(dev->dev->of_node, &interface);
if (ret == 0)
dev->compat_interface = interface;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 2333f6383b..e6b8bf6035 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -998,20 +998,56 @@ unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
+/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
+ * 802.1Qâ„¢-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
+ * must only be propagated to C-VLAN and MAC Bridge components. That means
+ * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
+ * these frames are supposed to be processed by the CPU (software). So we make
+ * the switch only forward them to the CPU port. And if received from a CPU
+ * port, forward to a single port. The software is responsible of making the
+ * switch conform to the latter by setting a single port as destination port on
+ * the special tag.
+ *
+ * This switch intellectual property cannot conform to this part of the standard
+ * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
+ * DAs, it also includes :22-FF which the scope of propagation is not supposed
+ * to be restricted for these MAC DAs.
+ */
static void
mt753x_trap_frames(struct mt7530_priv *priv)
{
- /* Trap BPDUs to the CPU port(s) */
- mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
+ * VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
+ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+ MT753X_BPDU_PORT_FW_MASK,
+ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_BPDU_CPU_ONLY);
- /* Trap 802.1X PAE frames to the CPU port(s) */
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
+ /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
+ MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
+ MT753X_R01_PORT_FW_MASK,
+ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
- /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
+ /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
+ MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
+ MT753X_R03_PORT_FW_MASK,
+ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
}
static int
@@ -2243,11 +2279,11 @@ mt7530_setup(struct dsa_switch *ds)
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
gpiod_set_value_cansleep(priv->reset, 1);
}
@@ -2449,11 +2485,11 @@ mt7531_setup(struct dsa_switch *ds)
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
gpiod_set_value_cansleep(priv->reset, 1);
}
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 17e42d30ff..75bc9043c8 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -65,14 +65,33 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
+#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
+#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for :01 and :02 MAC DA frame control */
+#define MT753X_RGAC1 0x28
+#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
+#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
+#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
+#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
+#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
+#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
enum mt753x_bpdu_port_fw {
MT753X_BPDU_FOLLOW_MFC,
@@ -253,6 +272,7 @@ enum mt7530_port_mode {
enum mt7530_vlan_port_eg_tag {
MT7530_VLAN_EG_DISABLED = 0,
MT7530_VLAN_EG_CONSISTENT = 1,
+ MT7530_VLAN_EG_UNTAGGED = 4,
};
enum mt7530_vlan_port_attr {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c44c44e26d..4fa27c9a33 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3238,22 +3238,6 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- u16 qid;
- /* we suspect that this is good for in--kernel network services that
- * want to loop incoming skb rx to tx in normal user generated traffic,
- * most probably we will not get to this
- */
- if (skb_rx_queue_recorded(skb))
- qid = skb_get_rx_queue(skb);
- else
- qid = netdev_pick_tx(dev, skb, NULL);
-
- return qid;
-}
-
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
@@ -3424,7 +3408,6 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_open = ena_open,
.ndo_stop = ena_close,
.ndo_start_xmit = ena_start_xmit,
- .ndo_select_queue = ena_select_queue,
.ndo_get_stats64 = ena_get_stats64,
.ndo_tx_timeout = ena_tx_timeout,
.ndo_change_mtu = ena_change_mtu,
diff --git a/drivers/net/ethernet/amd/pds_core/auxbus.c b/drivers/net/ethernet/amd/pds_core/auxbus.c
index 11c23a7f31..fd1a5149c0 100644
--- a/drivers/net/ethernet/amd/pds_core/auxbus.c
+++ b/drivers/net/ethernet/amd/pds_core/auxbus.c
@@ -160,23 +160,19 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
if (err < 0) {
dev_warn(cf->dev, "auxiliary_device_init of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out;
+ kfree(padev);
+ return ERR_PTR(err);
}
err = auxiliary_device_add(aux_dev);
if (err) {
dev_warn(cf->dev, "auxiliary_device_add of %s failed: %pe\n",
name, ERR_PTR(err));
- goto err_out_uninit;
+ auxiliary_device_uninit(aux_dev);
+ return ERR_PTR(err);
}
return padev;
-
-err_out_uninit:
- auxiliary_device_uninit(aux_dev);
-err_out:
- kfree(padev);
- return ERR_PTR(err);
}
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d8b1824c33..0bc1367fd6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
struct bnx2x_alloc_pool *pool)
{
- if (!pool->page)
- return;
-
put_page(pool->page);
pool->page = NULL;
@@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
+ if (!fp->page_pool.page)
+ return;
+
if (fp->mode == TPA_MODE_DISABLED)
return;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index 3b6dbf158b..f72dc0cee3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -76,7 +76,7 @@ static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
if (hns3_nic_resetting(ndev))
return -EBUSY;
- if (h->kinfo.dcb_ops->ieee_setapp)
+ if (h->kinfo.dcb_ops->ieee_delapp)
return h->kinfo.dcb_ops->ieee_delapp(h, app);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 5ea9e59569..609d3799d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2890,7 +2890,10 @@ static int hclge_mac_init(struct hclge_dev *hdev)
int ret;
hdev->support_sfp_query = true;
- hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
+ if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 80a2a0073d..507d7ce26d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -108,7 +108,7 @@ void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
u64 ns = nsec;
u32 sec_h;
- if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ if (!hdev->ptp || !test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
return;
/* Since the BD does not have enough space for the higher 16 bits of
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
index 8510b88d49..f3cd5a376e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -24,7 +24,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
__field(u8, code)
__field(u8, subcode)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
__array(u32, mbx_data, PF_GET_MBX_LEN)
),
@@ -33,7 +33,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
@@ -56,7 +56,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
__field(u8, vfid)
__field(u16, code)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
__array(u32, mbx_data, PF_SEND_MBX_LEN)
),
@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
index 5d4895bb57..b259e95dd5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -23,7 +23,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
__field(u8, vfid)
__field(u16, code)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->nic.kinfo.netdev->name)
+ __string(devname, hdev->nic.kinfo.netdev->name)
__array(u32, mbx_data, VF_GET_MBX_LEN)
),
@@ -31,7 +31,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
@@ -55,7 +55,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
__field(u8, code)
__field(u8, subcode)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->nic.kinfo.netdev->name)
+ __string(devname, hdev->nic.kinfo.netdev->name)
__array(u32, mbx_data, VF_SEND_MBX_LEN)
),
@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index d9716bcec8..b0dc0fc6b1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -13629,9 +13629,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
return err;
i40e_queue_pair_disable_irq(vsi, queue_pair);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
- i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
i40e_queue_pair_clean_rings(vsi, queue_pair);
i40e_queue_pair_reset_stats(vsi, queue_pair);
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index 2b657d43c7..68b894bb68 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -2146,6 +2146,7 @@ void ice_dpll_init(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
int err = 0;
+ mutex_init(&d->lock);
err = ice_dpll_init_info(pf, cgu);
if (err)
goto err_exit;
@@ -2158,7 +2159,6 @@ void ice_dpll_init(struct ice_pf *pf)
err = ice_dpll_init_pins(pf, cgu);
if (err)
goto deinit_pps;
- mutex_init(&d->lock);
if (cgu) {
err = ice_dpll_init_worker(pf);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index adfdea1e28..dabf33cec3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -6572,6 +6572,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
{
struct rtnl_link_stats64 *net_stats, *stats_prev;
struct rtnl_link_stats64 *vsi_stats;
+ struct ice_pf *pf = vsi->back;
u64 pkts, bytes;
int i;
@@ -6617,21 +6618,18 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
net_stats = &vsi->net_stats;
stats_prev = &vsi->net_stats_prev;
- /* clear prev counters after reset */
- if (vsi_stats->tx_packets < stats_prev->tx_packets ||
- vsi_stats->rx_packets < stats_prev->rx_packets) {
- stats_prev->tx_packets = 0;
- stats_prev->tx_bytes = 0;
- stats_prev->rx_packets = 0;
- stats_prev->rx_bytes = 0;
+ /* Update netdev counters, but keep in mind that values could start at
+ * random value after PF reset. And as we increase the reported stat by
+ * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not,
+ * let's skip this round.
+ */
+ if (likely(pf->stat_prev_loaded)) {
+ net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
+ net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
+ net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
+ net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
}
- /* update netdev counters */
- net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets;
- net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes;
- net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets;
- net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes;
-
stats_prev->tx_packets = vsi_stats->tx_packets;
stats_prev->tx_bytes = vsi_stats->tx_bytes;
stats_prev->rx_packets = vsi_stats->rx_packets;
@@ -7800,6 +7798,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
pf_sw = pf->first_sw;
/* find the attribute in the netlink message */
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index e1494f24f6..cd61928700 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -763,24 +763,6 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
}
/**
- * ice_sriov_create_vsi - Create a new VSI for a VF
- * @vf: VF to create the VSI for
- *
- * This is called by ice_vf_recreate_vsi to create the new VSI after the old
- * VSI has been released.
- */
-static int ice_sriov_create_vsi(struct ice_vf *vf)
-{
- struct ice_vsi *vsi;
-
- vsi = ice_vf_vsi_setup(vf);
- if (!vsi)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
* ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
* @vf: VF to perform tasks on
*/
@@ -799,7 +781,6 @@ static const struct ice_vf_ops ice_sriov_vf_ops = {
.poll_reset_status = ice_sriov_poll_reset_status,
.clear_reset_trigger = ice_sriov_clear_reset_trigger,
.irq_close = NULL,
- .create_vsi = ice_sriov_create_vsi,
.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
};
@@ -1093,6 +1074,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
struct ice_pf *pf = pci_get_drvdata(pdev);
u16 prev_msix, prev_queues, queues;
bool needs_rebuild = false;
+ struct ice_vsi *vsi;
struct ice_vf *vf;
int id;
@@ -1127,6 +1109,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (!vf)
return -ENOENT;
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -ENOENT;
+
prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs;
@@ -1147,8 +1133,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (vf->first_vector_idx < 0)
goto unroll;
- ice_vf_vsi_release(vf);
- if (vf->vf_ops->create_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) {
/* Try to rebuild with previous values */
needs_rebuild = true;
goto unroll;
@@ -1174,8 +1159,10 @@ unroll:
if (vf->first_vector_idx < 0)
return -EINVAL;
- if (needs_rebuild)
- vf->vf_ops->create_vsi(vf);
+ if (needs_rebuild) {
+ ice_vf_reconfig_vsi(vf);
+ ice_vf_init_host_cfg(vf, vsi);
+ }
ice_ena_vf_mappings(vf);
ice_put_vf(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index b7ae099521..88e3cd09f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -248,29 +248,44 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
}
/**
- * ice_vf_recreate_vsi - Release and re-create the VF's VSI
- * @vf: VF to recreate the VSI for
+ * ice_vf_reconfig_vsi - Reconfigure a VF VSI with the device
+ * @vf: VF to reconfigure the VSI for
*
- * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
- * VF configuration change, etc)
+ * This is called when a single VF is being reset (i.e. VVF, VFLR, host VF
+ * configuration change, etc).
*
- * It releases and then re-creates a new VSI.
+ * It brings the VSI down and then reconfigures it with the hardware.
*/
-static int ice_vf_recreate_vsi(struct ice_vf *vf)
+int ice_vf_reconfig_vsi(struct ice_vf *vf)
{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
int err;
- ice_vf_vsi_release(vf);
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_NO_INIT;
- err = vf->vf_ops->create_vsi(vf);
+ ice_vsi_decfg(vsi);
+ ice_fltr_remove_all(vsi);
+
+ err = ice_vsi_cfg(vsi, &params);
if (err) {
dev_err(ice_pf_to_dev(pf),
- "Failed to recreate the VF%u's VSI, error %d\n",
+ "Failed to reconfigure the VF%u's VSI, error %d\n",
vf->vf_id, err);
return err;
}
+ /* Update the lan_vsi_num field since it might have been changed. The
+ * PF lan_vsi_idx number remains the same so we don't need to change
+ * that.
+ */
+ vf->lan_vsi_num = vsi->vsi_num;
+
return 0;
}
@@ -929,7 +944,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_vf_pre_vsi_rebuild(vf);
- if (ice_vf_recreate_vsi(vf)) {
+ if (ice_vf_reconfig_vsi(vf)) {
dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
vf->vf_id);
err = -EFAULT;
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 93c774f2f4..6b41e0f3d3 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -62,7 +62,6 @@ struct ice_vf_ops {
bool (*poll_reset_status)(struct ice_vf *vf);
void (*clear_reset_trigger)(struct ice_vf *vf);
void (*irq_close)(struct ice_vf *vf);
- int (*create_vsi)(struct ice_vf *vf);
void (*post_vsi_rebuild)(struct ice_vf *vf);
};
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a0..91ba7fe0ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -23,6 +23,7 @@
#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
#endif
+int ice_vf_reconfig_vsi(struct ice_vf *vf);
void ice_initialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 8872f7a4f4..d6348f2082 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vf->driver_caps = *(u32 *)msg;
else
vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
- VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN;
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
@@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi,
vf->driver_caps);
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
- } else {
- if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
- else
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
- }
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
index 7d547fa616..588b77f1a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
@@ -13,8 +13,6 @@
* - opcodes needed by VF when caps are activated
*
* Caps that don't use new opcodes (no opcodes should be allowed):
- * - VIRTCHNL_VF_OFFLOAD_RSS_AQ
- * - VIRTCHNL_VF_OFFLOAD_RSS_REG
* - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
* - VIRTCHNL_VF_OFFLOAD_CRC
* - VIRTCHNL_VF_OFFLOAD_RX_POLLING
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index f3663b3f63..0fd5551b10 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
return -EBUSY;
usleep_range(1000, 2000);
}
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
@@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
}
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
-
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
- ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
@@ -259,11 +260,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
- clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ clear_bit(ICE_CFG_BUSY, vsi->state);
return 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 2c1b051fdc..b0c52f1784 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2087,8 +2087,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
/* schedule the napi to receive all the marker packets */
+ local_bh_disable();
for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi);
+ local_bh_enable();
return idpf_wait_for_marker_event(vport);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b2295caa2f..ada42ba635 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6984,44 +6984,31 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 ack = 0, tsicr = rd32(E1000_TSICR);
+ u32 tsicr = rd32(E1000_TSICR);
struct ptp_clock_event event;
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= TSINTR_SYS_WRAP;
}
if (tsicr & E1000_TSICR_TXTS) {
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
- ack |= E1000_TSICR_TXTS;
}
- if (tsicr & TSINTR_TT0) {
+ if (tsicr & TSINTR_TT0)
igb_perout(adapter, 0);
- ack |= TSINTR_TT0;
- }
- if (tsicr & TSINTR_TT1) {
+ if (tsicr & TSINTR_TT1)
igb_perout(adapter, 1);
- ack |= TSINTR_TT1;
- }
- if (tsicr & TSINTR_AUTT0) {
+ if (tsicr & TSINTR_AUTT0)
igb_extts(adapter, 0);
- ack |= TSINTR_AUTT0;
- }
- if (tsicr & TSINTR_AUTT1) {
+ if (tsicr & TSINTR_AUTT1)
igb_extts(adapter, 1);
- ack |= TSINTR_AUTT1;
- }
-
- /* acknowledge the interrupts */
- wr32(E1000_TSICR, ack);
}
static irqreturn_t igb_msix_other(int irq, void *data)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index e9bb403bba..45716d271d 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5304,25 +5304,22 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev,
static void igc_tsync_interrupt(struct igc_adapter *adapter)
{
- u32 ack, tsauxc, sec, nsec, tsicr;
struct igc_hw *hw = &adapter->hw;
+ u32 tsauxc, sec, nsec, tsicr;
struct ptp_clock_event event;
struct timespec64 ts;
tsicr = rd32(IGC_TSICR);
- ack = 0;
if (tsicr & IGC_TSICR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_SYS_WRAP;
}
if (tsicr & IGC_TSICR_TXTS) {
/* retrieve hardware timestamp */
igc_ptp_tx_tstamp_event(adapter);
- ack |= IGC_TSICR_TXTS;
}
if (tsicr & IGC_TSICR_TT0) {
@@ -5336,7 +5333,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[0].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT0;
}
if (tsicr & IGC_TSICR_TT1) {
@@ -5350,7 +5346,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
wr32(IGC_TSAUXC, tsauxc);
adapter->perout[1].start = ts;
spin_unlock(&adapter->tmreg_lock);
- ack |= IGC_TSICR_TT1;
}
if (tsicr & IGC_TSICR_AUTT0) {
@@ -5360,7 +5355,6 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 0;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT0;
}
if (tsicr & IGC_TSICR_AUTT1) {
@@ -5370,11 +5364,7 @@ static void igc_tsync_interrupt(struct igc_adapter *adapter)
event.index = 1;
event.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
- ack |= IGC_TSICR_AUTT1;
}
-
- /* acknowledge the interrupts */
- wr32(IGC_TSICR, ack);
}
/**
@@ -6489,7 +6479,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
int cpu = smp_processor_id();
struct netdev_queue *nq;
struct igc_ring *ring;
- int i, drops;
+ int i, nxmit;
if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
@@ -6505,16 +6495,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
/* Avoid transmit queue timeout since we share it with the slow path */
txq_trans_cond_update(nq);
- drops = 0;
+ nxmit = 0;
for (i = 0; i < num_frames; i++) {
int err;
struct xdp_frame *xdpf = frames[i];
err = igc_xdp_init_tx_descriptor(ring, xdpf);
- if (err) {
- xdp_return_frame_rx_napi(xdpf);
- drops++;
- }
+ if (err)
+ break;
+ nxmit++;
}
if (flags & XDP_XMIT_FLUSH)
@@ -6522,7 +6511,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_unlock(nq);
- return num_frames - drops;
+ return nxmit;
}
static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6a3f633406..ce234e76ea 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2939,8 +2939,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
u64 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -10525,6 +10525,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring)
}
/**
+ * ixgbe_irq_disable_single - Disable single IRQ vector
+ * @adapter: adapter structure
+ * @ring: ring index
+ **/
+static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 qmask = BIT_ULL(ring);
+ u32 mask;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
+ mask = qmask & IXGBE_EIMC_RTX_QUEUE;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ mask = (qmask & 0xFFFFFFFF);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ mask = (qmask >> 32);
+ if (mask)
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ synchronize_irq(adapter->msix_entries[ring].vector);
+ else
+ synchronize_irq(adapter->pdev->irq);
+}
+
+/**
* ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings
* @adapter: adapter structure
* @ring: ring index
@@ -10540,6 +10578,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
+ ixgbe_irq_disable_single(adapter, ring);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_disable(&rx_ring->q_vector->napi);
+
ixgbe_disable_txr(adapter, tx_ring);
if (xdp_ring)
ixgbe_disable_txr(adapter, xdp_ring);
@@ -10548,9 +10591,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
if (xdp_ring)
synchronize_rcu();
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_disable(&rx_ring->q_vector->napi);
-
ixgbe_clean_tx_ring(tx_ring);
if (xdp_ring)
ixgbe_clean_tx_ring(xdp_ring);
@@ -10578,9 +10618,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
tx_ring = adapter->tx_ring[ring];
xdp_ring = adapter->xdp_ring[ring];
- /* Rx/Tx/XDP Tx share the same napi context. */
- napi_enable(&rx_ring->q_vector->napi);
-
ixgbe_configure_tx_ring(adapter, tx_ring);
if (xdp_ring)
ixgbe_configure_tx_ring(adapter, xdp_ring);
@@ -10589,6 +10626,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
if (xdp_ring)
clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+
+ /* Rx/Tx/XDP Tx share the same napi context. */
+ napi_enable(&rx_ring->q_vector->napi);
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(ring));
+ IXGBE_WRITE_FLUSH(&adapter->hw);
}
/**
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 6c70c84986..3c0f55b3e4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1338,7 +1338,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
/* Release thread waiting for completion */
lmac->cmd_pend = false;
- wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ wake_up(&lmac->wq_cmd_cmplt);
break;
case CGX_EVT_ASYNC:
if (cgx_event_is_linkevent(event))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 9690ac01f0..7d741e3ba8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
}
EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
-void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
spin_unlock(&mdev->mbox_lock);
+ /* Check if interrupt pending */
+ intr_val = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ intr_val |= data;
/* The interrupt should be fired after num_msgs is written
* to the shared memory
*/
- writeq(1, (void __iomem *)mbox->reg_base +
+ writeq(intr_val, (void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
}
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
+}
EXPORT_SYMBOL(otx2_mbox_msg_send);
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
+{
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send_up);
+
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
+{
+ u64 data;
+
+ data = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ /* If data is non-zero wait for ~1ms and return to caller
+ * whether data has changed to zero or not after the wait.
+ */
+ if (!data)
+ return true;
+
+ usleep_range(950, 1000);
+
+ data = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ return data == 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
+
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 5df42634ce..bd4b9661ee 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -16,6 +16,9 @@
#define MBOX_SIZE SZ_64K
+#define MBOX_DOWN_MSG 1
+#define MBOX_UP_MSG 2
+
/* AF/PF: PF initiated, PF/VF VF initiated */
#define MBOX_DOWN_RX_START 0
#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
@@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
@@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
}
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
+
/* Mailbox message types */
#define MBOX_MSG_MASK 0xFFFF
#define MBOX_MSG_INVALID 0xFFFE
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index dfd23580e3..d39d86e694 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
{
struct mcs_intr_info *req;
- int err, pf;
+ int pf;
pf = rvu_get_pf(event->pcifunc);
+ mutex_lock(&rvu->mbox_lock);
+
req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
- if (!req)
+ if (!req) {
+ mutex_unlock(&rvu->mbox_lock);
return -ENOMEM;
+ }
req->mcs_id = event->mcs_id;
req->intr_mask = event->intr_mask;
@@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
req->hdr.pcifunc = event->pcifunc;
req->lmac_id = event->lmac_id;
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
- if (err)
- dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 731bb82b57..32645aefd5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2114,7 +2114,7 @@ bad_message:
}
}
-static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
{
struct rvu *rvu = mwork->rvu;
int offset, err, id, devid;
@@ -2181,6 +2181,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
}
mw->mbox_wrk[devid].num_msgs = 0;
+ if (poll)
+ otx2_mbox_wait_for_zero(mbox, devid);
+
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
}
@@ -2188,15 +2191,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
static inline void rvu_afpf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
- __rvu_mbox_handler(mwork, TYPE_AFPF);
+ mutex_lock(&rvu->mbox_lock);
+ __rvu_mbox_handler(mwork, TYPE_AFPF, true);
+ mutex_unlock(&rvu->mbox_lock);
}
static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
- __rvu_mbox_handler(mwork, TYPE_AFVF);
+ __rvu_mbox_handler(mwork, TYPE_AFVF, false);
}
static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
@@ -2371,6 +2377,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
+ mutex_init(&rvu->mbox_lock);
+
mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
@@ -2520,10 +2528,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
}
}
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
{
struct rvu *rvu = (struct rvu *)rvu_irq;
- int vfs = rvu->vfs;
u64 intr;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
@@ -2537,6 +2544,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
/* Handle VF interrupts */
if (vfs > 64) {
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
@@ -2874,7 +2893,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Register mailbox interrupt handler */
sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_intr_handler, 0,
+ rvu_mbox_pf_intr_handler, 0,
&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
if (ret) {
dev_err(rvu->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 8802961b88..185c296eaa 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -551,6 +551,8 @@ struct rvu {
spinlock_t mcs_intrq_lock;
/* CPT interrupt lock */
spinlock_t cpt_intr_lock;
+
+ struct mutex mbox_lock; /* Serialize mbox up and down msgs */
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 38acdc7a73..72e060cf6b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -232,7 +232,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
struct cgx_link_user_info *linfo;
struct cgx_link_info_msg *msg;
unsigned long pfmap;
- int err, pfid;
+ int pfid;
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
@@ -255,16 +255,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
continue;
}
+ mutex_lock(&rvu->mbox_lock);
+
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
- if (!msg)
+ if (!msg) {
+ mutex_unlock(&rvu->mbox_lock);
continue;
+ }
+
msg->link_info = *linfo;
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
- if (err)
- dev_warn(rvu->dev, "notification to pf %d failed\n",
- pfid);
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+
+ mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 02d0b707ae..a85ac039d7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1592,7 +1592,7 @@ int otx2_detach_resources(struct mbox *mbox)
detach->partial = false;
/* Send detach request to AF */
- otx2_mbox_msg_send(&mbox->mbox, 0);
+ otx2_sync_mbox_msg(mbox);
mutex_unlock(&mbox->lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 0691030708..7e16a341ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -815,7 +815,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
return 0;
- otx2_mbox_msg_send(&mbox->mbox_up, devid);
+ otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e5fe67e738..b40bd0e467 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -292,8 +292,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
-static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
- int first, int mdevs, u64 intr, int type)
+static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -307,40 +307,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
- if (type == TYPE_PFAF)
- otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
/* The hdr->num_msgs is set to zero immediately in the interrupt
- * handler to ensure that it holds a correct value next time
- * when the interrupt handler is called.
- * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
- * pf>mbox.up_num_msgs holds the data for use in
- * pfaf_mbox_up_handler.
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called. pf->mw[i].num_msgs
+ * holds the data for use in otx2_pfvf_mbox_handler and
+ * pf->mw[i].up_num_msgs holds the data for use in
+ * otx2_pfvf_mbox_up_handler.
*/
if (hdr->num_msgs) {
mw[i].num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
- if (type == TYPE_PFAF)
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr),
- sizeof(u64)));
-
queue_work(mbox_wq, &mw[i].mbox_wrk);
}
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
- if (type == TYPE_PFAF)
- otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs) {
mw[i].up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
- if (type == TYPE_PFAF)
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr),
- sizeof(u64)));
-
queue_work(mbox_wq, &mw[i].mbox_up_wrk);
}
}
@@ -356,8 +342,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
/* Msgs are already copied, trigger VF's mbox irq */
smp_wmb();
+ otx2_mbox_wait_for_zero(pfvf_mbox, devid);
+
offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
- writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
+ writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
/* Restore VF's mbox bounce buffer region address */
src_mdev->mbase = bbuf_base;
@@ -547,7 +535,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
end:
offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
- __otx2_mbox_reset(mbox, 0);
+ __otx2_mbox_reset(mbox, vf_idx);
mdev->msgs_acked++;
}
}
@@ -564,8 +552,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
if (vfs > 64) {
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
- TYPE_PFVF);
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
@@ -574,7 +561,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
@@ -597,8 +584,9 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf)
return -ENOMEM;
- pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
- WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 0);
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
@@ -821,20 +809,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
struct mbox *af_mbox;
struct otx2_nic *pf;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
- if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
+ if (mdev->msgs_acked == (num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++;
}
@@ -945,12 +935,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
int offset, id, devid = 0;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
+ u16 num_msgs;
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
@@ -959,10 +951,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
- if (devid) {
+ /* Forward to VF iff VFs are really present */
+ if (devid && pci_num_vf(pf->pdev)) {
otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
MBOX_DIR_PFVF_UP, devid - 1,
- af_mbox->up_num_msgs);
+ num_msgs);
return;
}
@@ -972,16 +965,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
- struct mbox *mbox;
+ struct mbox *mw = &pf->mbox;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
- mbox = &pf->mbox;
- trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+ mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
+
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
+ BIT_ULL(0));
+ }
+
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
- otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
+ BIT_ULL(0));
+ }
return IRQ_HANDLED;
}
@@ -3087,6 +3113,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
struct otx2_vf_config *config;
struct cgx_link_info_msg *req;
struct mbox_msghdr *msghdr;
+ struct delayed_work *dwork;
struct otx2_nic *pf;
int vf_idx;
@@ -3095,10 +3122,24 @@ static void otx2_vf_link_event_task(struct work_struct *work)
vf_idx = config - config->pf->vf_configs;
pf = config->pf;
+ if (config->intf_down)
+ return;
+
+ mutex_lock(&pf->mbox.lock);
+
+ dwork = &config->link_event_work;
+
+ if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
sizeof(*req), sizeof(struct msg_rsp));
if (!msghdr) {
dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ mutex_unlock(&pf->mbox.lock);
return;
}
@@ -3107,7 +3148,11 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req->hdr.sig = OTX2_MBOX_REQ_SIG;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+ otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
+
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+
+ mutex_unlock(&pf->mbox.lock);
}
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 35e0604835..cf0aa16d75 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
struct otx2_mbox *mbox;
struct mbox *af_mbox;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (af_mbox->num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
+
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
struct mbox *vf_mbox;
struct otx2_nic *vf;
int offset, id;
+ u16 num_msgs;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
vf = vf_mbox->pfvf;
@@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (vf_mbox->up_num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_mbox_msg_up(vf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
+
/* Read latest mbox data */
smp_rmb();
- /* Check for PF => VF response messages */
- mbox = &vf->mbox.mbox;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
- trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
+ BIT_ULL(0));
}
- /* Check for PF => VF notification messages */
- mbox = &vf->mbox.mbox_up;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.up_num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
+ BIT_ULL(0));
}
return IRQ_HANDLED;
@@ -760,8 +775,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
otx2_shutdown_qos(vf);
- otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ otx2vf_disable_mbox_intr(vf);
free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index d2c039f830..a1231203ec 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -677,8 +677,7 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
- MAC_MCR_RX_FIFO_CLR_DIS;
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
/* Only update control register when needed! */
if (mcr_new != mcr_cur)
@@ -694,7 +693,7 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phylink_config);
u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
@@ -803,7 +802,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
if (rx_pause)
mcr |= MAC_MCR_FORCE_RX_FC;
- mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index b2a5d9c373..6ce0db3a1a 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -994,7 +994,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_KEEPALIVE_DISABLE) |
FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
- MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
+ MTK_PPE_SCAN_MODE_CHECK_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
if (mtk_is_netsys_v2_or_greater(ppe->eth))
@@ -1090,17 +1090,21 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
mtk_ppe_cache_enable(ppe, false);
- /* disable offload engine */
- ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
- ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
-
/* disable aging */
val = MTK_PPE_TB_CFG_AGE_NON_L4 |
MTK_PPE_TB_CFG_AGE_UNBIND |
MTK_PPE_TB_CFG_AGE_TCP |
MTK_PPE_TB_CFG_AGE_UDP |
- MTK_PPE_TB_CFG_AGE_TCP_FIN;
+ MTK_PPE_TB_CFG_AGE_TCP_FIN |
+ MTK_PPE_TB_CFG_SCAN_MODE;
ppe_clear(ppe, MTK_PPE_TB_CFG, val);
- return mtk_ppe_wait_busy(ppe);
+ if (mtk_ppe_wait_busy(ppe))
+ return -ETIMEDOUT;
+
+ /* disable offload engine */
+ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3e064234f6..98d4306929 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -157,6 +157,12 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (action == DEVLINK_RELOAD_ACTION_FW_ACTIVATE &&
+ !dev->priv.fw_reset) {
+ NL_SET_ERR_MSG_MOD(extack, "FW activate is unsupported for this function");
+ return -EOPNOTSUPP;
+ }
+
if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 803035d4e5..15d97c685a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -42,9 +42,9 @@ mlx5e_ptp_port_ts_cqe_list_add(struct mlx5e_ptp_port_ts_cqe_list *list, u8 metad
WARN_ON_ONCE(tracker->inuse);
tracker->inuse = true;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_add_tail(&tracker->entry, &list->tracker_list_head);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
static void
@@ -54,9 +54,9 @@ mlx5e_ptp_port_ts_cqe_list_remove(struct mlx5e_ptp_port_ts_cqe_list *list, u8 me
WARN_ON_ONCE(!tracker->inuse);
tracker->inuse = false;
- spin_lock(&list->tracker_list_lock);
+ spin_lock_bh(&list->tracker_list_lock);
list_del(&tracker->entry);
- spin_unlock(&list->tracker_list_lock);
+ spin_unlock_bh(&list->tracker_list_lock);
}
void mlx5e_ptpsq_track_metadata(struct mlx5e_ptpsq *ptpsq, u8 metadata)
@@ -155,7 +155,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
struct mlx5e_ptp_metadata_map *metadata_map = &ptpsq->metadata_map;
struct mlx5e_ptp_port_ts_cqe_tracker *pos, *n;
- spin_lock(&cqe_list->tracker_list_lock);
+ spin_lock_bh(&cqe_list->tracker_list_lock);
list_for_each_entry_safe(pos, n, &cqe_list->tracker_list_head, entry) {
struct sk_buff *skb =
mlx5e_ptp_metadata_map_lookup(metadata_map, pos->metadata_id);
@@ -170,7 +170,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
pos->inuse = false;
list_del(&pos->entry);
}
- spin_unlock(&cqe_list->tracker_list_lock);
+ spin_unlock_bh(&cqe_list->tracker_list_lock);
}
#define PTP_WQE_CTR2IDX(val) ((val) & ptpsq->ts_cqe_ctr_mask)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 86bf007fd0..b500cc2c96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -37,7 +37,7 @@ mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ mlx5_core_dbg(priv->mdev, "firmware flow level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index d4ebd87431..b2cabd6ab8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -310,9 +310,9 @@ static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_o
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
-static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
- struct mlx5e_macsec_sa *sa,
- bool is_tx, struct net_device *netdev, u32 fs_id)
+static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
{
int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
@@ -322,20 +322,49 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
fs_id);
- mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
sa->macsec_rule = NULL;
}
+static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
+ struct mlx5e_macsec_sa *sa, bool is_tx,
+ struct net_device *netdev, u32 fs_id)
+{
+ mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
+ mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
+}
+
+static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ struct mlx5e_macsec_sa *sa, bool encrypt,
+ bool is_tx, u32 *fs_id)
+{
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ union mlx5_macsec_rule *macsec_rule;
+
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+ rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
+ MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
+
+ macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
+ if (!macsec_rule)
+ return -ENOMEM;
+
+ sa->macsec_rule = macsec_rule;
+
+ return 0;
+}
+
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt, bool is_tx, u32 *fs_id)
{
struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
struct mlx5e_macsec *macsec = priv->macsec;
- struct mlx5_macsec_rule_attrs rule_attrs;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_macsec_obj_attrs obj_attrs;
- union mlx5_macsec_rule *macsec_rule;
int err;
obj_attrs.next_pn = sa->next_pn;
@@ -357,20 +386,12 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
if (err)
return err;
- rule_attrs.macsec_obj_id = sa->macsec_obj_id;
- rule_attrs.sci = sa->sci;
- rule_attrs.assoc_num = sa->assoc_num;
- rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
- MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
-
- macsec_rule = mlx5_macsec_fs_add_rule(mdev->macsec_fs, ctx, &rule_attrs, fs_id);
- if (!macsec_rule) {
- err = -ENOMEM;
- goto destroy_macsec_object;
+ if (sa->active) {
+ err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
+ if (err)
+ goto destroy_macsec_object;
}
- sa->macsec_rule = macsec_rule;
-
return 0;
destroy_macsec_object:
@@ -526,9 +547,7 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
goto destroy_sa;
macsec_device->tx_sa[assoc_num] = tx_sa;
- if (!secy->operational ||
- assoc_num != tx_sc->encoding_sa ||
- !tx_sa->active)
+ if (!secy->operational)
goto out;
err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
@@ -595,7 +614,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
if (ctx_tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
} else {
@@ -604,7 +623,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
out:
mutex_unlock(&macsec->lock);
@@ -1030,8 +1049,9 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
goto out;
}
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ if (rx_sa->active)
+ mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
kfree(rx_sa);
rx_sc->rx_sa[assoc_num] = NULL;
@@ -1112,8 +1132,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
if (!rx_sa || !rx_sa->macsec_rule)
continue;
- mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
- rx_sc->sc_xarray_element->fs_id);
+ mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
+ rx_sc->sc_xarray_element->fs_id);
}
}
@@ -1124,8 +1144,8 @@ static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
continue;
if (rx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false,
- &rx_sc->sc_xarray_element->fs_id);
+ err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
+ &rx_sc->sc_xarray_element->fs_id);
if (err)
goto out;
}
@@ -1178,7 +1198,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
if (!tx_sa)
continue;
- mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
+ mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
}
for (i = 0; i < MACSEC_NUM_AN; ++i) {
@@ -1187,7 +1207,7 @@ static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
continue;
if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
- err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
+ err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
if (err)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f0b506e562..1ead69c5f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -401,6 +401,8 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
+ /* ensure skb is put on metadata_map before tracking the index */
+ wmb();
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
index 190f10aba1..5a0047bdcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -152,7 +152,7 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
xa_for_each(&esw->offloads.vport_reps, i, rep) {
rpriv = rep->rep_data[REP_ETH].priv;
- if (!rpriv || !rpriv->netdev || !atomic_read(&rpriv->tc_ht.nelems))
+ if (!rpriv || !rpriv->netdev)
continue;
rhashtable_walk_enter(&rpriv->tc_ht, &iter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index b0455134c9..baaae628b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -535,21 +535,26 @@ esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
}
static bool
-esw_dests_to_vf_pf_vports(struct mlx5_flow_destination *dests, int max_dest)
+esw_dests_to_int_external(struct mlx5_flow_destination *dests, int max_dest)
{
- bool vf_dest = false, pf_dest = false;
+ bool internal_dest = false, external_dest = false;
int i;
for (i = 0; i < max_dest; i++) {
- if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
+ if (dests[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dests[i].type != MLX5_FLOW_DESTINATION_TYPE_UPLINK)
continue;
- if (dests[i].vport.num == MLX5_VPORT_UPLINK)
- pf_dest = true;
+ /* Uplink dest is external, but considered as internal
+ * if there is reformat because firmware uses LB+hairpin to support it.
+ */
+ if (dests[i].vport.num == MLX5_VPORT_UPLINK &&
+ !(dests[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID))
+ external_dest = true;
else
- vf_dest = true;
+ internal_dest = true;
- if (vf_dest && pf_dest)
+ if (internal_dest && external_dest)
return true;
}
@@ -695,9 +700,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
/* Header rewrite with combined wire+loopback in FDB is not allowed */
if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) &&
- esw_dests_to_vf_pf_vports(dest, i)) {
+ esw_dests_to_int_external(dest, i)) {
esw_warn(esw->dev,
- "FDB: Header rewrite with forwarding to both PF and VF is not allowed\n");
+ "FDB: Header rewrite with forwarding to both internal and external dests is not allowed\n");
rule = ERR_PTR(-EINVAL);
goto err_esw_get;
}
@@ -3658,22 +3663,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
return 0;
}
-static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct net *devl_net, *netdev_net;
- bool ret = false;
-
- mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
- if (dev->mlx5e_res.uplink_netdev) {
- netdev_net = dev_net(dev->mlx5e_res.uplink_netdev);
- devl_net = devlink_net(devlink);
- ret = net_eq(devl_net, netdev_net);
- }
- mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
- return ret;
-}
-
int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -3718,13 +3707,6 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
- if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
- !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
- return -EPERM;
- }
-
mlx5_lag_disable_change(esw->dev);
err = mlx5_esw_try_lock(esw);
if (err < 0) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index c4e19d627d..3a9cdf7940 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -679,19 +679,30 @@ void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
mlx5_eq_notifier_register(dev, &fw_reset->nb);
}
void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
{
- mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (!fw_reset)
+ return;
+
+ mlx5_eq_notifier_unregister(dev, &fw_reset->nb);
}
void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
@@ -709,9 +720,13 @@ static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
- struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ struct mlx5_fw_reset *fw_reset;
int err;
+ if (!MLX5_CAP_MCAM_REG(dev, mfrl))
+ return 0;
+
+ fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
if (!fw_reset)
return -ENOMEM;
fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
@@ -747,6 +762,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ if (!fw_reset)
+ return;
+
devl_params_unregister(priv_to_devlink(dev),
mlx5_fw_reset_devlink_params,
ARRAY_SIZE(mlx5_fw_reset_devlink_params));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8ff6dc9bc8..b5c709bba1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -452,10 +452,10 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct health_buffer __iomem *h = health->health;
u8 synd = ioread8(&h->synd);
+ devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
if (!synd)
return 0;
- devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
return 0;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
index 4af285918e..75868b3f54 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mactable.c
@@ -347,10 +347,10 @@ int sparx5_del_mact_entry(struct sparx5 *sparx5,
list) {
if ((vid == 0 || mact_entry->vid == vid) &&
ether_addr_equal(addr, mact_entry->mac)) {
+ sparx5_mact_forget(sparx5, addr, mact_entry->vid);
+
list_del(&mact_entry->list);
devm_kfree(sparx5->dev, mact_entry);
-
- sparx5_mact_forget(sparx5, addr, mact_entry->vid);
}
}
mutex_unlock(&sparx5->mact_lock);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 88d6d992e7..86db8e8141 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -338,6 +338,11 @@ static void nfp_fl_lag_do_work(struct work_struct *work)
acti_netdevs = kmalloc_array(entry->slave_cnt,
sizeof(*acti_netdevs), GFP_KERNEL);
+ if (!acti_netdevs) {
+ schedule_delayed_work(&lag->work,
+ NFP_FL_LAG_DELAY);
+ continue;
+ }
/* Include sanity check in the loop. It may be that a bond has
* changed between processing the last notification and the
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index b6c06adb86..b95187a084 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -551,7 +551,7 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
+ snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d",
pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9df39cf8b0..1072e2210a 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1443,7 +1443,7 @@ static int temac_probe(struct platform_device *pdev)
}
/* map device registers */
- lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
+ lp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lp->regs)) {
dev_err(&pdev->dev, "could not map TEMAC registers\n");
return -ENOMEM;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index acd9c615d1..356da958ee 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -221,7 +221,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
unsigned int len;
- int err = 0;
+ int nh, err = 0;
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
@@ -272,9 +272,23 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
skb->pkt_type = PACKET_HOST;
}
- oiph = skb_network_header(skb);
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_reset_network_header(skb);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(geneve->dev, rx_length_errors);
+ DEV_STATS_INC(geneve->dev, rx_errors);
+ goto drop;
+ }
+
+ /* Get the outer header. */
+ oiph = skb->head + nh;
+
if (geneve_get_sk_family(gs) == AF_INET)
err = IP_ECN_decapsulate(oiph, skb);
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index b7cb718177..29e1cbea6d 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -380,7 +380,7 @@ static int dp83822_config_init(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
- int rgmii_delay;
+ int rgmii_delay = 0;
s32 rx_int_delay;
s32 tx_int_delay;
int err = 0;
@@ -390,30 +390,33 @@ static int dp83822_config_init(struct phy_device *phydev)
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
true);
- if (rx_int_delay <= 0)
- rgmii_delay = 0;
- else
- rgmii_delay = DP83822_RX_CLK_SHIFT;
+ /* Set DP83822_RX_CLK_SHIFT to enable rx clk internal delay */
+ if (rx_int_delay > 0)
+ rgmii_delay |= DP83822_RX_CLK_SHIFT;
tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
false);
+
+ /* Set DP83822_TX_CLK_SHIFT to disable tx clk internal delay */
if (tx_int_delay <= 0)
- rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
- else
rgmii_delay |= DP83822_TX_CLK_SHIFT;
- if (rgmii_delay) {
- err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, rgmii_delay);
- if (err)
- return err;
- }
+ err = phy_modify_mmd(phydev, DP83822_DEVADDR, MII_DP83822_RCSR,
+ DP83822_RX_CLK_SHIFT | DP83822_TX_CLK_SHIFT, rgmii_delay);
+ if (err)
+ return err;
+
+ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
- phy_set_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ if (err)
+ return err;
} else {
- phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+ err = phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_RCSR, DP83822_RGMII_MODE_EN);
+
+ if (err)
+ return err;
}
if (dp83822->fx_enabled) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index a42df2c1bd..813b753e21 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -2701,8 +2701,8 @@ EXPORT_SYMBOL(genphy_resume);
int genphy_loopback(struct phy_device *phydev, bool enable)
{
if (enable) {
- u16 val, ctl = BMCR_LOOPBACK;
- int ret;
+ u16 ctl = BMCR_LOOPBACK;
+ int ret, val;
ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
@@ -2954,7 +2954,7 @@ s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
if (delay < 0)
return delay;
- if (delay && size == 0)
+ if (size == 0)
return delay;
if (delay < delay_values[0] || delay > delay_values[size - 1]) {
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a2dde84499..f0fb9cd1ff 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3137,7 +3137,8 @@ static int lan78xx_open(struct net_device *net)
done:
mutex_unlock(&dev->dev_mutex);
- usb_autopm_put_interface(dev->intf);
+ if (ret < 0)
+ usb_autopm_put_interface(dev->intf);
return ret;
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index a530f20ee2..2fa46baa58 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -2105,6 +2105,11 @@ static const struct usb_device_id products[] = {
.driver_info = (unsigned long) &smsc95xx_info,
},
{
+ /* SYSTEC USB-SPEmodule1 10BASE-T1L Ethernet Device */
+ USB_DEVICE(0x0878, 0x1400),
+ .driver_info = (unsigned long)&smsc95xx_info,
+ },
+ {
/* Microchip's EVB-LAN8670-USB 10BASE-T1S Ethernet Device */
USB_DEVICE(0x184F, 0x0051),
.driver_info = (unsigned long)&smsc95xx_info,
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 143bd4ab16..57947a5590 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -737,7 +737,9 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
data->eeprom_len = SR9800_EEPROM_LEN;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
/* LED Setting Rule :
* AABB:CCDD
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a2e80278eb..2f3fd28737 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1533,8 +1533,6 @@ static netdev_features_t veth_fix_features(struct net_device *dev,
if (peer_priv->_xdp_prog)
features &= ~NETIF_F_GSO_SOFTWARE;
}
- if (priv->_xdp_prog)
- features |= NETIF_F_GRO;
return features;
}
@@ -1638,14 +1636,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
- if (!veth_gro_requested(dev)) {
- /* user-space did not require GRO, but adding
- * XDP is supposed to get GRO working
- */
- dev->features |= NETIF_F_GRO;
- netdev_features_change(dev);
- }
-
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
@@ -1661,14 +1651,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
- /* if user-space did not require GRO, since adding XDP
- * enabled it, clear it now
- */
- if (!veth_gro_requested(dev)) {
- dev->features &= ~NETIF_F_GRO;
- netdev_features_change(dev);
- }
-
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 80ddaff759..a6c787454a 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -382,12 +382,12 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
page = rbi->page;
dma_sync_single_for_cpu(&adapter->pdev->dev,
page_pool_get_dma_addr(page) +
- rq->page_pool->p.offset, rcd->len,
+ rq->page_pool->p.offset, rbi->len,
page_pool_get_dma_dir(rq->page_pool));
- xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
- rcd->len, false);
+ rbi->len, false);
xdp_buff_clear_frags_flag(&xdp);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index e220d761b1..f7055180ba 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -164,8 +164,8 @@ get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
if (!allowedips_node)
goto no_allowedips;
if (!ctx->allowedips_seq)
- ctx->allowedips_seq = peer->device->peer_allowedips.seq;
- else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
+ ctx->allowedips_seq = ctx->wg->peer_allowedips.seq;
+ else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq)
goto no_allowedips;
allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
@@ -255,17 +255,17 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!peers_nest)
goto out;
ret = 0;
- /* If the last cursor was removed via list_del_init in peer_remove, then
+ lockdep_assert_held(&wg->device_update_lock);
+ /* If the last cursor was removed in peer_remove or peer_remove_all, then
* we just treat this the same as there being no more peers left. The
* reason is that seq_nr should indicate to userspace that this isn't a
* coherent dump anyway, so they'll try again.
*/
if (list_empty(&wg->peer_list) ||
- (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
+ (ctx->next_peer && ctx->next_peer->is_dead)) {
nla_nest_cancel(skb, peers_nest);
goto out;
}
- lockdep_assert_held(&wg->device_update_lock);
peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
if (get_peer(peer, skb, ctx)) {
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index a176653c88..db01ec03bd 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -251,7 +251,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
- keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
+ READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) {
WRITE_ONCE(keypair->receiving.is_valid, false);
return false;
}
@@ -318,7 +318,7 @@ static bool counter_validate(struct noise_replay_counter *counter, u64 their_cou
for (i = 1; i <= top; ++i)
counter->backtrack[(i + index_current) &
((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
- counter->counter = their_counter;
+ WRITE_ONCE(counter->counter, their_counter);
}
index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
@@ -463,7 +463,7 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
peer->device->dev->name,
PACKET_CB(skb)->nonce,
- keypair->receiving_counter.counter);
+ READ_ONCE(keypair->receiving_counter.counter));
goto next;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 6b6aa3c367..0ce08e9a0a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -851,6 +851,10 @@ ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
}
ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
arg->desc_id = ev->desc_id;
arg->status = ev->status;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 71c6dab1ae..1c4613b1bd 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2297,6 +2297,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ /* Initialize rx_mcs_160 to 9 which is an invalid value */
+ rx_mcs_160 = 9;
if (support_160) {
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
@@ -2308,6 +2310,8 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
}
}
+ /* Initialize rx_mcs_80 to 9 which is an invalid value */
+ rx_mcs_80 = 9;
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
@@ -3026,7 +3030,14 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
rcu_read_unlock();
+ if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+ ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+ arvif->vdev_id, bss_conf->bssid);
+ return;
+ }
+
peer_arg.is_assoc = true;
+
ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
@@ -3049,12 +3060,6 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
return;
}
- if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
- ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
- arvif->vdev_id, bss_conf->bssid);
- return;
- }
-
WARN_ON(arvif->is_up);
arvif->aid = vif->cfg.aid;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 68c42ca44f..d67494de44 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -418,7 +418,7 @@ struct ath12k_sta {
};
#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5945
+#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
#define ATH12K_NUM_CHANS 100
#define ATH12K_MAX_5G_CHAN 173
diff --git a/drivers/net/wireless/ath/ath12k/dbring.c b/drivers/net/wireless/ath/ath12k/dbring.c
index 8fbf868e6f..788160c84c 100644
--- a/drivers/net/wireless/ath/ath12k/dbring.c
+++ b/drivers/net/wireless/ath/ath12k/dbring.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
index 45d33279e6..fe5a732ba9 100644
--- a/drivers/net/wireless/ath/ath12k/debug.c
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 492ca6ce67..62f9cdbb81 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index b896dfe66d..1bdab8604d 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -449,8 +449,8 @@ static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
{
- return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
- RX_MSDU_END_INFO5_DA_IS_MCBC;
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
}
static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 66035a787c..fc47e7e6b4 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_H
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
index f6afbd8196..4f25eb9f77 100644
--- a/drivers/net/wireless/ath/ath12k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
index 4095fd82b1..c653ca1f59 100644
--- a/drivers/net/wireless/ath/ath12k/hif.h
+++ b/drivers/net/wireless/ath/ath12k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HIF_H
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index b55cf33e37..de60d988d8 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 2d6427cf41..d2622bfef9 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HW_H
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index b698e55a5b..237fb8e7cf 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <net/mac80211.h>
@@ -5261,7 +5261,7 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
do {
if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
vdev_stats_id++;
- if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
+ if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) {
vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
break;
}
@@ -7188,7 +7188,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
- if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
channels = kmemdup(ath12k_6ghz_channels,
sizeof(ath12k_6ghz_channels), GFP_KERNEL);
if (!channels) {
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 59b4e8f5ee..7d71ae1aba 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_MAC_H
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index 39e640293c..27eb38b2b1 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/msi.h>
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 3006cd3fbe..a6a5f9bcff 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index 0f24fd9395..9a17a7dcdd 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PCI_H
#define ATH12K_PCI_H
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
index c6edb24cbe..7b3500b5c8 100644
--- a/drivers/net/wireless/ath/ath12k/peer.h
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_PEER_H
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index f6e949c618..77a132f6bb 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/elf.h>
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index e20d6511d1..e25bbaa125 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_QMI_H
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index 5c006256c8..1dac843980 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include "core.h"
@@ -103,7 +103,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
bands = hw->wiphy->bands;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
@@ -129,7 +129,7 @@ int ath12k_reg_update_chan_list(struct ath12k *ar)
ch = arg->channel;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!bands[band])
+ if (!(ar->mac.sbands[band].channels && bands[band]))
continue;
for (i = 0; i < bands[band]->n_channels; i++) {
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
index 35569f0304..d4a0776e10 100644
--- a/drivers/net/wireless/ath/ath12k/reg.h
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_REG_H
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
index c4058abc51..55f20c446c 100644
--- a/drivers/net/wireless/ath/ath12k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_RX_DESC_H
#define ATH12K_RX_DESC_H
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 0e5bf5ce8d..11cc3005c0 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 237f4ec2cf..6c33e898b3 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -306,7 +306,6 @@ struct ath9k_htc_tx {
DECLARE_BITMAP(tx_slot, MAX_TX_BUF_NUM);
struct timer_list cleanup_timer;
spinlock_t tx_lock;
- bool initialized;
};
struct ath9k_htc_tx_ctl {
@@ -515,6 +514,7 @@ struct ath9k_htc_priv {
unsigned long ps_usecount;
bool ps_enabled;
bool ps_idle;
+ bool initialized;
#ifdef CONFIG_MAC80211_LEDS
enum led_brightness brightness;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index dae3d9c7b6..fc339079ee 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -966,6 +966,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
htc_handle->drv_priv = priv;
+ /* Allow ath9k_wmi_event_tasklet() to operate. */
+ smp_wmb();
+ priv->initialized = true;
+
return 0;
err_init:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index efcaeccb05..ce9c04e418 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -815,10 +815,6 @@ int ath9k_tx_init(struct ath9k_htc_priv *priv)
skb_queue_head_init(&priv->tx.data_vo_queue);
skb_queue_head_init(&priv->tx.tx_failed);
- /* Allow ath9k_wmi_event_tasklet(WMI_TXSTATUS_EVENTID) to operate. */
- smp_wmb();
- priv->tx.initialized = true;
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 1476b42b52..805ad31edb 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -155,6 +155,12 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
}
spin_unlock_irqrestore(&wmi->wmi_lock, flags);
+ /* Check if ath9k_htc_probe_device() completed. */
+ if (!data_race(priv->initialized)) {
+ kfree_skb(skb);
+ continue;
+ }
+
hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_id = be16_to_cpu(hdr->command_id);
wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -169,10 +175,6 @@ void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
&wmi->drv_priv->fatal_work);
break;
case WMI_TXSTATUS_EVENTID:
- /* Check if ath9k_tx_init() completed. */
- if (!data_race(priv->tx.initialized))
- break;
-
spin_lock_bh(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
spin_unlock_bh(&priv->tx.tx_lock);
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 67b4bac048..c0d8fc0b22 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -1082,6 +1082,22 @@ static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
return dev->__using_pio_transfers;
}
+static inline void b43_wake_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_wake_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_wake_queue(dev->wl->hw, 0);
+}
+
+static inline void b43_stop_queue(struct b43_wldev *dev, int queue_prio)
+{
+ if (dev->qos_enabled)
+ ieee80211_stop_queue(dev->wl->hw, queue_prio);
+ else
+ ieee80211_stop_queue(dev->wl->hw, 0);
+}
+
/* Message printing */
__printf(2, 3) void b43info(struct b43_wl *wl, const char *fmt, ...);
__printf(2, 3) void b43err(struct b43_wl *wl, const char *fmt, ...);
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 760d1a28ed..6ac7dcebff 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1399,7 +1399,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
should_inject_overflow(ring)) {
/* This TX ring is full. */
unsigned int skb_mapping = skb_get_queue_mapping(skb);
- ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ b43_stop_queue(dev, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = true;
ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
@@ -1570,7 +1570,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
} else {
/* If the driver queue is running wake the corresponding
* mac80211 queue. */
- ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
+ b43_wake_queue(dev, ring->queue_prio);
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 92ca0b2ca2..effb6c23f8 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -2587,7 +2587,8 @@ static void b43_request_firmware(struct work_struct *work)
start_ieee80211:
wl->hw->queues = B43_QOS_QUEUE_NUM;
- if (!modparam_qos || dev->fw.opensource)
+ if (!modparam_qos || dev->fw.opensource ||
+ dev->dev->chip_id == BCMA_CHIP_ID_BCM4331)
wl->hw->queues = 1;
err = ieee80211_register_hw(wl->hw);
@@ -3603,7 +3604,7 @@ static void b43_tx_work(struct work_struct *work)
err = b43_dma_tx(dev, skb);
if (err == -ENOSPC) {
wl->tx_queue_stopped[queue_num] = true;
- ieee80211_stop_queue(wl->hw, queue_num);
+ b43_stop_queue(dev, queue_num);
skb_queue_head(&wl->tx_queue[queue_num], skb);
break;
}
@@ -3627,6 +3628,7 @@ static void b43_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
+ u16 skb_queue_mapping;
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
@@ -3635,12 +3637,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
- if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ skb_queue_mapping = skb_get_queue_mapping(skb);
+ skb_queue_tail(&wl->tx_queue[skb_queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb_queue_mapping])
ieee80211_queue_work(wl->hw, &wl->tx_work);
- } else {
- ieee80211_stop_queue(wl->hw, skb->queue_mapping);
- }
+ else
+ b43_stop_queue(wl->current_dev, skb_queue_mapping);
}
static void b43_qos_params_upload(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 0cf70fdb60..e41f2f5b4c 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -525,7 +525,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (total_len > (q->buffer_size - q->buffer_used)) {
/* Not enough memory on the queue. */
err = -EBUSY;
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
goto out;
}
@@ -552,7 +552,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
(q->free_packet_slots == 0)) {
/* The queue is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
+ b43_stop_queue(dev, skb_get_queue_mapping(skb));
q->stopped = true;
}
@@ -587,7 +587,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
list_add(&pack->list, &q->packets_list);
if (q->stopped) {
- ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
+ b43_wake_queue(dev, q->queue_prio);
q->stopped = false;
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
index ac3a36fa36..a963c24297 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c
@@ -7,21 +7,16 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <feature.h>
#include "vops.h"
-static int brcmf_bca_attach(struct brcmf_pub *drvr)
+static void brcmf_bca_feat_attach(struct brcmf_if *ifp)
{
- pr_err("%s: executing\n", __func__);
- return 0;
-}
-
-static void brcmf_bca_detach(struct brcmf_pub *drvr)
-{
- pr_err("%s: executing\n", __func__);
+ /* SAE support not confirmed so disabling for now */
+ ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_SAE);
}
const struct brcmf_fwvid_ops brcmf_bca_ops = {
- .attach = brcmf_bca_attach,
- .detach = brcmf_bca_detach,
+ .feat_attach = brcmf_bca_feat_attach,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 44cea18dd2..0ff15d4083 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -32,6 +32,7 @@
#include "vendor.h"
#include "bus.h"
#include "common.h"
+#include "fwvid.h"
#define BRCMF_SCAN_IE_LEN_MAX 2048
@@ -1179,8 +1180,7 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg,
scan_request = cfg->scan_request;
cfg->scan_request = NULL;
- if (timer_pending(&cfg->escan_timeout))
- del_timer_sync(&cfg->escan_timeout);
+ timer_delete_sync(&cfg->escan_timeout);
if (fw_abort) {
/* Do a scan abort to stop the driver's scan engine */
@@ -1687,52 +1687,39 @@ static u16 brcmf_map_fw_linkdown_reason(const struct brcmf_event_msg *e)
return reason;
}
-static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags)
{
struct brcmf_pub *drvr = ifp->drvr;
struct brcmf_wsec_pmk_le pmk;
int err;
+ if (key_len > sizeof(pmk.key)) {
+ bphy_err(drvr, "key must be less than %zu bytes\n",
+ sizeof(pmk.key));
+ return -EINVAL;
+ }
+
memset(&pmk, 0, sizeof(pmk));
- /* pass pmk directly */
- pmk.key_len = cpu_to_le16(pmk_len);
- pmk.flags = cpu_to_le16(0);
- memcpy(pmk.key, pmk_data, pmk_len);
+ /* pass key material directly */
+ pmk.key_len = cpu_to_le16(key_len);
+ pmk.flags = cpu_to_le16(flags);
+ memcpy(pmk.key, key, key_len);
- /* store psk in firmware */
+ /* store key material in firmware */
err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_WSEC_PMK,
&pmk, sizeof(pmk));
if (err < 0)
bphy_err(drvr, "failed to change PSK in firmware (len=%u)\n",
- pmk_len);
+ key_len);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_set_wsec);
-static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
- u16 pwd_len)
+static int brcmf_set_pmk(struct brcmf_if *ifp, const u8 *pmk_data, u16 pmk_len)
{
- struct brcmf_pub *drvr = ifp->drvr;
- struct brcmf_wsec_sae_pwd_le sae_pwd;
- int err;
-
- if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
- bphy_err(drvr, "sae_password must be less than %d\n",
- BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
- return -EINVAL;
- }
-
- sae_pwd.key_len = cpu_to_le16(pwd_len);
- memcpy(sae_pwd.key, pwd_data, pwd_len);
-
- err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
- sizeof(sae_pwd));
- if (err < 0)
- bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
- pwd_len);
-
- return err;
+ return brcmf_set_wsec(ifp, pmk_data, pmk_len, 0);
}
static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
@@ -2503,8 +2490,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
bphy_err(drvr, "failed to clean up user-space RSNE\n");
goto done;
}
- err = brcmf_set_sae_password(ifp, sme->crypto.sae_pwd,
- sme->crypto.sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, &sme->crypto);
if (!err && sme->crypto.psk)
err = brcmf_set_pmk(ifp, sme->crypto.psk,
BRCMF_WSEC_MAX_PSK_LEN);
@@ -4322,6 +4308,9 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
int ret;
pmk_op = kzalloc(sizeof(*pmk_op), GFP_KERNEL);
+ if (!pmk_op)
+ return -ENOMEM;
+
pmk_op->version = cpu_to_le16(BRCMF_PMKSA_VER_3);
if (!pmksa) {
@@ -5254,8 +5243,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
if (crypto->sae_pwd) {
brcmf_dbg(INFO, "using SAE offload\n");
profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
- err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
- crypto->sae_pwd_len);
+ err = brcmf_fwvid_set_sae_password(ifp, crypto);
if (err < 0)
goto exit;
}
@@ -5362,10 +5350,12 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
msleep(400);
if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
+ struct cfg80211_crypto_settings crypto = {};
+
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
brcmf_set_pmk(ifp, NULL, 0);
if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
- brcmf_set_sae_password(ifp, NULL, 0);
+ brcmf_fwvid_set_sae_password(ifp, &crypto);
profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
}
@@ -8437,6 +8427,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg)
brcmf_btcoex_detach(cfg);
wiphy_unregister(cfg->wiphy);
wl_deinit_priv(cfg);
+ cancel_work_sync(&cfg->escan_timeout_work);
brcmf_free_wiphy(cfg->wiphy);
kfree(cfg);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 0e1fa3f0de..dc3a6a5375 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -468,4 +468,6 @@ void brcmf_set_mpc(struct brcmf_if *ndev, int mpc);
void brcmf_abort_scanning(struct brcmf_cfg80211_info *cfg);
void brcmf_cfg80211_free_netdev(struct net_device *ndev);
+int brcmf_set_wsec(struct brcmf_if *ifp, const u8 *key, u16 key_len, u16 flags);
+
#endif /* BRCMFMAC_CFG80211_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
index b75652ba93..bec5748310 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c
@@ -7,21 +7,36 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <fwil.h>
#include "vops.h"
-static int brcmf_cyw_attach(struct brcmf_pub *drvr)
+static int brcmf_cyw_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_err("%s: executing\n", __func__);
- return 0;
-}
+ struct brcmf_pub *drvr = ifp->drvr;
+ struct brcmf_wsec_sae_pwd_le sae_pwd;
+ u16 pwd_len = crypto->sae_pwd_len;
+ int err;
-static void brcmf_cyw_detach(struct brcmf_pub *drvr)
-{
- pr_err("%s: executing\n", __func__);
+ if (pwd_len > BRCMF_WSEC_MAX_SAE_PASSWORD_LEN) {
+ bphy_err(drvr, "sae_password must be less than %d\n",
+ BRCMF_WSEC_MAX_SAE_PASSWORD_LEN);
+ return -EINVAL;
+ }
+
+ sae_pwd.key_len = cpu_to_le16(pwd_len);
+ memcpy(sae_pwd.key, crypto->sae_pwd, pwd_len);
+
+ err = brcmf_fil_iovar_data_set(ifp, "sae_password", &sae_pwd,
+ sizeof(sae_pwd));
+ if (err < 0)
+ bphy_err(drvr, "failed to set SAE password in firmware (len=%u)\n",
+ pwd_len);
+
+ return err;
}
const struct brcmf_fwvid_ops brcmf_cyw_ops = {
- .attach = brcmf_cyw_attach,
- .detach = brcmf_cyw_detach,
+ .set_sae_password = brcmf_cyw_set_sae_pwd,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 6d10c9efbe..909a34a1ab 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -13,6 +13,7 @@
#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
+#include "fwvid.h"
#include "feature.h"
#include "common.h"
@@ -339,6 +340,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_FWSUP, "sup_wpa");
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_SCAN_V2, "scan_ver");
+ brcmf_fwvid_feat_attach(ifp);
+
if (drvr->settings->feature_disable) {
brcmf_dbg(INFO, "Features: 0x%02x, disable: 0x%02x\n",
ifp->drvr->feat_flags,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index 72fe8bce6e..a9514d72f7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -239,6 +239,7 @@ brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *dat
mutex_unlock(&drvr->proto_block);
return err;
}
+BRCMF_EXPORT_SYMBOL_GPL(brcmf_fil_iovar_data_set);
s32
brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 9d248ba1c0..e74a23e118 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -584,7 +584,7 @@ struct brcmf_wsec_key_le {
struct brcmf_wsec_pmk_le {
__le16 key_len;
__le16 flags;
- u8 key[2 * BRCMF_WSEC_MAX_PSK_LEN + 1];
+ u8 key[BRCMF_WSEC_MAX_SAE_PASSWORD_LEN];
};
/**
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
index 86eafdb405..b427782554 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c
@@ -89,8 +89,7 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *vmod,
if (fwvid >= BRCMF_FWVENDOR_NUM)
return -ERANGE;
- if (WARN_ON(!vmod) || WARN_ON(!vops) ||
- WARN_ON(!vops->attach) || WARN_ON(!vops->detach))
+ if (WARN_ON(!vmod) || WARN_ON(!vops))
return -EINVAL;
if (WARN_ON(fwvid_list[fwvid].vmod))
@@ -150,7 +149,7 @@ static inline int brcmf_fwvid_request_module(enum brcmf_fwvendor fwvid)
}
#endif
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
+int brcmf_fwvid_attach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
int ret;
@@ -175,7 +174,7 @@ int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr)
return ret;
}
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
+void brcmf_fwvid_detach(struct brcmf_pub *drvr)
{
enum brcmf_fwvendor fwvid = drvr->bus_if->fwvid;
@@ -187,9 +186,10 @@ void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr)
mutex_lock(&fwvid_list_lock);
- drvr->vops = NULL;
- list_del(&drvr->bus_if->list);
-
+ if (drvr->vops) {
+ drvr->vops = NULL;
+ list_del(&drvr->bus_if->list);
+ }
mutex_unlock(&fwvid_list_lock);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
index 43df58bb70..dac22534d0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h
@@ -6,12 +6,14 @@
#define FWVID_H_
#include "firmware.h"
+#include "cfg80211.h"
struct brcmf_pub;
+struct brcmf_if;
struct brcmf_fwvid_ops {
- int (*attach)(struct brcmf_pub *drvr);
- void (*detach)(struct brcmf_pub *drvr);
+ void (*feat_attach)(struct brcmf_if *ifp);
+ int (*set_sae_password)(struct brcmf_if *ifp, struct cfg80211_crypto_settings *crypto);
};
/* exported functions */
@@ -20,28 +22,29 @@ int brcmf_fwvid_register_vendor(enum brcmf_fwvendor fwvid, struct module *mod,
int brcmf_fwvid_unregister_vendor(enum brcmf_fwvendor fwvid, struct module *mod);
/* core driver functions */
-int brcmf_fwvid_attach_ops(struct brcmf_pub *drvr);
-void brcmf_fwvid_detach_ops(struct brcmf_pub *drvr);
+int brcmf_fwvid_attach(struct brcmf_pub *drvr);
+void brcmf_fwvid_detach(struct brcmf_pub *drvr);
const char *brcmf_fwvid_vendor_name(struct brcmf_pub *drvr);
-static inline int brcmf_fwvid_attach(struct brcmf_pub *drvr)
+static inline void brcmf_fwvid_feat_attach(struct brcmf_if *ifp)
{
- int ret;
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
- ret = brcmf_fwvid_attach_ops(drvr);
- if (ret)
- return ret;
+ if (!vops->feat_attach)
+ return;
- return drvr->vops->attach(drvr);
+ vops->feat_attach(ifp);
}
-static inline void brcmf_fwvid_detach(struct brcmf_pub *drvr)
+static inline int brcmf_fwvid_set_sae_password(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- if (!drvr->vops)
- return;
+ const struct brcmf_fwvid_ops *vops = ifp->drvr->vops;
+
+ if (!vops || !vops->set_sae_password)
+ return -EOPNOTSUPP;
- drvr->vops->detach(drvr);
- brcmf_fwvid_detach_ops(drvr);
+ return vops->set_sae_password(ifp, crypto);
}
#endif /* FWVID_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
index 5573a47766..fd593b93ad 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c
@@ -7,21 +7,17 @@
#include <core.h>
#include <bus.h>
#include <fwvid.h>
+#include <cfg80211.h>
#include "vops.h"
-static int brcmf_wcc_attach(struct brcmf_pub *drvr)
+static int brcmf_wcc_set_sae_pwd(struct brcmf_if *ifp,
+ struct cfg80211_crypto_settings *crypto)
{
- pr_debug("%s: executing\n", __func__);
- return 0;
-}
-
-static void brcmf_wcc_detach(struct brcmf_pub *drvr)
-{
- pr_debug("%s: executing\n", __func__);
+ return brcmf_set_wsec(ifp, crypto->sae_pwd, crypto->sae_pwd_len,
+ BRCMF_WSEC_PASSPHRASE);
}
const struct brcmf_fwvid_ops brcmf_wcc_ops = {
- .attach = brcmf_wcc_attach,
- .detach = brcmf_wcc_detach,
+ .set_sae_password = brcmf_wcc_set_sae_pwd,
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index ccc621b8ed..4a1fe982a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -383,8 +383,9 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
return sh;
}
-static void wlc_phy_timercb_phycal(struct brcms_phy *pi)
+static void wlc_phy_timercb_phycal(void *ptr)
{
+ struct brcms_phy *pi = ptr;
uint delay = 5;
if (PHY_PERICAL_MPHASE_PENDING(pi)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
index a0de5db0cd..b723817915 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
@@ -57,12 +57,11 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim)
}
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name)
{
return (struct wlapi_timer *)
- brcms_init_timer(physhim->wl, (void (*)(void *))fn,
- arg, name);
+ brcms_init_timer(physhim->wl, fn, arg, name);
}
void wlapi_free_timer(struct wlapi_timer *t)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
index dd8774717a..27d0934e60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h
@@ -131,7 +131,7 @@ void wlc_phy_shim_detach(struct phy_shim_info *physhim);
/* PHY to WL utility functions */
struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
- void (*fn)(struct brcms_phy *pi),
+ void (*fn)(void *pi),
void *arg, const char *name);
void wlapi_free_timer(struct wlapi_timer *t);
void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index dcc4810cb3..e161b44539 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -767,7 +767,7 @@ read_table:
* from index 1, so the maximum value allowed here is
* ACPI_SAR_PROFILES_NUM - 1.
*/
- if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+ if (n_profiles >= ACPI_SAR_PROFILE_NUM) {
ret = -EINVAL;
goto out_free;
}
@@ -1296,7 +1296,6 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (IS_ERR(data))
return;
- /* try to read wtas table revision 1 or revision 0*/
wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
ACPI_WPFC_WIFI_DATA_SIZE,
&tbl_rev);
@@ -1306,13 +1305,14 @@ void iwl_acpi_get_phy_filters(struct iwl_fw_runtime *fwrt,
if (tbl_rev != 0)
goto out_free;
- BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) != ACPI_WPFC_WIFI_DATA_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(filters->filter_cfg_chains) !=
+ ACPI_WPFC_WIFI_DATA_SIZE - 1);
for (i = 0; i < ARRAY_SIZE(filters->filter_cfg_chains); i++) {
- if (wifi_pkg->package.elements[i].type != ACPI_TYPE_INTEGER)
- return;
+ if (wifi_pkg->package.elements[i + 1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
tmp.filter_cfg_chains[i] =
- cpu_to_le32(wifi_pkg->package.elements[i].integer.value);
+ cpu_to_le32(wifi_pkg->package.elements[i + 1].integer.value);
}
IWL_DEBUG_RADIO(fwrt, "Loaded WPFC filter config from ACPI\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index e9277f6f35..39106ccb4b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -56,7 +56,7 @@
#define ACPI_EWRD_WIFI_DATA_SIZE_REV2 ((ACPI_SAR_PROFILE_NUM - 1) * \
ACPI_SAR_NUM_CHAINS_REV2 * \
ACPI_SAR_NUM_SUB_BANDS_REV2 + 3)
-#define ACPI_WPFC_WIFI_DATA_SIZE 4 /* 4 filter config words */
+#define ACPI_WPFC_WIFI_DATA_SIZE 5 /* domain and 4 filter config words */
/* revision 0 and 1 are identical, except for the semantics in the FW */
#define ACPI_GEO_NUM_BANDS_REV0 2
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 9c69d36743..e6c0f928a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2019-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -66,6 +66,16 @@ enum iwl_gen2_tx_fifo {
IWL_GEN2_TRIG_TX_FIFO_VO,
};
+enum iwl_bz_tx_fifo {
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+};
/**
* enum iwl_tx_queue_cfg_actions - TXQ config options
* @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 3975a53a9f..57ddd8aefa 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -3075,8 +3075,6 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
struct iwl_fw_dbg_params params = {0};
struct iwl_fwrt_dump_data *dump_data =
&fwrt->dump.wks[wk_idx].dump_data;
- u32 policy;
- u32 time_point;
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
@@ -3107,13 +3105,16 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
iwl_fw_dbg_stop_restart_recording(fwrt, &params, false);
- policy = le32_to_cpu(dump_data->trig->apply_policy);
- time_point = le32_to_cpu(dump_data->trig->time_point);
+ if (iwl_trans_dbg_ini_valid(fwrt->trans)) {
+ u32 policy = le32_to_cpu(dump_data->trig->apply_policy);
+ u32 time_point = le32_to_cpu(dump_data->trig->time_point);
- if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
- IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
- iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ if (policy & IWL_FW_INI_APPLY_POLICY_DUMP_COMPLETE_CMD) {
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: sending dump complete\n");
+ iwl_send_dbg_dump_complete_cmd(fwrt, time_point, 0);
+ }
}
+
if (fwrt->trans->dbg.last_tp_resetfw == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY)
iwl_force_nmi(fwrt->trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index 650e4bde9c..56ee0ceed7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -255,21 +255,27 @@ static u8 *iwl_get_pnvm_image(struct iwl_trans *trans_p, size_t *len)
struct pnvm_sku_package *package;
u8 *image = NULL;
- /* First attempt to get the PNVM from BIOS */
- package = iwl_uefi_get_pnvm(trans_p, len);
- if (!IS_ERR_OR_NULL(package)) {
- if (*len >= sizeof(*package)) {
- /* we need only the data */
- *len -= sizeof(*package);
- image = kmemdup(package->data, *len, GFP_KERNEL);
+ /* Get PNVM from BIOS for non-Intel SKU */
+ if (trans_p->sku_id[2]) {
+ package = iwl_uefi_get_pnvm(trans_p, len);
+ if (!IS_ERR_OR_NULL(package)) {
+ if (*len >= sizeof(*package)) {
+ /* we need only the data */
+ *len -= sizeof(*package);
+ image = kmemdup(package->data,
+ *len, GFP_KERNEL);
+ }
+ /*
+ * free package regardless of whether kmemdup
+ * succeeded
+ */
+ kfree(package);
+ if (image)
+ return image;
}
- /* free package regardless of whether kmemdup succeeded */
- kfree(package);
- if (image)
- return image;
}
- /* If it's not available, try from the filesystem */
+ /* If it's not available, or for Intel SKU, try from the filesystem */
if (iwl_pnvm_get_from_fs(trans_p, &image, len))
return NULL;
return image;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 357727774d..baabb8e321 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -173,9 +173,9 @@ struct iwl_fw_runtime {
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
u8 reduced_power_flags;
- bool uats_enabled;
struct iwl_uats_table_cmd uats_table;
#endif
+ bool uats_enabled;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 9160d81a87..55d2ed13a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -103,6 +103,12 @@ static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
return -EINVAL;
+ /* we use this as a string, ensure input was NUL terminated */
+ if (strnlen(debug_info->debug_cfg_name,
+ sizeof(debug_info->debug_cfg_name)) ==
+ sizeof(debug_info->debug_cfg_name))
+ return -EINVAL;
+
IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
debug_info->debug_cfg_name);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 678c4a0718..d85cadd8e1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -2090,7 +2090,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
nvm->sku_cap_mimo_disabled =
!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
- if (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM)
+ if (CSR_HW_RFID_TYPE(trans->hw_rf_id) >= IWL_CFG_RF_TYPE_FM)
nvm->sku_cap_11be_enable = true;
/* Initialize PHY sku data */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 92c45571bd..d99a712e7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -461,12 +461,10 @@ static int iwl_mvm_wowlan_config_rsc_tsc(struct iwl_mvm *mvm,
struct wowlan_key_rsc_v5_data data = {};
int i;
- data.rsc = kmalloc(sizeof(*data.rsc), GFP_KERNEL);
+ data.rsc = kzalloc(sizeof(*data.rsc), GFP_KERNEL);
if (!data.rsc)
return -ENOMEM;
- memset(data.rsc, 0xff, sizeof(*data.rsc));
-
for (i = 0; i < ARRAY_SIZE(data.rsc->mcast_key_id_map); i++)
data.rsc->mcast_key_id_map[i] =
IWL_MCAST_KEY_MAP_INVALID;
@@ -1286,7 +1284,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->net_detect = true;
} else {
- struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .offloading_tid = 0,
+ };
wowlan_config_cmd.sta_id = mvmvif->deflink.ap_sta_id;
@@ -1298,6 +1298,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
goto out_noreset;
}
+ ret = iwl_mvm_sta_ensure_queue(
+ mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]);
+ if (ret)
+ goto out_noreset;
+
ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
vif, mvmvif, ap_sta);
if (ret)
@@ -2200,7 +2205,10 @@ static void iwl_mvm_convert_gtk_v3(struct iwl_wowlan_status_data *status,
static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
struct iwl_wowlan_igtk_status *data)
{
+ int i;
+
BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
+ BUILD_BUG_ON(sizeof(status->igtk.ipn) != sizeof(data->ipn));
if (!data->key_len)
return;
@@ -2212,7 +2220,10 @@ static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ WOWLAN_IGTK_MIN_INDEX;
memcpy(status->igtk.key, data->key, sizeof(data->key));
- memcpy(status->igtk.ipn, data->ipn, sizeof(data->ipn));
+
+ /* mac80211 expects big endian for memcmp() to work, convert */
+ for (i = 0; i < sizeof(data->ipn); i++)
+ status->igtk.ipn[i] = data->ipn[sizeof(data->ipn) - i - 1];
}
static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index e8b881596b..34b8771d1e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -735,7 +735,9 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct dentry *dbgfs_dir = vif->debugfs_dir;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- char buf[100];
+ char buf[3 * 3 + 11 + (NL80211_WIPHY_NAME_MAXLEN + 1) +
+ (7 + IFNAMSIZ + 1) + 6 + 1];
+ char name[7 + IFNAMSIZ + 1];
/* this will happen in monitor mode */
if (!dbgfs_dir)
@@ -748,10 +750,11 @@ void iwl_mvm_vif_dbgfs_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
* find
* netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
*/
- snprintf(buf, 100, "../../../%pd3/iwlmvm", dbgfs_dir);
+ snprintf(name, sizeof(name), "%pd", dbgfs_dir);
+ snprintf(buf, sizeof(buf), "../../../%pd3/iwlmvm", dbgfs_dir);
- mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
- mvm->debugfs_dir, buf);
+ mvmvif->dbgfs_slink =
+ debugfs_create_symlink(name, mvm->debugfs_dir, buf);
}
void iwl_mvm_vif_dbgfs_rm_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index c4f96125cf..25a5a31e63 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -31,6 +31,17 @@ const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
IWL_GEN2_TRIG_TX_FIFO_BK,
};
+const u8 iwl_mvm_ac_to_bz_tx_fifo[] = {
+ IWL_BZ_EDCA_TX_FIFO_VO,
+ IWL_BZ_EDCA_TX_FIFO_VI,
+ IWL_BZ_EDCA_TX_FIFO_BE,
+ IWL_BZ_EDCA_TX_FIFO_BK,
+ IWL_BZ_TRIG_TX_FIFO_VO,
+ IWL_BZ_TRIG_TX_FIFO_VI,
+ IWL_BZ_TRIG_TX_FIFO_BE,
+ IWL_BZ_TRIG_TX_FIFO_BK,
+};
+
struct iwl_mvm_mac_iface_iterator_data {
struct iwl_mvm *mvm;
struct ieee80211_vif *vif;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 350dc6b8a3..168e4a2ffe 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -344,7 +344,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
!iwlwifi_mod_params.disable_11ax &&
!iwlwifi_mod_params.disable_11be)
- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
/* With MLD FW API, it tracks timing by itself,
* no need for any timing from the host
@@ -3684,6 +3684,19 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
mvmvif->ap_sta = sta;
+ /*
+ * Initialize the rates here already - this really tells
+ * the firmware only what the supported legacy rates are
+ * (may be) since it's initialized already from what the
+ * AP advertised in the beacon/probe response. This will
+ * allow the firmware to send auth/assoc frames with one
+ * of the supported rates already, rather than having to
+ * use a mandatory rate.
+ * If we're the AP, we'll just assume mandatory rates at
+ * this point, but we know nothing about the STA anyway.
+ */
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
+
return 0;
}
@@ -3782,13 +3795,17 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
mvm_sta->authorized = true;
- iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
-
/* MFP is set by default before the station is authorized.
* Clear it here in case it's not used.
*/
- if (!sta->mfp)
- return callbacks->update_sta(mvm, vif, sta);
+ if (!sta->mfp) {
+ int ret = callbacks->update_sta(mvm, vif, sta);
+
+ if (ret)
+ return ret;
+ }
+
+ iwl_mvm_rs_rate_init_all_links(mvm, vif, sta);
return 0;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
index ea3e9e9c6e..fe4b39b19a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <net/mac80211.h>
@@ -62,11 +62,13 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
struct ieee80211_key_conf *keyconf)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool pairwise = keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ bool igtk = keyconf->keyidx == 4 || keyconf->keyidx == 5;
u32 flags = 0;
lockdep_assert_held(&mvm->mutex);
- if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ if (!pairwise)
flags |= IWL_SEC_KEY_FLAG_MCAST_KEY;
switch (keyconf->cipher) {
@@ -96,12 +98,14 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm,
if (!sta && vif->type == NL80211_IFTYPE_STATION)
sta = mvmvif->ap_sta;
- /* Set the MFP flag also for an AP interface where the key is an IGTK
- * key as in such a case the station would always be NULL
+ /*
+ * If we are installing an iGTK (in AP or STA mode), we need to tell
+ * the firmware this key will en/decrypt MGMT frames.
+ * Same goes if we are installing a pairwise key for an MFP station.
+ * In case we're installing a groupwise key (which is not an iGTK),
+ * then, we will not use this key for MGMT frames.
*/
- if ((!IS_ERR_OR_NULL(sta) && sta->mfp) ||
- (vif->type == NL80211_IFTYPE_AP &&
- (keyconf->keyidx == 4 || keyconf->keyidx == 5)))
+ if ((!IS_ERR_OR_NULL(sta) && sta->mfp && pairwise) || igtk)
flags |= IWL_SEC_KEY_FLAG_MFP;
return flags;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
index f313a8d771..ad78c69cc6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c
@@ -167,7 +167,7 @@ static int iwl_mvm_mld_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
iwl_mvm_mld_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
cmd.filter_flags = cpu_to_le32(MAC_CFG_FILTER_PROMISC |
- MAC_FILTER_IN_CONTROL_AND_MGMT |
+ MAC_CFG_FILTER_ACCEPT_CONTROL_AND_MGMT |
MAC_CFG_FILTER_ACCEPT_BEACON |
MAC_CFG_FILTER_ACCEPT_PROBE_REQ |
MAC_CFG_FILTER_ACCEPT_GRP);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index f2af3e5714..1e33de05d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -121,7 +121,7 @@ struct iwl_mvm_time_event_data {
* if the te is in the time event list or not (when id == TE_MAX)
*/
u32 id;
- u8 link_id;
+ s8 link_id;
};
/* Power management */
@@ -1574,12 +1574,16 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
extern const u8 iwl_mvm_ac_to_tx_fifo[];
extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_bz_tx_fifo[];
static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac)
{
- return iwl_mvm_has_new_tx_api(mvm) ?
- iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ return iwl_mvm_ac_to_bz_tx_fifo[ac];
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return iwl_mvm_ac_to_gen2_tx_fifo[ac];
+ return iwl_mvm_ac_to_tx_fifo[ac];
}
struct iwl_rate_info {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index af15d470c6..7bf2a5947e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -282,6 +282,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
u32 status,
struct ieee80211_rx_status *stats)
{
+ struct wireless_dev *wdev;
struct iwl_mvm_sta *mvmsta;
struct iwl_mvm_vif *mvmvif;
u8 keyid;
@@ -303,9 +304,15 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
+ if (!sta)
+ return -1;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
/* key mismatch - will also report !MIC_OK but we shouldn't count it */
if (!(status & IWL_RX_MPDU_STATUS_KEY_VALID))
- return -1;
+ goto report;
/* good cases */
if (likely(status & IWL_RX_MPDU_STATUS_MIC_OK &&
@@ -314,13 +321,6 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return 0;
}
- if (!sta)
- return -1;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
- mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
/*
* both keys will have the same cipher and MIC length, use
* whichever one is available
@@ -329,11 +329,11 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
if (!key) {
key = rcu_dereference(mvmvif->bcn_prot.keys[1]);
if (!key)
- return -1;
+ goto report;
}
if (len < key->icv_len + IEEE80211_GMAC_PN_LEN + 2)
- return -1;
+ goto report;
/* get the real key ID */
keyid = frame[len - key->icv_len - IEEE80211_GMAC_PN_LEN - 2];
@@ -347,7 +347,7 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
return -1;
key = rcu_dereference(mvmvif->bcn_prot.keys[keyid - 6]);
if (!key)
- return -1;
+ goto report;
}
/* Report status to mac80211 */
@@ -355,6 +355,10 @@ static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
ieee80211_key_mic_failure(key);
else if (status & IWL_RX_MPDU_STATUS_REPLAY_ERROR)
ieee80211_key_replay(key);
+report:
+ wdev = ieee80211_vif_to_wdev(mvmsta->vif);
+ if (wdev->netdev)
+ cfg80211_rx_unprot_mlme_mgmt(wdev->netdev, (void *)hdr, len);
return -1;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index bba96a9688..9905925142 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1502,6 +1502,34 @@ out_err:
return ret;
}
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm,
+ struct ieee80211_txq *txq)
+{
+ struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
+ int ret = -EINVAL;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) ||
+ !txq->sta) {
+ return 0;
+ }
+
+ if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) {
+ set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state);
+ ret = 0;
+ }
+
+ local_bh_disable();
+ spin_lock(&mvm->add_stream_lock);
+ if (!list_empty(&mvmtxq->list))
+ list_del_init(&mvmtxq->list);
+ spin_unlock(&mvm->add_stream_lock);
+ local_bh_enable();
+
+ return ret;
+}
+
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b33a0ce096..3cf8a70274 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -571,6 +571,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 2e653a417d..da00ef6e4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -692,7 +692,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
/* Determine whether mac or link id should be used, and validate the link id */
static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 link_id)
+ s8 link_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ver = iwl_fw_lookup_cmd_ver(mvm->fw,
@@ -706,8 +706,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
"Invalid link ID for session protection: %u\n", link_id))
return -EINVAL;
- if (WARN(ieee80211_vif_is_mld(vif) &&
- !(vif->active_links & BIT(link_id)),
+ if (WARN(!mvmvif->link[link_id]->active,
"Session Protection on an inactive link: %u\n", link_id))
return -EINVAL;
@@ -716,7 +715,7 @@ static int iwl_mvm_get_session_prot_id(struct iwl_mvm *mvm,
static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- u32 id, u32 link_id)
+ u32 id, s8 link_id)
{
int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
struct iwl_mvm_session_prot_cmd cmd = {
@@ -745,7 +744,7 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct ieee80211_vif *vif = te_data->vif;
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
- unsigned int link_id;
+ s8 link_id;
if (!vif)
return false;
@@ -1297,7 +1296,7 @@ void iwl_mvm_schedule_session_protection(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
const u16 notif[] = { WIDE_ID(MAC_CONF_GROUP, SESSION_PROTECTION_NOTIF) };
struct iwl_notification_wait wait_notif;
- int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, link_id);
+ int mac_link_id = iwl_mvm_get_session_prot_id(mvm, vif, (s8)link_id);
struct iwl_mvm_session_prot_cmd cmd = {
.id_and_color = cpu_to_le32(mac_link_id),
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index db986bfc4d..930742e75c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -520,13 +520,24 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
}
}
+static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
+ const u8 *addr3_override)
+{
+ struct ieee80211_hdr *out_hdr = cmd;
+
+ memcpy(cmd, hdr, hdrlen);
+ if (addr3_override)
+ memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
+}
+
/*
* Allocates and sets the Tx cmd the driver data pointers in the skb
*/
static struct iwl_device_tx_cmd *
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info, int hdrlen,
- struct ieee80211_sta *sta, u8 sta_id)
+ struct ieee80211_sta *sta, u8 sta_id,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_device_tx_cmd *dev_cmd;
@@ -584,7 +595,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le16(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -599,7 +610,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
cmd->len = cpu_to_le16((u16)skb->len);
/* Copy MAC header from skb into command buffer */
- memcpy(cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
cmd->flags = cpu_to_le32(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
@@ -617,7 +628,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
/* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdrlen);
+ iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
out:
return dev_cmd;
@@ -820,7 +831,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
- dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
+ dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
+ NULL);
if (!dev_cmd)
return -1;
@@ -1140,7 +1152,8 @@ static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
*/
static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
struct ieee80211_tx_info *info,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ const u8 *addr3_override)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct iwl_mvm_sta *mvmsta;
@@ -1172,7 +1185,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
iwl_mvm_probe_resp_set_noa(mvm, skb);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
- sta, mvmsta->deflink.sta_id);
+ sta, mvmsta->deflink.sta_id,
+ addr3_override);
if (!dev_cmd)
goto drop;
@@ -1294,9 +1308,11 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_tx_info info;
struct sk_buff_head mpdus_skbs;
+ struct ieee80211_vif *vif;
unsigned int payload_len;
int ret;
struct sk_buff *orig_skb = skb;
+ const u8 *addr3;
if (WARN_ON_ONCE(!mvmsta))
return -1;
@@ -1307,26 +1323,59 @@ int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
memcpy(&info, skb->cb, sizeof(info));
if (!skb_is_gso(skb))
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
tcp_hdrlen(skb) + skb->data_len;
if (payload_len <= skb_shinfo(skb)->gso_size)
- return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
__skb_queue_head_init(&mpdus_skbs);
+ vif = info.control.vif;
+ if (!vif)
+ return -1;
+
ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
if (ret)
return ret;
WARN_ON(skb_queue_empty(&mpdus_skbs));
+ /*
+ * As described in IEEE sta 802.11-2020, table 9-30 (Address
+ * field contents), A-MSDU address 3 should contain the BSSID
+ * address.
+ * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
+ * in the command header. We need to preserve the original
+ * address 3 in the skb header to correctly create all the
+ * A-MSDU subframe headers from it.
+ */
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ addr3 = vif->cfg.ap_addr;
+ break;
+ case NL80211_IFTYPE_AP:
+ addr3 = vif->addr;
+ break;
+ default:
+ addr3 = NULL;
+ break;
+ }
+
while (!skb_queue_empty(&mpdus_skbs)) {
+ struct ieee80211_hdr *hdr;
+ bool amsdu;
+
skb = __skb_dequeue(&mpdus_skbs);
+ hdr = (void *)skb->data;
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
- ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+ ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
+ amsdu ? addr3 : NULL);
if (ret) {
/* Free skbs created as part of TSO logic that have not yet been dequeued */
__skb_queue_purge(&mpdus_skbs);
@@ -1587,12 +1636,18 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
* of the batch. This is why the SSN of the SCD is written at the end of the
* whole struct at a variable offset. This function knows how to cope with the
* variable offset and returns the SSN of the SCD.
+ *
+ * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
*/
static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
struct iwl_mvm_tx_resp *tx_resp)
{
- return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
- tx_resp->frame_count) & 0xfff;
+ u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+ tx_resp->frame_count);
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ return val & 0xFFFF;
+ return val & 0xFFF;
}
static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 104d2b6dc9..5a525da434 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -1132,7 +1132,7 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
if (!cmdarray[i].cmdbuf) {
lbs_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
ret = -1;
- goto done;
+ goto free_cmd_array;
}
}
@@ -1140,8 +1140,17 @@ int lbs_allocate_cmd_buffer(struct lbs_private *priv)
init_waitqueue_head(&cmdarray[i].cmdwait_q);
lbs_cleanup_and_insert_cmd(priv, &cmdarray[i]);
}
- ret = 0;
+ return 0;
+free_cmd_array:
+ for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
+ if (cmdarray[i].cmdbuf) {
+ kfree(cmdarray[i].cmdbuf);
+ cmdarray[i].cmdbuf = NULL;
+ }
+ }
+ kfree(priv->cmd_array);
+ priv->cmd_array = NULL;
done:
return ret;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index f9c9fec7c7..d14a0f4c1b 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -970,9 +970,6 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
priv->dfs_dev_dir = debugfs_create_dir(priv->netdev->name,
mwifiex_dfs_dir);
- if (!priv->dfs_dev_dir)
- return;
-
MWIFIEX_DFS_ADD_FILE(info);
MWIFIEX_DFS_ADD_FILE(debug);
MWIFIEX_DFS_ADD_FILE(getlog);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index b475555097..633f562105 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -2100,7 +2100,7 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
int j, msg_len, num_ch;
struct sk_buff *skb;
- num_ch = i == batch_size - 1 ? n_chan % batch_len : batch_len;
+ num_ch = i == batch_size - 1 ? n_chan - i * batch_len : batch_len;
msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv);
skb = mt76_mcu_msg_alloc(dev, NULL, msg_len);
if (!skb) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 0563b1b22f..bf023f3170 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -416,6 +416,14 @@ struct sta_rec_he_6g_capa {
u8 rsv[2];
} __packed;
+struct sta_rec_pn_info {
+ __le16 tag;
+ __le16 len;
+ u8 pn[6];
+ u8 tsc_type;
+ u8 rsv;
+} __packed;
+
struct sec_key {
u8 cipher_id;
u8 cipher_len;
@@ -768,6 +776,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
+ sizeof(struct sta_rec_pn_info) + \
sizeof(struct tlv) + \
MT76_CONNAC_WTBL_UPDATE_MAX_SIZE)
@@ -798,6 +807,8 @@ enum {
STA_REC_HE_V2 = 0x19,
STA_REC_MLD = 0x20,
STA_REC_EHT = 0x22,
+ STA_REC_PN_INFO = 0x26,
+ STA_REC_KEY_V3 = 0x27,
STA_REC_HDRT = 0x28,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
@@ -925,6 +936,9 @@ enum {
PHY_TYPE_INDEX_NUM
};
+#define HR_DSSS_ERP_BASIC_RATE GENMASK(3, 0)
+#define OFDM_BASIC_RATE (BIT(6) | BIT(8) | BIT(10))
+
#define PHY_TYPE_BIT_HR_DSSS BIT(PHY_TYPE_HR_DSSS_INDEX)
#define PHY_TYPE_BIT_ERP BIT(PHY_TYPE_ERP_INDEX)
#define PHY_TYPE_BIT_OFDM BIT(PHY_TYPE_OFDM_INDEX)
@@ -1088,6 +1102,13 @@ enum mcu_cipher_type {
MCU_CIPHER_GCMP_256,
MCU_CIPHER_WAPI,
MCU_CIPHER_BIP_CMAC_128,
+ MCU_CIPHER_BIP_CMAC_256,
+ MCU_CIPHER_BCN_PROT_CMAC_128,
+ MCU_CIPHER_BCN_PROT_CMAC_256,
+ MCU_CIPHER_BCN_PROT_GMAC_128,
+ MCU_CIPHER_BCN_PROT_GMAC_256,
+ MCU_CIPHER_BIP_GMAC_128,
+ MCU_CIPHER_BIP_GMAC_256,
};
enum {
@@ -1307,6 +1328,7 @@ enum {
UNI_BSS_INFO_RATE = 11,
UNI_BSS_INFO_QBSS = 15,
UNI_BSS_INFO_SEC = 16,
+ UNI_BSS_INFO_BCN_PROT = 17,
UNI_BSS_INFO_TXCMD = 18,
UNI_BSS_INFO_UAPSD = 19,
UNI_BSS_INFO_PS = 21,
@@ -1768,6 +1790,12 @@ mt76_connac_mcu_get_cipher(int cipher)
return MCU_CIPHER_GCMP;
case WLAN_CIPHER_SUITE_GCMP_256:
return MCU_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ return MCU_CIPHER_BIP_GMAC_128;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ return MCU_CIPHER_BIP_GMAC_256;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ return MCU_CIPHER_BIP_CMAC_256;
case WLAN_CIPHER_SUITE_SMS4:
return MCU_CIPHER_WAPI;
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 399d7ca6be..18056045d9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -1270,7 +1270,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
.acpi_conf = mt792x_acpi_get_flags(&dev->phy),
};
int ret, valid_cnt = 0;
- u16 buf_len = 0;
+ u32 buf_len = 0;
u8 *pos;
if (!clc)
@@ -1281,7 +1281,7 @@ int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
if (mt76_find_power_limits_node(&dev->mt76))
req.cap |= CLC_CAP_DTS_EN;
- buf_len = le16_to_cpu(clc->len) - sizeof(*clc);
+ buf_len = le32_to_cpu(clc->len) - sizeof(*clc);
pos = clc->data;
while (buf_len > 16) {
struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index 42fd456eb6..4d04911245 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -387,6 +387,7 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
mt7921e_unregister_device(dev);
+ set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index aa918b9b04..6f0b18ae31 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -360,6 +360,7 @@ mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mvif->sta.vif = mvif;
mt76_wcid_init(&mvif->sta.wcid);
mt7925_mac_wtbl_update(dev, idx,
@@ -527,7 +528,7 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (cmd == SET_KEY && !mvif->mt76.cipher) {
struct mt792x_phy *phy = mt792x_hw_phy(hw);
- mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true);
}
@@ -711,7 +712,7 @@ static void mt7925_bss_info_changed(struct ieee80211_hw *hw,
if (slottime != phy->slottime) {
phy->slottime = slottime;
- mt792x_mac_set_timeing(phy);
+ mt7925_mcu_set_timing(phy, vif);
}
}
@@ -1275,6 +1276,25 @@ mt7925_channel_switch_beacon(struct ieee80211_hw *hw,
}
static int
+mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ static const u8 mq_to_aci[] = {
+ [IEEE80211_AC_VO] = 3,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ };
+
+ /* firmware uses access class index */
+ mvif->queue_params[mq_to_aci[queue]] = *params;
+
+ return 0;
+}
+
+static int
mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
{
@@ -1397,7 +1417,7 @@ const struct ieee80211_ops mt7925_ops = {
.add_interface = mt7925_add_interface,
.remove_interface = mt792x_remove_interface,
.config = mt7925_config,
- .conf_tx = mt792x_conf_tx,
+ .conf_tx = mt7925_conf_tx,
.configure_filter = mt7925_configure_filter,
.bss_info_changed = mt7925_bss_info_changed,
.start_ap = mt7925_start_ap,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index 9c0e397537..8e72012472 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -814,6 +814,7 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct sta_rec_hdr_trans *hdr_trans;
struct mt76_wcid *wcid;
struct tlv *tlv;
@@ -827,7 +828,11 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (sta)
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ else
+ wcid = &mvif->sta.wcid;
+
if (!wcid)
return;
@@ -895,7 +900,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
e = (struct edca *)tlv;
e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
+ e->queue = ac;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@@ -921,61 +926,67 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
{
+ struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid);
struct sta_rec_sec_uni *sec;
+ struct mt792x_vif *mvif = msta->vif;
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
struct tlv *tlv;
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sta = msta == &mvif->sta ?
+ NULL :
+ container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->add = cmd;
+ sec->bss_idx = mvif->mt76.idx;
+ sec->is_authenticator = 0;
+ sec->mgmt_prot = 0;
+ sec->wlan_idx = (u8)wcid->idx;
+
+ if (sta) {
+ sec->tx_key = 1;
+ sec->key_type = 1;
+ memcpy(sec->peer_addr, sta->addr, ETH_ALEN);
+ } else {
+ memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ }
if (cmd == SET_KEY) {
- struct sec_key_uni *sec_key;
u8 cipher;
- cipher = mt76_connac_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
+ sec->add = 1;
+ cipher = mt7925_mcu_get_cipher(key->cipher);
+ if (cipher == CONNAC3_CIPHER_NONE)
return -EOPNOTSUPP;
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = sta_key_conf->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, sta_key_conf->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
- sec->n_cipher = 2;
+ if (cipher == CONNAC3_CIPHER_BIP_CMAC_128) {
+ sec->cipher_id = CONNAC3_CIPHER_BIP_CMAC_128;
+ sec->key_id = sta_key_conf->keyidx;
+ sec->key_len = 32;
+ memcpy(sec->key, sta_key_conf->key, 16);
+ memcpy(sec->key + 16, key->key, 16);
} else {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
+ sec->cipher_id = cipher;
+ sec->key_id = key->keyidx;
+ sec->key_len = key->keylen;
+ memcpy(sec->key, key->key, key->keylen);
- if (cipher == MCU_CIPHER_TKIP) {
+ if (cipher == CONNAC3_CIPHER_TKIP) {
/* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
+ memcpy(sec->key + 16, key->key + 24, 8);
+ memcpy(sec->key + 24, key->key + 16, 8);
}
/* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
+ if (cipher == CONNAC3_CIPHER_AES_CCMP) {
memcpy(sta_key_conf->key, key->key, key->keylen);
sta_key_conf->keyidx = key->keyidx;
}
-
- sec->n_cipher = 1;
}
} else {
- sec->n_cipher = 0;
+ sec->add = 0;
}
return 0;
@@ -1460,12 +1471,10 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
struct tlv *tlv;
u8 af = 0, mm = 0;
- if (!sta->deflink.ht_cap.ht_supported && !sta->deflink.he_6ghz_capa.capa)
- return;
-
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta);
+ phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
if (sta->deflink.ht_cap.ht_supported) {
af = sta->deflink.ht_cap.ampdu_factor;
mm = sta->deflink.ht_cap.ampdu_density;
@@ -1573,8 +1582,6 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
struct sk_buff *skb;
skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, info->wcid,
@@ -1598,30 +1605,11 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta,
info->vif, info->rcpi,
info->state);
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta);
}
- sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
- sizeof(struct tlv));
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, info->wcid,
- WTBL_RESET_AND_SET,
- sta_wtbl, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- if (info->enable) {
- mt76_connac_mcu_wtbl_generic_tlv(dev, skb, info->vif,
- info->sta, sta_wtbl,
- wtbl_hdr);
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, info->vif, info->wcid,
- sta_wtbl, wtbl_hdr);
- if (info->sta)
- mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
- sta_wtbl, wtbl_hdr,
- true, true);
- }
+ if (info->enable)
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
@@ -2049,9 +2037,9 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
enum nl80211_band band = chandef->chan->band;
struct mt76_connac_bss_basic_tlv *basic_req;
- u8 idx, basic_phy;
struct tlv *tlv;
int conn_type;
+ u8 idx;
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req));
basic_req = (struct mt76_connac_bss_basic_tlv *)tlv;
@@ -2062,8 +2050,10 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta);
- basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, sta);
- basic_req->nonht_basic_phy = cpu_to_le16(basic_phy);
+ if (band == NL80211_BAND_2GHZ)
+ basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX);
+ else
+ basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX);
memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN);
basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta);
@@ -2122,21 +2112,21 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
sec = (struct bss_sec_tlv *)tlv;
switch (mvif->cipher) {
- case MCU_CIPHER_GCMP_256:
- case MCU_CIPHER_GCMP:
+ case CONNAC3_CIPHER_GCMP_256:
+ case CONNAC3_CIPHER_GCMP:
sec->mode = MODE_WPA3_SAE;
sec->status = 8;
break;
- case MCU_CIPHER_AES_CCMP:
+ case CONNAC3_CIPHER_AES_CCMP:
sec->mode = MODE_WPA2_PSK;
sec->status = 6;
break;
- case MCU_CIPHER_TKIP:
+ case CONNAC3_CIPHER_TKIP:
sec->mode = MODE_WPA2_PSK;
sec->status = 4;
break;
- case MCU_CIPHER_WEP104:
- case MCU_CIPHER_WEP40:
+ case CONNAC3_CIPHER_WEP104:
+ case CONNAC3_CIPHER_WEP40:
sec->mode = MODE_SHARED;
sec->status = 0;
break;
@@ -2167,6 +2157,11 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
bmc = (struct bss_rate_tlv *)tlv;
+ if (band == NL80211_BAND_2GHZ)
+ bmc->basic_rate = cpu_to_le16(HR_DSSS_ERP_BASIC_RATE);
+ else
+ bmc->basic_rate = cpu_to_le16(OFDM_BASIC_RATE);
+
bmc->short_preamble = (band == NL80211_BAND_2GHZ);
bmc->bc_fixed_rate = idx;
bmc->mc_fixed_rate = idx;
@@ -2249,6 +2244,38 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
vif->bss_conf.he_bss_color.color : 0;
}
+static void
+mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_phy *phy = mvif->phy;
+ struct bss_ifs_time_tlv *ifs_time;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_IFS_TIME, sizeof(*ifs_time));
+ ifs_time = (struct bss_ifs_time_tlv *)tlv;
+ ifs_time->slot_valid = true;
+ ifs_time->slot_time = cpu_to_le16(phy->slottime);
+}
+
+int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = phy->dev;
+ struct sk_buff *skb;
+
+ skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ MT7925_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7925_mcu_bss_ifs_tlv(skb, vif);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
struct ieee80211_vif *vif,
@@ -2273,6 +2300,7 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta);
mt7925_mcu_bss_qos_tlv(skb, vif);
mt7925_mcu_bss_mld_tlv(skb, vif, sta);
+ mt7925_mcu_bss_ifs_tlv(skb, vif);
if (vif->bss_conf.he_support) {
mt7925_mcu_bss_he_tlv(skb, vif, phy);
@@ -2845,12 +2873,16 @@ int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (cmd & __MCU_CMD_FIELD_UNI) {
uni_txd = (struct mt76_connac2_mcu_uni_txd *)txd;
uni_txd->len = cpu_to_le16(skb->len - sizeof(uni_txd->txd));
- uni_txd->option = MCU_CMD_UNI_EXT_ACK;
uni_txd->cid = cpu_to_le16(mcu_cmd);
uni_txd->s2d_index = MCU_S2D_H2N;
uni_txd->pkt_type = MCU_PKT_ID;
uni_txd->seq = seq;
+ if (cmd & __MCU_CMD_FIELD_QUERY)
+ uni_txd->option = MCU_CMD_UNI_QUERY_ACK;
+ else
+ uni_txd->option = MCU_CMD_UNI_EXT_ACK;
+
goto exit;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 3c41e21303..2cf3927611 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -159,6 +159,20 @@ enum {
UNI_EVENT_SCAN_DONE_NLO = 3,
};
+enum connac3_mcu_cipher_type {
+ CONNAC3_CIPHER_NONE = 0,
+ CONNAC3_CIPHER_WEP40 = 1,
+ CONNAC3_CIPHER_TKIP = 2,
+ CONNAC3_CIPHER_AES_CCMP = 4,
+ CONNAC3_CIPHER_WEP104 = 5,
+ CONNAC3_CIPHER_BIP_CMAC_128 = 6,
+ CONNAC3_CIPHER_WEP128 = 7,
+ CONNAC3_CIPHER_WAPI = 8,
+ CONNAC3_CIPHER_CCMP_256 = 10,
+ CONNAC3_CIPHER_GCMP = 11,
+ CONNAC3_CIPHER_GCMP_256 = 12,
+};
+
struct mt7925_mcu_scan_chinfo_event {
u8 nr_chan;
u8 alpha2[3];
@@ -334,7 +348,8 @@ struct bss_req_hdr {
struct bss_rate_tlv {
__le16 tag;
__le16 len;
- u8 __rsv1[4];
+ u8 __rsv1[2];
+ __le16 basic_rate;
__le16 bc_trans;
__le16 mc_trans;
u8 short_preamble;
@@ -382,25 +397,22 @@ struct sta_rec_eht {
u8 _rsv2[3];
} __packed;
-struct sec_key_uni {
- __le16 wlan_idx;
- u8 mgmt_prot;
- u8 cipher_id;
- u8 cipher_len;
- u8 key_id;
- u8 key_len;
- u8 need_resp;
- u8 key[32];
-} __packed;
-
struct sta_rec_sec_uni {
__le16 tag;
__le16 len;
u8 add;
- u8 n_cipher;
- u8 rsv[2];
-
- struct sec_key_uni key[2];
+ u8 tx_key;
+ u8 key_type;
+ u8 is_authenticator;
+ u8 peer_addr[6];
+ u8 bss_idx;
+ u8 cipher_id;
+ u8 key_id;
+ u8 key_len;
+ u8 wlan_idx;
+ u8 mgmt_prot;
+ u8 key[32];
+ u8 key_rsc[16];
} __packed;
struct sta_rec_hdr_trans {
@@ -428,6 +440,22 @@ struct sta_rec_mld {
} __packed link[2];
} __packed;
+struct bss_ifs_time_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 slot_valid;
+ u8 sifs_valid;
+ u8 rifs_valid;
+ u8 eifs_valid;
+ __le16 slot_time;
+ __le16 sifs_time;
+ __le16 rifs_time;
+ __le16 eifs_time;
+ u8 eifs_cck_valid;
+ u8 rsv;
+ __le16 eifs_cck_time;
+} __packed;
+
#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_bf) + \
@@ -440,7 +468,7 @@ struct sta_rec_mld {
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_phy) + \
sizeof(struct sta_rec_ra) + \
- sizeof(struct sta_rec_sec) + \
+ sizeof(struct sta_rec_sec_uni) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_eht) + \
@@ -455,6 +483,7 @@ struct sta_rec_mld {
sizeof(struct bss_mld_tlv) + \
sizeof(struct bss_info_uni_he) + \
sizeof(struct bss_info_uni_bss_color) + \
+ sizeof(struct bss_ifs_time_tlv) + \
sizeof(struct tlv))
#define MT_CONNAC3_SKU_POWER_LIMIT 449
@@ -509,6 +538,33 @@ struct mt7925_wow_pattern_tlv {
u8 rsv[4];
} __packed;
+static inline enum connac3_mcu_cipher_type
+mt7925_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return CONNAC3_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return CONNAC3_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return CONNAC3_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return CONNAC3_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return CONNAC3_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return CONNAC3_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return CONNAC3_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return CONNAC3_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return CONNAC3_CIPHER_WAPI;
+ default:
+ return CONNAC3_CIPHER_NONE;
+ }
+}
+
int mt7925_mcu_set_dbdc(struct mt76_phy *phy);
int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_scan_request *scan_req);
@@ -525,6 +581,8 @@ int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
int enable);
+int mt7925_mcu_set_timing(struct mt792x_phy *phy,
+ struct ieee80211_vif *vif);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 08ef75e24e..bf02a15067 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -386,6 +386,8 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_rmw_field(dev, MT_HW_EMI_CTL, MT_HW_EMI_CTL_SLPPROT_EN, 1);
+
ret = mt792x_wfsys_reset(dev);
if (ret)
goto err_free_dev;
@@ -425,6 +427,7 @@ static void mt7925_pci_remove(struct pci_dev *pdev)
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
mt7925e_unregister_device(dev);
+ set_bit(MT76_REMOVED, &mdev->phy.state);
devm_free_irq(&pdev->dev, pdev->irq, dev);
mt76_free_device(&dev->mt76);
pci_free_irq_vectors(pdev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
index 303c0f5c9c..c4e3bfcc51 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c
@@ -66,13 +66,15 @@ free:
}
/* MTCL : Country List Table for 6G band */
-static void
+static int
mt792x_asar_acpi_read_mtcl(struct mt792x_dev *dev, u8 **table, u8 *version)
{
- if (mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL) < 0)
- *version = 1;
- else
- *version = 2;
+ int ret;
+
+ *version = ((ret = mt792x_acpi_read(dev, MT792x_ACPI_MTCL, table, NULL)) < 0)
+ ? 1 : 2;
+
+ return ret;
}
/* MTDS : Dynamic SAR Power Table */
@@ -166,16 +168,16 @@ int mt792x_init_acpi_sar(struct mt792x_dev *dev)
if (!asar)
return -ENOMEM;
- mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+ ret = mt792x_asar_acpi_read_mtcl(dev, (u8 **)&asar->countrylist, &asar->ver);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->countrylist);
+ asar->countrylist = NULL;
+ }
- /* MTDS is mandatory. Return error if table is invalid */
ret = mt792x_asar_acpi_read_mtds(dev, (u8 **)&asar->dyn, asar->ver);
if (ret) {
devm_kfree(dev->mt76.dev, asar->dyn);
- devm_kfree(dev->mt76.dev, asar->countrylist);
- devm_kfree(dev->mt76.dev, asar);
-
- return ret;
+ asar->dyn = NULL;
}
/* MTGS is optional */
@@ -290,7 +292,7 @@ int mt792x_init_acpi_sar_power(struct mt792x_phy *phy, bool set_default)
const struct cfg80211_sar_capa *capa = phy->mt76->hw->wiphy->sar_capa;
int i;
- if (!phy->acpisar)
+ if (!phy->acpisar || !((struct mt792x_acpi_sar *)phy->acpisar)->dyn)
return 0;
/* When ACPI SAR enabled in HW, we should apply rules for .frp
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index 502be22dbe..fb35eff6dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -354,6 +354,7 @@ static const char mt792x_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_bw_40",
"v_tx_bw_80",
"v_tx_bw_160",
+ "v_tx_bw_320",
"v_tx_mcs_0",
"v_tx_mcs_1",
"v_tx_mcs_2",
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
index 488326ce5e..5cc2d59b77 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
@@ -12,6 +12,8 @@ irqreturn_t mt792x_irq_handler(int irq, void *dev_instance)
{
struct mt792x_dev *dev = dev_instance;
+ if (test_bit(MT76_REMOVED, &dev->mt76.phy.state))
+ return IRQ_NONE;
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@@ -123,14 +125,13 @@ static void mt792x_dma_prefetch(struct mt792x_dev *dev)
int mt792x_dma_enable(struct mt792x_dev *dev)
{
- if (is_mt7925(&dev->mt76))
- mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
-
/* configure perfetch settings */
mt792x_dma_prefetch(dev);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ if (is_mt7925(&dev->mt76))
+ mt76_wr(dev, MT_WFDMA0_RST_DRX_PTR, ~0);
/* configure delay interrupt */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
@@ -140,12 +141,20 @@ int mt792x_dma_enable(struct mt792x_dev *dev)
MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN |
MT_WFDMA0_GLO_CFG_CLK_GAT_DIS |
MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ FIELD_PREP(MT_WFDMA0_GLO_CFG_DMA_SIZE, 3) |
+ MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK |
+ MT_WFDMA0_GLO_CFG_RX_WB_DDONE |
MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN |
MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
mt76_set(dev, MT_WFDMA0_GLO_CFG,
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
+ if (is_mt7925(&dev->mt76)) {
+ mt76_rmw(dev, MT_UWFDMA0_GLO_CFG_EXT1, BIT(28), BIT(28));
+ mt76_set(dev, MT_WFDMA0_INT_RX_PRI, 0x0F00);
+ mt76_set(dev, MT_WFDMA0_INT_TX_PRI, 0x7F00);
+ }
mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
/* enable interrupts for TX/RX rings */
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
index a99af23e4b..458cfd0260 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_regs.h
@@ -292,9 +292,12 @@
#define MT_WFDMA0_GLO_CFG_TX_DMA_BUSY BIT(1)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA0_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WFDMA0_GLO_CFG_DMA_SIZE GENMASK(5, 4)
#define MT_WFDMA0_GLO_CFG_TX_WB_DDONE BIT(6)
#define MT_WFDMA0_GLO_CFG_FW_DWLD_BYPASS_DMASHDL BIT(9)
+#define MT_WFDMA0_GLO_CFG_FIFO_DIS_CHECK BIT(11)
#define MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MT_WFDMA0_GLO_CFG_RX_WB_DDONE BIT(13)
#define MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN BIT(15)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
@@ -322,6 +325,8 @@
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_RST_DRX_PTR MT_WFDMA0(0x280)
+#define MT_WFDMA0_INT_RX_PRI MT_WFDMA0(0x298)
+#define MT_WFDMA0_INT_TX_PRI MT_WFDMA0(0x29c)
#define MT_WFDMA0_GLO_CFG_EXT0 MT_WFDMA0(0x2b0)
#define MT_WFDMA0_CSR_TX_DMASHDL_ENABLE BIT(6)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
@@ -389,6 +394,9 @@
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_HW_EMI_CTL 0x18011100
+#define MT_HW_EMI_CTL_SLPPROT_EN BIT(1)
+
#define MT_PCIE_MAC_BASE 0x10000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 55cb1770fa..f8b61df15f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -297,7 +297,7 @@ static void mt7996_mac_init_basic_rates(struct mt7996_dev *dev)
void mt7996_mac_init(struct mt7996_dev *dev)
{
-#define HIF_TXD_V2_1 4
+#define HIF_TXD_V2_1 0x21
int i;
mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
@@ -578,11 +578,12 @@ mt7996_set_stream_he_txbf_caps(struct mt7996_phy *phy,
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
- if (vif != NL80211_IFTYPE_AP)
+ if (!(vif == NL80211_IFTYPE_AP || vif == NL80211_IFTYPE_STATION))
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ if (vif == NL80211_IFTYPE_AP)
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index fa3001e59a..7d33b0c895 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -1178,25 +1178,28 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_tx_info *info;
struct sk_buff_head list;
struct rate_info rate = {};
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
bool cck = false;
u32 txrate, txs, mode, stbc;
txs = le32_to_cpu(txs_data[0]);
mt76_tx_status_lock(mdev, &list);
- skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
- if (skb) {
- info = IEEE80211_SKB_CB(skb);
- if (!(txs & MT_TXS0_ACK_ERROR_MASK))
- info->flags |= IEEE80211_TX_STAT_ACK;
+ /* only report MPDU TXS */
+ if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
+ skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+ if (skb) {
+ info = IEEE80211_SKB_CB(skb);
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ampdu_len = 1;
- info->status.ampdu_ack_len =
- !!(info->flags & IEEE80211_TX_STAT_ACK);
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len =
+ !!(info->flags & IEEE80211_TX_STAT_ACK);
- info->status.rates[0].idx = -1;
+ info->status.rates[0].idx = -1;
+ }
}
if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
@@ -2458,6 +2461,34 @@ static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
return 0;
}
+static bool
+mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
+ struct ieee80211_twt_params *twt_agrt)
+{
+ u16 type = le16_to_cpu(twt_agrt->req_type);
+ u8 exp;
+ int i;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
+ for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
+ struct mt7996_twt_flow *f;
+
+ if (!(msta->twt.flowid_mask & BIT(i)))
+ continue;
+
+ f = &msta->twt.flow[i];
+ if (f->duration == twt_agrt->min_twt_dur &&
+ f->mantissa == twt_agrt->mantissa &&
+ f->exp == exp &&
+ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
+ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
+ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
+ return true;
+ }
+
+ return false;
+}
+
void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
struct ieee80211_twt_setup *twt)
@@ -2469,8 +2500,7 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
enum ieee80211_twt_setup_cmd sta_setup_cmd;
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_twt_flow *flow;
- int flowid, table_id;
- u8 exp;
+ u8 flowid, table_id, exp;
if (mt7996_mac_check_twt_req(twt))
goto out;
@@ -2483,9 +2513,19 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
goto unlock;
+ if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
+ setup_cmd = TWT_SETUP_CMD_DICTATE;
+ twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
+ goto unlock;
+ }
+
+ if (mt7996_mac_twt_param_equal(msta, twt_agrt))
+ goto unlock;
+
flowid = ffs(~msta->twt.flowid_mask) - 1;
- le16p_replace_bits(&twt_agrt->req_type, flowid,
- IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
table_id = ffs(~dev->twt.table_mask) - 1;
exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
@@ -2532,10 +2572,10 @@ void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
unlock:
mutex_unlock(&dev->mt76.mutex);
out:
- le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
- IEEE80211_TWT_REQTYPE_SETUP_CMD);
- twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
- (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
}
void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 09c7a28a3d..482a8f7d75 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -342,6 +342,8 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
@@ -365,9 +367,13 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
mt76_wcid_key_setup(&dev->mt76, wcid, key);
- err = mt7996_mcu_add_key(&dev->mt76, vif, &msta->bip,
- key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
+
+ if (key->keyidx == 6 || key->keyidx == 7)
+ err = mt7996_mcu_bcn_prot_enable(dev, vif, key);
+ else
+ err = mt7996_mcu_add_key(&dev->mt76, vif, key,
+ MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
out:
mutex_unlock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index bf917beb94..0586f5bd41 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -1079,6 +1079,9 @@ mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
static void
mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
+ struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
+ struct ieee80211_vif *vif = container_of((void *)msta->vif,
+ struct ieee80211_vif, drv_priv);
struct ieee80211_eht_mcs_nss_supp *mcs_map;
struct ieee80211_eht_cap_elem_fixed *elem;
struct sta_rec_eht *eht;
@@ -1098,8 +1101,17 @@ mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
- memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) {
+ memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz,
+ sizeof(eht->mcs_map_bw20));
+ return;
+ }
+
memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
memcpy(eht->mcs_map_bw320, &mcs_map->bw._320, sizeof(eht->mcs_map_bw320));
@@ -2058,7 +2070,6 @@ out:
static int
mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
- struct mt76_connac_sta_key_conf *sta_key_conf,
struct sk_buff *skb,
struct ieee80211_key_conf *key,
enum set_key_cmd cmd)
@@ -2079,43 +2090,22 @@ mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
return -EOPNOTSUPP;
sec_key = &sec->key[0];
+ sec_key->wlan_idx = cpu_to_le16(wcid->idx);
+ sec_key->mgmt_prot = 0;
+ sec_key->cipher_id = cipher;
sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = sta_key_conf->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, sta_key_conf->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
- sec->n_cipher = 2;
- } else {
- sec_key->wlan_idx = cpu_to_le16(wcid->idx);
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(sta_key_conf->key, key->key, key->keylen);
- sta_key_conf->keyidx = key->keyidx;
- }
-
- sec->n_cipher = 1;
+ sec_key->key_id = key->keyidx;
+ sec_key->key_len = key->keylen;
+ sec_key->need_resp = 0;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
}
+
+ sec->n_cipher = 1;
} else {
sec->n_cipher = 0;
}
@@ -2124,7 +2114,6 @@ mt7996_mcu_sta_key_tlv(struct mt76_wcid *wcid,
}
int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct mt76_connac_sta_key_conf *sta_key_conf,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd)
{
@@ -2137,13 +2126,99 @@ int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
if (IS_ERR(skb))
return PTR_ERR(skb);
- ret = mt7996_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd);
+ ret = mt7996_mcu_sta_key_tlv(wcid, skb, key, cmd);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
+static int mt7996_mcu_get_pn(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ u8 *pn)
+{
+#define TSC_TYPE_BIGTK_PN 2
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct sta_rec_pn_info *pn_info;
+ struct sk_buff *skb, *rskb;
+ struct tlv *tlv;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &mvif->sta.wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PN_INFO, sizeof(*pn_info));
+ pn_info = (struct sta_rec_pn_info *)tlv;
+
+ pn_info->tsc_type = TSC_TYPE_BIGTK_PN;
+ ret = mt76_mcu_skb_send_and_get_msg(&dev->mt76, skb,
+ MCU_WM_UNI_CMD_QUERY(STA_REC_UPDATE),
+ true, &rskb);
+ if (ret)
+ return ret;
+
+ skb_pull(rskb, 4);
+
+ pn_info = (struct sta_rec_pn_info *)rskb->data;
+ if (le16_to_cpu(pn_info->tag) == STA_REC_PN_INFO)
+ memcpy(pn, pn_info->pn, 6);
+
+ dev_kfree_skb(rskb);
+ return 0;
+}
+
+int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct mt7996_mcu_bcn_prot_tlv *bcn_prot;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+ u8 pn[6] = {};
+ int len = sizeof(struct bss_req_hdr) +
+ sizeof(struct mt7996_mcu_bcn_prot_tlv);
+ int ret;
+
+ skb = __mt7996_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BCN_PROT, sizeof(*bcn_prot));
+
+ bcn_prot = (struct mt7996_mcu_bcn_prot_tlv *)tlv;
+
+ ret = mt7996_mcu_get_pn(dev, vif, pn);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_CMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ bcn_prot->cipher_id = MCU_CIPHER_BCN_PROT_GMAC_256;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ default:
+ dev_err(dev->mt76.dev, "Not supported Bigtk Cipher\n");
+ dev_kfree_skb(skb);
+ return -EOPNOTSUPP;
+ }
+
+ pn[0]++;
+ memcpy(bcn_prot->pn, pn, 6);
+ bcn_prot->enable = BP_SW_MODE;
+ memcpy(bcn_prot->key, key->key, WLAN_MAX_KEY_LEN);
+ bcn_prot->key_id = key->keyidx;
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_WMWA_UNI_CMD(BSS_INFO_UPDATE), true);
+}
int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
struct ieee80211_vif *vif, bool enable)
{
@@ -3351,7 +3426,7 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
u32 addr = le32_to_cpu(*(__le32 *)(skb->data + 12));
u8 *buf = (u8 *)dev->mt76.eeprom.data + addr;
- skb_pull(skb, 64);
+ skb_pull(skb, 48);
memcpy(buf, skb->data, MT7996_EEPROM_BLOCK_SIZE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 9300cd8eeb..32ce57c8c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -250,6 +250,23 @@ struct bss_rate_tlv {
u8 __rsv2[9];
} __packed;
+enum {
+ BP_DISABLE,
+ BP_SW_MODE,
+ BP_HW_MODE,
+};
+
+struct mt7996_mcu_bcn_prot_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 pn[6];
+ u8 enable;
+ u8 cipher_id;
+ u8 key[WLAN_MAX_KEY_LEN];
+ u8 key_id;
+ u8 __rsv[3];
+} __packed;
+
struct bss_ra_tlv {
__le16 tag;
__le16 len;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index e53cf6a370..6733ee9744 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -43,6 +43,7 @@
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
+#define MT7996_MIN_TWT_DUR 64
#define MT7996_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 3)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
@@ -236,7 +237,7 @@ struct mt7996_dev {
struct rchan *relay_fwlog;
struct {
- u8 table_mask;
+ u16 table_mask;
u8 n_agrt;
} twt;
@@ -485,9 +486,10 @@ int mt7996_init_debugfs(struct mt7996_phy *phy);
void mt7996_debugfs_rx_fw_monitor(struct mt7996_dev *dev, const void *data, int len);
bool mt7996_debugfs_rx_log(struct mt7996_dev *dev, const void *data, int len);
int mt7996_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
- struct mt76_connac_sta_key_conf *sta_key_conf,
struct ieee80211_key_conf *key, int mcu_cmd,
struct mt76_wcid *wcid, enum set_key_cmd cmd);
+int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *key);
int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index da52f91693..e9a047a8c7 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1615,7 +1615,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
cfg80211_unregister_netdevice(vif->ndev);
vif->monitor_flag = 0;
- wilc_set_operation_mode(vif, 0, 0, 0);
mutex_lock(&wl->vif_mutex);
list_del_rcu(&vif->list);
wl->vif_num--;
@@ -1810,15 +1809,24 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
INIT_LIST_HEAD(&wl->rxq_head.list);
INIT_LIST_HEAD(&wl->vif_list);
+ wl->hif_workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
+ wiphy_name(wl->wiphy));
+ if (!wl->hif_workqueue) {
+ ret = -ENOMEM;
+ goto free_cfg;
+ }
vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
NL80211_IFTYPE_STATION, false);
if (IS_ERR(vif)) {
ret = PTR_ERR(vif);
- goto free_cfg;
+ goto free_hq;
}
return 0;
+free_hq:
+ destroy_workqueue(wl->hif_workqueue);
+
free_cfg:
wilc_wlan_cfg_deinit(wl);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index a28da59384..e202013e6f 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -374,38 +374,49 @@ out:
void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct cfg80211_crypto_settings *crypto)
{
- struct wilc_join_bss_param *param;
- struct ieee80211_p2p_noa_attr noa_attr;
- u8 rates_len = 0;
- const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
+ const u8 *ies_data, *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie;
const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie;
+ struct ieee80211_p2p_noa_attr noa_attr;
+ const struct cfg80211_bss_ies *ies;
+ struct wilc_join_bss_param *param;
+ u8 rates_len = 0, ies_len;
int ret;
- const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies);
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return NULL;
+ rcu_read_lock();
+ ies = rcu_dereference(bss->ies);
+ ies_data = kmemdup(ies->data, ies->len, GFP_ATOMIC);
+ if (!ies_data) {
+ rcu_read_unlock();
+ kfree(param);
+ return NULL;
+ }
+ ies_len = ies->len;
+ rcu_read_unlock();
+
param->beacon_period = cpu_to_le16(bss->beacon_interval);
param->cap_info = cpu_to_le16(bss->capability);
param->bss_type = WILC_FW_BSS_TYPE_INFRA;
param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq);
ether_addr_copy(param->bssid, bss->bssid);
- ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies_data, ies_len);
if (ssid_elm) {
if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN)
memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]);
}
- tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len);
+ tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies_data, ies_len);
if (tim_elm && tim_elm[1] >= 2)
param->dtim_period = tim_elm[3];
memset(param->p_suites, 0xFF, 3);
memset(param->akm_suites, 0xFF, 3);
- rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len);
+ rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies_data, ies_len);
if (rates_ie) {
rates_len = rates_ie[1];
if (rates_len > WILC_MAX_RATES_SUPPORTED)
@@ -416,7 +427,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
if (rates_len < WILC_MAX_RATES_SUPPORTED) {
supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
- ies->data, ies->len);
+ ies_data, ies_len);
if (supp_rates_ie) {
u8 ext_rates = supp_rates_ie[1];
@@ -431,11 +442,11 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
}
- ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len);
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies_data, ies_len);
if (ht_ie)
param->ht_capable = true;
- ret = cfg80211_get_p2p_attr(ies->data, ies->len,
+ ret = cfg80211_get_p2p_attr(ies_data, ies_len,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *)&noa_attr, sizeof(noa_attr));
if (ret > 0) {
@@ -459,7 +470,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
}
wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wmm_ie) {
struct ieee80211_wmm_param_ie *ie;
@@ -474,13 +485,13 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies->data, ies->len);
+ ies_data, ies_len);
if (wpa_ie) {
param->mode_802_11i = 1;
param->rsn_found = true;
}
- rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len);
+ rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies_data, ies_len);
if (rsn_ie) {
int rsn_ie_len = sizeof(struct element) + rsn_ie[1];
int offset = 8;
@@ -514,6 +525,7 @@ void *wilc_parse_join_bss_param(struct cfg80211_bss *bss,
param->akm_suites[i] = crypto->akm_suites[i] & 0xFF;
}
+ kfree(ies_data);
return (void *)param;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 91d71e0f7e..87fce5f418 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -890,8 +890,7 @@ static const struct net_device_ops wilc_netdev_ops = {
void wilc_netdev_cleanup(struct wilc *wilc)
{
- struct wilc_vif *vif;
- int srcu_idx, ifc_cnt = 0;
+ struct wilc_vif *vif, *vif_tmp;
if (!wilc)
return;
@@ -901,32 +900,19 @@ void wilc_netdev_cleanup(struct wilc *wilc)
wilc->firmware = NULL;
}
- srcu_idx = srcu_read_lock(&wilc->srcu);
- list_for_each_entry_rcu(vif, &wilc->vif_list, list) {
+ list_for_each_entry_safe(vif, vif_tmp, &wilc->vif_list, list) {
+ mutex_lock(&wilc->vif_mutex);
+ list_del_rcu(&vif->list);
+ wilc->vif_num--;
+ mutex_unlock(&wilc->vif_mutex);
+ synchronize_srcu(&wilc->srcu);
if (vif->ndev)
unregister_netdev(vif->ndev);
}
- srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
destroy_workqueue(wilc->hif_workqueue);
- while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
- mutex_lock(&wilc->vif_mutex);
- if (wilc->vif_num <= 0) {
- mutex_unlock(&wilc->vif_mutex);
- break;
- }
- vif = wilc_get_wl_to_vif(wilc);
- if (!IS_ERR(vif))
- list_del_rcu(&vif->list);
-
- wilc->vif_num--;
- mutex_unlock(&wilc->vif_mutex);
- synchronize_srcu(&wilc->srcu);
- ifc_cnt++;
- }
-
wilc_wlan_cfg_deinit(wilc);
wlan_deinit_locks(wilc);
wiphy_unregister(wilc->wiphy);
@@ -989,13 +975,6 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
goto error;
}
- wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM,
- ndev->name);
- if (!wl->hif_workqueue) {
- ret = -ENOMEM;
- goto unregister_netdev;
- }
-
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
vif->idx = wilc_get_available_idx(wl);
@@ -1008,12 +987,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
return vif;
-unregister_netdev:
+error:
if (rtnl_locked)
cfg80211_unregister_netdevice(ndev);
else
unregister_netdev(ndev);
- error:
free_netdev(ndev);
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 77b4cdff73..4cf8586ed5 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -192,11 +192,11 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
/* assert ENABLE: */
gpiod_set_value(gpios->enable, 1);
mdelay(5);
- /* assert RESET: */
- gpiod_set_value(gpios->reset, 1);
- } else {
/* deassert RESET: */
gpiod_set_value(gpios->reset, 0);
+ } else {
+ /* assert RESET: */
+ gpiod_set_value(gpios->reset, 1);
/* deassert ENABLE: */
gpiod_set_value(gpios->enable, 0);
}
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 180907319e..04df0f54aa 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -7304,6 +7304,7 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
rtl8xxxu_write32(priv, REG_USB_HIMR, 0);
+ cancel_work_sync(&priv->c2hcmd_work);
cancel_delayed_work_sync(&priv->ra_watchdog);
rtl8xxxu_free_rx_resources(priv);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 298663b035..0c1c1ff310 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -309,6 +309,13 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
ret = rtw_pwr_seq_parser(rtwdev, pwr_seq);
+ if (pwr_on && rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB) {
+ if (chip->id == RTW_CHIP_TYPE_8822C ||
+ chip->id == RTW_CHIP_TYPE_8822B ||
+ chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_write8_clr(rtwdev, REG_SYS_STATUS1 + 1, BIT(0));
+ }
+
if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_SDIO)
rtw_write32(rtwdev, REG_SDIO_HIMR, imr);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 4a33d2e47f..63673005c2 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -2027,8 +2027,6 @@ static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
rtw_phy_setup_phy_cond(rtwdev, hal->pkg_type);
rtw_phy_init_tx_power(rtwdev);
- if (rfe_def->agc_btg_tbl)
- rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
rtw_phy_tx_power_by_rate_config(hal);
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 128e75a81b..37ef80c909 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -1761,12 +1761,15 @@ static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
void rtw_phy_load_tables(struct rtw_dev *rtwdev)
{
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
const struct rtw_chip_info *chip = rtwdev->chip;
u8 rf_path;
rtw_load_table(rtwdev, chip->mac_tbl);
rtw_load_table(rtwdev, chip->bb_tbl);
rtw_load_table(rtwdev, chip->agc_tbl);
+ if (rfe_def->agc_btg_tbl)
+ rtw_load_table(rtwdev, rfe_def->agc_btg_tbl);
rtw_load_rfk_table(rtwdev);
for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 429bb420b0..fe5d8e1883 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -773,9 +773,9 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev)
dm_info->cck_fa_cnt = cck_fa_cnt;
dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
if (cck_enable)
dm_info->total_fa_cnt += cck_fa_cnt;
- dm_info->total_fa_cnt = ofdm_fa_cnt;
crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
index 7a5cbdc31e..e2c7d9f876 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821cu.c
@@ -9,24 +9,36 @@
#include "usb.h"
static const struct usb_device_id rtw_8821cu_id_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8731, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb820, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xb82b, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc80c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc820, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc821, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82a, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
{ USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82b, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8821CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc811, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x8811, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* 8811CU */
- { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0x2006, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* TOTOLINK A650UA v3 */
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(RTW_USB_VENDOR_ID_REALTEK, 0xc82c, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331d, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* D-Link */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xc811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xd811, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&(rtw8821c_hw_spec) }, /* Edimax */
{},
};
MODULE_DEVICE_TABLE(usb, rtw_8821cu_id_table);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index e6ab1ac6d7..a018851109 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -33,6 +33,36 @@ static void rtw_usb_fill_tx_checksum(struct rtw_usb *rtwusb,
rtw_tx_fill_txdesc_checksum(rtwdev, &pkt_info, skb->data);
}
+static void rtw_usb_reg_sec(struct rtw_dev *rtwdev, u32 addr, __le32 *data)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ struct usb_device *udev = rtwusb->udev;
+ bool reg_on_section = false;
+ u16 t_reg = 0x4e0;
+ u8 t_len = 1;
+ int status;
+
+ /* There are three sections:
+ * 1. on (0x00~0xFF; 0x1000~0x10FF): this section is always powered on
+ * 2. off (< 0xFE00, excluding "on" section): this section could be
+ * powered off
+ * 3. local (>= 0xFE00): usb specific registers section
+ */
+ if (addr <= 0xff || (addr >= 0x1000 && addr <= 0x10ff))
+ reg_on_section = true;
+
+ if (!reg_on_section)
+ return;
+
+ status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ RTW_USB_CMD_REQ, RTW_USB_CMD_WRITE,
+ t_reg, 0, data, t_len, 500);
+
+ if (status != t_len && status != -ENODEV)
+ rtw_err(rtwdev, "%s: reg 0x%x, usb write %u fail, status: %d\n",
+ __func__, t_reg, t_len, status);
+}
+
static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
@@ -58,6 +88,11 @@ static u32 rtw_usb_read(struct rtw_dev *rtwdev, u32 addr, u16 len)
rtw_err(rtwdev, "read register 0x%x failed with %d\n",
addr, ret);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_usb_reg_sec(rtwdev, addr, data);
+
return le32_to_cpu(*data);
}
@@ -102,6 +137,11 @@ static void rtw_usb_write(struct rtw_dev *rtwdev, u32 addr, u32 val, int len)
if (ret < 0 && ret != -ENODEV && count++ < 4)
rtw_err(rtwdev, "write register 0x%x failed with %d\n",
addr, ret);
+
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8822B ||
+ rtwdev->chip->id == RTW_CHIP_TYPE_8821C)
+ rtw_usb_reg_sec(rtwdev, addr, data);
}
static void rtw_usb_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index 537caf9d91..bb4446b88c 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -344,6 +344,7 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
const int pairwise_cipher_suite_count_offset = 8 / sizeof(u16);
const int pairwise_cipher_suite_size = 4 / sizeof(u16);
const int akm_suite_size = 4 / sizeof(u16);
+ int ret = -EINVAL;
const u16 *ptr;
if (unlikely(!skb))
@@ -352,22 +353,26 @@ static int wfx_set_mfp_ap(struct wfx_vif *wvif)
ptr = (u16 *)cfg80211_find_ie(WLAN_EID_RSN, skb->data + ieoffset,
skb->len - ieoffset);
if (unlikely(!ptr))
- return -EINVAL;
+ goto free_skb;
ptr += pairwise_cipher_suite_count_offset;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + pairwise_cipher_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
ptr += 1 + akm_suite_size * *ptr;
if (WARN_ON(ptr > (u16 *)skb_tail_pointer(skb)))
- return -EINVAL;
+ goto free_skb;
wfx_hif_set_mfp(wvif, *ptr & BIT(7), *ptr & BIT(6));
- return 0;
+ ret = 0;
+
+free_skb:
+ dev_kfree_skb(skb);
+ return ret;
}
int wfx_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/ntb/core.c b/drivers/ntb/core.c
index 27dd93deff..d702bee780 100644
--- a/drivers/ntb/core.c
+++ b/drivers/ntb/core.c
@@ -100,6 +100,8 @@ EXPORT_SYMBOL(ntb_unregister_client);
int ntb_register_device(struct ntb_dev *ntb)
{
+ int ret;
+
if (!ntb)
return -EINVAL;
if (!ntb->pdev)
@@ -120,7 +122,11 @@ int ntb_register_device(struct ntb_dev *ntb)
ntb->ctx_ops = NULL;
spin_lock_init(&ntb->ctx_lock);
- return device_register(&ntb->dev);
+ ret = device_register(&ntb->dev);
+ if (ret)
+ put_device(&ntb->dev);
+
+ return ret;
}
EXPORT_SYMBOL(ntb_register_device);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 86149275cc..771eac1412 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4272,7 +4272,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
set->ops = ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect and keep alive */
+ set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
@@ -4341,7 +4342,8 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
set->reserved_tags = NVME_AQ_DEPTH;
else if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect */
+ set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index fbaee5a7be..e4acccc315 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -19,13 +19,6 @@
#define NVMF_DEF_FAIL_FAST_TMO -1
/*
- * Reserved one command for internal usage. This command is used for sending
- * the connect command, as well as for the keep alive command on the admin
- * queue once live.
- */
-#define NVMF_RESERVED_TAGS 1
-
-/*
* Define a host as seen by the target. We allocate one at boot, but also
* allow the override it when creating controllers. This is both to provide
* persistence of the Host NQN over multiple boots, and to allow using
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index b922df99f9..33678d0af2 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -47,7 +47,6 @@ static int meson_efuse_probe(struct platform_device *pdev)
struct nvmem_config *econfig;
struct clk *clk;
unsigned int size;
- int ret;
sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
if (!sm_np) {
@@ -60,27 +59,9 @@ static int meson_efuse_probe(struct platform_device *pdev)
if (!fw)
return -EPROBE_DEFER;
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get efuse gate");
- return ret;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable gate");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
- if (ret) {
- dev_err(dev, "failed to add disable callback");
- return ret;
- }
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate");
if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
dev_err(dev, "failed to get max user");
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index ec030b1916..f157fb50be 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -37,10 +37,12 @@ static ssize_t bw_name_read(struct file *fp, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct icc_path *path = fp->private_data;
+ const char *name = icc_get_name(path);
char buf[64];
- int i;
+ int i = 0;
- i = scnprintf(buf, sizeof(buf), "%.62s\n", icc_get_name(path));
+ if (name)
+ i = scnprintf(buf, sizeof(buf), "%.62s\n", name);
return simple_read_from_buffer(userbuf, count, ppos, buf, i);
}
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index c2630db745..19b6708f60 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -692,8 +692,13 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >>
PCI_REBAR_CTRL_NBAR_SHIFT;
+ /*
+ * PCIe r6.0, sec 7.8.6.2 require us to support at least one
+ * size in the range from 1 MB to 512 GB. Advertise support
+ * for 1 MB BAR size only.
+ */
for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL)
- dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0);
+ dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, BIT(4));
}
/*
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index cbc3f08817..e3dc27f2ac 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -53,6 +53,7 @@
#define PARF_SLV_ADDR_SPACE_SIZE 0x358
#define PARF_DEVICE_TYPE 0x1000
#define PARF_BDF_TO_SID_TABLE_N 0x2000
+#define PARF_BDF_TO_SID_CFG 0x2c00
/* ELBI registers */
#define ELBI_SYS_CTRL 0x04
@@ -120,6 +121,9 @@
/* PARF_DEVICE_TYPE register fields */
#define DEVICE_TYPE_RC 0x4
+/* PARF_BDF_TO_SID_CFG fields */
+#define BDF_TO_SID_BYPASS BIT(0)
+
/* ELBI_SYS_CTRL register fields */
#define ELBI_SYS_CTRL_LT_ENABLE BIT(0)
@@ -229,6 +233,7 @@ struct qcom_pcie_ops {
struct qcom_pcie_cfg {
const struct qcom_pcie_ops *ops;
+ bool no_l0s;
};
struct qcom_pcie {
@@ -272,6 +277,26 @@ static int qcom_pcie_start_link(struct dw_pcie *pci)
return 0;
}
+static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
+{
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u16 offset;
+ u32 val;
+
+ if (!pcie->cfg->no_l0s)
+ return;
+
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
+ writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+}
+
static void qcom_pcie_clear_hpc(struct dw_pcie *pci)
{
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
@@ -961,6 +986,7 @@ err_disable_regulators:
static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
{
+ qcom_pcie_clear_aspm_l0s(pcie->pci);
qcom_pcie_clear_hpc(pcie->pci);
return 0;
@@ -1008,11 +1034,17 @@ static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
int i, nr_map, size = 0;
u32 smmu_sid_base;
+ u32 val;
of_get_property(dev->of_node, "iommu-map", &size);
if (!size)
return 0;
+ /* Enable BDF to SID translation by disabling bypass mode (default) */
+ val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
+ val &= ~BDF_TO_SID_BYPASS;
+ writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
+
map = kzalloc(size, GFP_KERNEL);
if (!map)
return -ENOMEM;
@@ -1358,6 +1390,11 @@ static const struct qcom_pcie_cfg cfg_2_9_0 = {
.ops = &ops_2_9_0,
};
+static const struct qcom_pcie_cfg cfg_sc8280xp = {
+ .ops = &ops_1_9_0,
+ .no_l0s = true,
+};
+
static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = qcom_pcie_link_up,
.start_link = qcom_pcie_start_link,
@@ -1629,11 +1666,11 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
- { .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_9_0},
{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
- { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 30c7dfeccb..11cc354a30 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -49,6 +49,7 @@
#include <linux/refcount.h>
#include <linux/irqdomain.h>
#include <linux/acpi.h>
+#include <linux/sizes.h>
#include <asm/mshyperv.h>
/*
@@ -465,7 +466,7 @@ struct pci_eject_response {
u32 status;
} __packed;
-static int pci_ring_size = (4 * PAGE_SIZE);
+static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
/*
* Driver specific state.
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index f9dd6622fe..e47a77f943 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -330,7 +330,7 @@ static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
readl(base + PCIE_RC_DL_MDIO_ADDR);
writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
- err = readw_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
+ err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
MDIO_WT_DONE(data), 10, 100);
return err;
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 3f60128560..2b7bc5a731 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -1278,15 +1278,11 @@ static int pci_vntb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = ntb_register_device(&ndev->ntb);
if (ret) {
dev_err(dev, "Failed to register NTB device\n");
- goto err_register_dev;
+ return ret;
}
dev_dbg(dev, "PCI Virtual NTB driver loaded\n");
return 0;
-
-err_register_dev:
- put_device(&ndev->ntb.dev);
- return -EINVAL;
}
static struct pci_device_id pci_vntb_table[] = {
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 0c361561b8..4f47a13cb5 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -661,7 +661,7 @@ done:
p2pdma = rcu_dereference(provider->p2pdma);
if (p2pdma)
xa_store(&p2pdma->map_types, map_types_idx(client),
- xa_mk_value(map_type), GFP_KERNEL);
+ xa_mk_value(map_type), GFP_ATOMIC);
rcu_read_unlock();
return map_type;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 51ec9e7e78..9c59bf03d6 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -473,6 +473,13 @@ static void pci_device_remove(struct device *dev)
if (drv->remove) {
pm_runtime_get_sync(dev);
+ /*
+ * If the driver provides a .runtime_idle() callback and it has
+ * started to run already, it may continue to run in parallel
+ * with the code below, so wait until all of the runtime PM
+ * activity has completed.
+ */
+ pm_runtime_barrier(dev);
drv->remove(pci_dev);
pm_runtime_put_noidle(dev);
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 24ae29f0d3..1697cb8289 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -366,11 +366,6 @@ static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
return 0;
}
-static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
-{
- return dev->error_state == pci_channel_io_perm_failure;
-}
-
/* pci_dev priv_flags */
#define PCI_DEV_ADDED 0
#define PCI_DPC_RECOVERED 1
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 94111e4382..e5d7c12854 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -234,7 +234,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
for (i = 0; i < pdev->dpc_rp_log_size - 5; i++) {
pci_read_config_dword(pdev,
- cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix);
+ cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG + i * 4, &prefix);
pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix);
}
clear_status:
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 59c90d04a6..705893b5f7 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -13,6 +13,7 @@
#define dev_fmt(fmt) "AER: " fmt
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -85,6 +86,18 @@ static int report_error_detected(struct pci_dev *dev,
return 0;
}
+static int pci_pm_runtime_get_sync(struct pci_dev *pdev, void *data)
+{
+ pm_runtime_get_sync(&pdev->dev);
+ return 0;
+}
+
+static int pci_pm_runtime_put(struct pci_dev *pdev, void *data)
+{
+ pm_runtime_put(&pdev->dev);
+ return 0;
+}
+
static int report_frozen_detected(struct pci_dev *dev, void *data)
{
return report_error_detected(dev, pci_channel_io_frozen, data);
@@ -207,6 +220,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
else
bridge = pci_upstream_bridge(dev);
+ pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
+
pci_dbg(bridge, "broadcast error_detected message\n");
if (state == pci_channel_io_frozen) {
pci_walk_bridge(bridge, report_frozen_detected, &status);
@@ -251,10 +266,15 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pcie_clear_device_status(dev);
pci_aer_clear_nonfatal_status(dev);
}
+
+ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+
pci_info(bridge, "device recovery successful\n");
return status;
failed:
+ pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
+
pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
/* TODO: Should kernel panic here? */
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a2bf6de114..fa770601c6 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5520,6 +5520,7 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_3WARE, 0x1004, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
@@ -6218,6 +6219,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif
/*
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 1804794d0e..5a4adf6c04 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -1672,7 +1672,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
rc = switchtec_init_isr(stdev);
if (rc) {
dev_err(&stdev->dev, "failed to init isr.\n");
- goto err_put;
+ goto err_exit_pci;
}
iowrite32(SWITCHTEC_EVENT_CLEAR |
@@ -1693,6 +1693,8 @@ static int switchtec_pci_probe(struct pci_dev *pdev,
err_devadd:
stdev_kill(stdev);
+err_exit_pci:
+ switchtec_exit_pci(stdev);
err_put:
ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt));
put_device(&stdev->dev);
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index c584165b13..7e3aa7e234 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -2305,6 +2305,17 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset)
dev_dbg(cmn->dev, "ignoring external node %llx\n", reg);
continue;
}
+ /*
+ * AmpereOneX erratum AC04_MESH_1 makes some XPs report a bogus
+ * child count larger than the number of valid child pointers.
+ * A child offset of 0 can only occur on CMN-600; otherwise it
+ * would imply the root node being its own grandchild, which
+ * we can safely dismiss in general.
+ */
+ if (reg == 0 && cmn->part != PART_CMN600) {
+ dev_dbg(cmn->dev, "bogus child pointer?\n");
+ continue;
+ }
arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn);
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index bc0d414a6a..308c996964 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -59,7 +59,7 @@
#define CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK GENMASK_ULL(63, 59)
#define CXL_PMU_FILTER_CFG_REG(n, f) (0x400 + 4 * ((f) + (n) * 8))
-#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(15, 0)
+#define CXL_PMU_FILTER_CFG_VALUE_MSK GENMASK(31, 0)
#define CXL_PMU_COUNTER_REG(n) (0xc00 + 8 * (n))
@@ -314,9 +314,9 @@ static bool cxl_pmu_config1_get_edge(struct perf_event *event)
}
/*
- * CPMU specification allows for 8 filters, each with a 16 bit value...
- * So we need to find 8x16bits to store it in.
- * As the value used for disable is 0xffff, a separate enable switch
+ * CPMU specification allows for 8 filters, each with a 32 bit value...
+ * So we need to find 8x32bits to store it in.
+ * As the value used for disable is 0xffff_ffff, a separate enable switch
* is needed.
*/
@@ -642,7 +642,7 @@ static void cxl_pmu_event_start(struct perf_event *event, int flags)
if (cxl_pmu_config1_hdm_filter_en(event))
cfg = cxl_pmu_config2_get_hdm_decoder(event);
else
- cfg = GENMASK(15, 0); /* No filtering if 0xFFFF_FFFF */
+ cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */
writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0));
}
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 16acd4dcdb..452aab49db 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -512,7 +512,7 @@ static void pmu_sbi_set_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) | BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_reset_scounteren(void *arg)
@@ -521,7 +521,7 @@ static void pmu_sbi_reset_scounteren(void *arg)
if (event->hw.idx != -1)
csr_write(CSR_SCOUNTEREN,
- csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+ csr_read(CSR_SCOUNTEREN) & ~BIT(pmu_sbi_csr_index(event)));
}
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
@@ -731,14 +731,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
/* compute hardware counter index */
hidx = info->csr - CSR_CYCLE;
/* check if the corresponding bit is set in sscountovf */
- if (!(overflow & (1 << hidx)))
+ if (!(overflow & BIT(hidx)))
continue;
/*
* Keep a track of overflowed counters so that they can be started
* with updated initial value.
*/
- overflowed_ctrs |= 1 << lidx;
+ overflowed_ctrs |= BIT(lidx);
hw_evt = &event->hw;
riscv_pmu_event_update(event);
perf_sample_data_init(&data, 0, hw_evt->last_period);
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 142ebe0247..983a6e6173 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -1531,6 +1531,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
+int tegra_xusb_padctl_get_port_number(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane;
+
+ if (!phy)
+ return -ENODEV;
+
+ lane = phy_get_drvdata(phy);
+
+ return lane->index;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
index a02f7c3269..09edcf47ef 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c
@@ -1198,7 +1198,6 @@ static const struct mtk_pin_reg_calc mt8186_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8186_pin_dir_range),
[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8186_pin_di_range),
[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8186_pin_do_range),
- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8186_pin_dir_range),
[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8186_pin_smt_range),
[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8186_pin_ies_range),
[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8186_pin_pu_range),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
index dee1b3aefd..bf5788d681 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c
@@ -1379,7 +1379,6 @@ static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = {
[PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt8192_pin_dir_range),
[PINCTRL_PIN_REG_DI] = MTK_RANGE(mt8192_pin_di_range),
[PINCTRL_PIN_REG_DO] = MTK_RANGE(mt8192_pin_do_range),
- [PINCTRL_PIN_REG_SR] = MTK_RANGE(mt8192_pin_dir_range),
[PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt8192_pin_smt_range),
[PINCTRL_PIN_REG_IES] = MTK_RANGE(mt8192_pin_ies_range),
[PINCTRL_PIN_REG_PU] = MTK_RANGE(mt8192_pin_pu_range),
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 863732287b..445c61a4a7 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -1575,8 +1575,10 @@ static int nmk_pmx_set(struct pinctrl_dev *pctldev, unsigned function,
* Then mask the pins that need to be sleeping now when we're
* switching to the ALT C function.
*/
- for (i = 0; i < g->grp.npins; i++)
- slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(g->grp.pins[i]);
+ for (i = 0; i < g->grp.npins; i++) {
+ unsigned int bit = g->grp.pins[i] % NMK_GPIO_PER_CHIP;
+ slpm[g->grp.pins[i] / NMK_GPIO_PER_CHIP] &= ~BIT(bit);
+ }
nmk_gpio_glitch_slpm_init(slpm);
}
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index 93e51abbf5..d1e92bbed3 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -731,10 +731,12 @@ static int sh_pfc_resume_noirq(struct device *dev)
sh_pfc_walk_regs(pfc, sh_pfc_restore_reg);
return 0;
}
+#define pm_psci_sleep_ptr(_ptr) pm_sleep_ptr(_ptr)
#else
static int sh_pfc_suspend_init(struct sh_pfc *pfc) { return 0; }
static int sh_pfc_suspend_noirq(struct device *dev) { return 0; }
static int sh_pfc_resume_noirq(struct device *dev) { return 0; }
+#define pm_psci_sleep_ptr(_ptr) PTR_IF(false, (_ptr))
#endif /* CONFIG_ARM_PSCI_FW */
static DEFINE_NOIRQ_DEV_PM_OPS(sh_pfc_pm, sh_pfc_suspend_noirq, sh_pfc_resume_noirq);
@@ -1415,7 +1417,7 @@ static struct platform_driver sh_pfc_driver = {
.driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(sh_pfc_of_table),
- .pm = pm_sleep_ptr(&sh_pfc_pm),
+ .pm = pm_psci_sleep_ptr(&sh_pfc_pm),
},
};
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
index acdea6ac15..d2de526a3b 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -2384,6 +2384,14 @@ static const unsigned int scif_clk_mux[] = {
SCIF_CLK_MARK,
};
+static const unsigned int scif_clk2_pins[] = {
+ /* SCIF_CLK2 */
+ RCAR_GP_PIN(8, 11),
+};
+static const unsigned int scif_clk2_mux[] = {
+ SCIF_CLK2_MARK,
+};
+
/* - SSI ------------------------------------------------- */
static const unsigned int ssi_data_pins[] = {
/* SSI_SD */
@@ -2694,6 +2702,7 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif4_clk),
SH_PFC_PIN_GROUP(scif4_ctrl),
SH_PFC_PIN_GROUP(scif_clk),
+ SH_PFC_PIN_GROUP(scif_clk2),
SH_PFC_PIN_GROUP(ssi_data),
SH_PFC_PIN_GROUP(ssi_ctrl),
@@ -3015,6 +3024,10 @@ static const char * const scif_clk_groups[] = {
"scif_clk",
};
+static const char * const scif_clk2_groups[] = {
+ "scif_clk2",
+};
+
static const char * const ssi_groups[] = {
"ssi_data",
"ssi_ctrl",
@@ -3102,6 +3115,7 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(scif3),
SH_PFC_FUNCTION(scif4),
SH_PFC_FUNCTION(scif_clk),
+ SH_PFC_FUNCTION(scif_clk2),
SH_PFC_FUNCTION(ssi),
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index 311abcac89..c2f6e20b45 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -96,7 +96,7 @@ struct intel_tpmi_pfs_entry {
*/
struct intel_tpmi_pm_feature {
struct intel_tpmi_pfs_entry pfs_header;
- unsigned int vsec_offset;
+ u64 vsec_offset;
struct intel_vsec_device *vsec_dev;
};
@@ -389,7 +389,7 @@ static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
read_blocked = feature_state.read_blocked ? 'Y' : 'N';
write_blocked = feature_state.write_blocked ? 'Y' : 'N';
}
- seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%08x\t%c\t%c\t\t%c\t\t%c\n",
+ seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%016llx\t%c\t%c\t\t%c\t\t%c\n",
pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled,
@@ -408,7 +408,8 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
struct intel_tpmi_pm_feature *pfs = s->private;
int count, ret = 0;
void __iomem *mem;
- u32 off, size;
+ u32 size;
+ u64 off;
u8 *buffer;
size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
@@ -424,7 +425,7 @@ static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
mutex_lock(&tpmi_dev_lock);
for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
- seq_printf(s, "TPMI Instance:%d offset:0x%x\n", count, off);
+ seq_printf(s, "TPMI Instance:%d offset:0x%llx\n", count, off);
mem = ioremap(off, size);
if (!mem) {
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 17cc4b45e0..a64f56ddd4 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -20,9 +20,11 @@
#define P2SBC_HIDE BIT(8)
#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1)
+#define P2SB_DEVFN_GOLDMONT PCI_DEVFN(13, 0)
+#define SPI_DEVFN_GOLDMONT PCI_DEVFN(13, 2)
static const struct x86_cpu_id p2sb_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, P2SB_DEVFN_GOLDMONT),
{}
};
@@ -98,21 +100,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
{
- unsigned int slot, fn;
-
- if (PCI_FUNC(devfn) == 0) {
- /*
- * When function number of the P2SB device is zero, scan it and
- * other function numbers, and if devices are available, cache
- * their BAR0s.
- */
- slot = PCI_SLOT(devfn);
- for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
- p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
- } else {
- /* Scan the P2SB device and cache its BAR0 */
- p2sb_scan_and_cache_devfn(bus, devfn);
- }
+ /* Scan the P2SB device and cache its BAR0 */
+ p2sb_scan_and_cache_devfn(bus, devfn);
+
+ /* On Goldmont p2sb_bar() also gets called for the SPI controller */
+ if (devfn == P2SB_DEVFN_GOLDMONT)
+ p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT);
if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
return -ENOENT;
diff --git a/drivers/platform/x86/x86-android-tablets/other.c b/drivers/platform/x86/x86-android-tablets/other.c
index bc6bbf7ec6..278402dcb8 100644
--- a/drivers/platform/x86/x86-android-tablets/other.c
+++ b/drivers/platform/x86/x86-android-tablets/other.c
@@ -68,7 +68,7 @@ static const struct x86_i2c_client_info acer_b1_750_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
+static struct gpiod_lookup_table acer_b1_750_nvt_ts_gpios = {
.dev_id = "i2c-NVT-ts",
.table = {
GPIO_LOOKUP("INT33FC:01", 26, "reset", GPIO_ACTIVE_LOW),
@@ -77,7 +77,7 @@ static struct gpiod_lookup_table acer_b1_750_goodix_gpios = {
};
static struct gpiod_lookup_table * const acer_b1_750_gpios[] = {
- &acer_b1_750_goodix_gpios,
+ &acer_b1_750_nvt_ts_gpios,
&int3496_reference_gpios,
NULL
};
diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c
index 0f0943e3ef..e4d5afab7b 100644
--- a/drivers/pmdomain/qcom/rpmhpd.c
+++ b/drivers/pmdomain/qcom/rpmhpd.c
@@ -217,7 +217,6 @@ static struct rpmhpd *sa8540p_rpmhpds[] = {
[SC8280XP_CX] = &cx,
[SC8280XP_CX_AO] = &cx_ao,
[SC8280XP_EBI] = &ebi,
- [SC8280XP_GFX] = &gfx,
[SC8280XP_LCX] = &lcx,
[SC8280XP_LMX] = &lmx,
[SC8280XP_MMCX] = &mmcx,
diff --git a/drivers/power/supply/mm8013.c b/drivers/power/supply/mm8013.c
index caa272b035..20c1651ca3 100644
--- a/drivers/power/supply/mm8013.c
+++ b/drivers/power/supply/mm8013.c
@@ -71,7 +71,6 @@ static int mm8013_checkdevice(struct mm8013_chip *chip)
static enum power_supply_property mm8013_battery_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
- POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_CHARGE_NOW,
@@ -103,16 +102,6 @@ static int mm8013_get_property(struct power_supply *psy,
val->intval = regval;
break;
- case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
- ret = regmap_read(chip->regmap, REG_FLAGS, &regval);
- if (ret < 0)
- return ret;
-
- if (regval & MM8013_FLAG_CHG_INH)
- val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
- else
- val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
- break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
ret = regmap_read(chip->regmap, REG_FULL_CHARGE_CAPACITY, &regval);
if (ret < 0)
@@ -187,6 +176,8 @@ static int mm8013_get_property(struct power_supply *psy,
if (regval & MM8013_FLAG_DSG)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ else if (regval & MM8013_FLAG_CHG_INH)
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
else if (regval & MM8013_FLAG_CHG)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
else if (regval & MM8013_FLAG_FC)
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index 9193c3b8ed..ae7ee61197 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -219,7 +219,7 @@ static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
ret = freq_qos_add_request(&policy->constraints,
&dtpm_cpu->qos_req, FREQ_QOS_MAX,
pd->table[pd->nr_perf_states - 1].frequency);
- if (ret)
+ if (ret < 0)
goto out_dtpm_unregister;
cpufreq_cpu_put(policy);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 2feed036c1..9d3e102f1a 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
@@ -759,6 +760,11 @@ static int rapl_config(struct rapl_package *rp)
default:
return -EINVAL;
}
+
+ /* defaults_msr can be NULL on unsupported platforms */
+ if (!rp->priv->defaults || !rp->priv->rpi)
+ return -ENODEV;
+
return 0;
}
@@ -1499,7 +1505,7 @@ static int rapl_detect_domains(struct rapl_package *rp)
}
/* called from CPU hotplug notifier, hotplug lock held */
-void rapl_remove_package(struct rapl_package *rp)
+void rapl_remove_package_cpuslocked(struct rapl_package *rp)
{
struct rapl_domain *rd, *rd_package = NULL;
@@ -1528,10 +1534,18 @@ void rapl_remove_package(struct rapl_package *rp)
list_del(&rp->plist);
kfree(rp);
}
+EXPORT_SYMBOL_GPL(rapl_remove_package_cpuslocked);
+
+void rapl_remove_package(struct rapl_package *rp)
+{
+ guard(cpus_read_lock)();
+ rapl_remove_package_cpuslocked(rp);
+}
EXPORT_SYMBOL_GPL(rapl_remove_package);
/* caller to ensure CPU hotplug lock is held */
-struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu)
{
struct rapl_package *rp;
int uid;
@@ -1549,10 +1563,17 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv,
return NULL;
}
+EXPORT_SYMBOL_GPL(rapl_find_package_domain_cpuslocked);
+
+struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+{
+ guard(cpus_read_lock)();
+ return rapl_find_package_domain_cpuslocked(id, priv, id_is_cpu);
+}
EXPORT_SYMBOL_GPL(rapl_find_package_domain);
/* called from CPU hotplug notifier, hotplug lock held */
-struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv, bool id_is_cpu)
{
struct rapl_package *rp;
int ret;
@@ -1598,6 +1619,13 @@ err_free_package:
kfree(rp);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(rapl_add_package_cpuslocked);
+
+struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
+{
+ guard(cpus_read_lock)();
+ return rapl_add_package_cpuslocked(id, priv, id_is_cpu);
+}
EXPORT_SYMBOL_GPL(rapl_add_package);
static void power_limit_state_save(void)
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index 250bd41a58..b4b6930cac 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -73,9 +73,9 @@ static int rapl_cpu_online(unsigned int cpu)
{
struct rapl_package *rp;
- rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
+ rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
if (!rp) {
- rp = rapl_add_package(cpu, rapl_msr_priv, true);
+ rp = rapl_add_package_cpuslocked(cpu, rapl_msr_priv, true);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
@@ -88,14 +88,14 @@ static int rapl_cpu_down_prep(unsigned int cpu)
struct rapl_package *rp;
int lead_cpu;
- rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
+ rp = rapl_find_package_domain_cpuslocked(cpu, rapl_msr_priv, true);
if (!rp)
return 0;
cpumask_clear_cpu(cpu, &rp->cpumask);
lead_cpu = cpumask_first(&rp->cpumask);
if (lead_cpu >= nr_cpu_ids)
- rapl_remove_package(rp);
+ rapl_remove_package_cpuslocked(rp);
else if (rp->lead_cpu == cpu)
rp->lead_cpu = lead_cpu;
return 0;
diff --git a/drivers/powercap/intel_rapl_tpmi.c b/drivers/powercap/intel_rapl_tpmi.c
index 891c90fefd..f6b7f08597 100644
--- a/drivers/powercap/intel_rapl_tpmi.c
+++ b/drivers/powercap/intel_rapl_tpmi.c
@@ -40,6 +40,7 @@ enum tpmi_rapl_register {
TPMI_RAPL_REG_ENERGY_STATUS,
TPMI_RAPL_REG_PERF_STATUS,
TPMI_RAPL_REG_POWER_INFO,
+ TPMI_RAPL_REG_DOMAIN_INFO,
TPMI_RAPL_REG_INTERRUPT,
TPMI_RAPL_REG_MAX = 15,
};
@@ -130,6 +131,12 @@ static void trp_release(struct tpmi_rapl_package *trp)
mutex_unlock(&tpmi_rapl_lock);
}
+/*
+ * Bit 0 of TPMI_RAPL_REG_DOMAIN_INFO indicates if the current package is a domain
+ * root or not. Only domain root packages can enumerate System (Psys) Domain.
+ */
+#define TPMI_RAPL_DOMAIN_ROOT BIT(0)
+
static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
{
u8 tpmi_domain_version;
@@ -139,6 +146,7 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
enum rapl_domain_reg_id reg_id;
int tpmi_domain_size, tpmi_domain_flags;
u64 tpmi_domain_header = readq(trp->base + offset);
+ u64 tpmi_domain_info;
/* Domain Parent bits are ignored for now */
tpmi_domain_version = tpmi_domain_header & 0xff;
@@ -169,6 +177,13 @@ static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
domain_type = RAPL_DOMAIN_PACKAGE;
break;
case TPMI_RAPL_DOMAIN_SYSTEM:
+ if (!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_DOMAIN_INFO))) {
+ pr_warn(FW_BUG "System domain must support Domain Info register\n");
+ return -ENODEV;
+ }
+ tpmi_domain_info = readq(trp->base + offset + TPMI_RAPL_REG_DOMAIN_INFO);
+ if (!(tpmi_domain_info & TPMI_RAPL_DOMAIN_ROOT))
+ return 0;
domain_type = RAPL_DOMAIN_PLATFORM;
break;
case TPMI_RAPL_DOMAIN_MEMORY:
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 07920e0347..584288d8c0 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -186,7 +186,7 @@ static int atmel_hlcdc_pwm_suspend(struct device *dev)
struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
/* Keep the periph clock enabled if the PWM is still running. */
- if (pwm_is_enabled(&atmel->chip.pwms[0]))
+ if (!pwm_is_enabled(&atmel->chip.pwms[0]))
clk_disable_unprepare(atmel->hlcdc->periph_clk);
return 0;
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index 116fa060e3..29dcf38f3b 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -288,9 +288,9 @@ static int img_pwm_probe(struct platform_device *pdev)
return PTR_ERR(imgchip->sys_clk);
}
- imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");
+ imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
if (IS_ERR(imgchip->pwm_clk)) {
- dev_err(&pdev->dev, "failed to get imgchip clock\n");
+ dev_err(&pdev->dev, "failed to get pwm clock\n");
return PTR_ERR(imgchip->pwm_clk);
}
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index dc92cea31c..d885b30a14 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -395,8 +395,17 @@ out:
static int sti_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
+ struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
+ struct sti_pwm_compat_data *cdata = pc->cdata;
+ struct device *dev = pc->dev;
int err;
+ if (pwm->hwpwm >= cdata->pwm_num_devs) {
+ dev_err(dev, "device %u is not valid for pwm mode\n",
+ pwm->hwpwm);
+ return -EINVAL;
+ }
+
if (state->polarity != PWM_POLARITY_NORMAL)
return -EINVAL;
@@ -646,7 +655,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
- pc->chip.npwm = pc->cdata->pwm_num_devs;
+ pc->chip.npwm = max(cdata->pwm_num_devs, cdata->cpt_num_devs);
for (i = 0; i < cdata->cpt_num_devs; i++) {
struct sti_cpt_ddata *ddata = &cdata->ddata[i];
diff --git a/drivers/regulator/max5970-regulator.c b/drivers/regulator/max5970-regulator.c
index 830a1c4cd7..8bbcd983a7 100644
--- a/drivers/regulator/max5970-regulator.c
+++ b/drivers/regulator/max5970-regulator.c
@@ -29,8 +29,8 @@ struct max5970_regulator {
};
enum max597x_regulator_id {
- MAX597X_SW0,
- MAX597X_SW1,
+ MAX597X_sw0,
+ MAX597X_sw1,
};
static int max5970_read_adc(struct regmap *regmap, int reg, long *val)
@@ -378,8 +378,8 @@ static int max597x_dt_parse(struct device_node *np,
}
static const struct regulator_desc regulators[] = {
- MAX597X_SWITCH(SW0, MAX5970_REG_CHXEN, 0, "vss1"),
- MAX597X_SWITCH(SW1, MAX5970_REG_CHXEN, 1, "vss2"),
+ MAX597X_SWITCH(sw0, MAX5970_REG_CHXEN, 0, "vss1"),
+ MAX597X_SWITCH(sw1, MAX5970_REG_CHXEN, 1, "vss2"),
};
static int max597x_regmap_read_clear(struct regmap *map, unsigned int reg,
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index 97f075ed68..cb1de24b98 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -210,6 +210,7 @@ static const struct of_device_id regulator_userspace_consumer_of_match[] = {
{ .compatible = "regulator-output", },
{},
};
+MODULE_DEVICE_TABLE(of, regulator_userspace_consumer_of_match);
static struct platform_driver regulator_userspace_consumer_driver = {
.probe = regulator_userspace_consumer_probe,
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 83d76915a6..25b66b113b 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev)
kfree(vdev);
+ of_reserved_mem_device_release(&rvdev->pdev->dev);
+ dma_release_coherent_memory(&rvdev->pdev->dev);
+
put_device(&rvdev->pdev->dev);
}
@@ -584,9 +587,6 @@ static void rproc_virtio_remove(struct platform_device *pdev)
rproc_remove_subdev(rproc, &rvdev->subdev);
rproc_remove_rvdev(rvdev);
- of_reserved_mem_device_release(&pdev->dev);
- dma_release_coherent_memory(&pdev->dev);
-
put_device(&rproc->dev);
}
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 4f469f0bcf..10b442c6f6 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -120,7 +120,7 @@ static int stm32_rproc_mem_alloc(struct rproc *rproc,
void *va;
dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len);
- va = ioremap_wc(mem->dma, mem->len);
+ va = (__force void *)ioremap_wc(mem->dma, mem->len);
if (IS_ERR_OR_NULL(va)) {
dev_err(dev, "Unable to map memory region: %pad+0x%zx\n",
&mem->dma, mem->len);
@@ -137,7 +137,7 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
- iounmap(mem->va);
+ iounmap((__force __iomem void *)mem->va);
return 0;
}
@@ -657,7 +657,7 @@ done:
* entire area by overwriting it with the initial values stored in rproc->clean_table.
*/
*table_sz = RSC_TBL_SIZE;
- return (struct resource_table *)ddata->rsc_va;
+ return (__force struct resource_table *)ddata->rsc_va;
}
static const struct rproc_ops st_rproc_ops = {
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 3814e0845e..b1e1d277d4 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1832,7 +1832,8 @@ config RTC_DRV_MT2712
config RTC_DRV_MT6397
tristate "MediaTek PMIC based RTC"
- depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
+ depends on MFD_MT6397 || COMPILE_TEST
+ select IRQ_DOMAIN
help
This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
diff --git a/drivers/rtc/lib_test.c b/drivers/rtc/lib_test.c
index d5caf36c56..225c859d6d 100644
--- a/drivers/rtc/lib_test.c
+++ b/drivers/rtc/lib_test.c
@@ -54,7 +54,7 @@ static void rtc_time64_to_tm_test_date_range(struct kunit *test)
days = div_s64(secs, 86400);
- #define FAIL_MSG "%d/%02d/%02d (%2d) : %ld", \
+ #define FAIL_MSG "%d/%02d/%02d (%2d) : %lld", \
year, month, mday, yday, days
KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 833cfab7d8..0eea5afe9e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -8,9 +8,6 @@
* Copyright IBM Corp. 1999, 2009
*/
-#define KMSG_COMPONENT "dasd"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -3408,8 +3405,7 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
ret = ccw_device_set_online(cdev);
if (ret)
- pr_warn("%s: Setting the DASD online failed with rc=%d\n",
- dev_name(&cdev->dev), ret);
+ dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
}
/*
@@ -3496,8 +3492,11 @@ int dasd_generic_set_online(struct ccw_device *cdev,
{
struct dasd_discipline *discipline;
struct dasd_device *device;
+ struct device *dev;
int rc;
+ dev = &cdev->dev;
+
/* first online clears initial online feature flag */
dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
device = dasd_create_device(cdev);
@@ -3510,11 +3509,10 @@ int dasd_generic_set_online(struct ccw_device *cdev,
/* Try to load the required module. */
rc = request_module(DASD_DIAG_MOD);
if (rc) {
- pr_warn("%s Setting the DASD online failed "
- "because the required module %s "
- "could not be loaded (rc=%d)\n",
- dev_name(&cdev->dev), DASD_DIAG_MOD,
- rc);
+ dev_warn(dev, "Setting the DASD online failed "
+ "because the required module %s "
+ "could not be loaded (rc=%d)\n",
+ DASD_DIAG_MOD, rc);
dasd_delete_device(device);
return -ENODEV;
}
@@ -3522,8 +3520,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
/* Module init could have failed, so check again here after
* request_module(). */
if (!dasd_diag_discipline_pointer) {
- pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
dasd_delete_device(device);
return -ENODEV;
}
@@ -3533,37 +3530,33 @@ int dasd_generic_set_online(struct ccw_device *cdev,
dasd_delete_device(device);
return -EINVAL;
}
+ device->base_discipline = base_discipline;
if (!try_module_get(discipline->owner)) {
- module_put(base_discipline->owner);
dasd_delete_device(device);
return -EINVAL;
}
- device->base_discipline = base_discipline;
device->discipline = discipline;
/* check_device will allocate block device if necessary */
rc = discipline->check_device(device);
if (rc) {
- pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
- dev_name(&cdev->dev), discipline->name, rc);
- module_put(discipline->owner);
- module_put(base_discipline->owner);
+ dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
+ discipline->name, rc);
dasd_delete_device(device);
return rc;
}
dasd_set_target_state(device, DASD_STATE_ONLINE);
if (device->state <= DASD_STATE_KNOWN) {
- pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
rc = -ENODEV;
dasd_set_target_state(device, DASD_STATE_NEW);
if (device->block)
dasd_free_block(device->block);
dasd_delete_device(device);
- } else
- pr_debug("dasd_generic device %s found\n",
- dev_name(&cdev->dev));
+ } else {
+ dev_dbg(dev, "dasd_generic device found\n");
+ }
wait_event(dasd_init_waitq, _wait_for_device(device));
@@ -3574,10 +3567,13 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
int dasd_generic_set_offline(struct ccw_device *cdev)
{
+ int max_count, open_count, rc;
struct dasd_device *device;
struct dasd_block *block;
- int max_count, open_count, rc;
unsigned long flags;
+ struct device *dev;
+
+ dev = &cdev->dev;
rc = 0;
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
@@ -3598,11 +3594,10 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
- pr_warn("%s: The DASD cannot be set offline with open count %i\n",
- dev_name(&cdev->dev), open_count);
+ dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
+ open_count);
else
- pr_warn("%s: The DASD cannot be set offline while it is in use\n",
- dev_name(&cdev->dev));
+ dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
rc = -EBUSY;
goto out_err;
}
@@ -3962,8 +3957,8 @@ static int dasd_handle_autoquiesce(struct dasd_device *device,
if (dasd_eer_enabled(device))
dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
- pr_info("%s: The DASD has been put in the quiesce state\n",
- dev_name(&device->cdev->dev));
+ dev_info(&device->cdev->dev,
+ "The DASD has been put in the quiesce state\n");
dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c
index d3f3a611f9..38c176cf62 100644
--- a/drivers/s390/cio/vfio_ccw_chp.c
+++ b/drivers/s390/cio/vfio_ccw_chp.c
@@ -115,7 +115,7 @@ static ssize_t vfio_ccw_crw_region_read(struct vfio_ccw_private *private,
/* Notify the guest if more CRWs are on our queue */
if (!list_empty(&private->crw) && private->crw_trigger)
- eventfd_signal(private->crw_trigger, 1);
+ eventfd_signal(private->crw_trigger);
return ret;
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 43601816ea..bfb35cfce1 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -112,7 +112,7 @@ void vfio_ccw_sch_io_todo(struct work_struct *work)
private->state = VFIO_CCW_STATE_IDLE;
if (private->io_trigger)
- eventfd_signal(private->io_trigger, 1);
+ eventfd_signal(private->io_trigger);
}
void vfio_ccw_crw_todo(struct work_struct *work)
@@ -122,7 +122,7 @@ void vfio_ccw_crw_todo(struct work_struct *work)
private = container_of(work, struct vfio_ccw_private, crw_work);
if (!list_empty(&private->crw) && private->crw_trigger)
- eventfd_signal(private->crw_trigger, 1);
+ eventfd_signal(private->crw_trigger);
}
/*
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index cba4971618..ea532a8a4a 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -421,7 +421,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
case VFIO_IRQ_SET_DATA_NONE:
{
if (*ctx)
- eventfd_signal(*ctx, 1);
+ eventfd_signal(*ctx);
return 0;
}
case VFIO_IRQ_SET_DATA_BOOL:
@@ -432,7 +432,7 @@ static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
return -EFAULT;
if (trigger && *ctx)
- eventfd_signal(*ctx, 1);
+ eventfd_signal(*ctx);
return 0;
}
case VFIO_IRQ_SET_DATA_EVENTFD:
@@ -612,7 +612,7 @@ static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
"Relaying device request to user (#%u)\n",
count);
- eventfd_signal(private->req_trigger, 1);
+ eventfd_signal(private->req_trigger);
} else if (count == 0) {
dev_notice(dev,
"No device request channel registered, blocked until released by user\n");
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index d6ea2fd4c2..76429585c1 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -1862,7 +1862,7 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
"Relaying device request to user (#%u)\n",
count);
- eventfd_signal(matrix_mdev->req_trigger, 1);
+ eventfd_signal(matrix_mdev->req_trigger);
} else if (count == 0) {
dev_notice(dev,
"No device request registered, blocked until released by user\n");
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index dcd6c7299f..973aa93753 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -579,6 +579,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
{
if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
return NULL;
+ zcrypt_card_get(zc);
zcrypt_queue_get(zq);
get_device(&zq->queue->ap_dev.device);
atomic_add(weight, &zc->load);
@@ -598,6 +599,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
atomic_sub(weight, &zq->load);
put_device(&zq->queue->ap_dev.device);
zcrypt_queue_put(zq);
+ zcrypt_card_put(zc);
module_put(mod);
}
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index 7bd2ba1ad4..f30fe324e6 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -20,7 +20,6 @@
struct bfa_s;
typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
-typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
/*
* Interrupt message handlers
@@ -437,4 +436,12 @@ struct bfa_cb_pending_q_s {
(__qe)->data = (__data); \
} while (0)
+#define bfa_pending_q_init_status(__qe, __cbfn, __cbarg, __data) do { \
+ bfa_q_qe_init(&((__qe)->hcb_qe.qe)); \
+ (__qe)->hcb_qe.cbfn_status = (__cbfn); \
+ (__qe)->hcb_qe.cbarg = (__cbarg); \
+ (__qe)->hcb_qe.pre_rmv = BFA_TRUE; \
+ (__qe)->data = (__data); \
+} while (0)
+
#endif /* __BFA_H__ */
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 6846ca8f73..3438d0b8ba 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -1907,15 +1907,13 @@ bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
struct list_head *qe;
struct list_head *qen;
struct bfa_cb_qe_s *hcb_qe;
- bfa_cb_cbfn_status_t cbfn;
list_for_each_safe(qe, qen, comp_q) {
hcb_qe = (struct bfa_cb_qe_s *) qe;
if (hcb_qe->pre_rmv) {
/* qe is invalid after return, dequeue before cbfn() */
list_del(qe);
- cbfn = (bfa_cb_cbfn_status_t)(hcb_qe->cbfn);
- cbfn(hcb_qe->cbarg, hcb_qe->fw_status);
+ hcb_qe->cbfn_status(hcb_qe->cbarg, hcb_qe->fw_status);
} else
hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
}
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 933a1c3890..5e568d6d7b 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -361,14 +361,18 @@ struct bfa_reqq_wait_s {
void *cbarg;
};
-typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+typedef void (*bfa_cb_cbfn_status_t) (void *cbarg, bfa_status_t status);
/*
* Generic BFA callback element.
*/
struct bfa_cb_qe_s {
struct list_head qe;
- bfa_cb_cbfn_t cbfn;
+ union {
+ bfa_cb_cbfn_status_t cbfn_status;
+ bfa_cb_cbfn_t cbfn;
+ };
bfa_boolean_t once;
bfa_boolean_t pre_rmv; /* set for stack based qe(s) */
bfa_status_t fw_status; /* to access fw status in comp proc */
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index d4ceca2d43..54bd11e6d5 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2135,8 +2135,7 @@ bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
struct bfa_cb_pending_q_s cb_qe;
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, &iocmd->stats);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -2159,7 +2158,7 @@ bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
struct bfa_cb_pending_q_s cb_qe;
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
spin_lock_irqsave(&bfad->bfad_lock, flags);
iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
@@ -2443,8 +2442,7 @@ bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, &iocmd->stats);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, &iocmd->stats);
spin_lock_irqsave(&bfad->bfad_lock, flags);
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
@@ -2474,8 +2472,7 @@ bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
init_completion(&fcomp.comp);
- bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
- &fcomp, NULL);
+ bfa_pending_q_init_status(&cb_qe, bfad_hcb_comp, &fcomp, NULL);
spin_lock_irqsave(&bfad->bfad_lock, flags);
WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
index c38017b4af..e50e93e7fe 100644
--- a/drivers/scsi/csiostor/csio_defs.h
+++ b/drivers/scsi/csiostor/csio_defs.h
@@ -73,7 +73,21 @@ csio_list_deleted(struct list_head *list)
#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
/* State machine */
-typedef void (*csio_sm_state_t)(void *, uint32_t);
+struct csio_lnode;
+
+/* State machine evets */
+enum csio_ln_ev {
+ CSIO_LNE_NONE = (uint32_t)0,
+ CSIO_LNE_LINKUP,
+ CSIO_LNE_FAB_INIT_DONE,
+ CSIO_LNE_LINK_DOWN,
+ CSIO_LNE_DOWN_LINK,
+ CSIO_LNE_LOGO,
+ CSIO_LNE_CLOSE,
+ CSIO_LNE_MAX_EVENT,
+};
+
+typedef void (*csio_sm_state_t)(struct csio_lnode *ln, enum csio_ln_ev evt);
struct csio_sm {
struct list_head sm_list;
@@ -83,7 +97,7 @@ struct csio_sm {
static inline void
csio_set_state(void *smp, void *state)
{
- ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
+ ((struct csio_sm *)smp)->sm_state = state;
}
static inline void
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index d5ac938970..5b3ffefae4 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1095,7 +1095,7 @@ csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
int
csio_is_lnode_ready(struct csio_lnode *ln)
{
- return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
+ return (csio_get_state(ln) == csio_lns_ready);
}
/*****************************************************************************/
@@ -1366,15 +1366,15 @@ csio_free_fcfinfo(struct kref *kref)
void
csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
{
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
+ if (csio_get_state(ln) == csio_lns_uninit) {
strcpy(str, "UNINIT");
return;
}
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
+ if (csio_get_state(ln) == csio_lns_ready) {
strcpy(str, "READY");
return;
}
- if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
+ if (csio_get_state(ln) == csio_lns_offline) {
strcpy(str, "OFFLINE");
return;
}
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 372a67d122..607698a0f0 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -53,19 +53,6 @@
extern int csio_fcoe_rnodes;
extern int csio_fdmi_enable;
-/* State machine evets */
-enum csio_ln_ev {
- CSIO_LNE_NONE = (uint32_t)0,
- CSIO_LNE_LINKUP,
- CSIO_LNE_FAB_INIT_DONE,
- CSIO_LNE_LINK_DOWN,
- CSIO_LNE_DOWN_LINK,
- CSIO_LNE_LOGO,
- CSIO_LNE_CLOSE,
- CSIO_LNE_MAX_EVENT,
-};
-
-
struct csio_fcf_info {
struct list_head list;
uint8_t priority;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index bbb7b2d9ff..1abc62b07d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1962,9 +1962,17 @@ static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
struct hisi_sas_internal_abort_data *timeout = data;
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) {
- down(&hisi_hba->sem);
+ /*
+ * If timeout occurs in device gone scenario, to avoid
+ * circular dependency like:
+ * hisi_sas_dev_gone() -> down() -> ... ->
+ * hisi_sas_internal_abort_timeout() -> down().
+ */
+ if (!timeout->rst_ha_timeout)
+ down(&hisi_hba->sem);
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
- up(&hisi_hba->sem);
+ if (!timeout->rst_ha_timeout)
+ up(&hisi_hba->sem);
}
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index d7f51b84f3..445f4a220d 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -353,12 +353,13 @@ static void scsi_host_dev_release(struct device *dev)
if (shost->shost_state == SHOST_CREATED) {
/*
- * Free the shost_dev device name here if scsi_host_alloc()
- * and scsi_host_put() have been called but neither
+ * Free the shost_dev device name and remove the proc host dir
+ * here if scsi_host_{alloc,put}() have been called but neither
* scsi_host_add() nor scsi_remove_host() has been called.
* This avoids that the memory allocated for the shost_dev
- * name is leaked.
+ * name as well as the proc dir structure are leaked.
*/
+ scsi_proc_hostdir_rm(shost->hostt);
kfree(dev_name(&shost->shost_dev));
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index a2204674b6..5c261005b7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1621,6 +1621,16 @@ out_err:
/* ---------- Domain revalidation ---------- */
+static void sas_get_sas_addr_and_dev_type(struct smp_disc_resp *disc_resp,
+ u8 *sas_addr,
+ enum sas_device_type *type)
+{
+ memcpy(sas_addr, disc_resp->disc.attached_sas_addr, SAS_ADDR_SIZE);
+ *type = to_dev_type(&disc_resp->disc);
+ if (*type == SAS_PHY_UNUSED)
+ memset(sas_addr, 0, SAS_ADDR_SIZE);
+}
+
static int sas_get_phy_discover(struct domain_device *dev,
int phy_id, struct smp_disc_resp *disc_resp)
{
@@ -1674,13 +1684,8 @@ int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
return -ENOMEM;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
- if (res == 0) {
- memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
- SAS_ADDR_SIZE);
- *type = to_dev_type(&disc_resp->disc);
- if (*type == 0)
- memset(sas_addr, 0, SAS_ADDR_SIZE);
- }
+ if (res == 0)
+ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, type);
kfree(disc_resp);
return res;
}
@@ -1940,6 +1945,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
enum sas_device_type type = SAS_PHY_UNUSED;
+ struct smp_disc_resp *disc_resp;
u8 sas_addr[SAS_ADDR_SIZE];
char msg[80] = "";
int res;
@@ -1951,33 +1957,41 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
SAS_ADDR(dev->sas_addr), phy_id, msg);
memset(sas_addr, 0, SAS_ADDR_SIZE);
- res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
+ disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ if (!disc_resp)
+ return -ENOMEM;
+
+ res = sas_get_phy_discover(dev, phy_id, disc_resp);
switch (res) {
case SMP_RESP_NO_PHY:
phy->phy_state = PHY_NOT_PRESENT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return res;
+ goto out_free_resp;
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return res;
+ goto out_free_resp;
case SMP_RESP_FUNC_ACC:
break;
case -ECOMM:
break;
default:
- return res;
+ goto out_free_resp;
}
+ if (res == 0)
+ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, &type);
+
if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
/*
- * Even though the PHY is empty, for convenience we discover
- * the PHY to update the PHY info, like negotiated linkrate.
+ * Even though the PHY is empty, for convenience we update
+ * the PHY info, like negotiated linkrate.
*/
- sas_ex_phy_discover(dev, phy_id);
- return res;
+ if (res == 0)
+ sas_set_ex_phy(dev, phy_id, disc_resp);
+ goto out_free_resp;
} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
dev_type_flutter(type, phy->attached_dev_type)) {
struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
@@ -1989,7 +2003,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
action = ", needs recovery";
pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
SAS_ADDR(dev->sas_addr), phy_id, action);
- return res;
+ goto out_free_resp;
}
/* we always have to delete the old device when we went here */
@@ -1998,7 +2012,10 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
SAS_ADDR(phy->attached_sas_addr));
sas_unregister_devs_sas_addr(dev, phy_id, last);
- return sas_discover_new(dev, phy_id);
+ res = sas_discover_new(dev, phy_id);
+out_free_resp:
+ kfree(disc_resp);
+ return res;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 595dca92e8..2919579fa0 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3169,10 +3169,10 @@ lpfc_bsg_diag_loopback_run(struct bsg_job *job)
}
cmdwqe = &cmdiocbq->wqe;
- memset(cmdwqe, 0, sizeof(union lpfc_wqe));
+ memset(cmdwqe, 0, sizeof(*cmdwqe));
if (phba->sli_rev < LPFC_SLI_REV4) {
rspwqe = &rspiocbq->wqe;
- memset(rspwqe, 0, sizeof(union lpfc_wqe));
+ memset(rspwqe, 0, sizeof(*rspwqe));
}
INIT_LIST_HEAD(&head);
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 425328d9c2..d41fea53e4 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1586,7 +1586,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
wqe = &nvmewqe->wqe;
/* Initialize WQE */
- memset(wqe, 0, sizeof(union lpfc_wqe));
+ memset(wqe, 0, sizeof(*wqe));
ctx_buf->iocbq->cmd_dmabuf = NULL;
spin_lock(&phba->sli4_hba.sgl_list_lock);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index a75f670bf5..aa29e250cf 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -7387,7 +7387,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
return -EFAULT;
}
- issue_diag_reset:
+ return 0;
+
+issue_diag_reset:
rc = _base_diag_reset(ioc);
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44449c70a3..76eeba435f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2741,7 +2741,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
return;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ /* Will wait for wind down of adapter */
+ ql_dbg(ql_dbg_aer, fcport->vha, 0x900c,
+ "%s pci offline detected (id %06x)\n", __func__,
+ fcport->d_id.b24);
+ qla_pci_set_eeh_busy(fcport->vha);
+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
+ 0, WAIT_TARGET);
return;
}
}
@@ -2763,7 +2769,11 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
vha = fcport->vha;
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+ /* Will wait for wind down of adapter */
+ ql_dbg(ql_dbg_aer, fcport->vha, 0x900b,
+ "%s pci offline detected (id %06x)\n", __func__,
+ fcport->d_id.b24);
+ qla_pci_set_eeh_busy(vha);
qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,
0, WAIT_TARGET);
return;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index deb642607d..2f49baf131 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -82,7 +82,7 @@ typedef union {
#include "qla_nvme.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
-#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."
+#define QLA2XXX_MANUFACTURER "Marvell"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 09cb941367..7309310d2a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -44,7 +44,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *);
extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);
extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);
-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *);
extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
struct els_plogi *els_plogi);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index a314cfc5b2..8377624d76 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1193,8 +1193,12 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
return rval;
done_free_sp:
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ /*
+ * use qla24xx_async_gnl_sp_done to purge all pending gnl request.
+ * kref_put is call behind the scene.
+ */
+ sp->u.iocb_cmd.u.mbx.in_mb[0] = MBS_COMMAND_ERROR;
+ qla24xx_async_gnl_sp_done(sp, QLA_COMMAND_ERROR);
fcport->flags &= ~(FCF_ASYNC_SENT);
done:
fcport->flags &= ~(FCF_ASYNC_ACTIVE);
@@ -2665,6 +2669,40 @@ exit:
return rval;
}
+static void qla_enable_fce_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->fce) {
+ ha->flags.fce_enabled = 1;
+ memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
+ rval = qla2x00_enable_fce_trace(vha,
+ ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8033,
+ "Unable to reinitialize FCE (%d).\n", rval);
+ ha->flags.fce_enabled = 0;
+ }
+ }
+}
+
+static void qla_enable_eft_trace(scsi_qla_host_t *vha)
+{
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->eft) {
+ memset(ha->eft, 0, EFT_SIZE);
+ rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS);
+
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0x8034,
+ "Unable to reinitialize EFT (%d).\n", rval);
+ }
+ }
+}
/*
* qla2x00_initialize_adapter
* Initialize board.
@@ -3668,9 +3706,8 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
}
static void
-qla2x00_init_fce_trace(scsi_qla_host_t *vha)
+qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)
{
- int rval;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
@@ -3699,27 +3736,17 @@ qla2x00_init_fce_trace(scsi_qla_host_t *vha)
return;
}
- rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
- ha->fce_mb, &ha->fce_bufs);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00bf,
- "Unable to initialize FCE (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
- return;
- }
-
ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
- ha->flags.fce_enabled = 1;
ha->fce_dma = tc_dma;
ha->fce = tc;
+ ha->fce_bufs = FCE_NUM_BUFFERS;
}
static void
-qla2x00_init_eft_trace(scsi_qla_host_t *vha)
+qla2x00_alloc_eft_trace(scsi_qla_host_t *vha)
{
- int rval;
dma_addr_t tc_dma;
void *tc;
struct qla_hw_data *ha = vha->hw;
@@ -3744,14 +3771,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
return;
}
- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x00c2,
- "Unable to initialize EFT (%d).\n", rval);
- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
- return;
- }
-
ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
@@ -3759,13 +3778,6 @@ qla2x00_init_eft_trace(scsi_qla_host_t *vha)
ha->eft = tc;
}
-static void
-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
-{
- qla2x00_init_fce_trace(vha);
- qla2x00_init_eft_trace(vha);
-}
-
void
qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
{
@@ -3820,10 +3832,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
if (ha->tgt.atio_ring)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
- qla2x00_init_fce_trace(vha);
+ qla2x00_alloc_fce_trace(vha);
if (ha->fce)
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
- qla2x00_init_eft_trace(vha);
+ qla2x00_alloc_eft_trace(vha);
if (ha->eft)
eft_size = EFT_SIZE;
}
@@ -4253,7 +4265,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
unsigned long flags;
- uint16_t fw_major_version;
int done_once = 0;
if (IS_P3P_TYPE(ha)) {
@@ -4320,7 +4331,6 @@ execute_fw_with_lr:
goto failed;
enable_82xx_npiv:
- fw_major_version = ha->fw_major_version;
if (IS_P3P_TYPE(ha))
qla82xx_check_md_needed(vha);
else
@@ -4349,12 +4359,11 @@ enable_82xx_npiv:
if (rval != QLA_SUCCESS)
goto failed;
- if (!fw_major_version && !(IS_P3P_TYPE(ha)))
- qla2x00_alloc_offload_mem(vha);
-
if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
+ qla_enable_fce_trace(vha);
+ qla_enable_eft_trace(vha);
} else {
goto failed;
}
@@ -7487,12 +7496,12 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
int
qla2x00_abort_isp(scsi_qla_host_t *vha)
{
- int rval;
uint8_t status = 0;
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *vp, *tvp;
struct req_que *req = ha->req_q_map[0];
unsigned long flags;
+ fc_port_t *fcport;
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
@@ -7561,6 +7570,15 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
"ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");
return status;
}
+
+ /* User may have updated [fcp|nvme] prefer in flash */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (NVME_PRIORITY(ha, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+ }
+
if (!qla2x00_restart_isp(vha)) {
clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
@@ -7581,31 +7599,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (IS_QLA81XX(ha) || IS_QLA8031(ha))
qla2x00_get_fw_version(vha);
- if (ha->fce) {
- ha->flags.fce_enabled = 1;
- memset(ha->fce, 0,
- fce_calc_size(ha->fce_bufs));
- rval = qla2x00_enable_fce_trace(vha,
- ha->fce_dma, ha->fce_bufs, ha->fce_mb,
- &ha->fce_bufs);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x8033,
- "Unable to reinitialize FCE "
- "(%d).\n", rval);
- ha->flags.fce_enabled = 0;
- }
- }
- if (ha->eft) {
- memset(ha->eft, 0, EFT_SIZE);
- rval = qla2x00_enable_eft_trace(vha,
- ha->eft_dma, EFT_NUM_BUFFERS);
- if (rval) {
- ql_log(ql_log_warn, vha, 0x8034,
- "Unable to reinitialize EFT "
- "(%d).\n", rval);
- }
- }
} else { /* failed the ISP abort */
vha->flags.online = 1;
if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
@@ -7655,6 +7649,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
atomic_inc(&vp->vref_count);
spin_unlock_irqrestore(&ha->vport_slock, flags);
+ /* User may have updated [fcp|nvme] prefer in flash */
+ list_for_each_entry(fcport, &vp->vp_fcports, list) {
+ if (NVME_PRIORITY(ha, fcport))
+ fcport->do_prli_nvme = 1;
+ else
+ fcport->do_prli_nvme = 0;
+ }
+
qla2x00_vp_abort_isp(vp);
spin_lock_irqsave(&ha->vport_slock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index df90169f82..0b41e8a066 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2587,6 +2587,33 @@ void
qla2x00_sp_release(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
+ struct scsi_qla_host *vha = sp->vha;
+
+ switch (sp->type) {
+ case SRB_CT_PTHRU_CMD:
+ /* GPSC & GFPNID use fcport->ct_desc.ct_sns for both req & rsp */
+ if (sp->u.iocb_cmd.u.ctarg.req &&
+ (!sp->fcport ||
+ sp->u.iocb_cmd.u.ctarg.req != sp->fcport->ct_desc.ct_sns)) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.req,
+ sp->u.iocb_cmd.u.ctarg.req_dma);
+ sp->u.iocb_cmd.u.ctarg.req = NULL;
+ }
+ if (sp->u.iocb_cmd.u.ctarg.rsp &&
+ (!sp->fcport ||
+ sp->u.iocb_cmd.u.ctarg.rsp != sp->fcport->ct_desc.ct_sns)) {
+ dma_free_coherent(&vha->hw->pdev->dev,
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
+ sp->u.iocb_cmd.u.ctarg.rsp,
+ sp->u.iocb_cmd.u.ctarg.rsp_dma);
+ sp->u.iocb_cmd.u.ctarg.rsp = NULL;
+ }
+ break;
+ default:
+ break;
+ }
sp->free(sp);
}
@@ -2610,7 +2637,8 @@ static void qla2x00_els_dcmd_sp_free(srb_t *sp)
{
struct srb_iocb *elsio = &sp->u.iocb_cmd;
- kfree(sp->fcport);
+ if (sp->fcport)
+ qla2x00_free_fcport(sp->fcport);
if (elsio->u.els_logo.els_logo_pyld)
dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
@@ -2692,7 +2720,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
*/
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp) {
- kfree(fcport);
+ qla2x00_free_fcport(fcport);
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
return -ENOMEM;
@@ -2723,6 +2751,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_logo.els_logo_pyld) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
@@ -2747,6 +2776,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (rval != QLA_SUCCESS) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
@@ -3012,7 +3042,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
int
qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
- fc_port_t *fcport, bool wait)
+ fc_port_t *fcport)
{
srb_t *sp;
struct srb_iocb *elsio = NULL;
@@ -3027,8 +3057,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
- return -ENOMEM;
+ goto done;
}
fcport->flags |= FCF_ASYNC_SENT;
@@ -3037,9 +3066,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dbg(ql_dbg_io, vha, 0x3073,
"%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
- if (wait)
- sp->flags = SRB_WAKEUP_ON_COMP;
-
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
@@ -3055,7 +3081,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_plogi.els_plogi_pyld) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ goto done_free_sp;
}
resp_ptr = elsio->u.els_plogi.els_resp_pyld =
@@ -3064,7 +3090,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_plogi.els_resp_pyld) {
rval = QLA_FUNCTION_FAILED;
- goto out;
+ goto done_free_sp;
}
ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
@@ -3080,7 +3106,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
struct fc_els_flogi *p = ptr;
-
p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
}
@@ -3089,10 +3114,11 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
(uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
sizeof(*elsio->u.els_plogi.els_plogi_pyld));
- init_completion(&elsio->u.els_plogi.comp);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
- rval = QLA_FUNCTION_FAILED;
+ fcport->flags |= FCF_LOGIN_NEEDED;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ goto done_free_sp;
} else {
ql_dbg(ql_dbg_disc, vha, 0x3074,
"%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
@@ -3100,21 +3126,15 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
fcport->d_id.b24, vha->d_id.b24);
}
- if (wait) {
- wait_for_completion(&elsio->u.els_plogi.comp);
-
- if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
- rval = QLA_FUNCTION_FAILED;
- } else {
- goto done;
- }
+ return rval;
-out:
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+done_free_sp:
qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
done:
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
return rval;
}
@@ -3918,7 +3938,7 @@ qla2x00_start_sp(srb_t *sp)
return -EAGAIN;
}
- pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
+ pkt = qla2x00_alloc_iocbs_ready(sp->qpair, sp);
if (!pkt) {
rval = -EAGAIN;
ql_log(ql_log_warn, vha, 0x700c,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 21ec32b4fb..0cd6f3e148 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -194,7 +194,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
ha->flags.eeh_busy) {
ql_log(ql_log_warn, vha, 0xd035,
- "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
+ "Purge mbox: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n",
ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
rval = QLA_ABORTED;
goto premature_exit;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 03348f605c..931bdaeaee 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4602,6 +4602,7 @@ fail_free_init_cb:
ha->init_cb_dma = 0;
fail_free_vp_map:
kfree(ha->vp_map);
+ ha->vp_map = NULL;
fail:
ql_log(ql_log_fatal, NULL, 0x0030,
"Memory allocation failure.\n");
@@ -5583,7 +5584,7 @@ qla2x00_do_work(struct scsi_qla_host *vha)
break;
case QLA_EVT_ELS_PLOGI:
qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI,
- e->u.fcport.fcport, false);
+ e->u.fcport.fcport);
break;
case QLA_EVT_SA_REPLACE:
rc = qla24xx_issue_sa_replace_iocb(vha, e);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2ef2dbac0d..d7551b1443 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1062,6 +1062,16 @@ void qlt_free_session_done(struct work_struct *work)
"%s: sess %p logout completed\n", __func__, sess);
}
+ /* check for any straggling io left behind */
+ if (!(sess->flags & FCF_FCP2_DEVICE) &&
+ qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) {
+ ql_log(ql_log_warn, vha, 0x3027,
+ "IO not return. Resetting.\n");
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_chip_reset(vha);
+ }
+
if (sess->logo_ack_needed) {
sess->logo_ack_needed = 0;
qla24xx_async_notify_ack(vha, sess,
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 44680f65ea..ca99be7341 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1619,6 +1619,40 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
}
EXPORT_SYMBOL(scsi_add_device);
+int scsi_resume_device(struct scsi_device *sdev)
+{
+ struct device *dev = &sdev->sdev_gendev;
+ int ret = 0;
+
+ device_lock(dev);
+
+ /*
+ * Bail out if the device or its queue are not running. Otherwise,
+ * the rescan may block waiting for commands to be executed, with us
+ * holding the device lock. This can result in a potential deadlock
+ * in the power management core code when system resume is on-going.
+ */
+ if (sdev->sdev_state != SDEV_RUNNING ||
+ blk_queue_pm_only(sdev->request_queue)) {
+ ret = -EWOULDBLOCK;
+ goto unlock;
+ }
+
+ if (dev->driver && try_module_get(dev->driver->owner)) {
+ struct scsi_driver *drv = to_scsi_driver(dev->driver);
+
+ if (drv->resume)
+ ret = drv->resume(dev);
+ module_put(dev->driver->owner);
+ }
+
+unlock:
+ device_unlock(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(scsi_resume_device);
+
int scsi_rescan_device(struct scsi_device *sdev)
{
struct device *dev = &sdev->sdev_gendev;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index a12ff43ac8..f76dbeade0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3948,7 +3948,21 @@ static int sd_suspend_runtime(struct device *dev)
return sd_suspend_common(dev, true);
}
-static int sd_resume(struct device *dev, bool runtime)
+static int sd_resume(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+
+ sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+
+ if (opal_unlock_from_suspend(sdkp->opal_dev)) {
+ sd_printk(KERN_NOTICE, sdkp, "OPAL unlock failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int sd_resume_common(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret;
@@ -3964,7 +3978,7 @@ static int sd_resume(struct device *dev, bool runtime)
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
if (!ret) {
- opal_unlock_from_suspend(sdkp->opal_dev);
+ sd_resume(dev);
sdkp->suspended = false;
}
@@ -3983,7 +3997,7 @@ static int sd_resume_system(struct device *dev)
return 0;
}
- return sd_resume(dev, false);
+ return sd_resume_common(dev, false);
}
static int sd_resume_runtime(struct device *dev)
@@ -4010,7 +4024,7 @@ static int sd_resume_runtime(struct device *dev)
"Failed to clear sense data\n");
}
- return sd_resume(dev, true);
+ return sd_resume_common(dev, true);
}
static const struct dev_pm_ops sd_pm_ops = {
@@ -4033,6 +4047,7 @@ static struct scsi_driver sd_template = {
.pm = &sd_pm_ops,
},
.rescan = sd_rescan,
+ .resume = sd_resume,
.init_command = sd_init_command,
.uninit_command = sd_uninit_command,
.done = sd_done,
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index d43873bb5f..01cbd46219 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
if (ret < 0)
goto err;
} else if (report_present) {
- ret = ida_simple_get(&ctrl->laddr_ida,
- 0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
+ ret = ida_alloc_max(&ctrl->laddr_ida,
+ SLIM_LA_MANAGER - 1, GFP_KERNEL);
if (ret < 0)
goto err;
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 1d2b27e3ea..b811446e0f 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -523,7 +523,7 @@ int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
struct qbman_eq_desc *ed;
int i, ret;
- ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
+ ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL);
if (!ed)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 739e4eee6b..7e9074519a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -991,7 +991,7 @@ struct qman_portal {
/* linked-list of CSCN handlers. */
struct list_head cgr_cbs;
/* list lock */
- spinlock_t cgr_lock;
+ raw_spinlock_t cgr_lock;
struct work_struct congestion_work;
struct work_struct mr_work;
char irqname[MAX_IRQNAME];
@@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal,
/* if the given mask is NULL, assume all CGRs can be seen */
qman_cgrs_fill(&portal->cgrs[0]);
INIT_LIST_HEAD(&portal->cgr_cbs);
- spin_lock_init(&portal->cgr_lock);
+ raw_spin_lock_init(&portal->cgr_lock);
INIT_WORK(&portal->congestion_work, qm_congestion_task);
INIT_WORK(&portal->mr_work, qm_mr_process_task);
portal->bits = 0;
@@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work)
union qm_mc_result *mcr;
struct qman_cgr *cgr;
- spin_lock(&p->cgr_lock);
+ /*
+ * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
+ */
+ raw_spin_lock_irq(&p->cgr_lock);
qm_mc_start(&p->p);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
return;
@@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work)
list_for_each_entry(cgr, &p->cgr_cbs, node)
if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
}
@@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
preempt_enable();
cgr->chan = p->config->channel;
- spin_lock(&p->cgr_lock);
+ raw_spin_lock_irq(&p->cgr_lock);
if (opts) {
struct qm_mcc_initcgr local_opts = *opts;
@@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
cgr->cb(p, cgr, 1);
out:
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
put_affine_portal();
return ret;
}
@@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
return -EINVAL;
memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
- spin_lock_irqsave(&p->cgr_lock, irqflags);
+ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
list_del(&cgr->node);
/*
* If there are no other CGR objects for this CGRID in the list,
@@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
/* add back to the list */
list_add(&cgr->node, &p->cgr_cbs);
release_lock:
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
put_affine_portal();
return ret;
}
@@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
if (!p)
return -EINVAL;
- spin_lock_irqsave(&p->cgr_lock, irqflags);
+ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
ret = qm_modify_cgr(cgr, 0, opts);
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
put_affine_portal();
return ret;
}
diff --git a/drivers/soc/microchip/Kconfig b/drivers/soc/microchip/Kconfig
index eb656b3315..f19e74d342 100644
--- a/drivers/soc/microchip/Kconfig
+++ b/drivers/soc/microchip/Kconfig
@@ -1,5 +1,5 @@
config POLARFIRE_SOC_SYS_CTRL
- tristate "POLARFIRE_SOC_SYS_CTRL"
+ tristate "Microchip PolarFire SoC (MPFS) system controller support"
depends on POLARFIRE_SOC_MAILBOX
help
This driver adds support for the PolarFire SoC (MPFS) system controller.
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 57d47dcf11..1b7e921d45 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -766,6 +766,8 @@ static int llcc_update_act_ctrl(u32 sid,
ret = regmap_read_poll_timeout(drv_data->bcast_regmap, status_reg,
slice_status, !(slice_status & status),
0, LLCC_STATUS_READ_DELAY);
+ if (ret)
+ return ret;
if (drv_data->version >= LLCC_VERSION_4_1_0_0)
ret = regmap_write(drv_data->bcast_regmap, act_clear_reg,
diff --git a/drivers/soc/qcom/pmic_glink_altmode.c b/drivers/soc/qcom/pmic_glink_altmode.c
index 7ee52cf257..ca58bfa418 100644
--- a/drivers/soc/qcom/pmic_glink_altmode.c
+++ b/drivers/soc/qcom/pmic_glink_altmode.c
@@ -469,12 +469,6 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
alt_port->bridge.ops = DRM_BRIDGE_OP_HPD;
alt_port->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
- ret = devm_drm_bridge_add(dev, &alt_port->bridge);
- if (ret) {
- fwnode_handle_put(fwnode);
- return ret;
- }
-
alt_port->dp_alt.svid = USB_TYPEC_DP_SID;
alt_port->dp_alt.mode = USB_TYPEC_DP_MODE;
alt_port->dp_alt.active = 1;
@@ -525,6 +519,16 @@ static int pmic_glink_altmode_probe(struct auxiliary_device *adev,
}
}
+ for (port = 0; port < ARRAY_SIZE(altmode->ports); port++) {
+ alt_port = &altmode->ports[port];
+ if (!alt_port->altmode)
+ continue;
+
+ ret = devm_drm_bridge_add(dev, &alt_port->bridge);
+ if (ret)
+ return ret;
+ }
+
altmode->client = devm_pmic_glink_register_client(dev,
altmode->owner_id,
pmic_glink_altmode_callback,
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 51e05bec5b..9865964cf6 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -114,7 +114,7 @@ static const char *const pmic_models[] = {
[50] = "PM8350B",
[51] = "PMR735A",
[52] = "PMR735B",
- [55] = "PM2250",
+ [55] = "PM4125",
[58] = "PM8450",
[65] = "PM8010",
[69] = "PM8550VS",
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 731775d34d..1a8d03958d 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -1927,7 +1927,7 @@ static void cqspi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int cqspi_suspend(struct device *dev)
+static int cqspi_runtime_suspend(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
@@ -1936,7 +1936,7 @@ static int cqspi_suspend(struct device *dev)
return 0;
}
-static int cqspi_resume(struct device *dev)
+static int cqspi_runtime_resume(struct device *dev)
{
struct cqspi_st *cqspi = dev_get_drvdata(dev);
@@ -1949,8 +1949,24 @@ static int cqspi_resume(struct device *dev)
return 0;
}
-static DEFINE_RUNTIME_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend,
- cqspi_resume, NULL);
+static int cqspi_suspend(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_suspend(cqspi->host);
+}
+
+static int cqspi_resume(struct device *dev)
+{
+ struct cqspi_st *cqspi = dev_get_drvdata(dev);
+
+ return spi_controller_resume(cqspi->host);
+}
+
+static const struct dev_pm_ops cqspi_dev_pm_ops = {
+ RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume)
+};
static const struct cqspi_driver_platdata cdns_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 11991eb126..079035db7d 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -830,11 +830,11 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
if (is_target)
- controller = spi_alloc_target(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = devm_spi_alloc_target(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
else
- controller = spi_alloc_host(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = devm_spi_alloc_host(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
if (!controller)
return -ENOMEM;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index e2d3e3ec13..0e479c5406 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -668,8 +668,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
- BITS_PER_BYTE) * spi_imx->bits_per_word
+ ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+ BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
}
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index 07d20ca116..4337ca51d7 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -85,6 +85,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 8d5d170d49..109dac2e69 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -787,17 +787,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(host);
- cnt = mdata->xfer_len / 4;
- iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
- trans->tx_buf + mdata->num_xfered, cnt);
+ if (trans->tx_buf) {
+ cnt = mdata->xfer_len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+ trans->tx_buf + mdata->num_xfered, cnt);
- remainder = mdata->xfer_len % 4;
- if (remainder > 0) {
- reg_val = 0;
- memcpy(&reg_val,
- trans->tx_buf + (cnt * 4) + mdata->num_xfered,
- remainder);
- writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ remainder = mdata->xfer_len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val,
+ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+ remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
}
mtk_spi_enable_transfer(host);
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index 87d36948c6..c6bd86a533 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -100,15 +100,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
static struct gb_channel *get_channel_from_mode(struct gb_light *light,
u32 mode)
{
- struct gb_channel *channel = NULL;
+ struct gb_channel *channel;
int i;
for (i = 0; i < light->channels_count; i++) {
channel = &light->channels[i];
- if (channel && channel->mode == mode)
- break;
+ if (channel->mode == mode)
+ return channel;
}
- return channel;
+ return NULL;
}
static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
index 1fd39a2fca..95cca281e8 100644
--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
+++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
@@ -803,6 +803,7 @@ static int ipu_csc_scaler_release(struct file *file)
dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
+ v4l2_ctrl_handler_free(&ctx->ctrl_hdlr);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index e530767e80..55cc44a401 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -1069,6 +1069,11 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
/* Initialize subdev media entity */
+ imgu_sd->subdev.entity.ops = &imgu_media_ops;
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
imgu_sd->subdev_pads);
if (r) {
@@ -1076,11 +1081,6 @@ static int imgu_v4l2_subdev_register(struct imgu_device *imgu,
"failed initialize subdev media entity (%d)\n", r);
return r;
}
- imgu_sd->subdev.entity.ops = &imgu_media_ops;
- for (i = 0; i < IMGU_NODE_NUM; i++) {
- imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
- MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
- }
/* Initialize subdev */
v4l2_subdev_init(&imgu_sd->subdev, &imgu_subdev_ops);
@@ -1177,15 +1177,15 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
}
/* Initialize media entities */
+ node->vdev_pad.flags = node->output ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+ vdev->entity.ops = NULL;
r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
if (r) {
dev_err(dev, "failed initialize media entity (%d)\n", r);
mutex_destroy(&node->lock);
return r;
}
- node->vdev_pad.flags = node->output ?
- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
- vdev->entity.ops = NULL;
/* Initialize vbq */
vbq->type = node->vdev_fmt.type;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index fc92972324..16c822637d 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -427,11 +427,11 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
unsigned int ctb_addr_x, ctb_addr_y;
struct cedrus_buffer *cedrus_buf;
dma_addr_t src_buf_addr;
- dma_addr_t src_buf_end_addr;
u32 chroma_log2_weight_denom;
u32 num_entry_point_offsets;
u32 output_pic_list_index;
u32 pic_order_cnt[2];
+ size_t slice_bytes;
u8 padding;
int count;
u32 reg;
@@ -443,6 +443,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
pred_weight_table = &slice_params->pred_weight_table;
num_entry_point_offsets = slice_params->num_entry_point_offsets;
cedrus_buf = vb2_to_cedrus_buffer(&run->dst->vb2_buf);
+ slice_bytes = vb2_get_plane_payload(&run->src->vb2_buf, 0);
/*
* If entry points offsets are present, we should get them
@@ -490,7 +491,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
cedrus_write(dev, VE_DEC_H265_BITS_OFFSET, 0);
- reg = slice_params->bit_size;
+ reg = slice_bytes * 8;
cedrus_write(dev, VE_DEC_H265_BITS_LEN, reg);
/* Source beginning and end addresses. */
@@ -504,10 +505,7 @@ static int cedrus_h265_setup(struct cedrus_ctx *ctx, struct cedrus_run *run)
cedrus_write(dev, VE_DEC_H265_BITS_ADDR, reg);
- src_buf_end_addr = src_buf_addr +
- DIV_ROUND_UP(slice_params->bit_size, 8);
-
- reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_end_addr);
+ reg = VE_DEC_H265_BITS_END_ADDR_BASE(src_buf_addr + slice_bytes);
cedrus_write(dev, VE_DEC_H265_BITS_END_ADDR, reg);
/* Coding tree block address */
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 258aa0e37f..4c3684dd90 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -937,8 +937,9 @@ static int create_component(struct vchiq_mmal_instance *instance,
/* build component create message */
m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
m.u.component_create.client_component = component->client_component;
- strncpy(m.u.component_create.name, name,
- sizeof(m.u.component_create.name));
+ strscpy_pad(m.u.component_create.name, name,
+ sizeof(m.u.component_create.name));
+ m.u.component_create.pid = 0;
ret = send_synchronous_mmal_msg(instance, &m,
sizeof(m.u.component_create),
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 4b10921276..1892e49a8e 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -90,13 +90,14 @@ static int optee_register_device(const uuid_t *device_uuid, u32 func)
if (rc) {
pr_err("device registration failed, err: %d\n", rc);
put_device(&optee_device->dev);
+ return rc;
}
if (func == PTA_CMD_GET_DEVICES_SUPP)
device_create_file(&optee_device->dev,
&dev_attr_need_supplicant);
- return rc;
+ return 0;
}
static int __optee_enumerate_devices(u32 func)
diff --git a/drivers/thermal/devfreq_cooling.c b/drivers/thermal/devfreq_cooling.c
index 262e62ab6c..90b828bcca 100644
--- a/drivers/thermal/devfreq_cooling.c
+++ b/drivers/thermal/devfreq_cooling.c
@@ -201,7 +201,7 @@ static int devfreq_cooling_get_requested_power(struct thermal_cooling_device *cd
res = dfc->power_ops->get_real_power(df, power, freq, voltage);
if (!res) {
- state = dfc->capped_state;
+ state = dfc->max_state - dfc->capped_state;
/* Convert EM power into milli-Watts first */
dfc->res_util = dfc->em_pd->table[state].power;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 649f67fdf3..d75fae7b7e 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -176,14 +176,14 @@ static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
int *temp)
{
int cpu;
- int curr_temp;
+ int curr_temp, ret;
*temp = 0;
for_each_online_cpu(cpu) {
- curr_temp = intel_tcc_get_temp(cpu, false);
- if (curr_temp < 0)
- return curr_temp;
+ ret = intel_tcc_get_temp(cpu, &curr_temp, false);
+ if (ret < 0)
+ return ret;
if (!*temp || curr_temp > *temp)
*temp = curr_temp;
}
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
index 2f00fc3bf2..e964a93757 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c
@@ -27,9 +27,9 @@ static int rapl_mmio_cpu_online(unsigned int cpu)
if (topology_physical_package_id(cpu))
return 0;
- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true);
+ rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true);
if (!rp) {
- rp = rapl_add_package(cpu, &rapl_mmio_priv, true);
+ rp = rapl_add_package_cpuslocked(cpu, &rapl_mmio_priv, true);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
@@ -42,14 +42,14 @@ static int rapl_mmio_cpu_down_prep(unsigned int cpu)
struct rapl_package *rp;
int lead_cpu;
- rp = rapl_find_package_domain(cpu, &rapl_mmio_priv, true);
+ rp = rapl_find_package_domain_cpuslocked(cpu, &rapl_mmio_priv, true);
if (!rp)
return 0;
cpumask_clear_cpu(cpu, &rp->cpumask);
lead_cpu = cpumask_first(&rp->cpumask);
if (lead_cpu >= nr_cpu_ids)
- rapl_remove_package(rp);
+ rapl_remove_package_cpuslocked(rp);
else if (rp->lead_cpu == cpu)
rp->lead_cpu = lead_cpu;
return 0;
diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
index 2e5c741c41..5e8b7f34b3 100644
--- a/drivers/thermal/intel/intel_tcc.c
+++ b/drivers/thermal/intel/intel_tcc.c
@@ -103,18 +103,19 @@ EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC);
/**
* intel_tcc_get_temp() - returns the current temperature
* @cpu: cpu that the MSR should be run on, nagative value means any cpu.
+ * @temp: pointer to the memory for saving cpu temperature.
* @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor.
*
* Get the current temperature returned by the CPU core/package level
* thermal sensor, in degrees C.
*
- * Return: Temperature in degrees C on success, negative error code otherwise.
+ * Return: 0 on success, negative error code otherwise.
*/
-int intel_tcc_get_temp(int cpu, bool pkg)
+int intel_tcc_get_temp(int cpu, int *temp, bool pkg)
{
u32 low, high;
u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS;
- int tjmax, temp, err;
+ int tjmax, err;
tjmax = intel_tcc_get_tjmax(cpu);
if (tjmax < 0)
@@ -131,9 +132,8 @@ int intel_tcc_get_temp(int cpu, bool pkg)
if (!(low & BIT(31)))
return -ENODATA;
- temp = tjmax - ((low >> 16) & 0x7f);
+ *temp = tjmax - ((low >> 16) & 0x7f);
- /* Do not allow negative CPU temperature */
- return temp >= 0 ? temp : -ENODATA;
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(intel_tcc_get_temp, INTEL_TCC);
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 11a7f8108b..61c3d450ee 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -108,11 +108,11 @@ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
struct zone_device *zonedev = thermal_zone_device_priv(tzd);
- int val;
+ int val, ret;
- val = intel_tcc_get_temp(zonedev->cpu, true);
- if (val < 0)
- return val;
+ ret = intel_tcc_get_temp(zonedev->cpu, &val, true);
+ if (ret < 0)
+ return ret;
*temp = val * 1000;
pr_debug("sys_get_curr_temp %d\n", *temp);
diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
index 8b0edb2048..9ee2e72834 100644
--- a/drivers/thermal/mediatek/auxadc_thermal.c
+++ b/drivers/thermal/mediatek/auxadc_thermal.c
@@ -690,6 +690,9 @@ static const struct mtk_thermal_data mt7986_thermal_data = {
.adcpnp = mt7986_adcpnp,
.sensor_mux_values = mt7986_mux_values,
.version = MTK_THERMAL_V3,
+ .apmixed_buffer_ctl_reg = APMIXED_SYS_TS_CON1,
+ .apmixed_buffer_ctl_mask = GENMASK(31, 6) | BIT(3),
+ .apmixed_buffer_ctl_set = BIT(0),
};
static bool mtk_thermal_temp_is_valid(int temp)
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 98d9c80bd4..fd4bd650c7 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -719,8 +719,10 @@ static int lvts_calibration_read(struct device *dev, struct lvts_domain *lvts_td
lvts_td->calib = devm_krealloc(dev, lvts_td->calib,
lvts_td->calib_len + len, GFP_KERNEL);
- if (!lvts_td->calib)
+ if (!lvts_td->calib) {
+ kfree(efuse);
return -ENOMEM;
+ }
memcpy(lvts_td->calib + lvts_td->calib_len, efuse, len);
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index ccc2eea7f9..404f01cca4 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -57,6 +57,9 @@
#define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n
* Control Register
*/
+#define NUM_TTRCR_V1 4
+#define NUM_TTRCR_MAX 16
+
#define REGS_IPBRR(n) (0xbf8 + 4 * (n)) /* IP Block Revision
* Register n
*/
@@ -71,6 +74,7 @@ struct qoriq_sensor {
struct qoriq_tmu_data {
int ver;
+ u32 ttrcr[NUM_TTRCR_MAX];
struct regmap *regmap;
struct clk *clk;
struct qoriq_sensor sensor[SITES_MAX];
@@ -182,17 +186,17 @@ static int qoriq_tmu_calibration(struct device *dev,
struct qoriq_tmu_data *data)
{
int i, val, len;
- u32 range[4];
const u32 *calibration;
struct device_node *np = dev->of_node;
len = of_property_count_u32_elems(np, "fsl,tmu-range");
- if (len < 0 || len > 4) {
+ if (len < 0 || (data->ver == TMU_VER1 && len > NUM_TTRCR_V1) ||
+ (data->ver > TMU_VER1 && len > NUM_TTRCR_MAX)) {
dev_err(dev, "invalid range data.\n");
return len;
}
- val = of_property_read_u32_array(np, "fsl,tmu-range", range, len);
+ val = of_property_read_u32_array(np, "fsl,tmu-range", data->ttrcr, len);
if (val != 0) {
dev_err(dev, "failed to read range data.\n");
return val;
@@ -200,7 +204,7 @@ static int qoriq_tmu_calibration(struct device *dev,
/* Init temperature range registers */
for (i = 0; i < len; i++)
- regmap_write(data->regmap, REGS_TTRnCR(i), range[i]);
+ regmap_write(data->regmap, REGS_TTRnCR(i), data->ttrcr[i]);
calibration = of_get_property(np, "fsl,tmu-calibration", &len);
if (calibration == NULL || len % 8) {
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 44e9b09de4..a3c68c808e 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1265,6 +1265,9 @@ int tb_port_update_credits(struct tb_port *port)
ret = tb_port_do_update_credits(port);
if (ret)
return ret;
+
+ if (!port->dual_link_port)
+ return 0;
return tb_port_do_update_credits(port->dual_link_port);
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 23366f868a..dab94835b6 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -753,6 +753,7 @@ static void exar_pci_remove(struct pci_dev *pcidev)
for (i = 0; i < priv->nr; i++)
serial8250_unregister_port(priv->line[i]);
+ /* Ensure that every init quirk is properly torn down */
if (priv->board->exit)
priv->board->exit(pcidev);
}
@@ -767,10 +768,6 @@ static int __maybe_unused exar_suspend(struct device *dev)
if (priv->line[i] >= 0)
serial8250_suspend_port(priv->line[i]);
- /* Ensure that every init quirk is properly torn down */
- if (priv->board->exit)
- priv->board->exit(pcidev);
-
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8ca061d3bb..1d65055dde 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1329,9 +1329,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
inb_p(ICP);
}
- if (uart_console(port))
- console_lock();
-
/* forget possible initially masked and pending IRQ */
probe_irq_off(probe_irq_on());
save_mcr = serial8250_in_MCR(up);
@@ -1371,9 +1368,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
if (port->flags & UPF_FOURPORT)
outb_p(save_ICP, ICP);
- if (uart_console(port))
- console_unlock();
-
port->irq = (irq > 0) ? irq : 0;
}
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 6d0cfb2e86..71d0cbd748 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -2345,9 +2345,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
lpuart32_write(&sport->port, bd, UARTBAUD);
lpuart32_serial_setbrg(sport, baud);
- lpuart32_write(&sport->port, modem, UARTMODIR);
- lpuart32_write(&sport->port, ctrl, UARTCTRL);
+ /* disable CTS before enabling UARTCTRL_TE to avoid pending idle preamble */
+ lpuart32_write(&sport->port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
/* restore control register */
+ lpuart32_write(&sport->port, ctrl, UARTCTRL);
+ /* re-enable the CTS if needed */
+ lpuart32_write(&sport->port, modem, UARTMODIR);
if ((ctrl & (UARTCTRL_PE | UARTCTRL_M)) == UARTCTRL_PE)
sport->is_cs7 = true;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 81557a58f8..5545bf4a79 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -462,8 +462,7 @@ static void imx_uart_stop_tx(struct uart_port *port)
}
}
-/* called with port.lock taken and irqs off */
-static void imx_uart_stop_rx(struct uart_port *port)
+static void imx_uart_stop_rx_with_loopback_ctrl(struct uart_port *port, bool loopback)
{
struct imx_port *sport = (struct imx_port *)port;
u32 ucr1, ucr2, ucr4, uts;
@@ -485,7 +484,7 @@ static void imx_uart_stop_rx(struct uart_port *port)
/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
if (port->rs485.flags & SER_RS485_ENABLED &&
port->rs485.flags & SER_RS485_RTS_ON_SEND &&
- sport->have_rtscts && !sport->have_rtsgpio) {
+ sport->have_rtscts && !sport->have_rtsgpio && loopback) {
uts = imx_uart_readl(sport, imx_uart_uts_reg(sport));
uts |= UTS_LOOP;
imx_uart_writel(sport, uts, imx_uart_uts_reg(sport));
@@ -498,6 +497,16 @@ static void imx_uart_stop_rx(struct uart_port *port)
}
/* called with port.lock taken and irqs off */
+static void imx_uart_stop_rx(struct uart_port *port)
+{
+ /*
+ * Stop RX and enable loopback in order to make sure RS485 bus
+ * is not blocked. Se comment in imx_uart_probe().
+ */
+ imx_uart_stop_rx_with_loopback_ctrl(port, true);
+}
+
+/* called with port.lock taken and irqs off */
static void imx_uart_enable_ms(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
@@ -682,9 +691,14 @@ static void imx_uart_start_tx(struct uart_port *port)
imx_uart_rts_inactive(sport, &ucr2);
imx_uart_writel(sport, ucr2, UCR2);
+ /*
+ * Since we are about to transmit we can not stop RX
+ * with loopback enabled because that will make our
+ * transmitted data being just looped to RX.
+ */
if (!(port->rs485.flags & SER_RS485_RX_DURING_TX) &&
!port->rs485_rx_during_tx_gpio)
- imx_uart_stop_rx(port);
+ imx_uart_stop_rx_with_loopback_ctrl(port, false);
sport->tx_state = WAIT_AFTER_RTS;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 07f7b48bad..1a4bb55652 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1461,7 +1461,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
if (!ret)
return 0;
- dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+ dev_err(dev, "Unable to request IRQ %i\n", irq);
out_uart:
for (i = 0; i < devtype->nr; i++) {
@@ -1635,13 +1635,16 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr,
static int max310x_i2c_probe(struct i2c_client *client)
{
- const struct max310x_devtype *devtype =
- device_get_match_data(&client->dev);
+ const struct max310x_devtype *devtype;
struct i2c_client *port_client;
struct regmap *regmaps[4];
unsigned int i;
u8 port_addr;
+ devtype = device_get_match_data(&client->dev);
+ if (!devtype)
+ return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n");
+
if (client->addr < devtype->slave_addr.min ||
client->addr > devtype->slave_addr.max)
return dev_err_probe(&client->dev, -EINVAL,
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 7e78f97e8f..5499096440 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -851,19 +851,21 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
}
static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport,
- unsigned int remaining)
+ unsigned int chunk)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
struct circ_buf *xmit = &uport->state->xmit;
- unsigned int tx_bytes;
+ unsigned int tx_bytes, c, remaining = chunk;
u8 buf[BYTES_PER_FIFO_WORD];
while (remaining) {
memset(buf, 0, sizeof(buf));
tx_bytes = min(remaining, BYTES_PER_FIFO_WORD);
- memcpy(buf, &xmit->buf[xmit->tail], tx_bytes);
- uart_xmit_advance(uport, tx_bytes);
+ for (c = 0; c < tx_bytes ; c++) {
+ buf[c] = xmit->buf[xmit->tail];
+ uart_xmit_advance(uport, 1);
+ }
iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 3bd552841c..06d140c9f5 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -987,11 +987,10 @@ static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
if ((ufstat & info->tx_fifomask) != 0 ||
(ufstat & info->tx_fifofull))
return 0;
-
- return 1;
+ return TIOCSER_TEMT;
}
- return s3c24xx_serial_txempty_nofifo(port);
+ return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0;
}
/* no modem control lines */
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 39fa9ae509..e383d9a10a 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2603,7 +2603,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
port->type = PORT_UNKNOWN;
flags |= UART_CONFIG_TYPE;
}
+ /* Synchronize with possible boot console. */
+ if (uart_console(port))
+ console_lock();
port->ops->config_port(port, flags);
+ if (uart_console(port))
+ console_unlock();
}
if (port->type != PORT_UNKNOWN) {
@@ -2611,6 +2616,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_report_port(drv, port);
+ /* Synchronize with possible boot console. */
+ if (uart_console(port))
+ console_lock();
+
/* Power up port for set_mctrl() */
uart_change_pm(state, UART_PM_STATE_ON);
@@ -2627,6 +2636,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_rs485_config(port);
+ if (uart_console(port))
+ console_unlock();
+
/*
* If this driver supports console, and it hasn't been
* successfully registered yet, try to re-register it.
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 88975a4df3..72b6f4f326 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -46,8 +46,31 @@ out:
return 0;
}
+static int serial_port_runtime_suspend(struct device *dev)
+{
+ struct serial_port_device *port_dev = to_serial_base_port_device(dev);
+ struct uart_port *port = port_dev->port;
+ unsigned long flags;
+ bool busy;
+
+ if (port->flags & UPF_DEAD)
+ return 0;
+
+ uart_port_lock_irqsave(port, &flags);
+ busy = __serial_port_busy(port);
+ if (busy)
+ port->ops->start_tx(port);
+ uart_port_unlock_irqrestore(port, flags);
+
+ if (busy)
+ pm_runtime_mark_last_busy(dev);
+
+ return busy ? -EBUSY : 0;
+}
+
static DEFINE_RUNTIME_DEV_PM_OPS(serial_port_pm,
- NULL, serial_port_runtime_resume, NULL);
+ serial_port_runtime_suspend,
+ serial_port_runtime_resume, NULL);
static int serial_port_probe(struct device *dev)
{
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 156efda7c8..52e6ca1ba2 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -381,7 +381,7 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
u32 *ln = vc->vc_uni_lines[vc->state.y];
unsigned int x = vc->state.x, cols = vc->vc_cols;
- memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
+ memmove(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
memset32(&ln[cols - nr], ' ', nr);
}
}
@@ -2469,7 +2469,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
}
return;
case EScsiignore:
- if (c >= 20 && c <= 0x3f)
+ if (c >= 0x20 && c <= 0x3f)
return;
vc->vc_state = ESnormal;
return;
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 0f4b3f16d3..0d38864553 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1397,8 +1397,10 @@ static int ufs_qcom_set_core_clk_ctrl(struct ufs_hba *hba, bool is_scale_up)
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) &&
- !strcmp(clki->name, "core_clk_unipro")) {
- if (is_scale_up)
+ !strcmp(clki->name, "core_clk_unipro")) {
+ if (!clki->max_freq)
+ cycles_in_1us = 150; /* default for backwards compatibility */
+ else if (is_scale_up)
cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));
else
cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c553decb54..c8262e2f29 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -485,6 +485,7 @@ out_free_mem:
static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
+ int used;
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
@@ -499,7 +500,10 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
goto out;
}
- set_bit(WDM_RESPONDING, &desc->flags);
+ used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
+ if (used)
+ goto out;
+
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4854d883e6..80145bd4b5 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -123,7 +123,6 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
#define HUB_DEBOUNCE_STEP 25
#define HUB_DEBOUNCE_STABLE 100
-static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
@@ -685,14 +684,14 @@ static void kick_hub_wq(struct usb_hub *hub)
*/
intf = to_usb_interface(hub->intfdev);
usb_autopm_get_interface_no_resume(intf);
- kref_get(&hub->kref);
+ hub_get(hub);
if (queue_work(hub_wq, &hub->events))
return;
/* the work has already been scheduled */
usb_autopm_put_interface_async(intf);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
void usb_kick_hub_wq(struct usb_device *hdev)
@@ -1060,7 +1059,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
goto init2;
goto init3;
}
- kref_get(&hub->kref);
+ hub_get(hub);
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
@@ -1308,7 +1307,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
device_unlock(&hdev->dev);
}
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
/* Implement the continuations for the delays above */
@@ -1724,6 +1723,16 @@ static void hub_release(struct kref *kref)
kfree(hub);
}
+void hub_get(struct usb_hub *hub)
+{
+ kref_get(&hub->kref);
+}
+
+void hub_put(struct usb_hub *hub)
+{
+ kref_put(&hub->kref, hub_release);
+}
+
static unsigned highspeed_hubs;
static void hub_disconnect(struct usb_interface *intf)
@@ -1772,7 +1781,7 @@ static void hub_disconnect(struct usb_interface *intf)
onboard_hub_destroy_pdevs(&hub->onboard_hub_devs);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
}
static bool hub_descriptor_is_sane(struct usb_host_interface *desc)
@@ -5894,7 +5903,7 @@ out_hdev_lock:
/* Balance the stuff in kick_hub_wq() and allow autosuspend */
usb_autopm_put_interface(intf);
- kref_put(&hub->kref, hub_release);
+ hub_put(hub);
kcov_remote_stop();
}
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 43ce21c96a..183b69dc29 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -129,6 +129,8 @@ extern void usb_hub_remove_port_device(struct usb_hub *hub,
extern int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
int port1, bool set);
extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev);
+extern void hub_get(struct usb_hub *hub);
+extern void hub_put(struct usb_hub *hub);
extern int hub_port_debounce(struct usb_hub *hub, int port1,
bool must_be_connected);
extern int usb_clear_port_feature(struct usb_device *hdev,
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index c628c1abc9..a5776531ba 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -55,11 +55,22 @@ static ssize_t disable_show(struct device *dev,
u16 portstatus, unused;
bool disabled;
int rc;
+ struct kernfs_node *kn;
+ hub_get(hub);
rc = usb_autopm_get_interface(intf);
if (rc < 0)
- return rc;
+ goto out_hub_get;
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister hdev.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (!kn) {
+ rc = -ENODEV;
+ goto out_autopm;
+ }
usb_lock_device(hdev);
if (hub->disconnected) {
rc = -ENODEV;
@@ -69,9 +80,13 @@ static ssize_t disable_show(struct device *dev,
usb_hub_port_status(hub, port1, &portstatus, &unused);
disabled = !usb_port_is_power_on(hub, portstatus);
-out_hdev_lock:
+ out_hdev_lock:
usb_unlock_device(hdev);
+ sysfs_unbreak_active_protection(kn);
+ out_autopm:
usb_autopm_put_interface(intf);
+ out_hub_get:
+ hub_put(hub);
if (rc)
return rc;
@@ -89,15 +104,26 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
int port1 = port_dev->portnum;
bool disabled;
int rc;
+ struct kernfs_node *kn;
rc = kstrtobool(buf, &disabled);
if (rc)
return rc;
+ hub_get(hub);
rc = usb_autopm_get_interface(intf);
if (rc < 0)
- return rc;
+ goto out_hub_get;
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister hdev.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (!kn) {
+ rc = -ENODEV;
+ goto out_autopm;
+ }
usb_lock_device(hdev);
if (hub->disconnected) {
rc = -ENODEV;
@@ -118,9 +144,13 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr,
if (!rc)
rc = count;
-out_hdev_lock:
+ out_hdev_lock:
usb_unlock_device(hdev);
+ sysfs_unbreak_active_protection(kn);
+ out_autopm:
usb_autopm_put_interface(intf);
+ out_hub_get:
+ hub_put(hub);
return rc;
}
@@ -573,7 +603,7 @@ static int match_location(struct usb_device *peer_hdev, void *p)
struct usb_hub *peer_hub = usb_hub_to_struct_hub(peer_hdev);
struct usb_device *hdev = to_usb_device(port_dev->dev.parent->parent);
- if (!peer_hub)
+ if (!peer_hub || port_dev->connect_type == USB_PORT_NOT_USED)
return 0;
hcd = bus_to_hcd(hdev->bus);
@@ -584,7 +614,8 @@ static int match_location(struct usb_device *peer_hdev, void *p)
for (port1 = 1; port1 <= peer_hdev->maxchild; port1++) {
peer = peer_hub->ports[port1 - 1];
- if (peer && peer->location == port_dev->location) {
+ if (peer && peer->connect_type != USB_PORT_NOT_USED &&
+ peer->location == port_dev->location) {
link_peers_report(port_dev, peer);
return 1; /* done */
}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5d21718afb..f91f543ec6 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -1168,14 +1168,24 @@ static ssize_t interface_authorized_store(struct device *dev,
{
struct usb_interface *intf = to_usb_interface(dev);
bool val;
+ struct kernfs_node *kn;
if (kstrtobool(buf, &val) != 0)
return -EINVAL;
- if (val)
+ if (val) {
usb_authorize_interface(intf);
- else
- usb_deauthorize_interface(intf);
+ } else {
+ /*
+ * Prevent deadlock if another process is concurrently
+ * trying to unregister intf.
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ if (kn) {
+ usb_deauthorize_interface(intf);
+ sysfs_unbreak_active_protection(kn);
+ }
+ }
return count;
}
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index c92a1da46a..a141f83aba 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -729,8 +729,14 @@ struct dwc2_dregs_backup {
* struct dwc2_hregs_backup - Holds host registers state before
* entering partial power down
* @hcfg: Backup of HCFG register
+ * @hflbaddr: Backup of HFLBADDR register
* @haintmsk: Backup of HAINTMSK register
+ * @hcchar: Backup of HCCHAR register
+ * @hcsplt: Backup of HCSPLT register
* @hcintmsk: Backup of HCINTMSK register
+ * @hctsiz: Backup of HCTSIZ register
+ * @hdma: Backup of HCDMA register
+ * @hcdmab: Backup of HCDMAB register
* @hprt0: Backup of HPTR0 register
* @hfir: Backup of HFIR register
* @hptxfsiz: Backup of HPTXFSIZ register
@@ -738,8 +744,14 @@ struct dwc2_dregs_backup {
*/
struct dwc2_hregs_backup {
u32 hcfg;
+ u32 hflbaddr;
u32 haintmsk;
+ u32 hcchar[MAX_EPS_CHANNELS];
+ u32 hcsplt[MAX_EPS_CHANNELS];
u32 hcintmsk[MAX_EPS_CHANNELS];
+ u32 hctsiz[MAX_EPS_CHANNELS];
+ u32 hcidma[MAX_EPS_CHANNELS];
+ u32 hcidmab[MAX_EPS_CHANNELS];
u32 hprt0;
u32 hfir;
u32 hptxfsiz;
@@ -1086,6 +1098,7 @@ struct dwc2_hsotg {
bool needs_byte_swap;
/* DWC OTG HW Release versions */
+#define DWC2_CORE_REV_4_30a 0x4f54430a
#define DWC2_CORE_REV_2_71a 0x4f54271a
#define DWC2_CORE_REV_2_72a 0x4f54272a
#define DWC2_CORE_REV_2_80a 0x4f54280a
@@ -1323,6 +1336,7 @@ int dwc2_backup_global_registers(struct dwc2_hsotg *hsotg);
int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg);
void dwc2_enable_acg(struct dwc2_hsotg *hsotg);
+void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup);
/* This function should be called on every hardware interrupt. */
irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index 158ede7538..26d752a4c3 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -297,7 +297,8 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
/* Exit gadget mode clock gating. */
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_gadget_exit_clock_gating(hsotg, 0);
}
@@ -322,10 +323,11 @@ static void dwc2_handle_session_req_intr(struct dwc2_hsotg *hsotg)
* @hsotg: Programming view of DWC_otg controller
*
*/
-static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
+void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup)
{
u32 glpmcfg;
- u32 i = 0;
+ u32 pcgctl;
+ u32 dctl;
if (hsotg->lx_state != DWC2_L1) {
dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");
@@ -334,37 +336,57 @@ static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)
glpmcfg = dwc2_readl(hsotg, GLPMCFG);
if (dwc2_is_device_mode(hsotg)) {
- dev_dbg(hsotg->dev, "Exit from L1 state\n");
+ dev_dbg(hsotg->dev, "Exit from L1 state, remotewakeup=%d\n", remotewakeup);
glpmcfg &= ~GLPMCFG_ENBLSLPM;
- glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;
+ glpmcfg &= ~GLPMCFG_HIRD_THRES_MASK;
dwc2_writel(hsotg, glpmcfg, GLPMCFG);
- do {
- glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ pcgctl = dwc2_readl(hsotg, PCGCTL);
+ pcgctl &= ~PCGCTL_ENBL_SLEEP_GATING;
+ dwc2_writel(hsotg, pcgctl, PCGCTL);
- if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |
- GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))
- break;
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ if (glpmcfg & GLPMCFG_ENBESL) {
+ glpmcfg |= GLPMCFG_RSTRSLPSTS;
+ dwc2_writel(hsotg, glpmcfg, GLPMCFG);
+ }
+
+ if (remotewakeup) {
+ if (dwc2_hsotg_wait_bit_set(hsotg, GLPMCFG, GLPMCFG_L1RESUMEOK, 1000)) {
+ dev_warn(hsotg->dev, "%s: timeout GLPMCFG_L1RESUMEOK\n", __func__);
+ goto fail;
+ return;
+ }
+
+ dctl = dwc2_readl(hsotg, DCTL);
+ dctl |= DCTL_RMTWKUPSIG;
+ dwc2_writel(hsotg, dctl, DCTL);
- udelay(1);
- } while (++i < 200);
+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_WKUPINT, 1000)) {
+ dev_warn(hsotg->dev, "%s: timeout GINTSTS_WKUPINT\n", __func__);
+ goto fail;
+ return;
+ }
+ }
- if (i == 200) {
- dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");
+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);
+ if (glpmcfg & GLPMCFG_COREL1RES_MASK || glpmcfg & GLPMCFG_SLPSTS ||
+ glpmcfg & GLPMCFG_L1RESUMEOK) {
+ goto fail;
return;
}
- dwc2_gadget_init_lpm(hsotg);
+
+ /* Inform gadget to exit from L1 */
+ call_gadget(hsotg, resume);
+ /* Change to L0 state */
+ hsotg->lx_state = DWC2_L0;
+ hsotg->bus_suspended = false;
+fail: dwc2_gadget_init_lpm(hsotg);
} else {
/* TODO */
dev_err(hsotg->dev, "Host side LPM is not supported.\n");
return;
}
-
- /* Change to L0 state */
- hsotg->lx_state = DWC2_L0;
-
- /* Inform gadget to exit from L1 */
- call_gadget(hsotg, resume);
}
/*
@@ -385,7 +407,7 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);
if (hsotg->lx_state == DWC2_L1) {
- dwc2_wakeup_from_lpm_l1(hsotg);
+ dwc2_wakeup_from_lpm_l1(hsotg, false);
return;
}
@@ -408,7 +430,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
/* Exit gadget mode clock gating. */
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_gadget_exit_clock_gating(hsotg, 0);
} else {
/* Change to L0 state */
@@ -425,7 +448,8 @@ static void dwc2_handle_wakeup_detected_intr(struct dwc2_hsotg *hsotg)
}
if (hsotg->params.power_down ==
- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
dwc2_host_exit_clock_gating(hsotg, 1);
/*
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index b517a7216d..b2f6da5b65 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1415,6 +1415,10 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
ep->name, req, req->length, req->buf, req->no_interrupt,
req->zero, req->short_not_ok);
+ if (hs->lx_state == DWC2_L1) {
+ dwc2_wakeup_from_lpm_l1(hs, true);
+ }
+
/* Prevent new request submission when controller is suspended */
if (hs->lx_state != DWC2_L0) {
dev_dbg(hs->dev, "%s: submit request only in active state\n",
@@ -3727,6 +3731,12 @@ irq_retry:
if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)
dwc2_exit_partial_power_down(hsotg, 0, true);
+ /* Exit gadget mode clock gating. */
+ if (hsotg->params.power_down ==
+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&
+ !hsotg->params.no_clock_gating)
+ dwc2_gadget_exit_clock_gating(hsotg, 0);
+
hsotg->lx_state = DWC2_L0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 35c7a4df8e..dd5b1c5691 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2701,8 +2701,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
hsotg->available_host_channels--;
}
qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
- if (dwc2_assign_and_init_hc(hsotg, qh))
+ if (dwc2_assign_and_init_hc(hsotg, qh)) {
+ if (hsotg->params.uframe_sched)
+ hsotg->available_host_channels++;
break;
+ }
/*
* Move the QH from the periodic ready schedule to the
@@ -2735,8 +2738,11 @@ enum dwc2_transaction_type dwc2_hcd_select_transactions(
hsotg->available_host_channels--;
}
- if (dwc2_assign_and_init_hc(hsotg, qh))
+ if (dwc2_assign_and_init_hc(hsotg, qh)) {
+ if (hsotg->params.uframe_sched)
+ hsotg->available_host_channels++;
break;
+ }
/*
* Move the QH from the non-periodic inactive schedule to the
@@ -4143,6 +4149,8 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
urb->actual_length);
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+ if (!hsotg->params.dma_desc_enable)
+ urb->start_frame = qtd->qh->start_active_frame;
urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
for (i = 0; i < urb->number_of_packets; ++i) {
urb->iso_frame_desc[i].actual_length =
@@ -4649,7 +4657,7 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
}
if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
- hsotg->bus_suspended) {
+ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
if (dwc2_is_device_mode(hsotg))
dwc2_gadget_exit_clock_gating(hsotg, 0);
else
@@ -5406,9 +5414,16 @@ int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg)
/* Backup Host regs */
hr = &hsotg->hr_backup;
hr->hcfg = dwc2_readl(hsotg, HCFG);
+ hr->hflbaddr = dwc2_readl(hsotg, HFLBADDR);
hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);
- for (i = 0; i < hsotg->params.host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i) {
+ hr->hcchar[i] = dwc2_readl(hsotg, HCCHAR(i));
+ hr->hcsplt[i] = dwc2_readl(hsotg, HCSPLT(i));
hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));
+ hr->hctsiz[i] = dwc2_readl(hsotg, HCTSIZ(i));
+ hr->hcidma[i] = dwc2_readl(hsotg, HCDMA(i));
+ hr->hcidmab[i] = dwc2_readl(hsotg, HCDMAB(i));
+ }
hr->hprt0 = dwc2_read_hprt0(hsotg);
hr->hfir = dwc2_readl(hsotg, HFIR);
@@ -5442,10 +5457,17 @@ int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg)
hr->valid = false;
dwc2_writel(hsotg, hr->hcfg, HCFG);
+ dwc2_writel(hsotg, hr->hflbaddr, HFLBADDR);
dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);
- for (i = 0; i < hsotg->params.host_channels; ++i)
+ for (i = 0; i < hsotg->params.host_channels; ++i) {
+ dwc2_writel(hsotg, hr->hcchar[i], HCCHAR(i));
+ dwc2_writel(hsotg, hr->hcsplt[i], HCSPLT(i));
dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));
+ dwc2_writel(hsotg, hr->hctsiz[i], HCTSIZ(i));
+ dwc2_writel(hsotg, hr->hcidma[i], HCDMA(i));
+ dwc2_writel(hsotg, hr->hcidmab[i], HCDMAB(i));
+ }
dwc2_writel(hsotg, hr->hprt0, HPRT0);
dwc2_writel(hsotg, hr->hfir, HFIR);
@@ -5610,10 +5632,12 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
dwc2_writel(hsotg, hr->hcfg, HCFG);
/* De-assert Wakeup Logic */
- gpwrdn = dwc2_readl(hsotg, GPWRDN);
- gpwrdn &= ~GPWRDN_PMUACTV;
- dwc2_writel(hsotg, gpwrdn, GPWRDN);
- udelay(10);
+ if (!(rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ }
hprt0 = hr->hprt0;
hprt0 |= HPRT0_PWR;
@@ -5638,6 +5662,13 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
hprt0 |= HPRT0_RES;
dwc2_writel(hsotg, hprt0, HPRT0);
+ /* De-assert Wakeup Logic */
+ if ((rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {
+ gpwrdn = dwc2_readl(hsotg, GPWRDN);
+ gpwrdn &= ~GPWRDN_PMUACTV;
+ dwc2_writel(hsotg, gpwrdn, GPWRDN);
+ udelay(10);
+ }
/* Wait for Resume time and then program HPRT again */
mdelay(100);
hprt0 &= ~HPRT0_RES;
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 6b4d825e97..79582b102c 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -559,7 +559,7 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
idx = qh->td_last;
inc = qh->host_interval;
hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
- cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
+ cur_idx = idx;
next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
/*
@@ -866,6 +866,8 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
{
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
+ u16 frame_desc_idx;
+ struct urb *usb_urb = qtd->urb->priv;
u16 remain = 0;
int rc = 0;
@@ -878,8 +880,11 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
DMA_FROM_DEVICE);
dma_desc = &qh->desc_list[idx];
+ frame_desc_idx = (idx - qtd->isoc_td_first) & (usb_urb->number_of_packets - 1);
- frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
+ frame_desc = &qtd->urb->iso_descs[frame_desc_idx];
+ if (idx == qtd->isoc_td_first)
+ usb_urb->start_frame = dwc2_hcd_get_frame_number(hsotg);
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
if (chan->ep_is_in)
remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
@@ -900,7 +905,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
frame_desc->status = 0;
}
- if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
+ if (++qtd->isoc_frame_index == usb_urb->number_of_packets) {
/*
* urb->status is not used for isoc transfers here. The
* individual frame_desc status are used instead.
@@ -1005,11 +1010,11 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
return;
idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
chan->speed);
- if (!rc)
+ if (rc == 0)
continue;
- if (rc == DWC2_CMPL_DONE)
- break;
+ if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP)
+ goto stop_scan;
/* rc == DWC2_CMPL_STOP */
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 13abdd5f67..12f8c7f86d 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -698,7 +698,7 @@
#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25)
#define TXSTS_QTOP_TOKEN_SHIFT 25
#define TXSTS_QTOP_TERMINATE BIT(24)
-#define TXSTS_QSPCAVAIL_MASK (0xff << 16)
+#define TXSTS_QSPCAVAIL_MASK (0x7f << 16)
#define TXSTS_QSPCAVAIL_SHIFT 16
#define TXSTS_FSPCAVAIL_MASK (0xffff << 0)
#define TXSTS_FSPCAVAIL_SHIFT 0
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index b1d48019e9..7b84416dfc 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -331,7 +331,7 @@ static void dwc2_driver_remove(struct platform_device *dev)
/* Exit clock gating when driver is removed. */
if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&
- hsotg->bus_suspended) {
+ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {
if (dwc2_is_device_mode(hsotg))
dwc2_gadget_exit_clock_gating(hsotg, 0);
else
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f50b5575d5..8de1ab8517 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1507,6 +1507,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
else
dwc->sysdev = dwc->dev;
+ dwc->sys_wakeup = device_may_wakeup(dwc->sysdev);
+
ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name);
if (ret >= 0) {
dwc->usb_psy = power_supply_get_by_name(usb_psy_name);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index efe6caf4d0..80265ef608 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1127,6 +1127,7 @@ struct dwc3_scratchpad_array {
* 3 - Reserved
* @dis_metastability_quirk: set to disable metastability quirk.
* @dis_split_quirk: set to disable split boundary.
+ * @sys_wakeup: set if the device may do system wakeup.
* @wakeup_configured: set if the device is configured for remote wakeup.
* @suspended: set to track suspend event due to U3/L2.
* @imod_interval: set the interrupt moderation interval in 250ns
@@ -1350,6 +1351,7 @@ struct dwc3 {
unsigned dis_split_quirk:1;
unsigned async_callbacks:1;
+ unsigned sys_wakeup:1;
unsigned wakeup_configured:1;
unsigned suspended:1;
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 90a587bc29..ea6e29091c 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -267,21 +267,15 @@ err_pm_disable:
return ret;
}
-static int dwc3_ti_remove_core(struct device *dev, void *c)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
- return 0;
-}
-
static void dwc3_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dwc3_am62 *am62 = platform_get_drvdata(pdev);
u32 reg;
- device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+ pm_runtime_get_sync(dev);
+ device_init_wakeup(dev, false);
+ of_platform_depopulate(dev);
/* Clear mode valid bit */
reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
@@ -289,7 +283,6 @@ static void dwc3_ti_remove(struct platform_device *pdev)
dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
pm_runtime_put_sync(dev);
- clk_disable_unprepare(am62->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 39564e17f3..497deed38c 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -51,7 +51,6 @@
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTLS 0x7f6f
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
-#define PCI_DEVICE_ID_INTEL_ARLH 0x7ec1
#define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_DEVICE_ID_AMD_MR 0x163a
@@ -423,7 +422,6 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, MTLP, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTL, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) },
- { PCI_DEVICE_DATA(INTEL, ARLH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) },
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 28f49400f3..07820b1a88 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2968,6 +2968,9 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc->gadget_driver = driver;
spin_unlock_irqrestore(&dwc->lock, flags);
+ if (dwc->sys_wakeup)
+ device_wakeup_enable(dwc->sysdev);
+
return 0;
}
@@ -2983,6 +2986,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
+ if (dwc->sys_wakeup)
+ device_wakeup_disable(dwc->sysdev);
+
spin_lock_irqsave(&dwc->lock, flags);
dwc->gadget_driver = NULL;
dwc->max_cfg_eps = 0;
@@ -4664,6 +4670,10 @@ int dwc3_gadget_init(struct dwc3 *dwc)
else
dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
+ /* No system wakeup if no gadget driver bound */
+ if (dwc->sys_wakeup)
+ device_wakeup_disable(dwc->sysdev);
+
return 0;
err5:
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 4323091532..f6a020d77f 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -123,6 +123,14 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err;
}
+ if (dwc->sys_wakeup) {
+ /* Restore wakeup setting if switched from device */
+ device_wakeup_enable(dwc->sysdev);
+
+ /* Pass on wakeup setting to the new xhci platform device */
+ device_init_wakeup(&xhci->dev, true);
+ }
+
return 0;
err:
platform_device_put(xhci);
@@ -131,6 +139,9 @@ err:
void dwc3_host_exit(struct dwc3 *dwc)
{
+ if (dwc->sys_wakeup)
+ device_init_wakeup(&dwc->xhci->dev, false);
+
platform_device_unregister(dwc->xhci);
dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index efe3e3b857..fdd0fc7b8f 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -831,7 +831,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
io_data->kiocb->ki_complete(io_data->kiocb, ret);
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
- eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+ eventfd_signal(io_data->ffs->ffs_eventfd);
if (io_data->read)
kfree(io_data->to_free);
@@ -2738,7 +2738,7 @@ static void __ffs_event_add(struct ffs_data *ffs,
ffs->ev.types[ffs->ev.count++] = type;
wake_up_locked(&ffs->ev.waitq);
if (ffs->ffs_eventfd)
- eventfd_signal(ffs->ffs_eventfd, 1);
+ eventfd_signal(ffs->ffs_eventfd);
}
static void ffs_event_add(struct ffs_data *ffs,
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 5712883a75..f3456b8bf4 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1333,7 +1333,7 @@ parse_ntb:
if (to_process == 1 &&
(*(unsigned char *)(ntb_ptr + block_len) == 0x00)) {
to_process--;
- } else if (to_process > 0) {
+ } else if ((to_process > 0) && (block_len != 0)) {
ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
goto parse_ntb;
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index d59f94464b..8ac29f7230 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -292,7 +292,9 @@ int usb_ep_queue(struct usb_ep *ep,
{
int ret = 0;
- if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
+ if (!ep->enabled && ep->address) {
+ pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n",
+ ep->address, ep->name);
ret = -ESHUTDOWN;
goto out;
}
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 12e76bb62c..19bbc38f3d 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2650,7 +2650,7 @@ net2272_plat_probe(struct platform_device *pdev)
goto err_req;
}
- ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
+ ret = net2272_probe_fin(dev, irqflags);
if (ret)
goto err_io;
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index cb85168fd0..7aa46d426f 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -3491,8 +3491,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
{
- int err = 0, usb3;
- unsigned int i;
+ int err = 0, usb3_companion_port;
+ unsigned int i, j;
xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
sizeof(*xudc->utmi_phy), GFP_KERNEL);
@@ -3520,7 +3520,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
if (IS_ERR(xudc->utmi_phy[i])) {
err = PTR_ERR(xudc->utmi_phy[i]);
dev_err_probe(xudc->dev, err,
- "failed to get usb2-%d PHY\n", i);
+ "failed to get PHY for phy-name usb2-%d\n", i);
goto clean_up;
} else if (xudc->utmi_phy[i]) {
/* Get usb-phy, if utmi phy is available */
@@ -3539,19 +3539,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
}
/* Get USB3 phy */
- usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
- if (usb3 < 0)
+ usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+ if (usb3_companion_port < 0)
continue;
- snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
- xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
- if (IS_ERR(xudc->usb3_phy[i])) {
- err = PTR_ERR(xudc->usb3_phy[i]);
- dev_err_probe(xudc->dev, err,
- "failed to get usb3-%d PHY\n", usb3);
- goto clean_up;
- } else if (xudc->usb3_phy[i])
- dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
+ for (j = 0; j < xudc->soc->num_phys; j++) {
+ snprintf(phy_name, sizeof(phy_name), "usb3-%d", j);
+ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+ if (IS_ERR(xudc->usb3_phy[i])) {
+ err = PTR_ERR(xudc->usb3_phy[i]);
+ dev_err_probe(xudc->dev, err,
+ "failed to get PHY for phy-name usb3-%d\n", j);
+ goto clean_up;
+ } else if (xudc->usb3_phy[i]) {
+ int usb2_port =
+ tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]);
+ int usb3_port =
+ tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]);
+ if (usb3_port == usb3_companion_port) {
+ dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n",
+ usb2_port, usb3_port, i);
+ break;
+ }
+ }
+ }
}
return err;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9673354d70..2647245d5b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -326,7 +326,13 @@ static unsigned int xhci_ring_expansion_needed(struct xhci_hcd *xhci, struct xhc
/* how many trbs will be queued past the enqueue segment? */
trbs_past_seg = enq_used + num_trbs - (TRBS_PER_SEGMENT - 1);
- if (trbs_past_seg <= 0)
+ /*
+ * Consider expanding the ring already if num_trbs fills the current
+ * segment (i.e. trbs_past_seg == 0), not only when num_trbs goes into
+ * the next segment. Avoids confusing full ring with special empty ring
+ * case below
+ */
+ if (trbs_past_seg < 0)
return 0;
/* Empty ring special case, enqueue stuck on link trb while dequeue advanced */
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 884b0898d9..943b87d15f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1202,6 +1202,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
temp = kzalloc_node(buf_len, GFP_ATOMIC,
dev_to_node(hcd->self.sysdev));
+ if (!temp)
+ return -ENOMEM;
if (usb_urb_dir_out(urb))
sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
diff --git a/drivers/usb/misc/usb-ljca.c b/drivers/usb/misc/usb-ljca.c
index 35770e608c..2d30fc1be3 100644
--- a/drivers/usb/misc/usb-ljca.c
+++ b/drivers/usb/misc/usb-ljca.c
@@ -518,8 +518,10 @@ static int ljca_new_client_device(struct ljca_adapter *adap, u8 type, u8 id,
int ret;
client = kzalloc(sizeof *client, GFP_KERNEL);
- if (!client)
+ if (!client) {
+ kfree(data);
return -ENOMEM;
+ }
client->type = type;
client->id = id;
@@ -535,8 +537,10 @@ static int ljca_new_client_device(struct ljca_adapter *adap, u8 type, u8 id,
auxdev->dev.release = ljca_auxdev_release;
ret = auxiliary_device_init(auxdev);
- if (ret)
+ if (ret) {
+ kfree(data);
goto err_free;
+ }
ljca_auxdev_acpi_bind(adap, auxdev, adr, id);
@@ -590,12 +594,8 @@ static int ljca_enumerate_gpio(struct ljca_adapter *adap)
valid_pin[i] = get_unaligned_le32(&desc->bank_desc[i].valid_pins);
bitmap_from_arr32(gpio_info->valid_pin_map, valid_pin, gpio_num);
- ret = ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",
+ return ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",
gpio_info, LJCA_GPIO_ACPI_ADR);
- if (ret)
- kfree(gpio_info);
-
- return ret;
}
static int ljca_enumerate_i2c(struct ljca_adapter *adap)
@@ -629,10 +629,8 @@ static int ljca_enumerate_i2c(struct ljca_adapter *adap)
ret = ljca_new_client_device(adap, LJCA_CLIENT_I2C, i,
"ljca-i2c", i2c_info,
LJCA_I2C1_ACPI_ADR + i);
- if (ret) {
- kfree(i2c_info);
+ if (ret)
return ret;
- }
}
return 0;
@@ -669,10 +667,8 @@ static int ljca_enumerate_spi(struct ljca_adapter *adap)
ret = ljca_new_client_device(adap, LJCA_CLIENT_SPI, i,
"ljca-spi", spi_info,
LJCA_SPI1_ACPI_ADR + i);
- if (ret) {
- kfree(spi_info);
+ if (ret)
return ret;
- }
}
return 0;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 923e0ed854..21fd266092 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */
+ { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
@@ -144,6 +146,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
{ USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
@@ -177,6 +180,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+ { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */
{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 13a5678383..22d01a0f10 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ /* GMC devices */
+ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 21a2b5a25f..5ee60ba2a7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1606,3 +1606,9 @@
#define UBLOX_VID 0x1546
#define UBLOX_C099F9P_ZED_PID 0x0502
#define UBLOX_C099F9P_ODIN_PID 0x0503
+
+/*
+ * GMC devices
+ */
+#define GMC_VID 0x1cd7
+#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2ae124c49d..55a65d941c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -613,6 +613,11 @@ static void option_instat_callback(struct urb *urb);
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
+/* MeiG Smart Technology products */
+#define MEIGSMART_VENDOR_ID 0x2dee
+/* MeiG Smart SLM320 based on UNISOC UIS8910 */
+#define MEIGSMART_PRODUCT_SLM320 0x4d41
+
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
@@ -2282,6 +2287,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 4e0eef1440..300aeef160 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1105,7 +1105,7 @@ static void isd200_dump_driveid(struct us_data *us, u16 *id)
static int isd200_get_inquiry_data( struct us_data *us )
{
struct isd200_info *info = (struct isd200_info *)us->extra;
- int retStatus = ISD200_GOOD;
+ int retStatus;
u16 *id = info->id;
usb_stor_dbg(us, "Entering isd200_get_inquiry_data\n");
@@ -1137,6 +1137,13 @@ static int isd200_get_inquiry_data( struct us_data *us )
isd200_fix_driveid(id);
isd200_dump_driveid(us, id);
+ /* Prevent division by 0 in isd200_scsi_to_ata() */
+ if (id[ATA_ID_HEADS] == 0 || id[ATA_ID_SECTORS] == 0) {
+ usb_stor_dbg(us, " Invalid ATA Identify data\n");
+ retStatus = ISD200_ERROR;
+ goto Done;
+ }
+
memset(&info->InquiryData, 0, sizeof(info->InquiryData));
/* Standard IDE interface only supports disks */
@@ -1202,6 +1209,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
}
}
+ Done:
usb_stor_dbg(us, "Leaving isd200_get_inquiry_data %08X\n", retStatus);
return(retStatus);
@@ -1481,22 +1489,27 @@ static int isd200_init_info(struct us_data *us)
static int isd200_Initialization(struct us_data *us)
{
+ int rc = 0;
+
usb_stor_dbg(us, "ISD200 Initialization...\n");
/* Initialize ISD200 info struct */
- if (isd200_init_info(us) == ISD200_ERROR) {
+ if (isd200_init_info(us) < 0) {
usb_stor_dbg(us, "ERROR Initializing ISD200 Info struct\n");
+ rc = -ENOMEM;
} else {
/* Get device specific data */
- if (isd200_get_inquiry_data(us) != ISD200_GOOD)
+ if (isd200_get_inquiry_data(us) != ISD200_GOOD) {
usb_stor_dbg(us, "ISD200 Initialization Failure\n");
- else
+ rc = -EINVAL;
+ } else {
usb_stor_dbg(us, "ISD200 Initialization complete\n");
+ }
}
- return 0;
+ return rc;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 299a6767b7..d002eead62 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -533,7 +533,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
* daft to me.
*/
-static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
+static int uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
{
struct uas_dev_info *devinfo = cmnd->device->hostdata;
struct urb *urb;
@@ -541,30 +541,28 @@ static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
urb = uas_alloc_sense_urb(devinfo, gfp, cmnd);
if (!urb)
- return NULL;
+ return -ENOMEM;
usb_anchor_urb(urb, &devinfo->sense_urbs);
err = usb_submit_urb(urb, gfp);
if (err) {
usb_unanchor_urb(urb);
uas_log_cmd_state(cmnd, "sense submit err", err);
usb_free_urb(urb);
- return NULL;
}
- return urb;
+ return err;
}
static int uas_submit_urbs(struct scsi_cmnd *cmnd,
struct uas_dev_info *devinfo)
{
struct uas_cmd_info *cmdinfo = scsi_cmd_priv(cmnd);
- struct urb *urb;
int err;
lockdep_assert_held(&devinfo->lock);
if (cmdinfo->state & SUBMIT_STATUS_URB) {
- urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
- if (!urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ err = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
+ if (err)
+ return err;
cmdinfo->state &= ~SUBMIT_STATUS_URB;
}
@@ -572,7 +570,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
cmnd, DMA_FROM_DEVICE);
if (!cmdinfo->data_in_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_DATA_IN_URB;
}
@@ -582,7 +580,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->data_in_urb);
uas_log_cmd_state(cmnd, "data in submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
cmdinfo->state |= DATA_IN_URB_INFLIGHT;
@@ -592,7 +590,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
cmnd, DMA_TO_DEVICE);
if (!cmdinfo->data_out_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
}
@@ -602,7 +600,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->data_out_urb);
uas_log_cmd_state(cmnd, "data out submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
@@ -611,7 +609,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (cmdinfo->state & ALLOC_CMD_URB) {
cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd);
if (!cmdinfo->cmd_urb)
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return -ENOMEM;
cmdinfo->state &= ~ALLOC_CMD_URB;
}
@@ -621,7 +619,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
if (err) {
usb_unanchor_urb(cmdinfo->cmd_urb);
uas_log_cmd_state(cmnd, "cmd submit err", err);
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ return err;
}
cmdinfo->cmd_urb = NULL;
cmdinfo->state &= ~SUBMIT_CMD_URB;
@@ -698,7 +696,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd)
* of queueing, no matter how fatal the error
*/
if (err == -ENODEV) {
- set_host_byte(cmnd, DID_ERROR);
+ set_host_byte(cmnd, DID_NO_CONNECT);
scsi_done(cmnd);
goto zombie;
}
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index f81bec0c7b..f8ea3054be 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -559,16 +559,21 @@ static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char
}
static DEVICE_ATTR_RO(hpd);
-static struct attribute *dp_altmode_attrs[] = {
+static struct attribute *displayport_attrs[] = {
&dev_attr_configuration.attr,
&dev_attr_pin_assignment.attr,
&dev_attr_hpd.attr,
NULL
};
-static const struct attribute_group dp_altmode_group = {
+static const struct attribute_group displayport_group = {
.name = "displayport",
- .attrs = dp_altmode_attrs,
+ .attrs = displayport_attrs,
+};
+
+static const struct attribute_group *displayport_groups[] = {
+ &displayport_group,
+ NULL,
};
int dp_altmode_probe(struct typec_altmode *alt)
@@ -576,7 +581,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
const struct typec_altmode *port = typec_altmode_get_partner(alt);
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
- int ret;
/* FIXME: Port can only be DFP_U. */
@@ -587,10 +591,6 @@ int dp_altmode_probe(struct typec_altmode *alt)
DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
- ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
- if (ret)
- return ret;
-
dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
if (!dp)
return -ENOMEM;
@@ -624,7 +624,6 @@ void dp_altmode_remove(struct typec_altmode *alt)
{
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
- sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
cancel_work_sync(&dp->work);
if (dp->connector_fwnode) {
@@ -649,6 +648,7 @@ static struct typec_altmode_driver dp_altmode_driver = {
.driver = {
.name = "typec_displayport",
.owner = THIS_MODULE,
+ .dev_groups = displayport_groups,
},
};
module_typec_altmode_driver(dp_altmode_driver);
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 2da19feacd..2373cd39b4 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1310,6 +1310,7 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
{
struct typec_port *port = to_typec_port(dev);
struct usb_power_delivery *pd;
+ int ret;
if (!port->ops || !port->ops->pd_set)
return -EOPNOTSUPP;
@@ -1318,7 +1319,11 @@ static ssize_t select_usb_power_delivery_store(struct device *dev,
if (!pd)
return -EINVAL;
- return port->ops->pd_set(port, pd);
+ ret = port->ops->pd_set(port, pd);
+ if (ret)
+ return ret;
+
+ return size;
}
static ssize_t select_usb_power_delivery_show(struct device *dev,
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index a8e03cfde9..c5776b3c96 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -4859,8 +4859,11 @@ static void run_state_machine(struct tcpm_port *port)
break;
case PORT_RESET:
tcpm_reset_port(port);
- tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
- TYPEC_CC_RD : tcpm_rp_cc(port));
+ if (port->self_powered)
+ tcpm_set_cc(port, TYPEC_CC_OPEN);
+ else
+ tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
+ TYPEC_CC_RD : tcpm_rp_cc(port));
tcpm_set_state(port, PORT_RESET_WAIT_OFF,
PD_T_ERROR_RECOVERY);
break;
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 8f9dff993b..70d9f4eebf 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -138,8 +138,12 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO;
- if (cci & UCSI_CCI_NOT_SUPPORTED)
+ if (cci & UCSI_CCI_NOT_SUPPORTED) {
+ if (ucsi_acknowledge_command(ucsi) < 0)
+ dev_err(ucsi->dev,
+ "ACK of unsupported command failed\n");
return -EOPNOTSUPP;
+ }
if (cci & UCSI_CCI_ERROR) {
if (cmd == UCSI_GET_ERROR_STATUS)
@@ -933,11 +937,11 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
- clear_bit(EVENT_PENDING, &con->ucsi->flags);
-
mutex_lock(&ucsi->ppm_lock);
+ clear_bit(EVENT_PENDING, &con->ucsi->flags);
ret = ucsi_acknowledge_connector_change(ucsi);
mutex_unlock(&ucsi->ppm_lock);
+
if (ret)
dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
@@ -978,13 +982,47 @@ static int ucsi_reset_connector(struct ucsi_connector *con, bool hard)
static int ucsi_reset_ppm(struct ucsi *ucsi)
{
- u64 command = UCSI_PPM_RESET;
+ u64 command;
unsigned long tmo;
u32 cci;
int ret;
mutex_lock(&ucsi->ppm_lock);
+ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci));
+ if (ret < 0)
+ goto out;
+
+ /*
+ * If UCSI_CCI_RESET_COMPLETE is already set we must clear
+ * the flag before we start another reset. Send a
+ * UCSI_SET_NOTIFICATION_ENABLE command to achieve this.
+ * Ignore a timeout and try the reset anyway if this fails.
+ */
+ if (cci & UCSI_CCI_RESET_COMPLETE) {
+ command = UCSI_SET_NOTIFICATION_ENABLE;
+ ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
+ sizeof(command));
+ if (ret < 0)
+ goto out;
+
+ tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
+ do {
+ ret = ucsi->ops->read(ucsi, UCSI_CCI,
+ &cci, sizeof(cci));
+ if (ret < 0)
+ goto out;
+ if (cci & UCSI_CCI_COMMAND_COMPLETE)
+ break;
+ if (time_is_before_jiffies(tmo))
+ break;
+ msleep(20);
+ } while (1);
+
+ WARN_ON(cci & UCSI_CCI_RESET_COMPLETE);
+ }
+
+ command = UCSI_PPM_RESET;
ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL, &command,
sizeof(command));
if (ret < 0)
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 474315a72c..13ec976b1c 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -221,12 +221,12 @@ struct ucsi_cable_property {
#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
-#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
+#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3)
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
-#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
+#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5)
u8 latency;
} __packed;
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 928eacbeb2..7b3ac133ef 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -23,10 +23,11 @@ struct ucsi_acpi {
void *base;
struct completion complete;
unsigned long flags;
+#define UCSI_ACPI_SUPPRESS_EVENT 0
+#define UCSI_ACPI_COMMAND_PENDING 1
+#define UCSI_ACPI_ACK_PENDING 2
guid_t guid;
u64 cmd;
- bool dell_quirk_probed;
- bool dell_quirk_active;
};
static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)
@@ -79,9 +80,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
int ret;
if (ack)
- set_bit(ACK_PENDING, &ua->flags);
+ set_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
else
- set_bit(COMMAND_PENDING, &ua->flags);
+ set_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);
if (ret)
@@ -92,9 +93,9 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
out_clear_bit:
if (ack)
- clear_bit(ACK_PENDING, &ua->flags);
+ clear_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);
else
- clear_bit(COMMAND_PENDING, &ua->flags);
+ clear_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);
return ret;
}
@@ -129,51 +130,40 @@ static const struct ucsi_operations ucsi_zenbook_ops = {
};
/*
- * Some Dell laptops expect that an ACK command with the
- * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)
- * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.
- * If this is not done events are not delivered to OSPM and
- * subsequent commands will timeout.
+ * Some Dell laptops don't like ACK commands with the
+ * UCSI_ACK_CONNECTOR_CHANGE but not the UCSI_ACK_COMMAND_COMPLETE
+ * bit set. To work around this send a dummy command and bundle the
+ * UCSI_ACK_CONNECTOR_CHANGE with the UCSI_ACK_COMMAND_COMPLETE
+ * for the dummy command.
*/
static int
ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- u64 cmd = *(u64 *)val, ack = 0;
+ u64 cmd = *(u64 *)val;
+ u64 dummycmd = UCSI_GET_CAPABILITY;
int ret;
- if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&
- cmd & UCSI_ACK_CONNECTOR_CHANGE)
- ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;
-
- ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);
- if (ret != 0)
- return ret;
- if (ack == 0)
- return ret;
-
- if (!ua->dell_quirk_probed) {
- ua->dell_quirk_probed = true;
-
- cmd = UCSI_GET_CAPABILITY;
- ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,
- sizeof(cmd));
- if (ret == 0)
- return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,
- &ack, sizeof(ack));
- if (ret != -ETIMEDOUT)
+ if (cmd == (UCSI_ACK_CC_CI | UCSI_ACK_CONNECTOR_CHANGE)) {
+ cmd |= UCSI_ACK_COMMAND_COMPLETE;
+
+ /*
+ * The UCSI core thinks it is sending a connector change ack
+ * and will accept new connector change events. We don't want
+ * this to happen for the dummy command as its response will
+ * still report the very event that the core is trying to clear.
+ */
+ set_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
+ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &dummycmd,
+ sizeof(dummycmd));
+ clear_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);
+
+ if (ret < 0)
return ret;
-
- ua->dell_quirk_active = true;
- dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");
- dev_err(ua->dev, "Firmware bug: Enabling workaround\n");
}
- if (!ua->dell_quirk_active)
- return ret;
-
- return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));
+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
}
static const struct ucsi_operations ucsi_dell_ops = {
@@ -209,13 +199,14 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
if (ret)
return;
- if (UCSI_CCI_CONNECTOR(cci))
+ if (UCSI_CCI_CONNECTOR(cci) &&
+ !test_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags))
ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))
complete(&ua->complete);
if (cci & UCSI_CCI_COMMAND_COMPLETE &&
- test_bit(COMMAND_PENDING, &ua->flags))
+ test_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags))
complete(&ua->complete);
}
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 4853141cd1..894622b655 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -254,6 +254,20 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
static void pmic_glink_ucsi_register(struct work_struct *work)
{
struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);
+ int orientation;
+ int i;
+
+ for (i = 0; i < PMIC_GLINK_MAX_PORTS; i++) {
+ if (!ucsi->port_orientation[i])
+ continue;
+ orientation = gpiod_get_value(ucsi->port_orientation[i]);
+
+ if (orientation >= 0) {
+ typec_switch_set(ucsi->port_switch[i],
+ orientation ? TYPEC_ORIENTATION_REVERSE
+ : TYPEC_ORIENTATION_NORMAL);
+ }
+ }
ucsi_register(ucsi->ucsi);
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 26ba7da6b4..7795d2b7fc 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -145,8 +145,6 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev);
static bool mlx5_vdpa_debug;
-#define MLX5_CVQ_MAX_ENT 16
-
#define MLX5_LOG_VIO_FLAG(_feature) \
do { \
if (features & BIT_ULL(_feature)) \
@@ -2147,9 +2145,16 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
- if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ if (!is_index_valid(mvdev, idx))
return;
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+
+ cvq->vring.vring.num = num;
+ return;
+ }
+
mvq = &ndev->vqs[idx];
mvq->num_ent = num;
}
@@ -2819,7 +2824,7 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
u16 idx = cvq->vring.last_avail_idx;
err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
- MLX5_CVQ_MAX_ENT, false,
+ cvq->vring.vring.num, false,
(struct vring_desc *)(uintptr_t)cvq->desc_addr,
(struct vring_avail *)(uintptr_t)cvq->driver_addr,
(struct vring_used *)(uintptr_t)cvq->device_addr);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index be2925d0d2..18584ce70b 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -160,7 +160,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim, u32 flags)
}
}
- vdpasim->running = true;
+ vdpasim->running = false;
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -483,6 +483,7 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
mutex_lock(&vdpasim->mutex);
vdpasim->status = status;
+ vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0;
mutex_unlock(&vdpasim->mutex);
}
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 0ddd4b8abe..6cb5ce4a8b 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -493,7 +493,7 @@ static void vduse_vq_kick(struct vduse_virtqueue *vq)
goto unlock;
if (vq->kickfd)
- eventfd_signal(vq->kickfd, 1);
+ eventfd_signal(vq->kickfd);
else
vq->kicked = true;
unlock:
@@ -911,7 +911,7 @@ static int vduse_kickfd_setup(struct vduse_dev *dev,
eventfd_ctx_put(vq->kickfd);
vq->kickfd = ctx;
if (vq->ready && vq->kicked && vq->kickfd) {
- eventfd_signal(vq->kickfd, 1);
+ eventfd_signal(vq->kickfd);
vq->kicked = false;
}
spin_unlock(&vq->kick_lock);
@@ -960,7 +960,7 @@ static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq)
spin_lock_irq(&vq->irq_lock);
if (vq->ready && vq->cb.trigger) {
- eventfd_signal(vq->cb.trigger, 1);
+ eventfd_signal(vq->cb.trigger);
signal = true;
}
spin_unlock_irq(&vq->irq_lock);
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index c51229fccb..82b2afa9b7 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -54,7 +54,7 @@ static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
{
struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
- eventfd_signal(mc_irq->trigger, 1);
+ eventfd_signal(mc_irq->trigger);
return IRQ_HANDLED;
}
@@ -141,13 +141,14 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
irq = &vdev->mc_irqs[index];
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- vfio_fsl_mc_irq_handler(hwirq, irq);
+ if (irq->trigger)
+ eventfd_signal(irq->trigger);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
u8 trigger = *(u8 *)data;
- if (trigger)
- vfio_fsl_mc_irq_handler(hwirq, irq);
+ if (trigger && irq->trigger)
+ eventfd_signal(irq->trigger);
}
return 0;
diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c
index 79fe2e66bb..6b94cc0bf4 100644
--- a/drivers/vfio/pci/pds/lm.c
+++ b/drivers/vfio/pci/pds/lm.c
@@ -92,8 +92,10 @@ static void pds_vfio_put_lm_file(struct pds_vfio_lm_file *lm_file)
{
mutex_lock(&lm_file->lock);
+ lm_file->disabled = true;
lm_file->size = 0;
lm_file->alloc_size = 0;
+ lm_file->filep->f_pos = 0;
/* Free scatter list of file pages */
sg_free_table(&lm_file->sg_table);
@@ -183,6 +185,12 @@ static ssize_t pds_vfio_save_read(struct file *filp, char __user *buf,
pos = &filp->f_pos;
mutex_lock(&lm_file->lock);
+
+ if (lm_file->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
if (*pos > lm_file->size) {
done = -EINVAL;
goto out_unlock;
@@ -283,6 +291,11 @@ static ssize_t pds_vfio_restore_write(struct file *filp, const char __user *buf,
mutex_lock(&lm_file->lock);
+ if (lm_file->disabled) {
+ done = -ENODEV;
+ goto out_unlock;
+ }
+
while (len) {
size_t page_offset;
struct page *page;
diff --git a/drivers/vfio/pci/pds/lm.h b/drivers/vfio/pci/pds/lm.h
index 13be893198..9511b1afc6 100644
--- a/drivers/vfio/pci/pds/lm.h
+++ b/drivers/vfio/pci/pds/lm.h
@@ -27,6 +27,7 @@ struct pds_vfio_lm_file {
struct scatterlist *last_offset_sg; /* Iterator */
unsigned int sg_last_entry;
unsigned long last_offset;
+ bool disabled;
};
struct pds_vfio_pci_device;
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
index 4c351c59d0..a286ebcc71 100644
--- a/drivers/vfio/pci/pds/vfio_dev.c
+++ b/drivers/vfio/pci/pds/vfio_dev.c
@@ -32,9 +32,9 @@ again:
mutex_lock(&pds_vfio->reset_mutex);
if (pds_vfio->deferred_reset) {
pds_vfio->deferred_reset = false;
+ pds_vfio_put_restore_file(pds_vfio);
+ pds_vfio_put_save_file(pds_vfio);
if (pds_vfio->state == VFIO_DEVICE_STATE_ERROR) {
- pds_vfio_put_restore_file(pds_vfio);
- pds_vfio_put_save_file(pds_vfio);
pds_vfio_dirty_disable(pds_vfio, false);
}
pds_vfio->state = pds_vfio->deferred_reset_state;
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 1929103ee5..1cbc990d42 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -443,7 +443,7 @@ static int vfio_pci_core_runtime_resume(struct device *dev)
*/
down_write(&vdev->memory_lock);
if (vdev->pm_wake_eventfd_ctx) {
- eventfd_signal(vdev->pm_wake_eventfd_ctx, 1);
+ eventfd_signal(vdev->pm_wake_eventfd_ctx);
__vfio_pci_runtime_pm_exit(vdev);
}
up_write(&vdev->memory_lock);
@@ -1883,7 +1883,7 @@ void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count)
pci_notice_ratelimited(pdev,
"Relaying device request to user (#%u)\n",
count);
- eventfd_signal(vdev->req_trigger, 1);
+ eventfd_signal(vdev->req_trigger);
} else if (count == 0) {
pci_warn(pdev,
"No device request channel registered, blocked until released by user\n");
@@ -2302,7 +2302,7 @@ pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
mutex_lock(&vdev->igate);
if (vdev->err_trigger)
- eventfd_signal(vdev->err_trigger, 1);
+ eventfd_signal(vdev->err_trigger);
mutex_unlock(&vdev->igate);
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index cbb4bcbfbf..fb5392b749 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -90,22 +90,28 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
struct vfio_pci_irq_ctx *ctx;
+ struct eventfd_ctx *trigger;
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
return;
- eventfd_signal(ctx->trigger, 1);
+
+ trigger = READ_ONCE(ctx->trigger);
+ if (likely(trigger))
+ eventfd_signal(trigger);
}
}
/* Returns true if the INTx vfio_pci_irq_ctx.masked value is changed. */
-bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+static bool __vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
unsigned long flags;
bool masked_changed = false;
+ lockdep_assert_held(&vdev->igate);
+
spin_lock_irqsave(&vdev->irqlock, flags);
/*
@@ -143,6 +149,17 @@ out_unlock:
return masked_changed;
}
+bool vfio_pci_intx_mask(struct vfio_pci_core_device *vdev)
+{
+ bool mask_changed;
+
+ mutex_lock(&vdev->igate);
+ mask_changed = __vfio_pci_intx_mask(vdev);
+ mutex_unlock(&vdev->igate);
+
+ return mask_changed;
+}
+
/*
* If this is triggered by an eventfd, we can't call eventfd_signal
* or else we'll deadlock on the eventfd wait queue. Return >0 when
@@ -194,12 +211,21 @@ out_unlock:
return ret;
}
-void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+static void __vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
{
+ lockdep_assert_held(&vdev->igate);
+
if (vfio_pci_intx_unmask_handler(vdev, NULL) > 0)
vfio_send_intx_eventfd(vdev, NULL);
}
+void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev)
+{
+ mutex_lock(&vdev->igate);
+ __vfio_pci_intx_unmask(vdev);
+ mutex_unlock(&vdev->igate);
+}
+
static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
{
struct vfio_pci_core_device *vdev = dev_id;
@@ -231,97 +257,100 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
return ret;
}
-static int vfio_intx_enable(struct vfio_pci_core_device *vdev)
+static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
+ struct eventfd_ctx *trigger)
{
+ struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
+ unsigned long irqflags;
+ char *name;
+ int ret;
if (!is_irq_none(vdev))
return -EINVAL;
- if (!vdev->pdev->irq)
+ if (!pdev->irq)
return -ENODEV;
+ name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
+ if (!name)
+ return -ENOMEM;
+
ctx = vfio_irq_ctx_alloc(vdev, 0);
if (!ctx)
return -ENOMEM;
+ ctx->name = name;
+ ctx->trigger = trigger;
+
/*
- * If the virtual interrupt is masked, restore it. Devices
- * supporting DisINTx can be masked at the hardware level
- * here, non-PCI-2.3 devices will have to wait until the
- * interrupt is enabled.
+ * Fill the initial masked state based on virq_disabled. After
+ * enable, changing the DisINTx bit in vconfig directly changes INTx
+ * masking. igate prevents races during setup, once running masked
+ * is protected via irqlock.
+ *
+ * Devices supporting DisINTx also reflect the current mask state in
+ * the physical DisINTx bit, which is not affected during IRQ setup.
+ *
+ * Devices without DisINTx support require an exclusive interrupt.
+ * IRQ masking is performed at the IRQ chip. Again, igate protects
+ * against races during setup and IRQ handlers and irqfds are not
+ * yet active, therefore masked is stable and can be used to
+ * conditionally auto-enable the IRQ.
+ *
+ * irq_type must be stable while the IRQ handler is registered,
+ * therefore it must be set before request_irq().
*/
ctx->masked = vdev->virq_disabled;
- if (vdev->pci_2_3)
- pci_intx(vdev->pdev, !ctx->masked);
+ if (vdev->pci_2_3) {
+ pci_intx(pdev, !ctx->masked);
+ irqflags = IRQF_SHARED;
+ } else {
+ irqflags = ctx->masked ? IRQF_NO_AUTOEN : 0;
+ }
vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;
+ ret = request_irq(pdev->irq, vfio_intx_handler,
+ irqflags, ctx->name, vdev);
+ if (ret) {
+ vdev->irq_type = VFIO_PCI_NUM_IRQS;
+ kfree(name);
+ vfio_irq_ctx_free(vdev, ctx, 0);
+ return ret;
+ }
+
return 0;
}
-static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev, int fd)
+static int vfio_intx_set_signal(struct vfio_pci_core_device *vdev,
+ struct eventfd_ctx *trigger)
{
struct pci_dev *pdev = vdev->pdev;
- unsigned long irqflags = IRQF_SHARED;
struct vfio_pci_irq_ctx *ctx;
- struct eventfd_ctx *trigger;
- unsigned long flags;
- int ret;
+ struct eventfd_ctx *old;
ctx = vfio_irq_ctx_get(vdev, 0);
if (WARN_ON_ONCE(!ctx))
return -EINVAL;
- if (ctx->trigger) {
- free_irq(pdev->irq, vdev);
- kfree(ctx->name);
- eventfd_ctx_put(ctx->trigger);
- ctx->trigger = NULL;
- }
-
- if (fd < 0) /* Disable only */
- return 0;
+ old = ctx->trigger;
- ctx->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)",
- pci_name(pdev));
- if (!ctx->name)
- return -ENOMEM;
+ WRITE_ONCE(ctx->trigger, trigger);
- trigger = eventfd_ctx_fdget(fd);
- if (IS_ERR(trigger)) {
- kfree(ctx->name);
- return PTR_ERR(trigger);
+ /* Releasing an old ctx requires synchronizing in-flight users */
+ if (old) {
+ synchronize_irq(pdev->irq);
+ vfio_virqfd_flush_thread(&ctx->unmask);
+ eventfd_ctx_put(old);
}
- ctx->trigger = trigger;
-
- if (!vdev->pci_2_3)
- irqflags = 0;
-
- ret = request_irq(pdev->irq, vfio_intx_handler,
- irqflags, ctx->name, vdev);
- if (ret) {
- ctx->trigger = NULL;
- kfree(ctx->name);
- eventfd_ctx_put(trigger);
- return ret;
- }
-
- /*
- * INTx disable will stick across the new irq setup,
- * disable_irq won't.
- */
- spin_lock_irqsave(&vdev->irqlock, flags);
- if (!vdev->pci_2_3 && ctx->masked)
- disable_irq_nosync(pdev->irq);
- spin_unlock_irqrestore(&vdev->irqlock, flags);
-
return 0;
}
static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
{
+ struct pci_dev *pdev = vdev->pdev;
struct vfio_pci_irq_ctx *ctx;
ctx = vfio_irq_ctx_get(vdev, 0);
@@ -329,10 +358,13 @@ static void vfio_intx_disable(struct vfio_pci_core_device *vdev)
if (ctx) {
vfio_virqfd_disable(&ctx->unmask);
vfio_virqfd_disable(&ctx->mask);
+ free_irq(pdev->irq, vdev);
+ if (ctx->trigger)
+ eventfd_ctx_put(ctx->trigger);
+ kfree(ctx->name);
+ vfio_irq_ctx_free(vdev, ctx, 0);
}
- vfio_intx_set_signal(vdev, -1);
vdev->irq_type = VFIO_PCI_NUM_IRQS;
- vfio_irq_ctx_free(vdev, ctx, 0);
}
/*
@@ -342,7 +374,7 @@ static irqreturn_t vfio_msihandler(int irq, void *arg)
{
struct eventfd_ctx *trigger = arg;
- eventfd_signal(trigger, 1);
+ eventfd_signal(trigger);
return IRQ_HANDLED;
}
@@ -560,11 +592,11 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_core_device *vdev,
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- vfio_pci_intx_unmask(vdev);
+ __vfio_pci_intx_unmask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t unmask = *(uint8_t *)data;
if (unmask)
- vfio_pci_intx_unmask(vdev);
+ __vfio_pci_intx_unmask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
struct vfio_pci_irq_ctx *ctx = vfio_irq_ctx_get(vdev, 0);
int32_t fd = *(int32_t *)data;
@@ -591,11 +623,11 @@ static int vfio_pci_set_intx_mask(struct vfio_pci_core_device *vdev,
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- vfio_pci_intx_mask(vdev);
+ __vfio_pci_intx_mask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t mask = *(uint8_t *)data;
if (mask)
- vfio_pci_intx_mask(vdev);
+ __vfio_pci_intx_mask(vdev);
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
return -ENOTTY; /* XXX implement me */
}
@@ -616,19 +648,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_core_device *vdev,
return -EINVAL;
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
+ struct eventfd_ctx *trigger = NULL;
int32_t fd = *(int32_t *)data;
int ret;
- if (is_intx(vdev))
- return vfio_intx_set_signal(vdev, fd);
+ if (fd >= 0) {
+ trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(trigger))
+ return PTR_ERR(trigger);
+ }
- ret = vfio_intx_enable(vdev);
- if (ret)
- return ret;
+ if (is_intx(vdev))
+ ret = vfio_intx_set_signal(vdev, trigger);
+ else
+ ret = vfio_intx_enable(vdev, trigger);
- ret = vfio_intx_set_signal(vdev, fd);
- if (ret)
- vfio_intx_disable(vdev);
+ if (ret && trigger)
+ eventfd_ctx_put(trigger);
return ret;
}
@@ -689,11 +725,11 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_core_device *vdev,
if (!ctx)
continue;
if (flags & VFIO_IRQ_SET_DATA_NONE) {
- eventfd_signal(ctx->trigger, 1);
+ eventfd_signal(ctx->trigger);
} else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
uint8_t *bools = data;
if (bools[i - start])
- eventfd_signal(ctx->trigger, 1);
+ eventfd_signal(ctx->trigger);
}
}
return 0;
@@ -707,7 +743,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
if (flags & VFIO_IRQ_SET_DATA_NONE) {
if (*ctx) {
if (count) {
- eventfd_signal(*ctx, 1);
+ eventfd_signal(*ctx);
} else {
eventfd_ctx_put(*ctx);
*ctx = NULL;
@@ -722,7 +758,7 @@ static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
trigger = *(uint8_t *)data;
if (trigger && *ctx)
- eventfd_signal(*ctx, 1);
+ eventfd_signal(*ctx);
return 0;
} else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
diff --git a/drivers/vfio/platform/vfio_platform_irq.c b/drivers/vfio/platform/vfio_platform_irq.c
index 665197caed..ef41ecef83 100644
--- a/drivers/vfio/platform/vfio_platform_irq.c
+++ b/drivers/vfio/platform/vfio_platform_irq.c
@@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
return 0;
}
+/*
+ * The trigger eventfd is guaranteed valid in the interrupt path
+ * and protected by the igate mutex when triggered via ioctl.
+ */
+static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx)
+{
+ if (likely(irq_ctx->trigger))
+ eventfd_signal(irq_ctx->trigger);
+}
+
static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
{
struct vfio_platform_irq *irq_ctx = dev_id;
@@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
spin_unlock_irqrestore(&irq_ctx->lock, flags);
if (ret == IRQ_HANDLED)
- eventfd_signal(irq_ctx->trigger, 1);
+ vfio_send_eventfd(irq_ctx);
return ret;
}
@@ -164,52 +174,40 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
{
struct vfio_platform_irq *irq_ctx = dev_id;
- eventfd_signal(irq_ctx->trigger, 1);
+ vfio_send_eventfd(irq_ctx);
return IRQ_HANDLED;
}
static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
- int fd, irq_handler_t handler)
+ int fd)
{
struct vfio_platform_irq *irq = &vdev->irqs[index];
struct eventfd_ctx *trigger;
- int ret;
if (irq->trigger) {
- irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
- free_irq(irq->hwirq, irq);
- kfree(irq->name);
+ disable_irq(irq->hwirq);
eventfd_ctx_put(irq->trigger);
irq->trigger = NULL;
}
if (fd < 0) /* Disable only */
return 0;
- irq->name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-irq[%d](%s)",
- irq->hwirq, vdev->name);
- if (!irq->name)
- return -ENOMEM;
trigger = eventfd_ctx_fdget(fd);
- if (IS_ERR(trigger)) {
- kfree(irq->name);
+ if (IS_ERR(trigger))
return PTR_ERR(trigger);
- }
irq->trigger = trigger;
- irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
- ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
- if (ret) {
- kfree(irq->name);
- eventfd_ctx_put(trigger);
- irq->trigger = NULL;
- return ret;
- }
-
- if (!irq->masked)
- enable_irq(irq->hwirq);
+ /*
+ * irq->masked effectively provides nested disables within the overall
+ * enable relative to trigger. Specifically request_irq() is called
+ * with NO_AUTOEN, therefore the IRQ is initially disabled. The user
+ * may only further disable the IRQ with a MASK operations because
+ * irq->masked is initially false.
+ */
+ enable_irq(irq->hwirq);
return 0;
}
@@ -228,7 +226,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
handler = vfio_irq_handler;
if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
- return vfio_set_trigger(vdev, index, -1, handler);
+ return vfio_set_trigger(vdev, index, -1);
if (start != 0 || count != 1)
return -EINVAL;
@@ -236,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
int32_t fd = *(int32_t *)data;
- return vfio_set_trigger(vdev, index, fd, handler);
+ return vfio_set_trigger(vdev, index, fd);
}
if (flags & VFIO_IRQ_SET_DATA_NONE) {
@@ -260,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
unsigned start, unsigned count, uint32_t flags,
void *data) = NULL;
+ /*
+ * For compatibility, errors from request_irq() are local to the
+ * SET_IRQS path and reflected in the name pointer. This allows,
+ * for example, polling mode fallback for an exclusive IRQ failure.
+ */
+ if (IS_ERR(vdev->irqs[index].name))
+ return PTR_ERR(vdev->irqs[index].name);
+
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
case VFIO_IRQ_SET_ACTION_MASK:
func = vfio_platform_set_irq_mask;
@@ -280,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
int vfio_platform_irq_init(struct vfio_platform_device *vdev)
{
- int cnt = 0, i;
+ int cnt = 0, i, ret = 0;
while (vdev->get_irq(vdev, cnt) >= 0)
cnt++;
@@ -292,37 +298,70 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev)
for (i = 0; i < cnt; i++) {
int hwirq = vdev->get_irq(vdev, i);
+ irq_handler_t handler = vfio_irq_handler;
- if (hwirq < 0)
+ if (hwirq < 0) {
+ ret = -EINVAL;
goto err;
+ }
spin_lock_init(&vdev->irqs[i].lock);
vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;
- if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
+ if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) {
vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
| VFIO_IRQ_INFO_AUTOMASKED;
+ handler = vfio_automasked_irq_handler;
+ }
vdev->irqs[i].count = 1;
vdev->irqs[i].hwirq = hwirq;
vdev->irqs[i].masked = false;
+ vdev->irqs[i].name = kasprintf(GFP_KERNEL_ACCOUNT,
+ "vfio-irq[%d](%s)", hwirq,
+ vdev->name);
+ if (!vdev->irqs[i].name) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN,
+ vdev->irqs[i].name, &vdev->irqs[i]);
+ if (ret) {
+ kfree(vdev->irqs[i].name);
+ vdev->irqs[i].name = ERR_PTR(ret);
+ }
}
vdev->num_irqs = cnt;
return 0;
err:
+ for (--i; i >= 0; i--) {
+ if (!IS_ERR(vdev->irqs[i].name)) {
+ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
+ kfree(vdev->irqs[i].name);
+ }
+ }
kfree(vdev->irqs);
- return -EINVAL;
+ return ret;
}
void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
{
int i;
- for (i = 0; i < vdev->num_irqs; i++)
- vfio_set_trigger(vdev, i, -1, NULL);
+ for (i = 0; i < vdev->num_irqs; i++) {
+ vfio_virqfd_disable(&vdev->irqs[i].mask);
+ vfio_virqfd_disable(&vdev->irqs[i].unmask);
+ if (!IS_ERR(vdev->irqs[i].name)) {
+ free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
+ if (vdev->irqs[i].trigger)
+ eventfd_ctx_put(vdev->irqs[i].trigger);
+ kfree(vdev->irqs[i].name);
+ }
+ }
vdev->num_irqs = 0;
kfree(vdev->irqs);
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 29c564b7a6..5322691338 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -101,6 +101,13 @@ static void virqfd_inject(struct work_struct *work)
virqfd->thread(virqfd->opaque, virqfd->data);
}
+static void virqfd_flush_inject(struct work_struct *work)
+{
+ struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject);
+
+ flush_work(&virqfd->inject);
+}
+
int vfio_virqfd_enable(void *opaque,
int (*handler)(void *, void *),
void (*thread)(void *, void *),
@@ -124,6 +131,7 @@ int vfio_virqfd_enable(void *opaque,
INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
INIT_WORK(&virqfd->inject, virqfd_inject);
+ INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);
irqfd = fdget(fd);
if (!irqfd.file) {
@@ -213,3 +221,16 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd)
flush_workqueue(vfio_irqfd_cleanup_wq);
}
EXPORT_SYMBOL_GPL(vfio_virqfd_disable);
+
+void vfio_virqfd_flush_thread(struct virqfd **pvirqfd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&virqfd_lock, flags);
+ if (*pvirqfd && (*pvirqfd)->thread)
+ queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject);
+ spin_unlock_irqrestore(&virqfd_lock, flags);
+
+ flush_workqueue(vfio_irqfd_cleanup_wq);
+}
+EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index da7ec77cda..173beda74b 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -178,7 +178,7 @@ static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
if (call_ctx)
- eventfd_signal(call_ctx, 1);
+ eventfd_signal(call_ctx);
return IRQ_HANDLED;
}
@@ -189,7 +189,7 @@ static irqreturn_t vhost_vdpa_config_cb(void *private)
struct eventfd_ctx *config_ctx = v->config_ctx;
if (config_ctx)
- eventfd_signal(config_ctx, 1);
+ eventfd_signal(config_ctx);
return IRQ_HANDLED;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e0c181ad17..045f666b4f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2248,7 +2248,7 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
len -= l;
if (!len) {
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
return 0;
}
}
@@ -2271,7 +2271,7 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
log_used(vq, (used - (void __user *)vq->used),
sizeof vq->used->flags);
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return 0;
}
@@ -2289,7 +2289,7 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq)
log_used(vq, (used - (void __user *)vq->used),
sizeof *vhost_avail_event(vq));
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return 0;
}
@@ -2715,7 +2715,7 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
log_used(vq, offsetof(struct vring_used, idx),
sizeof vq->used->idx);
if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
+ eventfd_signal(vq->log_ctx);
}
return r;
}
@@ -2763,7 +2763,7 @@ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
/* Signal the Guest tell them we used something up. */
if (vq->call_ctx.ctx && vhost_notify(dev, vq))
- eventfd_signal(vq->call_ctx.ctx, 1);
+ eventfd_signal(vq->call_ctx.ctx);
}
EXPORT_SYMBOL_GPL(vhost_signal);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index f60d5f7bef..9e942fcda5 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -249,7 +249,7 @@ void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
if ((vq)->error_ctx) \
- eventfd_signal((vq)->error_ctx, 1);\
+ eventfd_signal((vq)->error_ctx);\
} while (0)
enum {
diff --git a/drivers/video/backlight/da9052_bl.c b/drivers/video/backlight/da9052_bl.c
index 1cdc854331..b8ff704651 100644
--- a/drivers/video/backlight/da9052_bl.c
+++ b/drivers/video/backlight/da9052_bl.c
@@ -117,6 +117,7 @@ static int da9052_backlight_probe(struct platform_device *pdev)
wleds->led_reg = platform_get_device_id(pdev)->driver_data;
wleds->state = DA9052_WLEDS_OFF;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.max_brightness = DA9052_MAX_BRIGHTNESS;
diff --git a/drivers/video/backlight/ktz8866.c b/drivers/video/backlight/ktz8866.c
index 9c980f2571..014877b5a9 100644
--- a/drivers/video/backlight/ktz8866.c
+++ b/drivers/video/backlight/ktz8866.c
@@ -97,20 +97,20 @@ static void ktz8866_init(struct ktz8866 *ktz)
{
unsigned int val = 0;
- if (of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
+ if (!of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
ktz8866_write(ktz, BL_EN, BIT(val) - 1);
else
/* Enable all 6 current sinks if the number of current sinks isn't specified. */
ktz8866_write(ktz, BL_EN, BIT(6) - 1);
- if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
+ if (!of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
if (val <= 128)
ktz8866_write(ktz, BL_CFG2, BIT(7) | (ilog2(val) << 3) | PWM_HYST);
else
ktz8866_write(ktz, BL_CFG2, BIT(7) | ((5 + val / 64) << 3) | PWM_HYST);
}
- if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
+ if (!of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
if (val == 0)
ktz8866_write(ktz, BL_DIMMING, 0);
else {
diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
index 8fcb62be59..7115d7bb2a 100644
--- a/drivers/video/backlight/lm3630a_bl.c
+++ b/drivers/video/backlight/lm3630a_bl.c
@@ -233,7 +233,7 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
if (rval < 0)
goto out_i2c_err;
brightness |= rval;
- goto out;
+ return brightness;
}
/* disable sleep */
@@ -244,11 +244,8 @@ static int lm3630a_bank_a_get_brightness(struct backlight_device *bl)
rval = lm3630a_read(pchip, REG_BRT_A);
if (rval < 0)
goto out_i2c_err;
- brightness = rval;
+ return rval;
-out:
- bl->props.brightness = brightness;
- return bl->props.brightness;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access register\n");
return 0;
@@ -310,7 +307,7 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
if (rval < 0)
goto out_i2c_err;
brightness |= rval;
- goto out;
+ return brightness;
}
/* disable sleep */
@@ -321,11 +318,8 @@ static int lm3630a_bank_b_get_brightness(struct backlight_device *bl)
rval = lm3630a_read(pchip, REG_BRT_B);
if (rval < 0)
goto out_i2c_err;
- brightness = rval;
+ return rval;
-out:
- bl->props.brightness = brightness;
- return bl->props.brightness;
out_i2c_err:
dev_err(pchip->dev, "i2c failed to access register\n");
return 0;
@@ -343,6 +337,7 @@ static int lm3630a_backlight_register(struct lm3630a_chip *pchip)
struct backlight_properties props;
const char *label;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
if (pdata->leda_ctrl != LM3630A_LEDA_DISABLE) {
props.brightness = pdata->leda_init_brt;
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index 5246c17149..564f62acd7 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -338,6 +338,7 @@ static int lm3639_probe(struct i2c_client *client)
}
/* backlight */
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
props.brightness = pdata->init_brt_led;
props.max_brightness = pdata->max_brt_led;
diff --git a/drivers/video/backlight/lp8788_bl.c b/drivers/video/backlight/lp8788_bl.c
index d1a14b0db2..31f97230ee 100644
--- a/drivers/video/backlight/lp8788_bl.c
+++ b/drivers/video/backlight/lp8788_bl.c
@@ -191,6 +191,7 @@ static int lp8788_backlight_register(struct lp8788_bl *bl)
int init_brt;
char *name;
+ memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = MAX_BRIGHTNESS;
diff --git a/drivers/virt/acrn/ioeventfd.c b/drivers/virt/acrn/ioeventfd.c
index ac4037e9f9..4e845c6ca0 100644
--- a/drivers/virt/acrn/ioeventfd.c
+++ b/drivers/virt/acrn/ioeventfd.c
@@ -223,7 +223,7 @@ static int acrn_ioeventfd_handler(struct acrn_ioreq_client *client,
mutex_lock(&client->vm->ioeventfds_lock);
p = hsm_ioeventfd_match(client->vm, addr, val, size, req->type);
if (p)
- eventfd_signal(p->eventfd, 1);
+ eventfd_signal(p->eventfd);
mutex_unlock(&client->vm->ioeventfds_lock);
return 0;
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 3893dc29eb..71dee622b7 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -489,13 +489,19 @@ EXPORT_SYMBOL_GPL(unregister_virtio_device);
int virtio_device_freeze(struct virtio_device *dev)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
virtio_config_disable(dev);
dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
- if (drv && drv->freeze)
- return drv->freeze(dev);
+ if (drv && drv->freeze) {
+ ret = drv->freeze(dev);
+ if (ret) {
+ virtio_config_enable(dev);
+ return ret;
+ }
+ }
return 0;
}
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 49299b1f9e..6f7e5010a6 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1340,7 +1340,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id);
- if (vq->do_unmap) {
+ if (vq->use_dma_api) {
vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc);
@@ -1481,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
- if (unlikely(vq->do_unmap)) {
+ if (unlikely(vq->use_dma_api)) {
vq->packed.desc_extra[curr].addr = addr;
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
@@ -1615,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
vq->free_head = id;
vq->vq.num_free += state->num;
- if (unlikely(vq->do_unmap)) {
+ if (unlikely(vq->use_dma_api)) {
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
index 49b38ecc09..e4b344db38 100644
--- a/drivers/watchdog/starfive-wdt.c
+++ b/drivers/watchdog/starfive-wdt.c
@@ -494,8 +494,13 @@ static int starfive_wdt_probe(struct platform_device *pdev)
if (ret)
goto err_exit;
- if (!early_enable)
- pm_runtime_put_sync(&pdev->dev);
+ if (!early_enable) {
+ if (pm_runtime_enabled(&pdev->dev)) {
+ ret = pm_runtime_put_sync(&pdev->dev);
+ if (ret)
+ goto err_exit;
+ }
+ }
return 0;
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index d9fd50df98..5404e03876 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -20,6 +20,8 @@
#include <linux/platform_device.h>
#include <linux/watchdog.h>
+#define DEFAULT_TIMEOUT 10
+
/* IWDG registers */
#define IWDG_KR 0x00 /* Key register */
#define IWDG_PR 0x04 /* Prescaler Register */
@@ -248,6 +250,7 @@ static int stm32_iwdg_probe(struct platform_device *pdev)
wdd->parent = dev;
wdd->info = &stm32_iwdg_info;
wdd->ops = &stm32_iwdg_ops;
+ wdd->timeout = DEFAULT_TIMEOUT;
wdd->min_timeout = DIV_ROUND_UP((RLR_MIN + 1) * PR_MIN, wdt->rate);
wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * wdt->data->max_prescaler *
1000) / wdt->rate;
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 3b9f080109..27553673e4 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1190,7 +1190,7 @@ int xen_pirq_from_irq(unsigned irq)
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
- struct xenbus_device *dev)
+ struct xenbus_device *dev, bool shared)
{
int ret = -ENOMEM;
struct irq_info *info;
@@ -1224,7 +1224,8 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
*/
bind_evtchn_to_cpu(info, 0, false);
} else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
- info->refcnt++;
+ if (shared && !WARN_ON(info->refcnt < 0))
+ info->refcnt++;
}
ret = info->irq;
@@ -1237,13 +1238,13 @@ out:
int bind_evtchn_to_irq(evtchn_port_t evtchn)
{
- return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
+ return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL, false);
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
{
- return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
+ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL, false);
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
@@ -1295,7 +1296,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
evtchn_port_t remote_port,
- struct irq_chip *chip)
+ struct irq_chip *chip,
+ bool shared)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
@@ -1307,14 +1309,14 @@ static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
&bind_interdomain);
return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
- chip, dev);
+ chip, dev, shared);
}
int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
evtchn_port_t remote_port)
{
return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
- &xen_lateeoi_chip);
+ &xen_lateeoi_chip, false);
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
@@ -1430,7 +1432,8 @@ static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
{
int irq, retval;
- irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
+ irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL,
+ irqflags & IRQF_SHARED);
if (irq < 0)
return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
@@ -1471,7 +1474,8 @@ static int bind_interdomain_evtchn_to_irqhandler_chip(
{
int irq, retval;
- irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
+ irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip,
+ irqflags & IRQF_SHARED);
if (irq < 0)
return irq;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 59717628ca..f6a2216c2c 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -85,6 +85,7 @@ struct user_evtchn {
struct per_user_data *user;
evtchn_port_t port;
bool enabled;
+ bool unbinding;
};
static void evtchn_free_ring(evtchn_port_t *ring)
@@ -164,6 +165,10 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
struct per_user_data *u = evtchn->user;
unsigned int prod, cons;
+ /* Handler might be called when tearing down the IRQ. */
+ if (evtchn->unbinding)
+ return IRQ_HANDLED;
+
WARN(!evtchn->enabled,
"Interrupt for port %u, but apparently not enabled; per-user %p\n",
evtchn->port, u);
@@ -421,6 +426,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
BUG_ON(irq < 0);
+ evtchn->unbinding = true;
unbind_from_irqhandler(irq, evtchn);
del_evtchn(u, evtchn);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 0eb337a8ec..35b6e30602 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1147,7 +1147,7 @@ static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
ioreq->size == kioeventfd->addr_len &&
(ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
- eventfd_signal(kioeventfd->eventfd, 1);
+ eventfd_signal(kioeventfd->eventfd);
state = STATE_IORESP_READY;
break;
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index c097da6e9c..7761f25a77 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -474,16 +474,6 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
continue;
}
- /* Don't expose silly rename entries to userspace. */
- if (nlen > 6 &&
- dire->u.name[0] == '.' &&
- ctx->actor != afs_lookup_filldir &&
- ctx->actor != afs_lookup_one_filldir &&
- memcmp(dire->u.name, ".__afs", 6) == 0) {
- ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
- continue;
- }
-
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,
ntohl(dire->u.vnode),
diff --git a/fs/aio.c b/fs/aio.c
index 3235d4e6cc..be12d7c049 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -590,8 +590,8 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
{
- struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
- struct kioctx *ctx = req->ki_ctx;
+ struct aio_kiocb *req;
+ struct kioctx *ctx;
unsigned long flags;
/*
@@ -601,9 +601,13 @@ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
if (!(iocb->ki_flags & IOCB_AIO_RW))
return;
+ req = container_of(iocb, struct aio_kiocb, rw);
+
if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
return;
+ ctx = req->ki_ctx;
+
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_add_tail(&req->ki_list, &ctx->active_reqs);
req->ki_cancel = cancel;
@@ -1173,7 +1177,7 @@ static void aio_complete(struct aio_kiocb *iocb)
* from IRQ context.
*/
if (iocb->ki_eventfd)
- eventfd_signal(iocb->ki_eventfd, 1);
+ eventfd_signal(iocb->ki_eventfd);
/*
* We have to order our ring_info tail store above and test
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index da594e0067..816ecc3375 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -2094,7 +2094,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
* isn't monotonically increasing before FILTER_SNAPSHOTS, and
* that's what we check against in extents mode:
*/
- if (k.k->p.inode > end.inode)
+ if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
+ ? bkey_gt(k.k->p, end)
+ : k.k->p.inode > end.inode))
goto end;
if (iter->update_path &&
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 4bb88aefed..64000c8da5 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -392,10 +392,9 @@ static long bch2_ioctl_data(struct bch_fs *c,
goto err;
}
- fd_install(fd, file);
-
get_task_struct(ctx->thread);
wake_up_process(ctx->thread);
+ fd_install(fd, file);
return fd;
err:
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 9ce29681ee..ac90faccdc 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -224,6 +224,7 @@
x(BCH_ERR_invalid, invalid_bkey) \
x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
x(EIO, btree_node_read_err) \
+ x(EIO, sb_not_downgraded) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_fixable) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_want_retry) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_must_retry) \
diff --git a/fs/bcachefs/mean_and_variance.h b/fs/bcachefs/mean_and_variance.h
index 647505010b..056e797383 100644
--- a/fs/bcachefs/mean_and_variance.h
+++ b/fs/bcachefs/mean_and_variance.h
@@ -14,7 +14,7 @@
* type
*/
-#ifdef __SIZEOF_INT128__
+#if defined(__SIZEOF_INT128__) && defined(__KERNEL__) && !defined(CONFIG_PARISC)
typedef struct {
unsigned __int128 v;
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 4c98d8cc2a..c925dc5742 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -966,6 +966,18 @@ int bch2_write_super(struct bch_fs *c)
if (!BCH_SB_INITIALIZED(c->disk_sb.sb))
goto out;
+ if (le16_to_cpu(c->disk_sb.sb->version) > bcachefs_metadata_version_current) {
+ struct printbuf buf = PRINTBUF;
+ prt_printf(&buf, "attempting to write superblock that wasn't version downgraded (");
+ bch2_version_to_text(&buf, le16_to_cpu(c->disk_sb.sb->version));
+ prt_str(&buf, " > ");
+ bch2_version_to_text(&buf, bcachefs_metadata_version_current);
+ prt_str(&buf, ")");
+ bch2_fs_fatal_error(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return -BCH_ERR_sb_not_downgraded;
+ }
+
for_each_online_member(ca, c, i) {
__set_bit(ca->dev_idx, sb_written.d);
ca->sb_write_error = 0;
@@ -1073,13 +1085,22 @@ bool bch2_check_version_downgrade(struct bch_fs *c)
/*
* Downgrade, if superblock is at a higher version than currently
* supported:
+ *
+ * c->sb will be checked before we write the superblock, so update it as
+ * well:
*/
- if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current)
+ if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) {
SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
- if (c->sb.version > bcachefs_metadata_version_current)
+ c->sb.version_upgrade_complete = bcachefs_metadata_version_current;
+ }
+ if (c->sb.version > bcachefs_metadata_version_current) {
c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
- if (c->sb.version_min > bcachefs_metadata_version_current)
+ c->sb.version = bcachefs_metadata_version_current;
+ }
+ if (c->sb.version_min > bcachefs_metadata_version_current) {
c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
+ c->sb.version_min = bcachefs_metadata_version_current;
+ }
c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
return ret;
}
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index aca24186d6..52bab8cf78 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1562,7 +1562,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* needing to allocate extents from the block group.
*/
used = btrfs_space_info_used(space_info, true);
- if (space_info->total_bytes - block_group->length < used) {
+ if (space_info->total_bytes - block_group->length < used &&
+ block_group->zone_unusable < block_group->length) {
/*
* Add a reference for the list, compensate for the ref
* drop under the "next" label for the
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index ceb5f586a2..1043a81423 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -494,7 +494,7 @@ struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
block_rsv = get_block_rsv(trans, root);
- if (unlikely(block_rsv->size == 0))
+ if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
goto try_reserve;
again:
ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
diff --git a/fs/btrfs/block-rsv.h b/fs/btrfs/block-rsv.h
index b0bd12b865..43a9a6b5a7 100644
--- a/fs/btrfs/block-rsv.h
+++ b/fs/btrfs/block-rsv.h
@@ -101,4 +101,36 @@ static inline bool btrfs_block_rsv_full(const struct btrfs_block_rsv *rsv)
return data_race(rsv->full);
}
+/*
+ * Get the reserved mount of a block reserve in a context where getting a stale
+ * value is acceptable, instead of accessing it directly and trigger data race
+ * warning from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_reserved(struct btrfs_block_rsv *rsv)
+{
+ u64 ret;
+
+ spin_lock(&rsv->lock);
+ ret = rsv->reserved;
+ spin_unlock(&rsv->lock);
+
+ return ret;
+}
+
+/*
+ * Get the size of a block reserve in a context where getting a stale value is
+ * acceptable, instead of accessing it directly and trigger data race warning
+ * from KCSAN.
+ */
+static inline u64 btrfs_block_rsv_size(struct btrfs_block_rsv *rsv)
+{
+ u64 ret;
+
+ spin_lock(&rsv->lock);
+ ret = rsv->size;
+ spin_unlock(&rsv->lock);
+
+ return ret;
+}
+
#endif /* BTRFS_BLOCK_RSV_H */
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index eade0432bd..87082f9732 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2734,16 +2734,34 @@ static int fiemap_process_hole(struct btrfs_inode *inode,
* it beyond i_size.
*/
while (cur_offset < end && cur_offset < i_size) {
+ struct extent_state *cached_state = NULL;
u64 delalloc_start;
u64 delalloc_end;
u64 prealloc_start;
+ u64 lockstart;
+ u64 lockend;
u64 prealloc_len = 0;
bool delalloc;
+ lockstart = round_down(cur_offset, inode->root->fs_info->sectorsize);
+ lockend = round_up(end, inode->root->fs_info->sectorsize);
+
+ /*
+ * We are only locking for the delalloc range because that's the
+ * only thing that can change here. With fiemap we have a lock
+ * on the inode, so no buffered or direct writes can happen.
+ *
+ * However mmaps and normal page writeback will cause this to
+ * change arbitrarily. We have to lock the extent lock here to
+ * make sure that nobody messes with the tree while we're doing
+ * btrfs_find_delalloc_in_range.
+ */
+ lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
delalloc_cached_state,
&delalloc_start,
&delalloc_end);
+ unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
if (!delalloc)
break;
@@ -2911,15 +2929,15 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
const u64 ino = btrfs_ino(inode);
- struct extent_state *cached_state = NULL;
struct extent_state *delalloc_cached_state = NULL;
struct btrfs_path *path;
struct fiemap_cache cache = { 0 };
struct btrfs_backref_share_check_ctx *backref_ctx;
u64 last_extent_end;
u64 prev_extent_end;
- u64 lockstart;
- u64 lockend;
+ u64 range_start;
+ u64 range_end;
+ const u64 sectorsize = inode->root->fs_info->sectorsize;
bool stopped = false;
int ret;
@@ -2930,12 +2948,11 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
goto out;
}
- lockstart = round_down(start, inode->root->fs_info->sectorsize);
- lockend = round_up(start + len, inode->root->fs_info->sectorsize);
- prev_extent_end = lockstart;
+ range_start = round_down(start, sectorsize);
+ range_end = round_up(start + len, sectorsize);
+ prev_extent_end = range_start;
btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
- lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
if (ret < 0)
@@ -2943,7 +2960,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
btrfs_release_path(path);
path->reada = READA_FORWARD;
- ret = fiemap_search_slot(inode, path, lockstart);
+ ret = fiemap_search_slot(inode, path, range_start);
if (ret < 0) {
goto out_unlock;
} else if (ret > 0) {
@@ -2955,7 +2972,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
goto check_eof_delalloc;
}
- while (prev_extent_end < lockend) {
+ while (prev_extent_end < range_end) {
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_file_extent_item *ei;
struct btrfs_key key;
@@ -2978,19 +2995,19 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
* The first iteration can leave us at an extent item that ends
* before our range's start. Move to the next item.
*/
- if (extent_end <= lockstart)
+ if (extent_end <= range_start)
goto next_item;
backref_ctx->curr_leaf_bytenr = leaf->start;
/* We have in implicit hole (NO_HOLES feature enabled). */
if (prev_extent_end < key.offset) {
- const u64 range_end = min(key.offset, lockend) - 1;
+ const u64 hole_end = min(key.offset, range_end) - 1;
ret = fiemap_process_hole(inode, fieinfo, &cache,
&delalloc_cached_state,
backref_ctx, 0, 0, 0,
- prev_extent_end, range_end);
+ prev_extent_end, hole_end);
if (ret < 0) {
goto out_unlock;
} else if (ret > 0) {
@@ -3000,7 +3017,7 @@ int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
}
/* We've reached the end of the fiemap range, stop. */
- if (key.offset >= lockend) {
+ if (key.offset >= range_end) {
stopped = true;
break;
}
@@ -3094,29 +3111,41 @@ check_eof_delalloc:
btrfs_free_path(path);
path = NULL;
- if (!stopped && prev_extent_end < lockend) {
+ if (!stopped && prev_extent_end < range_end) {
ret = fiemap_process_hole(inode, fieinfo, &cache,
&delalloc_cached_state, backref_ctx,
- 0, 0, 0, prev_extent_end, lockend - 1);
+ 0, 0, 0, prev_extent_end, range_end - 1);
if (ret < 0)
goto out_unlock;
- prev_extent_end = lockend;
+ prev_extent_end = range_end;
}
if (cache.cached && cache.offset + cache.len >= last_extent_end) {
const u64 i_size = i_size_read(&inode->vfs_inode);
if (prev_extent_end < i_size) {
+ struct extent_state *cached_state = NULL;
u64 delalloc_start;
u64 delalloc_end;
+ u64 lockstart;
+ u64 lockend;
bool delalloc;
+ lockstart = round_down(prev_extent_end, sectorsize);
+ lockend = round_up(i_size, sectorsize);
+
+ /*
+ * See the comment in fiemap_process_hole as to why
+ * we're doing the locking here.
+ */
+ lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
delalloc = btrfs_find_delalloc_in_range(inode,
prev_extent_end,
i_size - 1,
&delalloc_cached_state,
&delalloc_start,
&delalloc_end);
+ unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
if (!delalloc)
cache.flags |= FIEMAP_EXTENT_LAST;
} else {
@@ -3127,7 +3156,6 @@ check_eof_delalloc:
ret = emit_last_fiemap_cache(fieinfo, &cache);
out_unlock:
- unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
out:
free_extent_state(delalloc_cached_state);
@@ -4024,6 +4052,19 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
goto done;
+ /*
+ * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above
+ * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have
+ * started and finished reading the same eb. In this case, UPTODATE
+ * will now be set, and we shouldn't read it in again.
+ */
+ if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {
+ clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
+ smp_mb__after_atomic();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
+ return 0;
+ }
+
clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
eb->read_mirror = 0;
check_buffer_tree_ref(eb);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index bbfa44b89b..1dcf5bb8df 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2959,11 +2959,6 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
ctx.roots = NULL;
}
- /* Free the reserved data space */
- btrfs_qgroup_free_refroot(fs_info,
- record->data_rsv_refroot,
- record->data_rsv,
- BTRFS_QGROUP_RSV_DATA);
/*
* Use BTRFS_SEQ_LAST as time_seq to do special search,
* which doesn't lock tree or delayed_refs and search
@@ -2987,6 +2982,11 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
record->old_roots = NULL;
new_roots = NULL;
}
+ /* Free the reserved data space */
+ btrfs_qgroup_free_refroot(fs_info,
+ record->data_rsv_refroot,
+ record->data_rsv,
+ BTRFS_QGROUP_RSV_DATA);
cleanup:
ulist_free(record->old_roots);
ulist_free(new_roots);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 443d2519f0..258b3e5585 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2809,7 +2809,17 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
gen = btrfs_get_last_trans_committed(fs_info);
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
- bytenr = btrfs_sb_offset(i);
+ ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);
+ if (ret == -ENOENT)
+ break;
+
+ if (ret) {
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.super_errors++;
+ spin_unlock(&sctx->stat_lock);
+ continue;
+ }
+
if (bytenr + BTRFS_SUPER_INFO_SIZE >
scrub_dev->commit_total_bytes)
break;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 571bb13587..3b54eb5834 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -856,7 +856,7 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info)
{
- u64 global_rsv_size = fs_info->global_block_rsv.reserved;
+ const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv);
u64 ordered, delalloc;
u64 thresh;
u64 used;
@@ -956,8 +956,8 @@ static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
if (ordered >= delalloc)
- used += fs_info->delayed_refs_rsv.reserved +
- fs_info->delayed_block_rsv.reserved;
+ used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) +
+ btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv);
else
used += space_info->bytes_may_use - global_rsv_size;
@@ -1173,7 +1173,7 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
enum btrfs_flush_state flush;
u64 delalloc_size = 0;
u64 to_reclaim, block_rsv_size;
- u64 global_rsv_size = global_rsv->reserved;
+ const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv);
loops++;
@@ -1185,9 +1185,9 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
* assume it's tied up in delalloc reservations.
*/
block_rsv_size = global_rsv_size +
- delayed_block_rsv->reserved +
- delayed_refs_rsv->reserved +
- trans_rsv->reserved;
+ btrfs_block_rsv_reserved(delayed_block_rsv) +
+ btrfs_block_rsv_reserved(delayed_refs_rsv) +
+ btrfs_block_rsv_reserved(trans_rsv);
if (block_rsv_size < space_info->bytes_may_use)
delalloc_size = space_info->bytes_may_use - block_rsv_size;
@@ -1207,16 +1207,16 @@ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
to_reclaim = delalloc_size;
flush = FLUSH_DELALLOC;
} else if (space_info->bytes_pinned >
- (delayed_block_rsv->reserved +
- delayed_refs_rsv->reserved)) {
+ (btrfs_block_rsv_reserved(delayed_block_rsv) +
+ btrfs_block_rsv_reserved(delayed_refs_rsv))) {
to_reclaim = space_info->bytes_pinned;
flush = COMMIT_TRANS;
- } else if (delayed_block_rsv->reserved >
- delayed_refs_rsv->reserved) {
- to_reclaim = delayed_block_rsv->reserved;
+ } else if (btrfs_block_rsv_reserved(delayed_block_rsv) >
+ btrfs_block_rsv_reserved(delayed_refs_rsv)) {
+ to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv);
flush = FLUSH_DELAYED_ITEMS_NR;
} else {
- to_reclaim = delayed_refs_rsv->reserved;
+ to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv);
flush = FLUSH_DELAYED_REFS_NR;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f627674b37..fd30dc3d59 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -683,6 +683,16 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
device->bdev = bdev_handle->bdev;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
+ if (device->devt != device->bdev->bd_dev) {
+ btrfs_warn(NULL,
+ "device %s maj:min changed from %d:%d to %d:%d",
+ device->name->str, MAJOR(device->devt),
+ MINOR(device->devt), MAJOR(device->bdev->bd_dev),
+ MINOR(device->bdev->bd_dev));
+
+ device->devt = device->bdev->bd_dev;
+ }
+
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -1290,6 +1300,47 @@ int btrfs_forget_devices(dev_t devt)
return ret;
}
+static bool btrfs_skip_registration(struct btrfs_super_block *disk_super,
+ const char *path, dev_t devt,
+ bool mount_arg_dev)
+{
+ struct btrfs_fs_devices *fs_devices;
+
+ /*
+ * Do not skip device registration for mounted devices with matching
+ * maj:min but different paths. Booting without initrd relies on
+ * /dev/root initially, later replaced with the actual root device.
+ * A successful scan ensures grub2-probe selects the correct device.
+ */
+ list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
+ struct btrfs_device *device;
+
+ mutex_lock(&fs_devices->device_list_mutex);
+
+ if (!fs_devices->opened) {
+ mutex_unlock(&fs_devices->device_list_mutex);
+ continue;
+ }
+
+ list_for_each_entry(device, &fs_devices->devices, dev_list) {
+ if (device->bdev && (device->bdev->bd_dev == devt) &&
+ strcmp(device->name->str, path) != 0) {
+ mutex_unlock(&fs_devices->device_list_mutex);
+
+ /* Do not skip registration. */
+ return false;
+ }
+ }
+ mutex_unlock(&fs_devices->device_list_mutex);
+ }
+
+ if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 &&
+ !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING))
+ return true;
+
+ return false;
+}
+
/*
* Look for a btrfs signature on a device. This may be called out of the mount path
* and we are not allowed to call set_blocksize during the scan. The superblock
@@ -1346,18 +1397,14 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, blk_mode_t flags,
goto error_bdev_put;
}
- if (!mount_arg_dev && btrfs_super_num_devices(disk_super) == 1 &&
- !(btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING)) {
- dev_t devt;
+ if (btrfs_skip_registration(disk_super, path, bdev_handle->bdev->bd_dev,
+ mount_arg_dev)) {
+ pr_debug("BTRFS: skip registering single non-seed device %s (%d:%d)\n",
+ path, MAJOR(bdev_handle->bdev->bd_dev),
+ MINOR(bdev_handle->bdev->bd_dev));
- ret = lookup_bdev(path, &devt);
- if (ret)
- btrfs_warn(NULL, "lookup bdev failed for path %s: %d",
- path, ret);
- else
- btrfs_free_stale_devices(devt, NULL);
+ btrfs_free_stale_devices(bdev_handle->bdev->bd_dev, NULL);
- pr_debug("BTRFS: skip registering single non-seed device %s\n", path);
device = NULL;
goto free_disk_super;
}
@@ -1392,7 +1439,7 @@ static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
if (in_range(physical_start, *start, len) ||
in_range(*start, physical_start,
- physical_end - physical_start)) {
+ physical_end + 1 - physical_start)) {
*start = physical_end + 1;
return true;
}
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 3779e76a15..524532f992 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1661,6 +1661,15 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
}
out:
+ /* Reject non SINGLE data profiles without RST */
+ if ((map->type & BTRFS_BLOCK_GROUP_DATA) &&
+ (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
+ !fs_info->stripe_root) {
+ btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree",
+ btrfs_bg_type_to_raid_name(map->type));
+ return -EINVAL;
+ }
+
if (cache->alloc_offset > cache->zone_capacity) {
btrfs_err(fs_info,
"zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu",
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index ad1f46c66f..7fb4aae974 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2156,6 +2156,30 @@ retry:
ceph_cap_string(cap->implemented),
ceph_cap_string(revoking));
+ /* completed revocation? going down and there are no caps? */
+ if (revoking) {
+ if ((revoking & cap_used) == 0) {
+ doutc(cl, "completed revocation of %s\n",
+ ceph_cap_string(cap->implemented & ~cap->issued));
+ goto ack;
+ }
+
+ /*
+ * If the "i_wrbuffer_ref" was increased by mmap or generic
+ * cache write just before the ceph_check_caps() is called,
+ * the Fb capability revoking will fail this time. Then we
+ * must wait for the BDI's delayed work to flush the dirty
+ * pages and to release the "i_wrbuffer_ref", which will cost
+ * at most 5 seconds. That means the MDS needs to wait at
+ * most 5 seconds to finished the Fb capability's revocation.
+ *
+ * Let's queue a writeback for it.
+ */
+ if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref &&
+ (revoking & CEPH_CAP_FILE_BUFFER))
+ queue_writeback = true;
+ }
+
if (cap == ci->i_auth_cap &&
(cap->issued & CEPH_CAP_FILE_WR)) {
/* request larger max_size from MDS? */
@@ -2183,30 +2207,6 @@ retry:
}
}
- /* completed revocation? going down and there are no caps? */
- if (revoking) {
- if ((revoking & cap_used) == 0) {
- doutc(cl, "completed revocation of %s\n",
- ceph_cap_string(cap->implemented & ~cap->issued));
- goto ack;
- }
-
- /*
- * If the "i_wrbuffer_ref" was increased by mmap or generic
- * cache write just before the ceph_check_caps() is called,
- * the Fb capability revoking will fail this time. Then we
- * must wait for the BDI's delayed work to flush the dirty
- * pages and to release the "i_wrbuffer_ref", which will cost
- * at most 5 seconds. That means the MDS needs to wait at
- * most 5 seconds to finished the Fb capability's revocation.
- *
- * Let's queue a writeback for it.
- */
- if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref &&
- (revoking & CEPH_CAP_FILE_BUFFER))
- queue_writeback = true;
- }
-
/* want more caps from mds? */
if (want & ~cap->mds_wanted) {
if (want & ~(cap->mds_wanted | cap->issued))
@@ -4772,7 +4772,22 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
if (__ceph_caps_dirty(ci)) {
struct ceph_mds_client *mdsc =
ceph_inode_to_fs_client(inode)->mdsc;
- __cap_delay_requeue_front(mdsc, ci);
+
+ doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ spin_lock(&mdsc->cap_unlink_delay_lock);
+ ci->i_ceph_flags |= CEPH_I_FLUSH;
+ if (!list_empty(&ci->i_cap_delay_list))
+ list_del_init(&ci->i_cap_delay_list);
+ list_add_tail(&ci->i_cap_delay_list,
+ &mdsc->cap_unlink_delay_list);
+ spin_unlock(&mdsc->cap_unlink_delay_lock);
+
+ /*
+ * Fire the work immediately, because the MDS maybe
+ * waiting for caps release.
+ */
+ ceph_queue_cap_unlink_work(mdsc);
}
}
spin_unlock(&ci->i_ceph_lock);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 3b5aae29e9..523debc6f2 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1135,7 +1135,12 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
}
idx = 0;
- left = ret > 0 ? ret : 0;
+ if (ret <= 0)
+ left = 0;
+ else if (off + ret > i_size)
+ left = i_size - off;
+ else
+ left = ret;
while (left > 0) {
size_t plen, copied;
@@ -1164,15 +1169,13 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
}
if (ret > 0) {
- if (off > *ki_pos) {
- if (off >= i_size) {
- *retry_op = CHECK_EOF;
- ret = i_size - *ki_pos;
- *ki_pos = i_size;
- } else {
- ret = off - *ki_pos;
- *ki_pos = off;
- }
+ if (off >= i_size) {
+ *retry_op = CHECK_EOF;
+ ret = i_size - *ki_pos;
+ *ki_pos = i_size;
+ } else {
+ ret = off - *ki_pos;
+ *ki_pos = off;
}
if (last_objver)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 2eb66dd7d0..950360b075 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2470,6 +2470,50 @@ void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
}
}
+void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc)
+{
+ struct ceph_client *cl = mdsc->fsc->client;
+ if (mdsc->stopping)
+ return;
+
+ if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_unlink_work)) {
+ doutc(cl, "caps unlink work queued\n");
+ } else {
+ doutc(cl, "failed to queue caps unlink work\n");
+ }
+}
+
+static void ceph_cap_unlink_work(struct work_struct *work)
+{
+ struct ceph_mds_client *mdsc =
+ container_of(work, struct ceph_mds_client, cap_unlink_work);
+ struct ceph_client *cl = mdsc->fsc->client;
+
+ doutc(cl, "begin\n");
+ spin_lock(&mdsc->cap_unlink_delay_lock);
+ while (!list_empty(&mdsc->cap_unlink_delay_list)) {
+ struct ceph_inode_info *ci;
+ struct inode *inode;
+
+ ci = list_first_entry(&mdsc->cap_unlink_delay_list,
+ struct ceph_inode_info,
+ i_cap_delay_list);
+ list_del_init(&ci->i_cap_delay_list);
+
+ inode = igrab(&ci->netfs.inode);
+ if (inode) {
+ spin_unlock(&mdsc->cap_unlink_delay_lock);
+ doutc(cl, "on %p %llx.%llx\n", inode,
+ ceph_vinop(inode));
+ ceph_check_caps(ci, CHECK_CAPS_FLUSH);
+ iput(inode);
+ spin_lock(&mdsc->cap_unlink_delay_lock);
+ }
+ }
+ spin_unlock(&mdsc->cap_unlink_delay_lock);
+ doutc(cl, "done\n");
+}
+
/*
* requests
*/
@@ -5345,6 +5389,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_LIST_HEAD(&mdsc->cap_delay_list);
INIT_LIST_HEAD(&mdsc->cap_wait_list);
spin_lock_init(&mdsc->cap_delay_lock);
+ INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
+ spin_lock_init(&mdsc->cap_unlink_delay_lock);
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
@@ -5353,6 +5399,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq);
INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
+ INIT_WORK(&mdsc->cap_unlink_work, ceph_cap_unlink_work);
err = ceph_metric_init(&mdsc->metric);
if (err)
goto err_mdsmap;
@@ -5626,6 +5673,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
ceph_cleanup_global_and_empty_realms(mdsc);
cancel_work_sync(&mdsc->cap_reclaim_work);
+ cancel_work_sync(&mdsc->cap_unlink_work);
cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
doutc(cl, "done\n");
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 40560af388..03f8ff0087 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -462,6 +462,8 @@ struct ceph_mds_client {
unsigned long last_renew_caps; /* last time we renewed our caps */
struct list_head cap_delay_list; /* caps with delayed release */
spinlock_t cap_delay_lock; /* protects cap_delay_list */
+ struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
+ spinlock_t cap_unlink_delay_lock; /* protects cap_unlink_delay_list */
struct list_head snap_flush_list; /* cap_snaps ready to flush */
spinlock_t snap_flush_lock;
@@ -475,6 +477,8 @@ struct ceph_mds_client {
struct work_struct cap_reclaim_work;
atomic_t cap_reclaim_pending;
+ struct work_struct cap_unlink_work;
+
/*
* Cap reservations
*
@@ -574,6 +578,7 @@ extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
extern void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc);
extern void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr);
+extern void ceph_queue_cap_unlink_work(struct ceph_mds_client *mdsc);
extern int ceph_iterate_session_caps(struct ceph_mds_session *session,
int (*cb)(struct inode *, int mds, void *),
void *arg);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 034a617cb1..a40da00654 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -751,13 +751,28 @@ static void __debugfs_file_removed(struct dentry *dentry)
if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)
return;
- /* if we hit zero, just wait for all to finish */
- if (!refcount_dec_and_test(&fsd->active_users)) {
- wait_for_completion(&fsd->active_users_drained);
+ /* if this was the last reference, we're done */
+ if (refcount_dec_and_test(&fsd->active_users))
return;
- }
- /* if we didn't hit zero, try to cancel any we can */
+ /*
+ * If there's still a reference, the code that obtained it can
+ * be in different states:
+ * - The common case of not using cancellations, or already
+ * after debugfs_leave_cancellation(), where we just need
+ * to wait for debugfs_file_put() which signals the completion;
+ * - inside a cancellation section, i.e. between
+ * debugfs_enter_cancellation() and debugfs_leave_cancellation(),
+ * in which case we need to trigger the ->cancel() function,
+ * and then wait for debugfs_file_put() just like in the
+ * previous case;
+ * - before debugfs_enter_cancellation() (but obviously after
+ * debugfs_file_get()), in which case we may not see the
+ * cancellation in the list on the first round of the loop,
+ * but debugfs_enter_cancellation() signals the completion
+ * after adding it, so this code gets woken up to call the
+ * ->cancel() function.
+ */
while (refcount_read(&fsd->active_users)) {
struct debugfs_cancellation *c;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 695e691b38..9f9b684488 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -806,7 +806,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
struct dlm_lkb *lkb;
DECLARE_WAITQUEUE(wait, current);
struct dlm_callback *cb;
- int rv, copy_lvb = 0;
+ int rv, ret, copy_lvb = 0;
int old_mode, new_mode;
if (count == sizeof(struct dlm_device_version)) {
@@ -906,9 +906,9 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
}
- rv = copy_result_to_user(lkb->lkb_ua,
- test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
- cb->flags, cb->mode, copy_lvb, buf, count);
+ ret = copy_result_to_user(lkb->lkb_ua,
+ test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
+ cb->flags, cb->mode, copy_lvb, buf, count);
kref_put(&cb->ref, dlm_release_callback);
@@ -916,7 +916,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
if (rv == DLM_DEQUEUE_CALLBACK_LAST)
dlm_put_lkb(lkb);
- return rv;
+ return ret;
}
static __poll_t device_poll(struct file *file, poll_table *wait)
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index c98aeda8ab..3d9721b3fa 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -447,5 +447,6 @@ const struct file_operations erofs_file_fops = {
.llseek = generic_file_llseek,
.read_iter = erofs_file_read_iter,
.mmap = erofs_file_mmap,
+ .get_unmapped_area = thp_get_unmapped_area,
.splice_read = filemap_splice_read,
};
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 87ff35bff8..afc37c9029 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -3,6 +3,7 @@
* Copyright (C) 2022, Alibaba Cloud
* Copyright (C) 2022, Bytedance Inc. All rights reserved.
*/
+#include <linux/pseudo_fs.h>
#include <linux/fscache.h>
#include "internal.h"
@@ -12,6 +13,18 @@ static LIST_HEAD(erofs_domain_list);
static LIST_HEAD(erofs_domain_cookies_list);
static struct vfsmount *erofs_pseudo_mnt;
+static int erofs_anon_init_fs_context(struct fs_context *fc)
+{
+ return init_pseudo(fc, EROFS_SUPER_MAGIC) ? 0 : -ENOMEM;
+}
+
+static struct file_system_type erofs_anon_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "pseudo_erofs",
+ .init_fs_context = erofs_anon_init_fs_context,
+ .kill_sb = kill_anon_super,
+};
+
struct erofs_fscache_request {
struct erofs_fscache_request *primary;
struct netfs_cache_resources cache_resources;
@@ -381,11 +394,12 @@ static int erofs_fscache_init_domain(struct super_block *sb)
goto out;
if (!erofs_pseudo_mnt) {
- erofs_pseudo_mnt = kern_mount(&erofs_fs_type);
- if (IS_ERR(erofs_pseudo_mnt)) {
- err = PTR_ERR(erofs_pseudo_mnt);
+ struct vfsmount *mnt = kern_mount(&erofs_anon_fs_type);
+ if (IS_ERR(mnt)) {
+ err = PTR_ERR(mnt);
goto out;
}
+ erofs_pseudo_mnt = mnt;
}
domain->volume = sbi->volume;
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index b0409badb0..410f5af623 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -385,7 +385,6 @@ struct erofs_map_dev {
unsigned int m_deviceid;
};
-extern struct file_system_type erofs_fs_type;
extern const struct super_operations erofs_sops;
extern const struct address_space_operations erofs_raw_access_aops;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 3789d62245..8a82d5cec1 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -573,13 +573,6 @@ static const struct export_operations erofs_export_ops = {
.get_parent = erofs_get_parent,
};
-static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
-{
- static const struct tree_descr empty_descr = {""};
-
- return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
-}
-
static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
@@ -706,11 +699,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
return 0;
}
-static int erofs_fc_anon_get_tree(struct fs_context *fc)
-{
- return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
-}
-
static int erofs_fc_get_tree(struct fs_context *fc)
{
struct erofs_fs_context *ctx = fc->fs_private;
@@ -783,20 +771,10 @@ static const struct fs_context_operations erofs_context_ops = {
.free = erofs_fc_free,
};
-static const struct fs_context_operations erofs_anon_context_ops = {
- .get_tree = erofs_fc_anon_get_tree,
-};
-
static int erofs_init_fs_context(struct fs_context *fc)
{
struct erofs_fs_context *ctx;
- /* pseudo mount for anon inodes */
- if (fc->sb_flags & SB_KERNMOUNT) {
- fc->ops = &erofs_anon_context_ops;
- return 0;
- }
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -818,12 +796,6 @@ static void erofs_kill_sb(struct super_block *sb)
{
struct erofs_sb_info *sbi;
- /* pseudo mount for anon inodes */
- if (sb->s_flags & SB_KERNMOUNT) {
- kill_anon_super(sb);
- return;
- }
-
if (erofs_is_fscache_mode(sb))
kill_anon_super(sb);
else
@@ -862,7 +834,7 @@ static void erofs_put_super(struct super_block *sb)
erofs_fscache_unregister_fs(sb);
}
-struct file_system_type erofs_fs_type = {
+static struct file_system_type erofs_fs_type = {
.owner = THIS_MODULE,
.name = "erofs",
.init_fs_context = erofs_init_fs_context,
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 33a918f956..d2f7d2d8a3 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -72,22 +72,19 @@ __u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask)
}
/**
- * eventfd_signal - Adds @n to the eventfd counter.
+ * eventfd_signal - Increment the event counter
* @ctx: [in] Pointer to the eventfd context.
- * @n: [in] Value of the counter to be added to the eventfd internal counter.
- * The value cannot be negative.
*
* This function is supposed to be called by the kernel in paths that do not
* allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
* value, and we signal this as overflow condition by returning a EPOLLERR
* to poll(2).
*
- * Returns the amount by which the counter was incremented. This will be less
- * than @n if the counter has overflowed.
+ * Returns the amount by which the counter was incremented.
*/
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+__u64 eventfd_signal(struct eventfd_ctx *ctx)
{
- return eventfd_signal_mask(ctx, n, 0);
+ return eventfd_signal_mask(ctx, 1, 0);
}
EXPORT_SYMBOL_GPL(eventfd_signal);
diff --git a/fs/exec.c b/fs/exec.c
index 6d9ed2d765..7a1861a718 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -894,6 +894,7 @@ int transfer_args_to_stack(struct linux_binprm *bprm,
goto out;
}
+ bprm->exec += *sp_location - MAX_ARG_PAGES * PAGE_SIZE;
*sp_location = sp;
out:
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 677a9ad45d..f38bdd46e4 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -674,7 +674,7 @@ struct ext2_inode_info {
struct inode vfs_inode;
struct list_head i_orphan; /* unlinked but open inodes */
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
};
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 01f9addc8b..6d8587505c 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -320,7 +320,7 @@ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, siz
static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
static int ext2_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path);
-static struct dquot **ext2_get_dquots(struct inode *inode)
+static struct dquot __rcu **ext2_get_dquots(struct inode *inode)
{
return EXT2_I(inode)->i_dquot;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index a5d7848723..3205d46bc9 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1156,7 +1156,7 @@ struct ext4_inode_info {
tid_t i_datasync_tid;
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 7497a789d0..38ec0fdb33 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -5173,10 +5173,16 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
.fe_len = ac->ac_orig_goal_len,
};
loff_t orig_goal_end = extent_logical_end(sbi, &ex);
+ loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex);
- /* we can't allocate as much as normalizer wants.
- * so, found space must get proper lstart
- * to cover original request */
+ /*
+ * We can't allocate as much as normalizer wants, so we try
+ * to get proper lstart to cover the original request, except
+ * when the goal doesn't cover the original request as below:
+ *
+ * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048
+ * best_ex:0/200(200) -> adjusted: 1848/2048(200)
+ */
BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
@@ -5188,7 +5194,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
* 1. Check if best ex can be kept at end of goal (before
* cr_best_avail trimmed it) and still cover original start
* 2. Else, check if best ex can be kept at start of goal and
- * still cover original start
+ * still cover original end
* 3. Else, keep the best ex at start of original request.
*/
ex.fe_len = ac->ac_b_ex.fe_len;
@@ -5198,7 +5204,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
goto adjust_bex;
ex.fe_logical = ac->ac_g_ex.fe_logical;
- if (ac->ac_o_ex.fe_logical < extent_logical_end(sbi, &ex))
+ if (o_ex_end <= extent_logical_end(sbi, &ex))
goto adjust_bex;
ex.fe_logical = ac->ac_o_ex.fe_logical;
@@ -5206,7 +5212,6 @@ adjust_bex:
ac->ac_b_ex.fe_logical = ex.fe_logical;
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
- BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end);
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index e168a9f596..9a39596d2a 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1592,7 +1592,8 @@ exit_journal:
int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
int gdb_num_end = ((group + flex_gd->count - 1) /
EXT4_DESC_PER_BLOCK(sb));
- int meta_bg = ext4_has_feature_meta_bg(sb);
+ int meta_bg = ext4_has_feature_meta_bg(sb) &&
+ gdb_num >= le32_to_cpu(es->s_first_meta_bg);
sector_t padding_blocks = meta_bg ? 0 : sbi->s_sbh->b_blocknr -
ext4_group_first_block_no(sb, 0);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c5fcf377ab..3f1fa5d182 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1600,7 +1600,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
unsigned int flags);
-static struct dquot **ext4_get_dquots(struct inode *inode)
+static struct dquot __rcu **ext4_get_dquots(struct inode *inode)
{
return EXT4_I(inode)->i_dquot;
}
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index b0597a539f..9afc8d24dc 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1587,8 +1587,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
*/
if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
f2fs_sb_has_compression(sbi))
- invalidate_mapping_pages(META_MAPPING(sbi),
- MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
+ f2fs_bug_on(sbi,
+ invalidate_inode_pages2_range(META_MAPPING(sbi),
+ MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1));
f2fs_release_ino_entry(sbi, false);
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 62119f3f72..52f407b978 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1371,8 +1371,6 @@ unlock_continue:
add_compr_block_stat(inode, cc->valid_nr_cpages);
set_inode_flag(cc->inode, FI_APPEND_WRITE);
- if (cc->cluster_idx == 0)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_put_dnode(&dn);
if (quota_inode)
@@ -1420,6 +1418,8 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
struct f2fs_sb_info *sbi = bio->bi_private;
struct compress_io_ctx *cic =
(struct compress_io_ctx *)page_private(page);
+ enum count_type type = WB_DATA_TYPE(page,
+ f2fs_is_compressed_page(page));
int i;
if (unlikely(bio->bi_status))
@@ -1427,7 +1427,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
f2fs_compress_free_page(page);
- dec_page_count(sbi, F2FS_WB_DATA);
+ dec_page_count(sbi, type);
if (atomic_dec_return(&cic->pending_pages))
return;
@@ -1443,12 +1443,14 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
}
static int f2fs_write_raw_pages(struct compress_ctx *cc,
- int *submitted,
+ int *submitted_p,
struct writeback_control *wbc,
enum iostat_type io_type)
{
struct address_space *mapping = cc->inode->i_mapping;
- int _submitted, compr_blocks, ret, i;
+ struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ int submitted, compr_blocks, i;
+ int ret = 0;
compr_blocks = f2fs_compressed_blocks(cc);
@@ -1463,6 +1465,10 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
if (compr_blocks < 0)
return compr_blocks;
+ /* overwrite compressed cluster w/ normal cluster */
+ if (compr_blocks > 0)
+ f2fs_lock_op(sbi);
+
for (i = 0; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
@@ -1487,7 +1493,7 @@ continue_unlock:
if (!clear_page_dirty_for_io(cc->rpages[i]))
goto continue_unlock;
- ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
+ ret = f2fs_write_single_data_page(cc->rpages[i], &submitted,
NULL, NULL, wbc, io_type,
compr_blocks, false);
if (ret) {
@@ -1495,26 +1501,29 @@ continue_unlock:
unlock_page(cc->rpages[i]);
ret = 0;
} else if (ret == -EAGAIN) {
+ ret = 0;
/*
* for quota file, just redirty left pages to
* avoid deadlock caused by cluster update race
* from foreground operation.
*/
if (IS_NOQUOTA(cc->inode))
- return 0;
- ret = 0;
+ goto out;
f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
goto retry_write;
}
- return ret;
+ goto out;
}
- *submitted += _submitted;
+ *submitted_p += submitted;
}
- f2fs_balance_fs(F2FS_M_SB(mapping), true);
+out:
+ if (compr_blocks > 0)
+ f2fs_unlock_op(sbi);
- return 0;
+ f2fs_balance_fs(sbi, true);
+ return ret;
}
int f2fs_write_multi_pages(struct compress_ctx *cc,
@@ -1808,16 +1817,18 @@ void f2fs_put_page_dic(struct page *page, bool in_task)
* check whether cluster blocks are contiguous, and add extent cache entry
* only if cluster blocks are logically and physically contiguous.
*/
-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn)
+unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
+ unsigned int ofs_in_node)
{
- bool compressed = f2fs_data_blkaddr(dn) == COMPRESS_ADDR;
+ bool compressed = data_blkaddr(dn->inode, dn->node_page,
+ ofs_in_node) == COMPRESS_ADDR;
int i = compressed ? 1 : 0;
block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page,
- dn->ofs_in_node + i);
+ ofs_in_node + i);
for (i += 1; i < F2FS_I(dn->inode)->i_cluster_size; i++) {
block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
- dn->ofs_in_node + i);
+ ofs_in_node + i);
if (!__is_valid_data_blkaddr(blkaddr))
break;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index bc3f05d43b..c611d064ae 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -48,7 +48,7 @@ void f2fs_destroy_bioset(void)
bioset_exit(&f2fs_bioset);
}
-static bool __is_cp_guaranteed(struct page *page)
+bool f2fs_is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
@@ -65,8 +65,6 @@ static bool __is_cp_guaranteed(struct page *page)
S_ISDIR(inode->i_mode))
return true;
- if (f2fs_is_compressed_page(page))
- return false;
if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
page_private_gcing(page))
return true;
@@ -338,7 +336,7 @@ static void f2fs_write_end_io(struct bio *bio)
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
- enum count_type type = WB_DATA_TYPE(page);
+ enum count_type type = WB_DATA_TYPE(page, false);
if (page_private_dummy(page)) {
clear_page_private_dummy(page);
@@ -762,7 +760,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
- __read_io_type(page) : WB_DATA_TYPE(fio->page));
+ __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
if (is_read_io(bio_op(bio)))
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
@@ -973,7 +971,7 @@ alloc_new:
if (fio->io_wbc)
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
- inc_page_count(fio->sbi, WB_DATA_TYPE(page));
+ inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
*fio->last_block = fio->new_blkaddr;
*fio->bio = bio;
@@ -1007,11 +1005,12 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
+ enum count_type type;
f2fs_bug_on(sbi, is_read_io(fio->op));
f2fs_down_write(&io->io_rwsem);
-
+next:
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
wait_for_completion_io(&io->zone_wait);
@@ -1021,7 +1020,6 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
}
#endif
-next:
if (fio->in_list) {
spin_lock(&io->io_lock);
if (list_empty(&io->io_list)) {
@@ -1046,7 +1044,8 @@ next:
/* set submitted = true as a return value */
fio->submitted = 1;
- inc_page_count(sbi, WB_DATA_TYPE(bio_page));
+ type = WB_DATA_TYPE(bio_page, fio->compressed_page);
+ inc_page_count(sbi, type);
if (io->bio &&
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
@@ -1059,7 +1058,8 @@ alloc_new:
if (F2FS_IO_ALIGNED(sbi) &&
(fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
- dec_page_count(sbi, WB_DATA_TYPE(bio_page));
+ dec_page_count(sbi, WB_DATA_TYPE(bio_page,
+ fio->compressed_page));
fio->retry = 1;
goto skip;
}
@@ -1080,10 +1080,6 @@ alloc_new:
io->last_block_in_bio = fio->new_blkaddr;
trace_f2fs_submit_page_write(fio->page, fio);
-skip:
- if (fio->in_list)
- goto next;
-out:
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
@@ -1096,6 +1092,10 @@ out:
__submit_merged_bio(io);
}
#endif
+skip:
+ if (fio->in_list)
+ goto next;
+out:
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
!f2fs_is_checkpoint_ready(sbi))
__submit_merged_bio(io);
@@ -1179,18 +1179,12 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
return 0;
}
-static void __set_data_blkaddr(struct dnode_of_data *dn)
+static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- struct f2fs_node *rn = F2FS_NODE(dn->node_page);
- __le32 *addr_array;
- int base = 0;
-
- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
- base = get_extra_isize(dn->inode);
+ __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
- /* Get physical address of data block */
- addr_array = blkaddr_in_node(rn);
- addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+ dn->data_blkaddr = blkaddr;
+ addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/*
@@ -1199,18 +1193,17 @@ static void __set_data_blkaddr(struct dnode_of_data *dn)
* ->node_page
* update block addresses in the node page
*/
-void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
- __set_data_blkaddr(dn);
+ __set_data_blkaddr(dn, blkaddr);
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
}
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- dn->data_blkaddr = blkaddr;
- f2fs_set_data_blkaddr(dn);
+ f2fs_set_data_blkaddr(dn, blkaddr);
f2fs_update_read_extent_cache(dn);
}
@@ -1225,7 +1218,8 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
+ err = inc_valid_block_count(sbi, dn->inode, &count, true);
+ if (unlikely(err))
return err;
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
@@ -1237,8 +1231,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
block_t blkaddr = f2fs_data_blkaddr(dn);
if (blkaddr == NULL_ADDR) {
- dn->data_blkaddr = NEW_ADDR;
- __set_data_blkaddr(dn);
+ __set_data_blkaddr(dn, NEW_ADDR);
count--;
}
}
@@ -1483,7 +1476,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
dn->data_blkaddr = f2fs_data_blkaddr(dn);
if (dn->data_blkaddr == NULL_ADDR) {
- err = inc_valid_block_count(sbi, dn->inode, &count);
+ err = inc_valid_block_count(sbi, dn->inode, &count, true);
if (unlikely(err))
return err;
}
@@ -1492,11 +1485,9 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
old_blkaddr = dn->data_blkaddr;
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
&sum, seg_type, NULL);
- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
- invalidate_mapping_pages(META_MAPPING(sbi),
- old_blkaddr, old_blkaddr);
- f2fs_invalidate_compress_page(sbi, old_blkaddr);
- }
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr);
+
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
return 0;
}
@@ -2811,8 +2802,6 @@ got_it:
f2fs_outplace_write_data(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
- if (page->index == 0)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
out_writepage:
f2fs_put_dnode(&dn);
out:
@@ -2850,7 +2839,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.encrypted_page = NULL,
.submitted = 0,
.compr_blocks = compr_blocks,
- .need_lock = LOCK_RETRY,
+ .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
.post_read = f2fs_post_read_required(inode) ? 1 : 0,
.io_type = io_type,
.io_wbc = wbc,
@@ -2895,9 +2884,6 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
zero_user_segment(page, offset, PAGE_SIZE);
write:
- if (f2fs_is_drop_cache(inode))
- goto out;
-
/* Dentry/quota blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode) || quota_inode) {
/*
@@ -2934,6 +2920,7 @@ write:
if (err == -EAGAIN) {
err = f2fs_do_write_data_page(&fio);
if (err == -EAGAIN) {
+ f2fs_bug_on(sbi, compr_blocks);
fio.need_lock = LOCK_REQ;
err = f2fs_do_write_data_page(&fio);
}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 042593aed1..1b937f7d04 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -830,13 +830,14 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
return err;
}
-int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
+int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
+ struct f2fs_filename *fname)
{
struct page *page;
int err = 0;
f2fs_down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
+ page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 9043cedfa1..3b6133c865 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -75,6 +75,11 @@ struct f2fs_fault_info {
extern const char *f2fs_fault_name[FAULT_MAX];
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & BIT(type))
+
+/* maximum retry count for injected failure */
+#define DEFAULT_FAILURE_RETRY_COUNT 8
+#else
+#define DEFAULT_FAILURE_RETRY_COUNT 1
#endif
/*
@@ -774,8 +779,6 @@ enum {
FI_UPDATE_WRITE, /* inode has in-place-update data */
FI_NEED_IPU, /* used for ipu per file */
FI_ATOMIC_FILE, /* indicate atomic file */
- FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
- FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_SKIP_WRITES, /* should skip data page writeback */
@@ -824,7 +827,7 @@ struct f2fs_inode_info {
spinlock_t i_size_lock; /* protect last_disk_size */
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
/* quota space reservation, managed internally by quota code */
qsize_t i_reserved_quota;
@@ -1075,7 +1078,8 @@ struct f2fs_sm_info {
* f2fs monitors the number of several block types such as on-writeback,
* dirty dentry blocks, dirty node blocks, and dirty meta blocks.
*/
-#define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
+#define WB_DATA_TYPE(p, f) \
+ (f || f2fs_is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
enum count_type {
F2FS_DIRTY_DENTS,
F2FS_DIRTY_DATA,
@@ -2246,7 +2250,7 @@ static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
- struct inode *inode, blkcnt_t *count)
+ struct inode *inode, blkcnt_t *count, bool partial)
{
blkcnt_t diff = 0, release = 0;
block_t avail_user_block_count;
@@ -2286,6 +2290,11 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
avail_user_block_count = 0;
}
if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
+ if (!partial) {
+ spin_unlock(&sbi->stat_lock);
+ goto enospc;
+ }
+
diff = sbi->total_valid_block_count - avail_user_block_count;
if (diff > *count)
diff = *count;
@@ -3016,6 +3025,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode,
case FI_INLINE_DOTS:
case FI_PIN_FILE:
case FI_COMPRESS_RELEASED:
+ case FI_ATOMIC_COMMITTED:
f2fs_mark_inode_dirty_sync(inode, true);
}
}
@@ -3272,22 +3282,13 @@ static inline bool f2fs_is_cow_file(struct inode *inode)
return is_inode_flag_set(inode, FI_COW_FILE);
}
-static inline bool f2fs_is_first_block_written(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
-}
-
-static inline bool f2fs_is_drop_cache(struct inode *inode)
-{
- return is_inode_flag_set(inode, FI_DROP_CACHE);
-}
-
+static inline __le32 *get_dnode_addr(struct inode *inode,
+ struct page *node_page);
static inline void *inline_data_addr(struct inode *inode, struct page *page)
{
- struct f2fs_inode *ri = F2FS_INODE(page);
- int extra_size = get_extra_isize(inode);
+ __le32 *addr = get_dnode_addr(inode, page);
- return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
+ return (void *)(addr + DEF_INLINE_RESERVED_SIZE);
}
static inline int f2fs_has_inline_dentry(struct inode *inode)
@@ -3432,6 +3433,17 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
return F2FS_I(inode)->i_inline_xattr_size;
}
+static inline __le32 *get_dnode_addr(struct inode *inode,
+ struct page *node_page)
+{
+ int base = 0;
+
+ if (IS_INODE(node_page) && f2fs_has_extra_attr(inode))
+ base = get_extra_isize(inode);
+
+ return blkaddr_in_node(F2FS_NODE(node_page)) + base;
+}
+
#define f2fs_get_inode_mode(i) \
((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
@@ -3457,11 +3469,9 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
- if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type))
f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
blkaddr, type);
- f2fs_bug_on(sbi, 1);
- }
}
static inline bool __is_valid_data_blkaddr(block_t blkaddr)
@@ -3563,7 +3573,8 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
struct inode *dir, struct inode *inode);
-int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
+int f2fs_do_tmpfile(struct inode *inode, struct inode *dir,
+ struct f2fs_filename *fname);
bool f2fs_empty_dir(struct inode *dir);
static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
@@ -3797,6 +3808,7 @@ void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi);
*/
int __init f2fs_init_bioset(void);
void f2fs_destroy_bioset(void);
+bool f2fs_is_cp_guaranteed(struct page *page);
int f2fs_init_bio_entry_cache(void);
void f2fs_destroy_bio_entry_cache(void);
void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
@@ -3815,7 +3827,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio);
struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
block_t blk_addr, sector_t *sector);
int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
-void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
int f2fs_reserve_new_block(struct dnode_of_data *dn);
@@ -4280,7 +4292,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed,
bool in_task);
void f2fs_put_page_dic(struct page *page, bool in_task);
-unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn);
+unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn,
+ unsigned int ofs_in_node);
int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
@@ -4337,7 +4350,8 @@ static inline void f2fs_put_page_dic(struct page *page, bool in_task)
{
WARN_ON_ONCE(1);
}
-static inline unsigned int f2fs_cluster_blocks_are_contiguous(struct dnode_of_data *dn) { return 0; }
+static inline unsigned int f2fs_cluster_blocks_are_contiguous(
+ struct dnode_of_data *dn, unsigned int ofs_in_node) { return 0; }
static inline bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) { return false; }
static inline int f2fs_init_compress_inode(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi) { }
@@ -4394,15 +4408,24 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- if (!f2fs_compressed_file(inode))
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+
+ if (!f2fs_compressed_file(inode)) {
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
return true;
- if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
+ }
+ if (f2fs_is_mmap_file(inode) ||
+ (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) {
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
return false;
+ }
fi->i_flags &= ~F2FS_COMPR_FL;
stat_dec_compr_inode(inode);
clear_inode_flag(inode, FI_COMPRESSED_FILE);
f2fs_mark_inode_dirty_sync(inode, true);
+
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
return true;
}
@@ -4606,6 +4629,39 @@ static inline bool f2fs_is_readonly(struct f2fs_sb_info *sbi)
return f2fs_sb_has_readonly(sbi) || f2fs_readonly(sbi->sb);
}
+static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi,
+ block_t blkaddr, unsigned int cnt)
+{
+ bool need_submit = false;
+ int i = 0;
+
+ do {
+ struct page *page;
+
+ page = find_get_page(META_MAPPING(sbi), blkaddr + i);
+ if (page) {
+ if (PageWriteback(page))
+ need_submit = true;
+ f2fs_put_page(page, 0);
+ }
+ } while (++i < cnt && !need_submit);
+
+ if (need_submit)
+ f2fs_submit_merged_write_cond(sbi, sbi->meta_inode,
+ NULL, 0, DATA);
+
+ truncate_inode_pages_range(META_MAPPING(sbi),
+ F2FS_BLK_TO_BYTES((loff_t)blkaddr),
+ F2FS_BLK_END_BYTES((loff_t)(blkaddr + cnt - 1)));
+}
+
+static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1);
+ f2fs_invalidate_compress_page(sbi, blkaddr);
+}
+
#define EFSBADCRC EBADMSG /* Bad CRC detected */
#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index a05781e708..caeae900f7 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -557,20 +557,14 @@ static int f2fs_file_open(struct inode *inode, struct file *filp)
void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- struct f2fs_node *raw_node;
int nr_free = 0, ofs = dn->ofs_in_node, len = count;
__le32 *addr;
- int base = 0;
bool compressed_cluster = false;
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
- base = get_extra_isize(dn->inode);
-
- raw_node = F2FS_NODE(dn->node_page);
- addr = blkaddr_in_node(raw_node) + base + ofs;
+ addr = get_dnode_addr(dn->inode, dn->node_page) + ofs;
/* Assumption: truncation starts with cluster */
for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
@@ -588,8 +582,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
if (blkaddr == NULL_ADDR)
continue;
- dn->data_blkaddr = NULL_ADDR;
- f2fs_set_data_blkaddr(dn);
+ f2fs_set_data_blkaddr(dn, NULL_ADDR);
if (__is_valid_data_blkaddr(blkaddr)) {
if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
@@ -599,9 +592,6 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
valid_blocks++;
}
- if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
- clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
-
f2fs_invalidate_blocks(sbi, blkaddr);
if (!released || blkaddr != COMPRESS_ADDR)
@@ -1488,8 +1478,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
}
f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
- dn->data_blkaddr = NEW_ADDR;
- f2fs_set_data_blkaddr(dn);
+ f2fs_set_data_blkaddr(dn, NEW_ADDR);
}
f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
@@ -3469,8 +3458,7 @@ static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
if (blkaddr != NEW_ADDR)
continue;
- dn->data_blkaddr = NULL_ADDR;
- f2fs_set_data_blkaddr(dn);
+ f2fs_set_data_blkaddr(dn, NULL_ADDR);
}
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
@@ -3595,10 +3583,10 @@ out:
return ret;
}
-static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
+ unsigned int *reserved_blocks)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
- unsigned int reserved_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
block_t blkaddr;
int i;
@@ -3621,41 +3609,53 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
blkcnt_t reserved;
int ret;
- for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
- blkaddr = f2fs_data_blkaddr(dn);
+ for (i = 0; i < cluster_size; i++) {
+ blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ dn->ofs_in_node + i);
if (i == 0) {
- if (blkaddr == COMPRESS_ADDR)
- continue;
- dn->ofs_in_node += cluster_size;
- goto next;
+ if (blkaddr != COMPRESS_ADDR) {
+ dn->ofs_in_node += cluster_size;
+ goto next;
+ }
+ continue;
}
- if (__is_valid_data_blkaddr(blkaddr)) {
+ /*
+ * compressed cluster was not released due to it
+ * fails in release_compress_blocks(), so NEW_ADDR
+ * is a possible case.
+ */
+ if (blkaddr == NEW_ADDR ||
+ __is_valid_data_blkaddr(blkaddr)) {
compr_blocks++;
continue;
}
-
- dn->data_blkaddr = NEW_ADDR;
- f2fs_set_data_blkaddr(dn);
}
reserved = cluster_size - compr_blocks;
- ret = inc_valid_block_count(sbi, dn->inode, &reserved);
- if (ret)
+
+ /* for the case all blocks in cluster were reserved */
+ if (reserved == 1)
+ goto next;
+
+ ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
+ if (unlikely(ret))
return ret;
- if (reserved != cluster_size - compr_blocks)
- return -ENOSPC;
+ for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+ if (f2fs_data_blkaddr(dn) == NULL_ADDR)
+ f2fs_set_data_blkaddr(dn, NEW_ADDR);
+ }
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
- reserved_blocks += reserved;
+ *reserved_blocks += reserved;
next:
count -= cluster_size;
}
- return reserved_blocks;
+ return 0;
}
static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
@@ -3679,9 +3679,6 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
if (ret)
return ret;
- if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
- goto out;
-
f2fs_balance_fs(sbi, true);
inode_lock(inode);
@@ -3691,6 +3688,9 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
goto unlock_inode;
}
+ if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
+ goto unlock_inode;
+
f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
filemap_invalidate_lock(inode->i_mapping);
@@ -3716,7 +3716,7 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
count = round_up(count, F2FS_I(inode)->i_cluster_size);
- ret = reserve_compress_blocks(&dn, count);
+ ret = reserve_compress_blocks(&dn, count, &reserved_blocks);
f2fs_put_dnode(&dn);
@@ -3724,23 +3724,21 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
break;
page_idx += count;
- reserved_blocks += ret;
}
filemap_invalidate_unlock(inode->i_mapping);
f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- if (ret >= 0) {
+ if (!ret) {
clear_inode_flag(inode, FI_COMPRESS_RELEASED);
inode_set_ctime_current(inode);
f2fs_mark_inode_dirty_sync(inode, true);
}
unlock_inode:
inode_unlock(inode);
-out:
mnt_drop_write_file(filp);
- if (ret >= 0) {
+ if (!ret) {
ret = put_user(reserved_blocks, (u64 __user *)arg);
} else if (reserved_blocks &&
atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
@@ -3989,16 +3987,20 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
sizeof(option)))
return -EFAULT;
- if (!f2fs_compressed_file(inode) ||
- option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
- option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
- option.algorithm >= COMPRESS_MAX)
+ if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
+ option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
+ option.algorithm >= COMPRESS_MAX)
return -EINVAL;
file_start_write(filp);
inode_lock(inode);
f2fs_down_write(&F2FS_I(inode)->i_sem);
+ if (!f2fs_compressed_file(inode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
ret = -EBUSY;
goto out;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index f550cdeaa6..405a6077bd 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1380,9 +1380,8 @@ static int move_data_block(struct inode *inode, block_t bidx,
memcpy(page_address(fio.encrypted_page),
page_address(mpage), PAGE_SIZE);
f2fs_put_page(mpage, 1);
- invalidate_mapping_pages(META_MAPPING(fio.sbi),
- fio.old_blkaddr, fio.old_blkaddr);
- f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
+
+ f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))
@@ -1405,8 +1404,6 @@ static int move_data_block(struct inode *inode, block_t bidx,
f2fs_update_data_blkaddr(&dn, newaddr);
set_inode_flag(inode, FI_APPEND_WRITE);
- if (page->index == 0)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
recover_block:
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 560bfcad1a..b31410c4af 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -61,49 +61,31 @@ void f2fs_set_inode_flags(struct inode *inode)
S_ENCRYPTED|S_VERITY|S_CASEFOLD);
}
-static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+static void __get_inode_rdev(struct inode *inode, struct page *node_page)
{
- int extra_size = get_extra_isize(inode);
+ __le32 *addr = get_dnode_addr(inode, node_page);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
- if (ri->i_addr[extra_size])
- inode->i_rdev = old_decode_dev(
- le32_to_cpu(ri->i_addr[extra_size]));
+ if (addr[0])
+ inode->i_rdev = old_decode_dev(le32_to_cpu(addr[0]));
else
- inode->i_rdev = new_decode_dev(
- le32_to_cpu(ri->i_addr[extra_size + 1]));
+ inode->i_rdev = new_decode_dev(le32_to_cpu(addr[1]));
}
}
-static int __written_first_block(struct f2fs_sb_info *sbi,
- struct f2fs_inode *ri)
+static void __set_inode_rdev(struct inode *inode, struct page *node_page)
{
- block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
-
- if (!__is_valid_data_blkaddr(addr))
- return 1;
- if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) {
- f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
- return -EFSCORRUPTED;
- }
- return 0;
-}
-
-static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
-{
- int extra_size = get_extra_isize(inode);
+ __le32 *addr = get_dnode_addr(inode, node_page);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
if (old_valid_dev(inode->i_rdev)) {
- ri->i_addr[extra_size] =
- cpu_to_le32(old_encode_dev(inode->i_rdev));
- ri->i_addr[extra_size + 1] = 0;
+ addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
+ addr[1] = 0;
} else {
- ri->i_addr[extra_size] = 0;
- ri->i_addr[extra_size + 1] =
- cpu_to_le32(new_encode_dev(inode->i_rdev));
- ri->i_addr[extra_size + 2] = 0;
+ addr[0] = 0;
+ addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
+ addr[2] = 0;
}
}
}
@@ -398,7 +380,6 @@ static int do_read_inode(struct inode *inode)
struct page *node_page;
struct f2fs_inode *ri;
projid_t i_projid;
- int err;
/* Check if ino is within scope */
if (f2fs_check_nid_range(sbi, inode->i_ino))
@@ -478,17 +459,7 @@ static int do_read_inode(struct inode *inode)
}
/* get rdev by using inline_info */
- __get_inode_rdev(inode, ri);
-
- if (S_ISREG(inode->i_mode)) {
- err = __written_first_block(sbi, ri);
- if (err < 0) {
- f2fs_put_page(node_page, 1);
- return err;
- }
- if (!err)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
- }
+ __get_inode_rdev(inode, node_page);
if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
fi->last_disk_size = inode->i_size;
@@ -761,7 +732,7 @@ void f2fs_update_inode(struct inode *inode, struct page *node_page)
}
}
- __set_inode_rdev(inode, ri);
+ __set_inode_rdev(inode, node_page);
/* deleted inode */
if (inode->i_nlink == 0)
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 7f71bae2c8..9cdf3f36d1 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -853,7 +853,7 @@ out:
static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
struct file *file, umode_t mode, bool is_whiteout,
- struct inode **new_inode)
+ struct inode **new_inode, struct f2fs_filename *fname)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct inode *inode;
@@ -881,7 +881,7 @@ static int __f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
if (err)
goto out;
- err = f2fs_do_tmpfile(inode, dir);
+ err = f2fs_do_tmpfile(inode, dir, fname);
if (err)
goto release_out;
@@ -932,22 +932,24 @@ static int f2fs_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- err = __f2fs_tmpfile(idmap, dir, file, mode, false, NULL);
+ err = __f2fs_tmpfile(idmap, dir, file, mode, false, NULL, NULL);
return finish_open_simple(file, err);
}
static int f2fs_create_whiteout(struct mnt_idmap *idmap,
- struct inode *dir, struct inode **whiteout)
+ struct inode *dir, struct inode **whiteout,
+ struct f2fs_filename *fname)
{
- return __f2fs_tmpfile(idmap, dir, NULL,
- S_IFCHR | WHITEOUT_MODE, true, whiteout);
+ return __f2fs_tmpfile(idmap, dir, NULL, S_IFCHR | WHITEOUT_MODE,
+ true, whiteout, fname);
}
int f2fs_get_tmpfile(struct mnt_idmap *idmap, struct inode *dir,
struct inode **new_inode)
{
- return __f2fs_tmpfile(idmap, dir, NULL, S_IFREG, false, new_inode);
+ return __f2fs_tmpfile(idmap, dir, NULL, S_IFREG,
+ false, new_inode, NULL);
}
static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
@@ -990,7 +992,14 @@ static int f2fs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
}
if (flags & RENAME_WHITEOUT) {
- err = f2fs_create_whiteout(idmap, old_dir, &whiteout);
+ struct f2fs_filename fname;
+
+ err = f2fs_setup_filename(old_dir, &old_dentry->d_name,
+ 0, &fname);
+ if (err)
+ return err;
+
+ err = f2fs_create_whiteout(idmap, old_dir, &whiteout, &fname);
if (err)
return err;
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9b546fd210..2ea9c99e7d 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -852,21 +852,29 @@ int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
f2fs_sb_has_readonly(sbi)) {
- unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
+ unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ unsigned int ofs_in_node = dn->ofs_in_node;
+ pgoff_t fofs = index;
+ unsigned int c_len;
block_t blkaddr;
+ /* should align fofs and ofs_in_node to cluster_size */
+ if (fofs % cluster_size) {
+ fofs = round_down(fofs, cluster_size);
+ ofs_in_node = round_down(ofs_in_node, cluster_size);
+ }
+
+ c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
if (!c_len)
goto out;
- blkaddr = f2fs_data_blkaddr(dn);
+ blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
if (blkaddr == COMPRESS_ADDR)
blkaddr = data_blkaddr(dn->inode, dn->node_page,
- dn->ofs_in_node + 1);
+ ofs_in_node + 1);
f2fs_update_read_extent_tree_range_compressed(dn->inode,
- index, blkaddr,
- F2FS_I(dn->inode)->i_cluster_size,
- c_len);
+ fofs, blkaddr, cluster_size, c_len);
}
out:
return 0;
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index d0f24ccbd1..aad1d1a9b3 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -611,6 +611,19 @@ truncate_out:
return 0;
}
+static int f2fs_reserve_new_block_retry(struct dnode_of_data *dn)
+{
+ int i, err = 0;
+
+ for (i = DEFAULT_FAILURE_RETRY_COUNT; i > 0; i--) {
+ err = f2fs_reserve_new_block(dn);
+ if (!err)
+ break;
+ }
+
+ return err;
+}
+
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page)
{
@@ -712,14 +725,8 @@ retry_dn:
*/
if (dest == NEW_ADDR) {
f2fs_truncate_data_blocks_range(&dn, 1);
- do {
- err = f2fs_reserve_new_block(&dn);
- if (err == -ENOSPC) {
- f2fs_bug_on(sbi, 1);
- break;
- }
- } while (err &&
- IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
+
+ err = f2fs_reserve_new_block_retry(&dn);
if (err)
goto err;
continue;
@@ -727,16 +734,8 @@ retry_dn:
/* dest is valid block, try to recover from src to dest */
if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
-
if (src == NULL_ADDR) {
- do {
- err = f2fs_reserve_new_block(&dn);
- if (err == -ENOSPC) {
- f2fs_bug_on(sbi, 1);
- break;
- }
- } while (err &&
- IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION));
+ err = f2fs_reserve_new_block_retry(&dn);
if (err)
goto err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 727d016318..7a0143825e 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -192,6 +192,9 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
if (!f2fs_is_atomic_file(inode))
return;
+ if (clean)
+ truncate_inode_pages_final(inode->i_mapping);
+
release_atomic_write_cnt(inode);
clear_inode_flag(inode, FI_ATOMIC_COMMITTED);
clear_inode_flag(inode, FI_ATOMIC_REPLACE);
@@ -201,7 +204,6 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
F2FS_I(inode)->atomic_write_task = NULL;
if (clean) {
- truncate_inode_pages_final(inode->i_mapping);
f2fs_i_size_write(inode, fi->original_i_size);
fi->original_i_size = 0;
}
@@ -248,7 +250,7 @@ retry:
} else {
blkcnt_t count = 1;
- err = inc_valid_block_count(sbi, inode, &count);
+ err = inc_valid_block_count(sbi, inode, &count, true);
if (err) {
f2fs_put_dnode(&dn);
return err;
@@ -2495,8 +2497,7 @@ void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
return;
- invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
- f2fs_invalidate_compress_page(sbi, addr);
+ f2fs_invalidate_internal_cache(sbi, addr);
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
@@ -3490,12 +3491,12 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
- if (IS_DATASEG(type))
+ if (IS_DATASEG(curseg->seg_type))
atomic64_inc(&sbi->allocated_data_blocks);
up_write(&sit_i->sentry_lock);
- if (page && IS_NODESEG(type)) {
+ if (page && IS_NODESEG(curseg->seg_type)) {
fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
f2fs_inode_chksum_set(sbi, page);
@@ -3557,11 +3558,8 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
reallocate:
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type, fio);
- if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) {
- invalidate_mapping_pages(META_MAPPING(fio->sbi),
- fio->old_blkaddr, fio->old_blkaddr);
- f2fs_invalidate_compress_page(fio->sbi, fio->old_blkaddr);
- }
+ if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+ f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr);
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
@@ -3655,8 +3653,7 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
}
if (fio->post_read)
- invalidate_mapping_pages(META_MAPPING(sbi),
- fio->new_blkaddr, fio->new_blkaddr);
+ f2fs_truncate_meta_inode_pages(sbi, fio->new_blkaddr, 1);
stat_inc_inplace_blocks(fio->sbi);
@@ -3757,9 +3754,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
update_sit_entry(sbi, new_blkaddr, 1);
}
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
- invalidate_mapping_pages(META_MAPPING(sbi),
- old_blkaddr, old_blkaddr);
- f2fs_invalidate_compress_page(sbi, old_blkaddr);
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr);
if (!from_gc)
update_segment_mtime(sbi, old_blkaddr, 0);
update_sit_entry(sbi, old_blkaddr, -1);
@@ -3848,7 +3843,7 @@ void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
for (i = 0; i < len; i++)
f2fs_wait_on_block_writeback(inode, blkaddr + i);
- invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1);
+ f2fs_truncate_meta_inode_pages(sbi, blkaddr, len);
}
static int read_compacted_summaries(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 8129be788b..c77a562831 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -573,23 +573,22 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
unsigned int node_blocks, unsigned int dent_blocks)
{
- unsigned int segno, left_blocks;
+ unsigned segno, left_blocks;
int i;
- /* check current node segment */
+ /* check current node sections in the worst case. */
for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
segno = CURSEG_I(sbi, i)->segno;
- left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
- get_seg_entry(sbi, segno)->ckpt_valid_blocks;
-
+ left_blocks = CAP_BLKS_PER_SEC(sbi) -
+ get_ckpt_valid_blocks(sbi, segno, true);
if (node_blocks > left_blocks)
return false;
}
- /* check current data segment */
+ /* check current data section for dentry blocks. */
segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
- left_blocks = f2fs_usable_blks_in_seg(sbi, segno) -
- get_seg_entry(sbi, segno)->ckpt_valid_blocks;
+ left_blocks = CAP_BLKS_PER_SEC(sbi) -
+ get_ckpt_valid_blocks(sbi, segno, true);
if (dent_blocks > left_blocks)
return false;
return true;
@@ -638,7 +637,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
if (free_secs > upper_secs)
return false;
- else if (free_secs <= lower_secs)
+ if (free_secs <= lower_secs)
return true;
return !curseg_space;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 5dfbc6b4c0..4151293fc4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -663,7 +663,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
#ifdef CONFIG_F2FS_FS_ZSTD
static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
{
- unsigned int level;
+ int level;
int len = 4;
if (strlen(str) == len) {
@@ -677,9 +677,15 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
return -EINVAL;
}
- if (kstrtouint(str + 1, 10, &level))
+ if (kstrtoint(str + 1, 10, &level))
return -EINVAL;
+ /* f2fs does not support negative compress level now */
+ if (level < 0) {
+ f2fs_info(sbi, "do not support negative compress level: %d", level);
+ return -ERANGE;
+ }
+
if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
f2fs_info(sbi, "invalid zstd compress level: %d", level);
return -EINVAL;
@@ -2776,7 +2782,7 @@ int f2fs_dquot_initialize(struct inode *inode)
return dquot_initialize(inode);
}
-static struct dquot **f2fs_get_dquots(struct inode *inode)
+static struct dquot __rcu **f2fs_get_dquots(struct inode *inode)
{
return F2FS_I(inode)->i_dquot;
}
@@ -3938,11 +3944,6 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
return 0;
zone_sectors = bdev_zone_sectors(bdev);
- if (!is_power_of_2(zone_sectors)) {
- f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n");
- return -EINVAL;
- }
-
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
SECTOR_TO_BLOCK(zone_sectors))
return -EINVAL;
diff --git a/fs/fat/nfs.c b/fs/fat/nfs.c
index c52e63e10d..509eea96a4 100644
--- a/fs/fat/nfs.c
+++ b/fs/fat/nfs.c
@@ -130,6 +130,12 @@ fat_encode_fh_nostale(struct inode *inode, __u32 *fh, int *lenp,
fid->parent_i_gen = parent->i_generation;
type = FILEID_FAT_WITH_PARENT;
*lenp = FAT_FID_SIZE_WITH_PARENT;
+ } else {
+ /*
+ * We need to initialize this field because the fh is actually
+ * 12 bytes long
+ */
+ fid->parent_i_pos_hi = 0;
}
return type;
diff --git a/fs/fcntl.c b/fs/fcntl.c
index c80a6acad7..3ff707bf27 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -268,7 +268,7 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
}
#endif
-static bool rw_hint_valid(enum rw_hint hint)
+static bool rw_hint_valid(u64 hint)
{
switch (hint) {
case RWH_WRITE_LIFE_NOT_SET:
@@ -288,19 +288,17 @@ static long fcntl_rw_hint(struct file *file, unsigned int cmd,
{
struct inode *inode = file_inode(file);
u64 __user *argp = (u64 __user *)arg;
- enum rw_hint hint;
- u64 h;
+ u64 hint;
switch (cmd) {
case F_GET_RW_HINT:
- h = inode->i_write_hint;
- if (copy_to_user(argp, &h, sizeof(*argp)))
+ hint = inode->i_write_hint;
+ if (copy_to_user(argp, &hint, sizeof(*argp)))
return -EFAULT;
return 0;
case F_SET_RW_HINT:
- if (copy_from_user(&h, argp, sizeof(h)))
+ if (copy_from_user(&hint, argp, sizeof(hint)))
return -EFAULT;
- hint = (enum rw_hint) h;
if (!rw_hint_valid(hint))
return -EINVAL;
diff --git a/fs/fhandle.c b/fs/fhandle.c
index 18b3ba8dc8..57a12614ad 100644
--- a/fs/fhandle.c
+++ b/fs/fhandle.c
@@ -36,7 +36,7 @@ static long do_sys_name_to_handle(const struct path *path,
if (f_handle.handle_bytes > MAX_HANDLE_SZ)
return -EINVAL;
- handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
+ handle = kzalloc(sizeof(struct file_handle) + f_handle.handle_bytes,
GFP_KERNEL);
if (!handle)
return -ENOMEM;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index d19cbf34c6..9307bb4393 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -391,6 +391,10 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
err = -EIO;
if (fuse_invalid_attr(&outarg->attr))
goto out_put_forget;
+ if (outarg->nodeid == FUSE_ROOT_ID && outarg->generation != 0) {
+ pr_warn_once("root generation should be zero\n");
+ outarg->generation = 0;
+ }
*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
&outarg->attr, ATTR_TIMEOUT(outarg),
@@ -1210,7 +1214,7 @@ static int fuse_do_statx(struct inode *inode, struct file *file,
if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) ||
((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) ||
inode_wrong_type(inode, sx->mode)))) {
- make_bad_inode(inode);
+ fuse_make_bad(inode);
return -EIO;
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index a660f1f215..cc9651a013 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2467,7 +2467,8 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
return fuse_dax_mmap(file, vma);
if (ff->open_flags & FOPEN_DIRECT_IO) {
- /* Can't provide the coherency needed for MAP_SHARED
+ /*
+ * Can't provide the coherency needed for MAP_SHARED
* if FUSE_DIRECT_IO_ALLOW_MMAP isn't set.
*/
if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap)
@@ -2475,7 +2476,10 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
invalidate_inode_pages2(file->f_mapping);
- return generic_file_mmap(file, vma);
+ if (!(vma->vm_flags & VM_MAYSHARE)) {
+ /* MAP_PRIVATE */
+ return generic_file_mmap(file, vma);
+ }
}
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 1df83eebda..b5c241e169 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -939,7 +939,6 @@ static inline bool fuse_stale_inode(const struct inode *inode, int generation,
static inline void fuse_make_bad(struct inode *inode)
{
- remove_inode_hash(inode);
set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state);
}
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 2a6d44f917..b676c72c62 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -469,8 +469,11 @@ retry:
} else if (fuse_stale_inode(inode, generation, attr)) {
/* nodeid was reused, any I/O on the old inode should fail */
fuse_make_bad(inode);
- iput(inode);
- goto retry;
+ if (inode != d_inode(sb->s_root)) {
+ remove_inode_hash(inode);
+ iput(inode);
+ goto retry;
+ }
}
fi = get_fuse_inode(inode);
spin_lock(&fi->lock);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index f72df2babe..fc5c647123 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1843,16 +1843,10 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
if (unlikely(error)) {
/*
* Let the filesystem know what portion of the current page
- * failed to map. If the page hasn't been added to ioend, it
- * won't be affected by I/O completion and we must unlock it
- * now.
+ * failed to map.
*/
if (wpc->ops->discard_folio)
wpc->ops->discard_folio(folio, pos);
- if (!count) {
- folio_unlock(folio);
- goto done;
- }
}
/*
@@ -1861,6 +1855,16 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* all the dirty bits in the folio here.
*/
iomap_clear_range_dirty(folio, 0, folio_size(folio));
+
+ /*
+ * If the page hasn't been added to the ioend, it won't be affected by
+ * I/O completion and we must unlock it now.
+ */
+ if (error && !count) {
+ folio_unlock(folio);
+ goto done;
+ }
+
folio_start_writeback(folio);
folio_unlock(folio);
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h
index dd4264aa9b..10934f9a11 100644
--- a/fs/jfs/jfs_incore.h
+++ b/fs/jfs/jfs_incore.h
@@ -92,7 +92,7 @@ struct jfs_inode_info {
} link;
} u;
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
u32 dev; /* will die when we get wide dev_t */
struct inode vfs_inode;
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 8d8e556bd6..ff135a43b5 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -824,7 +824,7 @@ out:
return len - towrite;
}
-static struct dquot **jfs_get_dquots(struct inode *inode)
+static struct dquot __rcu **jfs_get_dquots(struct inode *inode)
{
return JFS_IP(inode)->i_dquot;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 5918c67dae..b6f801e73b 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -668,10 +668,17 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
LIST_HEAD(mds_list);
nfs_init_cinfo_from_dreq(&cinfo, dreq);
+ nfs_commit_begin(cinfo.mds);
nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
- if (res < 0) /* res == -ENOMEM */
- nfs_direct_write_reschedule(dreq);
+ if (res < 0) { /* res == -ENOMEM */
+ spin_lock(&dreq->lock);
+ if (dreq->flags == 0)
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ spin_unlock(&dreq->lock);
+ }
+ if (nfs_commit_end(cinfo.mds))
+ nfs_direct_write_complete(dreq);
}
static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index ef817a0475..3e724cb7ef 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -2016,7 +2016,7 @@ static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
mirror = flseg->mirror_array[idx];
mirror_ds = mirror->mirror_ds;
- if (!mirror_ds)
+ if (IS_ERR_OR_NULL(mirror_ds))
continue;
ds = mirror->mirror_ds->ds;
if (!ds)
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index b05717fe0d..60a3c28784 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -307,11 +307,11 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
struct inode *inode = sreq->rreq->inode;
struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
struct page *page;
+ unsigned long idx;
int err;
pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
pgoff_t last = ((sreq->start + sreq->len -
sreq->transferred - 1) >> PAGE_SHIFT);
- XA_STATE(xas, &sreq->rreq->mapping->i_pages, start);
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
@@ -322,19 +322,14 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
pgio.pg_netfs = netfs; /* used in completion */
- xas_lock(&xas);
- xas_for_each(&xas, page, last) {
+ xa_for_each_range(&sreq->rreq->mapping->i_pages, idx, page, start, last) {
/* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
- xas_pause(&xas);
- xas_unlock(&xas);
err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
if (err < 0) {
netfs->error = err;
goto out;
}
- xas_lock(&xas);
}
- xas_unlock(&xas);
out:
nfs_pageio_complete_read(&pgio);
nfs_netfs_put(netfs);
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
index b59876b01a..0282d93c8b 100644
--- a/fs/nfs/nfs42.h
+++ b/fs/nfs/nfs42.h
@@ -55,11 +55,14 @@ int nfs42_proc_removexattr(struct inode *inode, const char *name);
* They would be 7 bytes long in the eventual buffer ("user.x\0"), and
* 8 bytes long XDR-encoded.
*
- * Include the trailing eof word as well.
+ * Include the trailing eof word as well and make the result a multiple
+ * of 4 bytes.
*/
static inline u32 nfs42_listxattr_xdrsize(u32 buflen)
{
- return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4;
+ u32 size = 8 * buflen / (XATTR_USER_PREFIX_LEN + 2) + 4;
+
+ return (size + 3) & ~3;
}
#endif /* CONFIG_NFS_V4_2 */
#endif /* __LINUX_FS_NFS_NFS4_2_H */
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 23819a7565..b2ff8c7a21 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -10615,29 +10615,33 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
{
ssize_t error, error2, error3;
+ size_t left = size;
- error = generic_listxattr(dentry, list, size);
+ error = generic_listxattr(dentry, list, left);
if (error < 0)
return error;
if (list) {
list += error;
- size -= error;
+ left -= error;
}
- error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
+ error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, left);
if (error2 < 0)
return error2;
if (list) {
list += error2;
- size -= error2;
+ left -= error2;
}
- error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size);
+ error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, left);
if (error3 < 0)
return error3;
- return error + error2 + error3;
+ error += error2 + error3;
+ if (size && error > size)
+ return -ERANGE;
+ return error;
}
static void nfs4_enable_swap(struct inode *inode)
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 7600100ba2..432612d224 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -175,10 +175,10 @@ static int __init root_nfs_cat(char *dest, const char *src,
size_t len = strlen(dest);
if (len && dest[len - 1] != ',')
- if (strlcat(dest, ",", destlen) > destlen)
+ if (strlcat(dest, ",", destlen) >= destlen)
return -1;
- if (strlcat(dest, src, destlen) > destlen)
+ if (strlcat(dest, src, destlen) >= destlen)
return -1;
return 0;
}
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index afd23910f3..88e061bd71 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -919,6 +919,8 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
list_for_each_entry(da, &ds->ds_addrs, da_node) {
+ char servername[48];
+
dprintk("%s: DS %s: trying address %s\n",
__func__, ds->ds_remotestr, da->da_remotestr);
@@ -929,6 +931,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
.dstaddr = (struct sockaddr *)&da->da_addr,
.addrlen = da->da_addrlen,
.servername = clp->cl_hostname,
+ .xprtsec = clp->cl_xprtsec,
};
struct nfs4_add_xprt_data xprtdata = {
.clp = clp,
@@ -938,10 +941,45 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
.data = &xprtdata,
};
- if (da->da_transport != clp->cl_proto)
+ if (da->da_transport != clp->cl_proto &&
+ clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
continue;
+ if (da->da_transport == XPRT_TRANSPORT_TCP &&
+ mds_srv->nfs_client->cl_proto ==
+ XPRT_TRANSPORT_TCP_TLS) {
+ struct sockaddr *addr =
+ (struct sockaddr *)&da->da_addr;
+ struct sockaddr_in *sin =
+ (struct sockaddr_in *)&da->da_addr;
+ struct sockaddr_in6 *sin6 =
+ (struct sockaddr_in6 *)&da->da_addr;
+
+ /* for NFS with TLS we need to supply a correct
+ * servername of the trunked transport, not the
+ * servername of the main transport stored in
+ * clp->cl_hostname. And set the protocol to
+ * indicate to use TLS
+ */
+ servername[0] = '\0';
+ switch(addr->sa_family) {
+ case AF_INET:
+ snprintf(servername, sizeof(servername),
+ "%pI4", &sin->sin_addr.s_addr);
+ break;
+ case AF_INET6:
+ snprintf(servername, sizeof(servername),
+ "%pI6", &sin6->sin6_addr);
+ break;
+ default:
+ /* do not consider this address */
+ continue;
+ }
+ xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
+ xprt_args.servername = servername;
+ }
if (da->da_addr.ss_family != clp->cl_addr.ss_family)
continue;
+
/**
* Test this address for session trunking and
* add as an alias
@@ -953,6 +991,10 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
if (xprtdata.cred)
put_cred(xprtdata.cred);
} else {
+ if (da->da_transport == XPRT_TRANSPORT_TCP &&
+ mds_srv->nfs_client->cl_proto ==
+ XPRT_TRANSPORT_TCP_TLS)
+ da->da_transport = XPRT_TRANSPORT_TCP_TLS;
clp = nfs4_set_ds_client(mds_srv,
&da->da_addr,
da->da_addrlen,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 7dc21a48e3..a142287d86 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -305,6 +305,8 @@ int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
if (IS_ERR(new)) {
error = PTR_ERR(new);
+ if (nfs_netfs_folio_unlock(folio))
+ folio_unlock(folio);
goto out;
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9e345d3c30..5c2ff4a31a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1663,7 +1663,7 @@ static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
!atomic_read(&cinfo->rpcs_out));
}
-static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
+void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
{
atomic_inc(&cinfo->rpcs_out);
}
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index fbc0ccb404..4c7a296c41 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -843,7 +843,7 @@ DECLARE_EVENT_CLASS(nfsd_clid_class,
__array(unsigned char, addr, sizeof(struct sockaddr_in6))
__field(unsigned long, flavor)
__array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
- __string_len(name, name, clp->cl_name.len)
+ __string_len(name, clp->cl_name.data, clp->cl_name.len)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 13592e82ea..65659fa037 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -724,7 +724,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
dat = nilfs_bmap_get_dat(btree);
ret = nilfs_dat_translate(dat, ptr, &blocknr);
if (ret < 0)
- goto out;
+ goto dat_error;
ptr = blocknr;
}
cnt = 1;
@@ -743,7 +743,7 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
if (dat) {
ret = nilfs_dat_translate(dat, ptr2, &blocknr);
if (ret < 0)
- goto out;
+ goto dat_error;
ptr2 = blocknr;
}
if (ptr2 != ptr + cnt || ++cnt == maxblocks)
@@ -781,6 +781,11 @@ static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree,
out:
nilfs_btree_free_path(path);
return ret;
+
+ dat_error:
+ if (ret == -ENOENT)
+ ret = -EINVAL; /* Notify bmap layer of metadata corruption */
+ goto out;
}
static void nilfs_btree_promote_key(struct nilfs_bmap *btree,
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 4c85914f2a..893ab36824 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -66,7 +66,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
dat = nilfs_bmap_get_dat(direct);
ret = nilfs_dat_translate(dat, ptr, &blocknr);
if (ret < 0)
- return ret;
+ goto dat_error;
ptr = blocknr;
}
@@ -79,7 +79,7 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
if (dat) {
ret = nilfs_dat_translate(dat, ptr2, &blocknr);
if (ret < 0)
- return ret;
+ goto dat_error;
ptr2 = blocknr;
}
if (ptr2 != ptr + cnt)
@@ -87,6 +87,11 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
}
*ptrp = ptr;
return cnt;
+
+ dat_error:
+ if (ret == -ENOENT)
+ ret = -EINVAL; /* Notify bmap layer of metadata corruption */
+ return ret;
}
static __u64
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index f861f3a0bf..da97149f83 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -112,7 +112,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
"%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
__func__, inode->i_ino,
(unsigned long long)blkoff);
- err = 0;
+ err = -EAGAIN;
}
nilfs_transaction_abort(inode->i_sb);
goto out;
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index 82b28fdacc..accf03d476 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -65,7 +65,7 @@ struct ocfs2_inode_info
tid_t i_sync_tid;
tid_t i_datasync_tid;
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
};
/*
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 6b90642490..1259fe02cd 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -122,7 +122,7 @@ static int ocfs2_susp_quotas(struct ocfs2_super *osb, int unsuspend);
static int ocfs2_enable_quotas(struct ocfs2_super *osb);
static void ocfs2_disable_quotas(struct ocfs2_super *osb);
-static struct dquot **ocfs2_get_dquots(struct inode *inode)
+static struct dquot __rcu **ocfs2_get_dquots(struct inode *inode)
{
return OCFS2_I(inode)->i_dquot;
}
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index 3fe2dde159..488f920f79 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -280,12 +280,20 @@ static int ovl_mount_dir_check(struct fs_context *fc, const struct path *path,
{
struct ovl_fs_context *ctx = fc->fs_private;
- if (ovl_dentry_weird(path->dentry))
- return invalfc(fc, "filesystem on %s not supported", name);
-
if (!d_is_dir(path->dentry))
return invalfc(fc, "%s is not a directory", name);
+ /*
+ * Root dentries of case-insensitive capable filesystems might
+ * not have the dentry operations set, but still be incompatible
+ * with overlayfs. Check explicitly to prevent post-mount
+ * failures.
+ */
+ if (sb_has_encoding(path->mnt->mnt_sb))
+ return invalfc(fc, "case-insensitive capable filesystem on %s not supported", name);
+
+ if (ovl_dentry_weird(path->dentry))
+ return invalfc(fc, "filesystem on %s not supported", name);
/*
* Check whether upper path is read-only here to report failures
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index d41c20d1b5..4ca72b08d9 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -182,25 +182,21 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
{
struct pstore_private *p = d_inode(dentry)->i_private;
struct pstore_record *record = p->record;
- int rc = 0;
if (!record->psi->erase)
return -EPERM;
/* Make sure we can't race while removing this file. */
- mutex_lock(&records_list_lock);
- if (!list_empty(&p->list))
- list_del_init(&p->list);
- else
- rc = -ENOENT;
- p->dentry = NULL;
- mutex_unlock(&records_list_lock);
- if (rc)
- return rc;
-
- mutex_lock(&record->psi->read_mutex);
- record->psi->erase(record);
- mutex_unlock(&record->psi->read_mutex);
+ scoped_guard(mutex, &records_list_lock) {
+ if (!list_empty(&p->list))
+ list_del_init(&p->list);
+ else
+ return -ENOENT;
+ p->dentry = NULL;
+ }
+
+ scoped_guard(mutex, &record->psi->read_mutex)
+ record->psi->erase(record);
return simple_unlink(dir, dentry);
}
@@ -292,19 +288,16 @@ static struct dentry *psinfo_lock_root(void)
{
struct dentry *root;
- mutex_lock(&pstore_sb_lock);
+ guard(mutex)(&pstore_sb_lock);
/*
* Having no backend is fine -- no records appear.
* Not being mounted is fine -- nothing to do.
*/
- if (!psinfo || !pstore_sb) {
- mutex_unlock(&pstore_sb_lock);
+ if (!psinfo || !pstore_sb)
return NULL;
- }
root = pstore_sb->s_root;
inode_lock(d_inode(root));
- mutex_unlock(&pstore_sb_lock);
return root;
}
@@ -313,29 +306,25 @@ int pstore_put_backend_records(struct pstore_info *psi)
{
struct pstore_private *pos, *tmp;
struct dentry *root;
- int rc = 0;
root = psinfo_lock_root();
if (!root)
return 0;
- mutex_lock(&records_list_lock);
- list_for_each_entry_safe(pos, tmp, &records_list, list) {
- if (pos->record->psi == psi) {
- list_del_init(&pos->list);
- rc = simple_unlink(d_inode(root), pos->dentry);
- if (WARN_ON(rc))
- break;
- d_drop(pos->dentry);
- dput(pos->dentry);
- pos->dentry = NULL;
+ scoped_guard(mutex, &records_list_lock) {
+ list_for_each_entry_safe(pos, tmp, &records_list, list) {
+ if (pos->record->psi == psi) {
+ list_del_init(&pos->list);
+ d_invalidate(pos->dentry);
+ simple_unlink(d_inode(root), pos->dentry);
+ pos->dentry = NULL;
+ }
}
}
- mutex_unlock(&records_list_lock);
inode_unlock(d_inode(root));
- return rc;
+ return 0;
}
/*
@@ -355,20 +344,20 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
if (WARN_ON(!inode_is_locked(d_inode(root))))
return -EINVAL;
- rc = -EEXIST;
+ guard(mutex)(&records_list_lock);
+
/* Skip records that are already present in the filesystem. */
- mutex_lock(&records_list_lock);
list_for_each_entry(pos, &records_list, list) {
if (pos->record->type == record->type &&
pos->record->id == record->id &&
pos->record->psi == record->psi)
- goto fail;
+ return -EEXIST;
}
rc = -ENOMEM;
inode = pstore_get_inode(root->d_sb);
if (!inode)
- goto fail;
+ return -ENOMEM;
inode->i_mode = S_IFREG | 0444;
inode->i_fop = &pstore_file_operations;
scnprintf(name, sizeof(name), "%s-%s-%llu%s",
@@ -396,7 +385,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
d_add(dentry, inode);
list_add(&private->list, &records_list);
- mutex_unlock(&records_list_lock);
return 0;
@@ -404,8 +392,6 @@ fail_private:
free_pstore_private(private);
fail_inode:
iput(inode);
-fail:
- mutex_unlock(&records_list_lock);
return rc;
}
@@ -451,9 +437,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent)
if (!sb->s_root)
return -ENOMEM;
- mutex_lock(&pstore_sb_lock);
- pstore_sb = sb;
- mutex_unlock(&pstore_sb_lock);
+ scoped_guard(mutex, &pstore_sb_lock)
+ pstore_sb = sb;
pstore_get_records(0);
@@ -468,17 +453,14 @@ static struct dentry *pstore_mount(struct file_system_type *fs_type,
static void pstore_kill_sb(struct super_block *sb)
{
- mutex_lock(&pstore_sb_lock);
+ guard(mutex)(&pstore_sb_lock);
WARN_ON(pstore_sb && pstore_sb != sb);
kill_litter_super(sb);
pstore_sb = NULL;
- mutex_lock(&records_list_lock);
+ guard(mutex)(&records_list_lock);
INIT_LIST_HEAD(&records_list);
- mutex_unlock(&records_list_lock);
-
- mutex_unlock(&pstore_sb_lock);
}
static struct file_system_type pstore_fs_type = {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 58b5de081b..6f2fa7889d 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -399,15 +399,17 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
EXPORT_SYMBOL(dquot_mark_dquot_dirty);
/* Dirtify all the dquots - this can block when journalling */
-static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
+static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots)
{
int ret, err, cnt;
+ struct dquot *dquot;
ret = err = 0;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (dquot[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (dquot)
/* Even in case of error we have to continue */
- ret = mark_dquot_dirty(dquot[cnt]);
+ ret = mark_dquot_dirty(dquot);
if (!err)
err = ret;
}
@@ -998,14 +1000,14 @@ out:
}
EXPORT_SYMBOL(dqget);
-static inline struct dquot **i_dquot(struct inode *inode)
+static inline struct dquot __rcu **i_dquot(struct inode *inode)
{
return inode->i_sb->s_op->get_dquots(inode);
}
static int dqinit_needed(struct inode *inode, int type)
{
- struct dquot * const *dquots;
+ struct dquot __rcu * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
@@ -1095,14 +1097,16 @@ static void remove_dquot_ref(struct super_block *sb, int type)
*/
spin_lock(&dq_data_lock);
if (!IS_NOQUOTA(inode)) {
- struct dquot **dquots = i_dquot(inode);
- struct dquot *dquot = dquots[type];
+ struct dquot __rcu **dquots = i_dquot(inode);
+ struct dquot *dquot = srcu_dereference_check(
+ dquots[type], &dquot_srcu,
+ lockdep_is_held(&dq_data_lock));
#ifdef CONFIG_QUOTA_DEBUG
if (unlikely(inode_get_rsv_space(inode) > 0))
reserved = 1;
#endif
- dquots[type] = NULL;
+ rcu_assign_pointer(dquots[type], NULL);
if (dquot)
dqput(dquot);
}
@@ -1455,7 +1459,8 @@ static int inode_quota_active(const struct inode *inode)
static int __dquot_initialize(struct inode *inode, int type)
{
int cnt, init_needed = 0;
- struct dquot **dquots, *got[MAXQUOTAS] = {};
+ struct dquot __rcu **dquots;
+ struct dquot *got[MAXQUOTAS] = {};
struct super_block *sb = inode->i_sb;
qsize_t rsv;
int ret = 0;
@@ -1530,7 +1535,7 @@ static int __dquot_initialize(struct inode *inode, int type)
if (!got[cnt])
continue;
if (!dquots[cnt]) {
- dquots[cnt] = got[cnt];
+ rcu_assign_pointer(dquots[cnt], got[cnt]);
got[cnt] = NULL;
/*
* Make quota reservation system happy if someone
@@ -1538,12 +1543,16 @@ static int __dquot_initialize(struct inode *inode, int type)
*/
rsv = inode_get_rsv_space(inode);
if (unlikely(rsv)) {
+ struct dquot *dquot = srcu_dereference_check(
+ dquots[cnt], &dquot_srcu,
+ lockdep_is_held(&dq_data_lock));
+
spin_lock(&inode->i_lock);
/* Get reservation again under proper lock */
rsv = __inode_get_rsv_space(inode);
- spin_lock(&dquots[cnt]->dq_dqb_lock);
- dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ spin_lock(&dquot->dq_dqb_lock);
+ dquot->dq_dqb.dqb_rsvspace += rsv;
+ spin_unlock(&dquot->dq_dqb_lock);
spin_unlock(&inode->i_lock);
}
}
@@ -1565,7 +1574,7 @@ EXPORT_SYMBOL(dquot_initialize);
bool dquot_initialize_needed(struct inode *inode)
{
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
int i;
if (!inode_quota_active(inode))
@@ -1590,13 +1599,14 @@ EXPORT_SYMBOL(dquot_initialize_needed);
static void __dquot_drop(struct inode *inode)
{
int cnt;
- struct dquot **dquots = i_dquot(inode);
+ struct dquot __rcu **dquots = i_dquot(inode);
struct dquot *put[MAXQUOTAS];
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- put[cnt] = dquots[cnt];
- dquots[cnt] = NULL;
+ put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu,
+ lockdep_is_held(&dq_data_lock));
+ rcu_assign_pointer(dquots[cnt], NULL);
}
spin_unlock(&dq_data_lock);
dqput_all(put);
@@ -1604,7 +1614,7 @@ static void __dquot_drop(struct inode *inode)
void dquot_drop(struct inode *inode)
{
- struct dquot * const *dquots;
+ struct dquot __rcu * const *dquots;
int cnt;
if (IS_NOQUOTA(inode))
@@ -1677,7 +1687,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
int reserve = flags & DQUOT_SPACE_RESERVE;
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
+ struct dquot *dquot;
if (!inode_quota_active(inode)) {
if (reserve) {
@@ -1697,27 +1708,26 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
if (reserve) {
- ret = dquot_add_space(dquots[cnt], 0, number, flags,
- &warn[cnt]);
+ ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]);
} else {
- ret = dquot_add_space(dquots[cnt], number, 0, flags,
- &warn[cnt]);
+ ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]);
}
if (ret) {
/* Back out changes we already did */
for (cnt--; cnt >= 0; cnt--) {
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
- spin_lock(&dquots[cnt]->dq_dqb_lock);
+ spin_lock(&dquot->dq_dqb_lock);
if (reserve)
- dquot_free_reserved_space(dquots[cnt],
- number);
+ dquot_free_reserved_space(dquot, number);
else
- dquot_decr_space(dquots[cnt], number);
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ dquot_decr_space(dquot, number);
+ spin_unlock(&dquot->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
goto out_flush_warn;
@@ -1747,7 +1757,8 @@ int dquot_alloc_inode(struct inode *inode)
{
int cnt, ret = 0, index;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot * const *dquots;
+ struct dquot __rcu * const *dquots;
+ struct dquot *dquot;
if (!inode_quota_active(inode))
return 0;
@@ -1758,17 +1769,19 @@ int dquot_alloc_inode(struct inode *inode)
index = srcu_read_lock(&dquot_srcu);
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
- ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
+ ret = dquot_add_inodes(dquot, 1, &warn[cnt]);
if (ret) {
for (cnt--; cnt >= 0; cnt--) {
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
/* Back out changes we already did */
- spin_lock(&dquots[cnt]->dq_dqb_lock);
- dquot_decr_inodes(dquots[cnt], 1);
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ spin_lock(&dquot->dq_dqb_lock);
+ dquot_decr_inodes(dquot, 1);
+ spin_unlock(&dquot->dq_dqb_lock);
}
goto warn_put_all;
}
@@ -1789,7 +1802,8 @@ EXPORT_SYMBOL(dquot_alloc_inode);
*/
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
{
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
+ struct dquot *dquot;
int cnt, index;
if (!inode_quota_active(inode)) {
@@ -1805,9 +1819,8 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
spin_lock(&inode->i_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (dquots[cnt]) {
- struct dquot *dquot = dquots[cnt];
-
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (dquot) {
spin_lock(&dquot->dq_dqb_lock);
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
number = dquot->dq_dqb.dqb_rsvspace;
@@ -1831,7 +1844,8 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
*/
void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
{
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
+ struct dquot *dquot;
int cnt, index;
if (!inode_quota_active(inode)) {
@@ -1847,9 +1861,8 @@ void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
spin_lock(&inode->i_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (dquots[cnt]) {
- struct dquot *dquot = dquots[cnt];
-
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (dquot) {
spin_lock(&dquot->dq_dqb_lock);
if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
number = dquot->dq_dqb.dqb_curspace;
@@ -1875,7 +1888,8 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot **dquots;
+ struct dquot __rcu **dquots;
+ struct dquot *dquot;
int reserve = flags & DQUOT_SPACE_RESERVE, index;
if (!inode_quota_active(inode)) {
@@ -1896,17 +1910,18 @@ void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
int wtype;
warn[cnt].w_type = QUOTA_NL_NOWARN;
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
- spin_lock(&dquots[cnt]->dq_dqb_lock);
- wtype = info_bdq_free(dquots[cnt], number);
+ spin_lock(&dquot->dq_dqb_lock);
+ wtype = info_bdq_free(dquot, number);
if (wtype != QUOTA_NL_NOWARN)
- prepare_warning(&warn[cnt], dquots[cnt], wtype);
+ prepare_warning(&warn[cnt], dquot, wtype);
if (reserve)
- dquot_free_reserved_space(dquots[cnt], number);
+ dquot_free_reserved_space(dquot, number);
else
- dquot_decr_space(dquots[cnt], number);
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ dquot_decr_space(dquot, number);
+ spin_unlock(&dquot->dq_dqb_lock);
}
if (reserve)
*inode_reserved_space(inode) -= number;
@@ -1930,7 +1945,8 @@ void dquot_free_inode(struct inode *inode)
{
unsigned int cnt;
struct dquot_warn warn[MAXQUOTAS];
- struct dquot * const *dquots;
+ struct dquot __rcu * const *dquots;
+ struct dquot *dquot;
int index;
if (!inode_quota_active(inode))
@@ -1941,16 +1957,16 @@ void dquot_free_inode(struct inode *inode)
spin_lock(&inode->i_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
int wtype;
-
warn[cnt].w_type = QUOTA_NL_NOWARN;
- if (!dquots[cnt])
+ dquot = srcu_dereference(dquots[cnt], &dquot_srcu);
+ if (!dquot)
continue;
- spin_lock(&dquots[cnt]->dq_dqb_lock);
- wtype = info_idq_free(dquots[cnt], 1);
+ spin_lock(&dquot->dq_dqb_lock);
+ wtype = info_idq_free(dquot, 1);
if (wtype != QUOTA_NL_NOWARN)
- prepare_warning(&warn[cnt], dquots[cnt], wtype);
- dquot_decr_inodes(dquots[cnt], 1);
- spin_unlock(&dquots[cnt]->dq_dqb_lock);
+ prepare_warning(&warn[cnt], dquot, wtype);
+ dquot_decr_inodes(dquot, 1);
+ spin_unlock(&dquot->dq_dqb_lock);
}
spin_unlock(&inode->i_lock);
mark_all_dquot_dirty(dquots);
@@ -1976,8 +1992,9 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
qsize_t cur_space;
qsize_t rsv_space = 0;
qsize_t inode_usage = 1;
+ struct dquot __rcu **dquots;
struct dquot *transfer_from[MAXQUOTAS] = {};
- int cnt, ret = 0;
+ int cnt, index, ret = 0;
char is_valid[MAXQUOTAS] = {};
struct dquot_warn warn_to[MAXQUOTAS];
struct dquot_warn warn_from_inodes[MAXQUOTAS];
@@ -2008,6 +2025,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
}
cur_space = __inode_get_bytes(inode);
rsv_space = __inode_get_rsv_space(inode);
+ dquots = i_dquot(inode);
/*
* Build the transfer_from list, check limits, and update usage in
* the target structures.
@@ -2022,7 +2040,8 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
if (!sb_has_quota_active(inode->i_sb, cnt))
continue;
is_valid[cnt] = 1;
- transfer_from[cnt] = i_dquot(inode)[cnt];
+ transfer_from[cnt] = srcu_dereference_check(dquots[cnt],
+ &dquot_srcu, lockdep_is_held(&dq_data_lock));
ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
&warn_to[cnt]);
if (ret)
@@ -2061,13 +2080,21 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
rsv_space);
spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
}
- i_dquot(inode)[cnt] = transfer_to[cnt];
+ rcu_assign_pointer(dquots[cnt], transfer_to[cnt]);
}
spin_unlock(&inode->i_lock);
spin_unlock(&dq_data_lock);
- mark_all_dquot_dirty(transfer_from);
- mark_all_dquot_dirty(transfer_to);
+ /*
+ * These arrays are local and we hold dquot references so we don't need
+ * the srcu protection but still take dquot_srcu to avoid warning in
+ * mark_all_dquot_dirty().
+ */
+ index = srcu_read_lock(&dquot_srcu);
+ mark_all_dquot_dirty((struct dquot __rcu **)transfer_from);
+ mark_all_dquot_dirty((struct dquot __rcu **)transfer_to);
+ srcu_read_unlock(&dquot_srcu, index);
+
flush_warnings(warn_to);
flush_warnings(warn_from_inodes);
flush_warnings(warn_from_space);
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 725667880e..b655491645 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -97,7 +97,7 @@ struct reiserfs_inode_info {
struct rw_semaphore i_xattr_sem;
#endif
#ifdef CONFIG_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
struct inode vfs_inode;
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 67b5510bed..7b3d5aeb2a 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -802,7 +802,7 @@ static ssize_t reiserfs_quota_write(struct super_block *, int, const char *,
static ssize_t reiserfs_quota_read(struct super_block *, int, char *, size_t,
loff_t);
-static struct dquot **reiserfs_get_dquots(struct inode *inode)
+static struct dquot __rcu **reiserfs_get_dquots(struct inode *inode)
{
return REISERFS_I(inode)->i_dquot;
}
diff --git a/fs/select.c b/fs/select.c
index 0ee55af1a5..d4d881d439 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -476,7 +476,7 @@ static inline void wait_key_set(poll_table *wait, unsigned long in,
wait->_key |= POLLOUT_SET;
}
-static int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
+static noinline_for_stack int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time)
{
ktime_t expire, *to = NULL;
struct poll_wqueues table;
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index 5730c65ffb..15e1215bc4 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -233,7 +233,8 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
.tcon = tcon,
.path = path,
.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
- .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES,
+ .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES |
+ FILE_READ_EA,
.disposition = FILE_OPEN,
.fid = pfid,
};
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index 60027f5aeb..04a6351a92 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -488,6 +488,8 @@ skip_rdma:
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->ses_status);
}
+ if (ses->expired_pwd)
+ seq_puts(m, "password no longer valid ");
spin_unlock(&ses->ses_lock);
seq_printf(m, "\n\tSecurity type: %s ",
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 462554917e..35a12413bb 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -339,6 +339,9 @@ struct smb_version_operations {
/* informational QFS call */
void (*qfs_tcon)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *);
+ /* query for server interfaces */
+ int (*query_server_interfaces)(const unsigned int, struct cifs_tcon *,
+ bool);
/* check if a path is accessible or not */
int (*is_path_accessible)(const unsigned int, struct cifs_tcon *,
struct cifs_sb_info *, const char *);
@@ -1052,6 +1055,7 @@ struct cifs_ses {
enum securityEnum sectype; /* what security flavor was specified? */
bool sign; /* is signing required? */
bool domainAuto:1;
+ bool expired_pwd; /* track if access denied or expired pwd so can know if need to update */
unsigned int flags;
__u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
@@ -1562,6 +1566,7 @@ struct cifsInodeInfo {
spinlock_t deferred_lock; /* protection on deferred list */
bool lease_granted; /* Flag to indicate whether lease or oplock is granted. */
char *symlink_target;
+ __u32 reparse_tag;
};
static inline struct cifsInodeInfo *
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 9516f57323..13131957d9 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -144,7 +144,8 @@ extern int cifs_reconnect(struct TCP_Server_Info *server,
extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr);
extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *);
-extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
+extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof,
+ bool from_readdir);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written);
extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
@@ -201,7 +202,8 @@ extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
struct cifs_sb_info *cifs_sb);
extern void cifs_dir_info_to_fattr(struct cifs_fattr *, FILE_DIRECTORY_INFO *,
struct cifs_sb_info *);
-extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr);
+extern int cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
+ bool from_readdir);
extern struct inode *cifs_iget(struct super_block *sb,
struct cifs_fattr *fattr);
@@ -652,7 +654,7 @@ cifs_chan_is_iface_active(struct cifs_ses *ses,
struct TCP_Server_Info *server);
void
cifs_disable_secondary_channels(struct cifs_ses *ses);
-int
+void
cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server);
int
SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon, bool in_mount);
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index c3d805ecb7..bc8d09bab1 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -123,12 +123,16 @@ static void smb2_query_server_interfaces(struct work_struct *work)
struct cifs_tcon *tcon = container_of(work,
struct cifs_tcon,
query_interfaces.work);
+ struct TCP_Server_Info *server = tcon->ses->server;
/*
* query server network interfaces, in case they change
*/
+ if (!server->ops->query_server_interfaces)
+ return;
+
xid = get_xid();
- rc = SMB3_request_interfaces(xid, tcon, false);
+ rc = server->ops->query_server_interfaces(xid, tcon, false);
free_xid(xid);
if (rc) {
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 4cbb5487bd..c711d5eb29 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -87,7 +87,7 @@ void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len
continue;
if (!folio_test_writeback(folio)) {
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio_index(folio), end);
+ len, start, folio->index, end);
continue;
}
@@ -120,7 +120,7 @@ void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len
continue;
if (!folio_test_writeback(folio)) {
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio_index(folio), end);
+ len, start, folio->index, end);
continue;
}
@@ -151,7 +151,7 @@ void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int le
xas_for_each(&xas, folio, end) {
if (!folio_test_writeback(folio)) {
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
- len, start, folio_index(folio), end);
+ len, start, folio->index, end);
continue;
}
@@ -329,7 +329,7 @@ int cifs_posix_open(const char *full_path, struct inode **pinode,
}
} else {
cifs_revalidate_mapping(*pinode);
- rc = cifs_fattr_to_inode(*pinode, &fattr);
+ rc = cifs_fattr_to_inode(*pinode, &fattr, false);
}
posix_open_ret:
@@ -2622,20 +2622,20 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
* dirty pages if possible, but don't sleep while doing so.
*/
static void cifs_extend_writeback(struct address_space *mapping,
+ struct xa_state *xas,
long *_count,
loff_t start,
int max_pages,
- size_t max_len,
- unsigned int *_len)
+ loff_t max_len,
+ size_t *_len)
{
struct folio_batch batch;
struct folio *folio;
- unsigned int psize, nr_pages;
- size_t len = *_len;
- pgoff_t index = (start + len) / PAGE_SIZE;
+ unsigned int nr_pages;
+ pgoff_t index = (start + *_len) / PAGE_SIZE;
+ size_t len;
bool stop = true;
unsigned int i;
- XA_STATE(xas, &mapping->i_pages, index);
folio_batch_init(&batch);
@@ -2646,54 +2646,64 @@ static void cifs_extend_writeback(struct address_space *mapping,
*/
rcu_read_lock();
- xas_for_each(&xas, folio, ULONG_MAX) {
+ xas_for_each(xas, folio, ULONG_MAX) {
stop = true;
- if (xas_retry(&xas, folio))
+ if (xas_retry(xas, folio))
continue;
if (xa_is_value(folio))
break;
- if (folio_index(folio) != index)
+ if (folio->index != index) {
+ xas_reset(xas);
break;
+ }
+
if (!folio_try_get_rcu(folio)) {
- xas_reset(&xas);
+ xas_reset(xas);
continue;
}
nr_pages = folio_nr_pages(folio);
- if (nr_pages > max_pages)
+ if (nr_pages > max_pages) {
+ xas_reset(xas);
break;
+ }
/* Has the page moved or been split? */
- if (unlikely(folio != xas_reload(&xas))) {
+ if (unlikely(folio != xas_reload(xas))) {
folio_put(folio);
+ xas_reset(xas);
break;
}
if (!folio_trylock(folio)) {
folio_put(folio);
+ xas_reset(xas);
break;
}
- if (!folio_test_dirty(folio) || folio_test_writeback(folio)) {
+ if (!folio_test_dirty(folio) ||
+ folio_test_writeback(folio)) {
folio_unlock(folio);
folio_put(folio);
+ xas_reset(xas);
break;
}
max_pages -= nr_pages;
- psize = folio_size(folio);
- len += psize;
+ len = folio_size(folio);
stop = false;
- if (max_pages <= 0 || len >= max_len || *_count <= 0)
- stop = true;
index += nr_pages;
+ *_count -= nr_pages;
+ *_len += len;
+ if (max_pages <= 0 || *_len >= max_len || *_count <= 0)
+ stop = true;
+
if (!folio_batch_add(&batch, folio))
break;
if (stop)
break;
}
- if (!stop)
- xas_pause(&xas);
+ xas_pause(xas);
rcu_read_unlock();
/* Now, if we obtained any pages, we can shift them to being
@@ -2709,18 +2719,13 @@ static void cifs_extend_writeback(struct address_space *mapping,
*/
if (!folio_clear_dirty_for_io(folio))
WARN_ON(1);
- if (folio_start_writeback(folio))
- WARN_ON(1);
-
- *_count -= folio_nr_pages(folio);
+ folio_start_writeback(folio);
folio_unlock(folio);
}
folio_batch_release(&batch);
cond_resched();
} while (!stop);
-
- *_len = len;
}
/*
@@ -2728,8 +2733,10 @@ static void cifs_extend_writeback(struct address_space *mapping,
*/
static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
struct writeback_control *wbc,
+ struct xa_state *xas,
struct folio *folio,
- loff_t start, loff_t end)
+ unsigned long long start,
+ unsigned long long end)
{
struct inode *inode = mapping->host;
struct TCP_Server_Info *server;
@@ -2738,18 +2745,18 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
struct cifsFileInfo *cfile = NULL;
- unsigned int xid, wsize, len;
- loff_t i_size = i_size_read(inode);
- size_t max_len;
+ unsigned long long i_size = i_size_read(inode), max_len;
+ unsigned int xid, wsize;
+ size_t len = folio_size(folio);
long count = wbc->nr_to_write;
int rc;
/* The folio should be locked, dirty and not undergoing writeback. */
- if (folio_start_writeback(folio))
- WARN_ON(1);
+ if (!folio_clear_dirty_for_io(folio))
+ WARN_ON_ONCE(1);
+ folio_start_writeback(folio);
count -= folio_nr_pages(folio);
- len = folio_size(folio);
xid = get_xid();
server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
@@ -2779,9 +2786,10 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
wdata->server = server;
cfile = NULL;
- /* Find all consecutive lockable dirty pages, stopping when we find a
- * page that is not immediately lockable, is not dirty or is missing,
- * or we reach the end of the range.
+ /* Find all consecutive lockable dirty pages that have contiguous
+ * written regions, stopping when we find a page that is not
+ * immediately lockable, is not dirty or is missing, or we reach the
+ * end of the range.
*/
if (start < i_size) {
/* Trim the write to the EOF; the extra data is ignored. Also
@@ -2801,19 +2809,18 @@ static ssize_t cifs_write_back_from_locked_folio(struct address_space *mapping,
max_pages -= folio_nr_pages(folio);
if (max_pages > 0)
- cifs_extend_writeback(mapping, &count, start,
+ cifs_extend_writeback(mapping, xas, &count, start,
max_pages, max_len, &len);
}
- len = min_t(loff_t, len, max_len);
}
-
- wdata->bytes = len;
+ len = min_t(unsigned long long, len, i_size - start);
/* We now have a contiguous set of dirty pages, each with writeback
* set; the first page is still locked at this point, but all the rest
* have been unlocked.
*/
folio_unlock(folio);
+ wdata->bytes = len;
if (start < i_size) {
iov_iter_xarray(&wdata->iter, ITER_SOURCE, &mapping->i_pages,
@@ -2864,102 +2871,118 @@ err_xid:
/*
* write a region of pages back to the server
*/
-static int cifs_writepages_region(struct address_space *mapping,
- struct writeback_control *wbc,
- loff_t start, loff_t end, loff_t *_next)
+static ssize_t cifs_writepages_begin(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct xa_state *xas,
+ unsigned long long *_start,
+ unsigned long long end)
{
- struct folio_batch fbatch;
+ struct folio *folio;
+ unsigned long long start = *_start;
+ ssize_t ret;
int skips = 0;
- folio_batch_init(&fbatch);
- do {
- int nr;
- pgoff_t index = start / PAGE_SIZE;
+search_again:
+ /* Find the first dirty page. */
+ rcu_read_lock();
- nr = filemap_get_folios_tag(mapping, &index, end / PAGE_SIZE,
- PAGECACHE_TAG_DIRTY, &fbatch);
- if (!nr)
+ for (;;) {
+ folio = xas_find_marked(xas, end / PAGE_SIZE, PAGECACHE_TAG_DIRTY);
+ if (xas_retry(xas, folio) || xa_is_value(folio))
+ continue;
+ if (!folio)
break;
- for (int i = 0; i < nr; i++) {
- ssize_t ret;
- struct folio *folio = fbatch.folios[i];
+ if (!folio_try_get_rcu(folio)) {
+ xas_reset(xas);
+ continue;
+ }
-redo_folio:
- start = folio_pos(folio); /* May regress with THPs */
+ if (unlikely(folio != xas_reload(xas))) {
+ folio_put(folio);
+ xas_reset(xas);
+ continue;
+ }
- /* At this point we hold neither the i_pages lock nor the
- * page lock: the page may be truncated or invalidated
- * (changing page->mapping to NULL), or even swizzled
- * back from swapper_space to tmpfs file mapping
- */
- if (wbc->sync_mode != WB_SYNC_NONE) {
- ret = folio_lock_killable(folio);
- if (ret < 0)
- goto write_error;
- } else {
- if (!folio_trylock(folio))
- goto skip_write;
- }
+ xas_pause(xas);
+ break;
+ }
+ rcu_read_unlock();
+ if (!folio)
+ return 0;
- if (folio_mapping(folio) != mapping ||
- !folio_test_dirty(folio)) {
- start += folio_size(folio);
- folio_unlock(folio);
- continue;
- }
+ start = folio_pos(folio); /* May regress with THPs */
- if (folio_test_writeback(folio) ||
- folio_test_fscache(folio)) {
- folio_unlock(folio);
- if (wbc->sync_mode == WB_SYNC_NONE)
- goto skip_write;
+ /* At this point we hold neither the i_pages lock nor the page lock:
+ * the page may be truncated or invalidated (changing page->mapping to
+ * NULL), or even swizzled back from swapper_space to tmpfs file
+ * mapping
+ */
+lock_again:
+ if (wbc->sync_mode != WB_SYNC_NONE) {
+ ret = folio_lock_killable(folio);
+ if (ret < 0)
+ return ret;
+ } else {
+ if (!folio_trylock(folio))
+ goto search_again;
+ }
+
+ if (folio->mapping != mapping ||
+ !folio_test_dirty(folio)) {
+ start += folio_size(folio);
+ folio_unlock(folio);
+ goto search_again;
+ }
- folio_wait_writeback(folio);
+ if (folio_test_writeback(folio) ||
+ folio_test_fscache(folio)) {
+ folio_unlock(folio);
+ if (wbc->sync_mode != WB_SYNC_NONE) {
+ folio_wait_writeback(folio);
#ifdef CONFIG_CIFS_FSCACHE
- folio_wait_fscache(folio);
+ folio_wait_fscache(folio);
#endif
- goto redo_folio;
- }
-
- if (!folio_clear_dirty_for_io(folio))
- /* We hold the page lock - it should've been dirty. */
- WARN_ON(1);
-
- ret = cifs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
- if (ret < 0)
- goto write_error;
-
- start += ret;
- continue;
-
-write_error:
- folio_batch_release(&fbatch);
- *_next = start;
- return ret;
+ goto lock_again;
+ }
-skip_write:
- /*
- * Too many skipped writes, or need to reschedule?
- * Treat it as a write error without an error code.
- */
+ start += folio_size(folio);
+ if (wbc->sync_mode == WB_SYNC_NONE) {
if (skips >= 5 || need_resched()) {
ret = 0;
- goto write_error;
+ goto out;
}
-
- /* Otherwise, just skip that folio and go on to the next */
skips++;
- start += folio_size(folio);
- continue;
}
+ goto search_again;
+ }
- folio_batch_release(&fbatch);
- cond_resched();
- } while (wbc->nr_to_write > 0);
+ ret = cifs_write_back_from_locked_folio(mapping, wbc, xas, folio, start, end);
+out:
+ if (ret > 0)
+ *_start = start + ret;
+ return ret;
+}
- *_next = start;
- return 0;
+/*
+ * Write a region of pages back to the server
+ */
+static int cifs_writepages_region(struct address_space *mapping,
+ struct writeback_control *wbc,
+ unsigned long long *_start,
+ unsigned long long end)
+{
+ ssize_t ret;
+
+ XA_STATE(xas, &mapping->i_pages, *_start / PAGE_SIZE);
+
+ do {
+ ret = cifs_writepages_begin(mapping, wbc, &xas, _start, end);
+ if (ret > 0 && wbc->nr_to_write > 0)
+ cond_resched();
+ } while (ret > 0 && wbc->nr_to_write > 0);
+
+ return ret > 0 ? 0 : ret;
}
/*
@@ -2968,7 +2991,7 @@ skip_write:
static int cifs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
- loff_t start, next;
+ loff_t start, end;
int ret;
/* We have to be careful as we can end up racing with setattr()
@@ -2976,28 +2999,34 @@ static int cifs_writepages(struct address_space *mapping,
* to prevent it.
*/
- if (wbc->range_cyclic) {
+ if (wbc->range_cyclic && mapping->writeback_index) {
start = mapping->writeback_index * PAGE_SIZE;
- ret = cifs_writepages_region(mapping, wbc, start, LLONG_MAX, &next);
- if (ret == 0) {
- mapping->writeback_index = next / PAGE_SIZE;
- if (start > 0 && wbc->nr_to_write > 0) {
- ret = cifs_writepages_region(mapping, wbc, 0,
- start, &next);
- if (ret == 0)
- mapping->writeback_index =
- next / PAGE_SIZE;
- }
+ ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX);
+ if (ret < 0)
+ goto out;
+
+ if (wbc->nr_to_write <= 0) {
+ mapping->writeback_index = start / PAGE_SIZE;
+ goto out;
}
+
+ start = 0;
+ end = mapping->writeback_index * PAGE_SIZE;
+ mapping->writeback_index = 0;
+ ret = cifs_writepages_region(mapping, wbc, &start, end);
+ if (ret == 0)
+ mapping->writeback_index = start / PAGE_SIZE;
} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
- ret = cifs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next);
+ start = 0;
+ ret = cifs_writepages_region(mapping, wbc, &start, LLONG_MAX);
if (wbc->nr_to_write > 0 && ret == 0)
- mapping->writeback_index = next / PAGE_SIZE;
+ mapping->writeback_index = start / PAGE_SIZE;
} else {
- ret = cifs_writepages_region(mapping, wbc,
- wbc->range_start, wbc->range_end, &next);
+ start = wbc->range_start;
+ ret = cifs_writepages_region(mapping, wbc, &start, wbc->range_end);
}
+out:
return ret;
}
@@ -4737,12 +4766,14 @@ static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
refreshing the inode only on increases in the file size
but this is tricky to do without racing with writebehind
page caching in the current Linux kernel design */
-bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
+bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
+ bool from_readdir)
{
if (!cifsInode)
return true;
- if (is_inode_writable(cifsInode)) {
+ if (is_inode_writable(cifsInode) ||
+ ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
/* This inode is open for write at least once */
struct cifs_sb_info *cifs_sb;
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 6ecbf48d0f..e4a6b240d2 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -771,7 +771,7 @@ static void smb3_fs_context_free(struct fs_context *fc)
*/
static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
struct smb3_fs_context *new_ctx,
- struct smb3_fs_context *old_ctx)
+ struct smb3_fs_context *old_ctx, bool need_recon)
{
if (new_ctx->posix_paths != old_ctx->posix_paths) {
cifs_errorf(fc, "can not change posixpaths during remount\n");
@@ -797,8 +797,15 @@ static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
}
if (new_ctx->password &&
(!old_ctx->password || strcmp(new_ctx->password, old_ctx->password))) {
- cifs_errorf(fc, "can not change password during remount\n");
- return -EINVAL;
+ if (need_recon == false) {
+ cifs_errorf(fc,
+ "can not change password of active session during remount\n");
+ return -EINVAL;
+ } else if (old_ctx->sectype == Kerberos) {
+ cifs_errorf(fc,
+ "can not change password for Kerberos via remount\n");
+ return -EINVAL;
+ }
}
if (new_ctx->domainname &&
(!old_ctx->domainname || strcmp(new_ctx->domainname, old_ctx->domainname))) {
@@ -842,9 +849,14 @@ static int smb3_reconfigure(struct fs_context *fc)
struct smb3_fs_context *ctx = smb3_fc2context(fc);
struct dentry *root = fc->root;
struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
+ struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses;
+ bool need_recon = false;
int rc;
- rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx);
+ if (ses->expired_pwd)
+ need_recon = true;
+
+ rc = smb3_verify_reconfigure_ctx(fc, ctx, cifs_sb->ctx, need_recon);
if (rc)
return rc;
@@ -857,7 +869,12 @@ static int smb3_reconfigure(struct fs_context *fc)
STEAL_STRING(cifs_sb, ctx, UNC);
STEAL_STRING(cifs_sb, ctx, source);
STEAL_STRING(cifs_sb, ctx, username);
- STEAL_STRING_SENSITIVE(cifs_sb, ctx, password);
+ if (need_recon == false)
+ STEAL_STRING_SENSITIVE(cifs_sb, ctx, password);
+ else {
+ kfree_sensitive(ses->password);
+ ses->password = kstrdup(ctx->password, GFP_KERNEL);
+ }
STEAL_STRING(cifs_sb, ctx, domainname);
STEAL_STRING(cifs_sb, ctx, nodename);
STEAL_STRING(cifs_sb, ctx, iocharset);
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index eb54e48937..cb9e719e67 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -147,7 +147,8 @@ cifs_nlink_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
/* populate an inode with info from a cifs_fattr struct */
int
-cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
+cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr,
+ bool from_readdir)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -182,6 +183,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
inode->i_mode = fattr->cf_mode;
cifs_i->cifsAttrs = fattr->cf_cifsattrs;
+ cifs_i->reparse_tag = fattr->cf_cifstag;
if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
cifs_i->time = 0;
@@ -198,7 +200,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
* Can't safely change the file size here if the client is writing to
* it due to potential races.
*/
- if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
+ if (is_size_safe_to_change(cifs_i, fattr->cf_eof, from_readdir)) {
i_size_write(inode, fattr->cf_eof);
/*
@@ -209,7 +211,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
}
- if (S_ISLNK(fattr->cf_mode)) {
+ if (S_ISLNK(fattr->cf_mode) && fattr->cf_symlink_target) {
kfree(cifs_i->symlink_target);
cifs_i->symlink_target = fattr->cf_symlink_target;
fattr->cf_symlink_target = NULL;
@@ -367,7 +369,7 @@ static int update_inode_info(struct super_block *sb,
CIFS_I(*inode)->time = 0; /* force reval */
return -ESTALE;
}
- return cifs_fattr_to_inode(*inode, fattr);
+ return cifs_fattr_to_inode(*inode, fattr, false);
}
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
@@ -402,7 +404,7 @@ cifs_get_file_info_unix(struct file *filp)
} else
goto cifs_gfiunix_out;
- rc = cifs_fattr_to_inode(inode, &fattr);
+ rc = cifs_fattr_to_inode(inode, &fattr, false);
cifs_gfiunix_out:
free_xid(xid);
@@ -927,7 +929,7 @@ cifs_get_file_info(struct file *filp)
fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
/* if filetype is different, return error */
- rc = cifs_fattr_to_inode(inode, &fattr);
+ rc = cifs_fattr_to_inode(inode, &fattr, false);
cgfi_exit:
cifs_free_open_info(&data);
free_xid(xid);
@@ -1103,6 +1105,7 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
cifs_open_info_to_fattr(fattr, data, sb);
out:
+ fattr->cf_cifstag = data->reparse.tag;
free_rsp_buf(rsp_buftype, rsp_iov.iov_base);
return rc;
}
@@ -1465,7 +1468,7 @@ retry_iget5_locked:
}
/* can't fail - see cifs_find_inode() */
- cifs_fattr_to_inode(inode, fattr);
+ cifs_fattr_to_inode(inode, fattr, false);
if (sb->s_flags & SB_NOATIME)
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index e23cd216bf..56033e4e4b 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -56,6 +56,23 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
#endif /* DEBUG2 */
/*
+ * Match a reparse point inode if reparse tag and ctime haven't changed.
+ *
+ * Windows Server updates ctime of reparse points when their data have changed.
+ * The server doesn't allow changing reparse tags from existing reparse points,
+ * though it's worth checking.
+ */
+static inline bool reparse_inode_match(struct inode *inode,
+ struct cifs_fattr *fattr)
+{
+ struct timespec64 ctime = inode_get_ctime(inode);
+
+ return (CIFS_I(inode)->cifsAttrs & ATTR_REPARSE) &&
+ CIFS_I(inode)->reparse_tag == fattr->cf_cifstag &&
+ timespec64_equal(&ctime, &fattr->cf_ctime);
+}
+
+/*
* Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
*
* Find the dentry that matches "name". If there isn't one, create one. If it's
@@ -71,6 +88,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
struct super_block *sb = parent->d_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ int rc;
cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
@@ -82,9 +100,11 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
* We'll end up doing an on the wire call either way and
* this spares us an invalidation.
*/
- if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
- return;
retry:
+ if ((fattr->cf_cifsattrs & ATTR_REPARSE) ||
+ (fattr->cf_flags & CIFS_FATTR_NEED_REVAL))
+ return;
+
dentry = d_alloc_parallel(parent, name, &wq);
}
if (IS_ERR(dentry))
@@ -104,12 +124,34 @@ retry:
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
- /* update inode in place
- * if both i_ino and i_mode didn't change */
- if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
- cifs_fattr_to_inode(inode, fattr) == 0) {
- dput(dentry);
- return;
+ /*
+ * Update inode in place if both i_ino and i_mode didn't
+ * change.
+ */
+ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+ /*
+ * Query dir responses don't provide enough
+ * information about reparse points other than
+ * their reparse tags. Save an invalidation by
+ * not clobbering the existing mode, size and
+ * symlink target (if any) when reparse tag and
+ * ctime haven't changed.
+ */
+ rc = 0;
+ if (fattr->cf_cifsattrs & ATTR_REPARSE) {
+ if (likely(reparse_inode_match(inode, fattr))) {
+ fattr->cf_mode = inode->i_mode;
+ fattr->cf_eof = CIFS_I(inode)->server_eof;
+ fattr->cf_symlink_target = NULL;
+ } else {
+ CIFS_I(inode)->time = 0;
+ rc = -ESTALE;
+ }
+ }
+ if (!rc && !cifs_fattr_to_inode(inode, fattr, true)) {
+ dput(dentry);
+ return;
+ }
}
}
d_invalidate(dentry);
@@ -127,29 +169,6 @@ retry:
dput(dentry);
}
-static bool reparse_file_needs_reval(const struct cifs_fattr *fattr)
-{
- if (!(fattr->cf_cifsattrs & ATTR_REPARSE))
- return false;
- /*
- * The DFS tags should be only intepreted by server side as per
- * MS-FSCC 2.1.2.1, but let's include them anyway.
- *
- * Besides, if cf_cifstag is unset (0), then we still need it to be
- * revalidated to know exactly what reparse point it is.
- */
- switch (fattr->cf_cifstag) {
- case IO_REPARSE_TAG_DFS:
- case IO_REPARSE_TAG_DFSR:
- case IO_REPARSE_TAG_SYMLINK:
- case IO_REPARSE_TAG_NFS:
- case IO_REPARSE_TAG_MOUNT_POINT:
- case 0:
- return true;
- }
- return false;
-}
-
static void
cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
{
@@ -181,14 +200,6 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb)
}
out_reparse:
- /*
- * We need to revalidate it further to make a decision about whether it
- * is a symbolic link, DFS referral or a reparse point with a direct
- * access like junctions, deduplicated files, NFS symlinks.
- */
- if (reparse_file_needs_reval(fattr))
- fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
-
/* non-unix readdir doesn't provide nlink */
fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
@@ -269,9 +280,6 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
fattr->cf_dtype = DT_REG;
}
- if (reparse_file_needs_reval(fattr))
- fattr->cf_flags |= CIFS_FATTR_NEED_REVAL;
-
sid_to_id(cifs_sb, &parsed.owner, fattr, SIDOWNER);
sid_to_id(cifs_sb, &parsed.group, fattr, SIDGROUP);
}
@@ -333,38 +341,6 @@ cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
cifs_fill_common_info(fattr, cifs_sb);
}
-/* BB eventually need to add the following helper function to
- resolve NT_STATUS_STOPPED_ON_SYMLINK return code when
- we try to do FindFirst on (NTFS) directory symlinks */
-/*
-int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
- unsigned int xid)
-{
- __u16 fid;
- int len;
- int oplock = 0;
- int rc;
- struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb);
- char *tmpbuffer;
-
- rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
- OPEN_REPARSE_POINT, &fid, &oplock, NULL,
- cifs_sb->local_nls,
- cifs_remap(cifs_sb);
- if (!rc) {
- tmpbuffer = kmalloc(maxpath);
- rc = CIFSSMBQueryReparseLinkInfo(xid, ptcon, full_path,
- tmpbuffer,
- maxpath -1,
- fid,
- cifs_sb->local_nls);
- if (CIFSSMBClose(xid, ptcon, fid)) {
- cifs_dbg(FYI, "Error closing temporary reparsepoint open\n");
- }
- }
-}
- */
-
static int
_initiate_cifs_search(const unsigned int xid, struct file *file,
const char *full_path)
@@ -433,13 +409,10 @@ ffirst_retry:
&cifsFile->fid, search_flags,
&cifsFile->srch_inf);
- if (rc == 0)
+ if (rc == 0) {
cifsFile->invalidHandle = false;
- /* BB add following call to handle readdir on new NTFS symlink errors
- else if STATUS_STOPPED_ON_SYMLINK
- call get_symlink_reparse_path and retry with new path */
- else if ((rc == -EOPNOTSUPP) &&
- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
+ } else if ((rc == -EOPNOTSUPP) &&
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) {
cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
goto ffirst_retry;
}
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index 94c5d50aa3..5de32640f0 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -230,7 +230,7 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
spin_lock(&ses->iface_lock);
if (!ses->iface_count) {
spin_unlock(&ses->iface_lock);
- cifs_dbg(VFS, "server %s does not advertise interfaces\n",
+ cifs_dbg(ONCE, "server %s does not advertise interfaces\n",
ses->server->hostname);
break;
}
@@ -361,10 +361,9 @@ done:
/*
* update the iface for the channel if necessary.
- * will return 0 when iface is updated, 1 if removed, 2 otherwise
* Must be called with chan_lock held.
*/
-int
+void
cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
{
unsigned int chan_index;
@@ -373,20 +372,19 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
struct cifs_server_iface *old_iface = NULL;
struct cifs_server_iface *last_iface = NULL;
struct sockaddr_storage ss;
- int rc = 0;
spin_lock(&ses->chan_lock);
chan_index = cifs_ses_get_chan_index(ses, server);
if (chan_index == CIFS_INVAL_CHAN_INDEX) {
spin_unlock(&ses->chan_lock);
- return 0;
+ return;
}
if (ses->chans[chan_index].iface) {
old_iface = ses->chans[chan_index].iface;
if (old_iface->is_active) {
spin_unlock(&ses->chan_lock);
- return 1;
+ return;
}
}
spin_unlock(&ses->chan_lock);
@@ -398,8 +396,8 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
spin_lock(&ses->iface_lock);
if (!ses->iface_count) {
spin_unlock(&ses->iface_lock);
- cifs_dbg(VFS, "server %s does not advertise interfaces\n", ses->server->hostname);
- return 0;
+ cifs_dbg(ONCE, "server %s does not advertise interfaces\n", ses->server->hostname);
+ return;
}
last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
@@ -439,7 +437,6 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
}
if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
- rc = 1;
iface = NULL;
cifs_dbg(FYI, "unable to find a suitable iface\n");
}
@@ -454,7 +451,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
}
spin_unlock(&ses->iface_lock);
- return 0;
+ return;
}
/* now drop the ref to the current iface */
@@ -472,28 +469,24 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
kref_put(&old_iface->refcount, release_iface);
} else if (!chan_index) {
/* special case: update interface for primary channel */
- if (iface) {
- cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
- &iface->sockaddr);
- iface->num_channels++;
- iface->weight_fulfilled++;
- }
+ cifs_dbg(FYI, "referencing primary channel iface: %pIS\n",
+ &iface->sockaddr);
+ iface->num_channels++;
+ iface->weight_fulfilled++;
}
spin_unlock(&ses->iface_lock);
- if (iface) {
- spin_lock(&ses->chan_lock);
- chan_index = cifs_ses_get_chan_index(ses, server);
- if (chan_index == CIFS_INVAL_CHAN_INDEX) {
- spin_unlock(&ses->chan_lock);
- return 0;
- }
-
- ses->chans[chan_index].iface = iface;
+ spin_lock(&ses->chan_lock);
+ chan_index = cifs_ses_get_chan_index(ses, server);
+ if (chan_index == CIFS_INVAL_CHAN_INDEX) {
spin_unlock(&ses->chan_lock);
+ return;
}
- return rc;
+ ses->chans[chan_index].iface = iface;
+ spin_unlock(&ses->chan_lock);
+
+ return;
}
/*
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index ba734395b0..4852afe392 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -5429,6 +5429,7 @@ struct smb_version_operations smb30_operations = {
.tree_connect = SMB2_tcon,
.tree_disconnect = SMB2_tdis,
.qfs_tcon = smb3_qfs_tcon,
+ .query_server_interfaces = SMB3_request_interfaces,
.is_path_accessible = smb2_is_path_accessible,
.can_echo = smb2_can_echo,
.echo = SMB2_echo,
@@ -5543,6 +5544,7 @@ struct smb_version_operations smb311_operations = {
.tree_connect = SMB2_tcon,
.tree_disconnect = SMB2_tdis,
.qfs_tcon = smb3_qfs_tcon,
+ .query_server_interfaces = SMB3_request_interfaces,
.is_path_accessible = smb2_is_path_accessible,
.can_echo = smb2_can_echo,
.echo = SMB2_echo,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 9d34a55fdb..4d7d0bdf7a 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -409,14 +409,15 @@ skip_sess_setup:
spin_unlock(&ses->ses_lock);
if (!rc &&
- (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+ (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
+ server->ops->query_server_interfaces) {
mutex_unlock(&ses->session_mutex);
/*
* query server network interfaces, in case they change
*/
xid = get_xid();
- rc = SMB3_request_interfaces(xid, tcon, false);
+ rc = server->ops->query_server_interfaces(xid, tcon, false);
free_xid(xid);
if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
@@ -1536,6 +1537,11 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
&sess_data->buf0_type,
CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
cifs_small_buf_release(sess_data->iov[0].iov_base);
+ if (rc == 0)
+ sess_data->ses->expired_pwd = false;
+ else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED))
+ sess_data->ses->expired_pwd = true;
+
memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
return rc;
diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
index 03dded29a9..727cb49926 100644
--- a/fs/smb/server/smb2misc.c
+++ b/fs/smb/server/smb2misc.c
@@ -101,13 +101,17 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
*len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength);
break;
case SMB2_TREE_CONNECT:
- *off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset);
+ *off = max_t(unsigned short int,
+ le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset),
+ offsetof(struct smb2_tree_connect_req, Buffer));
*len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength);
break;
case SMB2_CREATE:
{
unsigned short int name_off =
- le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset);
+ max_t(unsigned short int,
+ le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset),
+ offsetof(struct smb2_create_req, Buffer));
unsigned short int name_len =
le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength);
@@ -128,11 +132,15 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
break;
}
case SMB2_QUERY_INFO:
- *off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset);
+ *off = max_t(unsigned int,
+ le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset),
+ offsetof(struct smb2_query_info_req, Buffer));
*len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength);
break;
case SMB2_SET_INFO:
- *off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset);
+ *off = max_t(unsigned int,
+ le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset),
+ offsetof(struct smb2_set_info_req, Buffer));
*len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength);
break;
case SMB2_READ:
@@ -142,7 +150,7 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
case SMB2_WRITE:
if (((struct smb2_write_req *)hdr)->DataOffset ||
((struct smb2_write_req *)hdr)->Length) {
- *off = max_t(unsigned int,
+ *off = max_t(unsigned short int,
le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset),
offsetof(struct smb2_write_req, Buffer));
*len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length);
@@ -153,7 +161,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
*len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength);
break;
case SMB2_QUERY_DIRECTORY:
- *off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset);
+ *off = max_t(unsigned short int,
+ le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset),
+ offsetof(struct smb2_query_directory_req, Buffer));
*len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength);
break;
case SMB2_LOCK:
@@ -168,7 +178,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len,
break;
}
case SMB2_IOCTL:
- *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset);
+ *off = max_t(unsigned int,
+ le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset),
+ offsetof(struct smb2_ioctl_req, Buffer));
*len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount);
break;
default:
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 0c97d3c860..88db6e207e 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1951,7 +1951,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
WORK_BUFFERS(work, req, rsp);
- treename = smb_strndup_from_utf16(req->Buffer,
+ treename = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->PathOffset),
le16_to_cpu(req->PathLength), true,
conn->local_nls);
if (IS_ERR(treename)) {
@@ -2704,7 +2704,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out2;
}
- name = smb2_get_name(req->Buffer,
+ name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
le16_to_cpu(req->NameLength),
work->conn->local_nls);
if (IS_ERR(name)) {
@@ -3828,11 +3828,16 @@ static int process_query_dir_entries(struct smb2_query_dir_private *priv)
}
ksmbd_kstat.kstat = &kstat;
- if (priv->info_level != FILE_NAMES_INFORMATION)
- ksmbd_vfs_fill_dentry_attrs(priv->work,
- idmap,
- dent,
- &ksmbd_kstat);
+ if (priv->info_level != FILE_NAMES_INFORMATION) {
+ rc = ksmbd_vfs_fill_dentry_attrs(priv->work,
+ idmap,
+ dent,
+ &ksmbd_kstat);
+ if (rc) {
+ dput(dent);
+ continue;
+ }
+ }
rc = smb2_populate_readdir_entry(priv->work->conn,
priv->info_level,
@@ -4075,7 +4080,7 @@ int smb2_query_dir(struct ksmbd_work *work)
}
srch_flag = req->Flags;
- srch_ptr = smb_strndup_from_utf16(req->Buffer,
+ srch_ptr = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->FileNameOffset),
le16_to_cpu(req->FileNameLength), 1,
conn->local_nls);
if (IS_ERR(srch_ptr)) {
@@ -4335,7 +4340,8 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
sizeof(struct smb2_ea_info_req))
return -EINVAL;
- ea_req = (struct smb2_ea_info_req *)req->Buffer;
+ ea_req = (struct smb2_ea_info_req *)((char *)req +
+ le16_to_cpu(req->InputBufferOffset));
} else {
/* need to send all EAs, if no specific EA is requested*/
if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY)
@@ -4480,6 +4486,7 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
struct smb2_file_basic_info *basic_info;
struct kstat stat;
u64 time;
+ int ret;
if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
pr_err("no right to read the attributes : 0x%x\n",
@@ -4487,9 +4494,12 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
return -EACCES;
}
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret)
+ return ret;
+
basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
- file_inode(fp->filp), &stat);
basic_info->CreationTime = cpu_to_le64(fp->create_time);
time = ksmbd_UnixTimeToNT(stat.atime);
basic_info->LastAccessTime = cpu_to_le64(time);
@@ -4504,27 +4514,31 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
return 0;
}
-static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
- struct ksmbd_file *fp, void *rsp_org)
+static int get_file_standard_info(struct smb2_query_info_rsp *rsp,
+ struct ksmbd_file *fp, void *rsp_org)
{
struct smb2_file_standard_info *sinfo;
unsigned int delete_pending;
- struct inode *inode;
struct kstat stat;
+ int ret;
- inode = file_inode(fp->filp);
- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret)
+ return ret;
sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
delete_pending = ksmbd_inode_pending_delete(fp);
- sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
+ sinfo->AllocationSize = cpu_to_le64(stat.blocks << 9);
sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
sinfo->DeletePending = delete_pending;
sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0;
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_standard_info));
+
+ return 0;
}
static void get_file_alignment_info(struct smb2_query_info_rsp *rsp,
@@ -4546,11 +4560,11 @@ static int get_file_all_info(struct ksmbd_work *work,
struct ksmbd_conn *conn = work->conn;
struct smb2_file_all_info *file_info;
unsigned int delete_pending;
- struct inode *inode;
struct kstat stat;
int conv_len;
char *filename;
u64 time;
+ int ret;
if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
ksmbd_debug(SMB, "no right to read the attributes : 0x%x\n",
@@ -4562,8 +4576,10 @@ static int get_file_all_info(struct ksmbd_work *work,
if (IS_ERR(filename))
return PTR_ERR(filename);
- inode = file_inode(fp->filp);
- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret)
+ return ret;
ksmbd_debug(SMB, "filename = %s\n", filename);
delete_pending = ksmbd_inode_pending_delete(fp);
@@ -4579,7 +4595,7 @@ static int get_file_all_info(struct ksmbd_work *work,
file_info->Attributes = fp->f_ci->m_fattr;
file_info->Pad1 = 0;
file_info->AllocationSize =
- cpu_to_le64(inode->i_blocks << 9);
+ cpu_to_le64(stat.blocks << 9);
file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
file_info->NumberOfLinks =
cpu_to_le32(get_nlink(&stat) - delete_pending);
@@ -4623,10 +4639,10 @@ static void get_file_alternate_info(struct ksmbd_work *work,
cpu_to_le32(sizeof(struct smb2_file_alt_name_info) + conv_len);
}
-static void get_file_stream_info(struct ksmbd_work *work,
- struct smb2_query_info_rsp *rsp,
- struct ksmbd_file *fp,
- void *rsp_org)
+static int get_file_stream_info(struct ksmbd_work *work,
+ struct smb2_query_info_rsp *rsp,
+ struct ksmbd_file *fp,
+ void *rsp_org)
{
struct ksmbd_conn *conn = work->conn;
struct smb2_file_stream_info *file_info;
@@ -4637,9 +4653,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
int nbytes = 0, streamlen, stream_name_len, next, idx = 0;
int buf_free_len;
struct smb2_query_info_req *req = ksmbd_req_buf_next(work);
+ int ret;
+
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret)
+ return ret;
- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
- file_inode(fp->filp), &stat);
file_info = (struct smb2_file_stream_info *)rsp->Buffer;
buf_free_len =
@@ -4720,29 +4740,37 @@ out:
kvfree(xattr_list);
rsp->OutputBufferLength = cpu_to_le32(nbytes);
+
+ return 0;
}
-static void get_file_internal_info(struct smb2_query_info_rsp *rsp,
- struct ksmbd_file *fp, void *rsp_org)
+static int get_file_internal_info(struct smb2_query_info_rsp *rsp,
+ struct ksmbd_file *fp, void *rsp_org)
{
struct smb2_file_internal_info *file_info;
struct kstat stat;
+ int ret;
+
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret)
+ return ret;
- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
- file_inode(fp->filp), &stat);
file_info = (struct smb2_file_internal_info *)rsp->Buffer;
file_info->IndexNumber = cpu_to_le64(stat.ino);
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_internal_info));
+
+ return 0;
}
static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
struct ksmbd_file *fp, void *rsp_org)
{
struct smb2_file_ntwrk_info *file_info;
- struct inode *inode;
struct kstat stat;
u64 time;
+ int ret;
if (!(fp->daccess & FILE_READ_ATTRIBUTES_LE)) {
pr_err("no right to read the attributes : 0x%x\n",
@@ -4750,10 +4778,12 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
return -EACCES;
}
- file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret)
+ return ret;
- inode = file_inode(fp->filp);
- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS, inode, &stat);
+ file_info = (struct smb2_file_ntwrk_info *)rsp->Buffer;
file_info->CreationTime = cpu_to_le64(fp->create_time);
time = ksmbd_UnixTimeToNT(stat.atime);
@@ -4763,8 +4793,7 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
time = ksmbd_UnixTimeToNT(stat.ctime);
file_info->ChangeTime = cpu_to_le64(time);
file_info->Attributes = fp->f_ci->m_fattr;
- file_info->AllocationSize =
- cpu_to_le64(inode->i_blocks << 9);
+ file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
file_info->Reserved = cpu_to_le32(0);
rsp->OutputBufferLength =
@@ -4804,14 +4833,17 @@ static void get_file_mode_info(struct smb2_query_info_rsp *rsp,
cpu_to_le32(sizeof(struct smb2_file_mode_info));
}
-static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
- struct ksmbd_file *fp, void *rsp_org)
+static int get_file_compression_info(struct smb2_query_info_rsp *rsp,
+ struct ksmbd_file *fp, void *rsp_org)
{
struct smb2_file_comp_info *file_info;
struct kstat stat;
+ int ret;
- generic_fillattr(file_mnt_idmap(fp->filp), STATX_BASIC_STATS,
- file_inode(fp->filp), &stat);
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret)
+ return ret;
file_info = (struct smb2_file_comp_info *)rsp->Buffer;
file_info->CompressedFileSize = cpu_to_le64(stat.blocks << 9);
@@ -4823,6 +4855,8 @@ static void get_file_compression_info(struct smb2_query_info_rsp *rsp,
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_comp_info));
+
+ return 0;
}
static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
@@ -4844,7 +4878,7 @@ static int get_file_attribute_tag_info(struct smb2_query_info_rsp *rsp,
return 0;
}
-static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
+static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
struct ksmbd_file *fp, void *rsp_org)
{
struct smb311_posix_qinfo *file_info;
@@ -4852,24 +4886,31 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
struct mnt_idmap *idmap = file_mnt_idmap(fp->filp);
vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
+ struct kstat stat;
u64 time;
int out_buf_len = sizeof(struct smb311_posix_qinfo) + 32;
+ int ret;
+
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret)
+ return ret;
file_info = (struct smb311_posix_qinfo *)rsp->Buffer;
file_info->CreationTime = cpu_to_le64(fp->create_time);
- time = ksmbd_UnixTimeToNT(inode_get_atime(inode));
+ time = ksmbd_UnixTimeToNT(stat.atime);
file_info->LastAccessTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode_get_mtime(inode));
+ time = ksmbd_UnixTimeToNT(stat.mtime);
file_info->LastWriteTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
+ time = ksmbd_UnixTimeToNT(stat.ctime);
file_info->ChangeTime = cpu_to_le64(time);
file_info->DosAttributes = fp->f_ci->m_fattr;
- file_info->Inode = cpu_to_le64(inode->i_ino);
- file_info->EndOfFile = cpu_to_le64(inode->i_size);
- file_info->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
- file_info->HardLinks = cpu_to_le32(inode->i_nlink);
- file_info->Mode = cpu_to_le32(inode->i_mode & 0777);
- file_info->DeviceId = cpu_to_le32(inode->i_rdev);
+ file_info->Inode = cpu_to_le64(stat.ino);
+ file_info->EndOfFile = cpu_to_le64(stat.size);
+ file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
+ file_info->HardLinks = cpu_to_le32(stat.nlink);
+ file_info->Mode = cpu_to_le32(stat.mode & 0777);
+ file_info->DeviceId = cpu_to_le32(stat.rdev);
/*
* Sids(32) contain two sids(Domain sid(16), UNIX group sid(16)).
@@ -4882,6 +4923,8 @@ static void find_file_posix_info(struct smb2_query_info_rsp *rsp,
SIDUNIX_GROUP, (struct smb_sid *)&file_info->Sids[16]);
rsp->OutputBufferLength = cpu_to_le32(out_buf_len);
+
+ return 0;
}
static int smb2_get_info_file(struct ksmbd_work *work,
@@ -4930,7 +4973,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
break;
case FILE_STANDARD_INFORMATION:
- get_file_standard_info(rsp, fp, work->response_buf);
+ rc = get_file_standard_info(rsp, fp, work->response_buf);
break;
case FILE_ALIGNMENT_INFORMATION:
@@ -4946,11 +4989,11 @@ static int smb2_get_info_file(struct ksmbd_work *work,
break;
case FILE_STREAM_INFORMATION:
- get_file_stream_info(work, rsp, fp, work->response_buf);
+ rc = get_file_stream_info(work, rsp, fp, work->response_buf);
break;
case FILE_INTERNAL_INFORMATION:
- get_file_internal_info(rsp, fp, work->response_buf);
+ rc = get_file_internal_info(rsp, fp, work->response_buf);
break;
case FILE_NETWORK_OPEN_INFORMATION:
@@ -4974,7 +5017,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
break;
case FILE_COMPRESSION_INFORMATION:
- get_file_compression_info(rsp, fp, work->response_buf);
+ rc = get_file_compression_info(rsp, fp, work->response_buf);
break;
case FILE_ATTRIBUTE_TAG_INFORMATION:
@@ -4985,7 +5028,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
rc = -EOPNOTSUPP;
} else {
- find_file_posix_info(rsp, fp, work->response_buf);
+ rc = find_file_posix_info(rsp, fp, work->response_buf);
}
break;
default:
@@ -5398,7 +5441,6 @@ int smb2_close(struct ksmbd_work *work)
struct smb2_close_rsp *rsp;
struct ksmbd_conn *conn = work->conn;
struct ksmbd_file *fp;
- struct inode *inode;
u64 time;
int err = 0;
@@ -5453,24 +5495,33 @@ int smb2_close(struct ksmbd_work *work)
rsp->Reserved = 0;
if (req->Flags == SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB) {
+ struct kstat stat;
+ int ret;
+
fp = ksmbd_lookup_fd_fast(work, volatile_id);
if (!fp) {
err = -ENOENT;
goto out;
}
- inode = file_inode(fp->filp);
+ ret = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (ret) {
+ ksmbd_fd_put(work, fp);
+ goto out;
+ }
+
rsp->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
- rsp->AllocationSize = S_ISDIR(inode->i_mode) ? 0 :
- cpu_to_le64(inode->i_blocks << 9);
- rsp->EndOfFile = cpu_to_le64(inode->i_size);
+ rsp->AllocationSize = S_ISDIR(stat.mode) ? 0 :
+ cpu_to_le64(stat.blocks << 9);
+ rsp->EndOfFile = cpu_to_le64(stat.size);
rsp->Attributes = fp->f_ci->m_fattr;
rsp->CreationTime = cpu_to_le64(fp->create_time);
- time = ksmbd_UnixTimeToNT(inode_get_atime(inode));
+ time = ksmbd_UnixTimeToNT(stat.atime);
rsp->LastAccessTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode_get_mtime(inode));
+ time = ksmbd_UnixTimeToNT(stat.mtime);
rsp->LastWriteTime = cpu_to_le64(time);
- time = ksmbd_UnixTimeToNT(inode_get_ctime(inode));
+ time = ksmbd_UnixTimeToNT(stat.ctime);
rsp->ChangeTime = cpu_to_le64(time);
ksmbd_fd_put(work, fp);
} else {
@@ -5759,15 +5810,21 @@ static int set_file_allocation_info(struct ksmbd_work *work,
loff_t alloc_blks;
struct inode *inode;
+ struct kstat stat;
int rc;
if (!(fp->daccess & FILE_WRITE_DATA_LE))
return -EACCES;
+ rc = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
+ AT_STATX_SYNC_AS_STAT);
+ if (rc)
+ return rc;
+
alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
inode = file_inode(fp->filp);
- if (alloc_blks > inode->i_blocks) {
+ if (alloc_blks > stat.blocks) {
smb_break_all_levII_oplock(work, fp, 1);
rc = vfs_fallocate(fp->filp, FALLOC_FL_KEEP_SIZE, 0,
alloc_blks * 512);
@@ -5775,7 +5832,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
pr_err("vfs_fallocate is failed : %d\n", rc);
return rc;
}
- } else if (alloc_blks < inode->i_blocks) {
+ } else if (alloc_blks < stat.blocks) {
loff_t size;
/*
@@ -5930,6 +5987,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
struct ksmbd_share_config *share)
{
unsigned int buf_len = le32_to_cpu(req->BufferLength);
+ char *buffer = (char *)req + le16_to_cpu(req->BufferOffset);
switch (req->FileInfoClass) {
case FILE_BASIC_INFORMATION:
@@ -5937,7 +5995,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
if (buf_len < sizeof(struct smb2_file_basic_info))
return -EINVAL;
- return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
+ return set_file_basic_info(fp, (struct smb2_file_basic_info *)buffer, share);
}
case FILE_ALLOCATION_INFORMATION:
{
@@ -5945,7 +6003,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EINVAL;
return set_file_allocation_info(work, fp,
- (struct smb2_file_alloc_info *)req->Buffer);
+ (struct smb2_file_alloc_info *)buffer);
}
case FILE_END_OF_FILE_INFORMATION:
{
@@ -5953,7 +6011,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EINVAL;
return set_end_of_file_info(work, fp,
- (struct smb2_file_eof_info *)req->Buffer);
+ (struct smb2_file_eof_info *)buffer);
}
case FILE_RENAME_INFORMATION:
{
@@ -5961,7 +6019,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EINVAL;
return set_rename_info(work, fp,
- (struct smb2_file_rename_info *)req->Buffer,
+ (struct smb2_file_rename_info *)buffer,
buf_len);
}
case FILE_LINK_INFORMATION:
@@ -5970,7 +6028,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EINVAL;
return smb2_create_link(work, work->tcon->share_conf,
- (struct smb2_file_link_info *)req->Buffer,
+ (struct smb2_file_link_info *)buffer,
buf_len, fp->filp,
work->conn->local_nls);
}
@@ -5980,7 +6038,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EINVAL;
return set_file_disposition_info(fp,
- (struct smb2_file_disposition_info *)req->Buffer);
+ (struct smb2_file_disposition_info *)buffer);
}
case FILE_FULL_EA_INFORMATION:
{
@@ -5993,7 +6051,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
if (buf_len < sizeof(struct smb2_ea_info))
return -EINVAL;
- return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+ return smb2_set_ea((struct smb2_ea_info *)buffer,
buf_len, &fp->filp->f_path, true);
}
case FILE_POSITION_INFORMATION:
@@ -6001,14 +6059,14 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
if (buf_len < sizeof(struct smb2_file_pos_info))
return -EINVAL;
- return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
+ return set_file_position_info(fp, (struct smb2_file_pos_info *)buffer);
}
case FILE_MODE_INFORMATION:
{
if (buf_len < sizeof(struct smb2_file_mode_info))
return -EINVAL;
- return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
+ return set_file_mode_info(fp, (struct smb2_file_mode_info *)buffer);
}
}
@@ -6089,7 +6147,7 @@ int smb2_set_info(struct ksmbd_work *work)
}
rc = smb2_set_info_sec(fp,
le32_to_cpu(req->AdditionalInformation),
- req->Buffer,
+ (char *)req + le16_to_cpu(req->BufferOffset),
le32_to_cpu(req->BufferLength));
ksmbd_revert_fsids(work);
break;
@@ -7535,7 +7593,7 @@ static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id,
struct smb2_ioctl_rsp *rsp)
{
struct ksmbd_rpc_command *rpc_resp;
- char *data_buf = (char *)&req->Buffer[0];
+ char *data_buf = (char *)req + le32_to_cpu(req->InputOffset);
int nbytes = 0;
rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf,
@@ -7648,6 +7706,7 @@ int smb2_ioctl(struct ksmbd_work *work)
u64 id = KSMBD_NO_FID;
struct ksmbd_conn *conn = work->conn;
int ret = 0;
+ char *buffer;
if (work->next_smb2_rcv_hdr_off) {
req = ksmbd_req_buf_next(work);
@@ -7670,6 +7729,8 @@ int smb2_ioctl(struct ksmbd_work *work)
goto out;
}
+ buffer = (char *)req + le32_to_cpu(req->InputOffset);
+
cnt_code = le32_to_cpu(req->CtlCode);
ret = smb2_calc_max_out_buf_len(work, 48,
le32_to_cpu(req->MaxOutputResponse));
@@ -7727,7 +7788,7 @@ int smb2_ioctl(struct ksmbd_work *work)
}
ret = fsctl_validate_negotiate_info(conn,
- (struct validate_negotiate_info_req *)&req->Buffer[0],
+ (struct validate_negotiate_info_req *)buffer,
(struct validate_negotiate_info_rsp *)&rsp->Buffer[0],
in_buf_len);
if (ret < 0)
@@ -7780,7 +7841,7 @@ int smb2_ioctl(struct ksmbd_work *work)
rsp->VolatileFileId = req->VolatileFileId;
rsp->PersistentFileId = req->PersistentFileId;
fsctl_copychunk(work,
- (struct copychunk_ioctl_req *)&req->Buffer[0],
+ (struct copychunk_ioctl_req *)buffer,
le32_to_cpu(req->CtlCode),
le32_to_cpu(req->InputCount),
req->VolatileFileId,
@@ -7793,8 +7854,7 @@ int smb2_ioctl(struct ksmbd_work *work)
goto out;
}
- ret = fsctl_set_sparse(work, id,
- (struct file_sparse *)&req->Buffer[0]);
+ ret = fsctl_set_sparse(work, id, (struct file_sparse *)buffer);
if (ret < 0)
goto out;
break;
@@ -7817,7 +7877,7 @@ int smb2_ioctl(struct ksmbd_work *work)
}
zero_data =
- (struct file_zero_data_information *)&req->Buffer[0];
+ (struct file_zero_data_information *)buffer;
off = le64_to_cpu(zero_data->FileOffset);
bfz = le64_to_cpu(zero_data->BeyondFinalZero);
@@ -7848,7 +7908,7 @@ int smb2_ioctl(struct ksmbd_work *work)
}
ret = fsctl_query_allocated_ranges(work, id,
- (struct file_allocated_range_buffer *)&req->Buffer[0],
+ (struct file_allocated_range_buffer *)buffer,
(struct file_allocated_range_buffer *)&rsp->Buffer[0],
out_buf_len /
sizeof(struct file_allocated_range_buffer), &nbytes);
@@ -7892,7 +7952,7 @@ int smb2_ioctl(struct ksmbd_work *work)
goto out;
}
- dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0];
+ dup_ext = (struct duplicate_extents_to_file *)buffer;
fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle,
dup_ext->PersistentFileHandle);
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index 7c98bf6997..fcaf373cc0 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -457,10 +457,13 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
}
ksmbd_kstat.kstat = &kstat;
- ksmbd_vfs_fill_dentry_attrs(work,
- idmap,
- dentry,
- &ksmbd_kstat);
+ rc = ksmbd_vfs_fill_dentry_attrs(work,
+ idmap,
+ dentry,
+ &ksmbd_kstat);
+ if (rc)
+ break;
+
rc = fn(conn, info_level, d_info, &ksmbd_kstat);
if (rc)
break;
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 4277750a6d..a8936aba77 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1669,11 +1669,19 @@ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
struct dentry *dentry,
struct ksmbd_kstat *ksmbd_kstat)
{
+ struct ksmbd_share_config *share_conf = work->tcon->share_conf;
u64 time;
int rc;
+ struct path path = {
+ .mnt = share_conf->vfs_path.mnt,
+ .dentry = dentry,
+ };
- generic_fillattr(idmap, STATX_BASIC_STATS, d_inode(dentry),
- ksmbd_kstat->kstat);
+ rc = vfs_getattr(&path, ksmbd_kstat->kstat,
+ STATX_BASIC_STATS | STATX_BTIME,
+ AT_STATX_SYNC_AS_STAT);
+ if (rc)
+ return rc;
time = ksmbd_UnixTimeToNT(ksmbd_kstat->kstat->ctime);
ksmbd_kstat->create_time = time;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 2d2b39f843..abf4a77584 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -261,9 +261,6 @@ static int write_begin_slow(struct address_space *mapping,
return err;
}
}
-
- SetPageUptodate(page);
- ClearPageError(page);
}
if (PagePrivate(page))
@@ -462,9 +459,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
return err;
}
}
-
- SetPageUptodate(page);
- ClearPageError(page);
}
err = allocate_budget(c, page, ui, appending);
@@ -474,10 +468,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
* If we skipped reading the page because we were going to
* write all of it, then it is not up to date.
*/
- if (skipped_read) {
+ if (skipped_read)
ClearPageChecked(page);
- ClearPageUptodate(page);
- }
/*
* Budgeting failed which means it would have to force
* write-back but didn't, because we set the @fast flag in the
@@ -568,6 +560,9 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
goto out;
}
+ if (len == PAGE_SIZE)
+ SetPageUptodate(page);
+
if (!PagePrivate(page)) {
attach_page_private(page, (void *)1);
atomic_long_inc(&c->dirty_pg_cnt);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 9ef461aa9b..99aea53601 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -558,6 +558,37 @@ struct drm_bridge_funcs {
struct drm_connector *connector);
/**
+ * @edid_read:
+ *
+ * Read the EDID data of the connected display.
+ *
+ * The @edid_read callback is the preferred way of reporting mode
+ * information for a display connected to the bridge output. Bridges
+ * that support reading EDID shall implement this callback and leave
+ * the @get_modes callback unimplemented.
+ *
+ * The caller of this operation shall first verify the output
+ * connection status and refrain from reading EDID from a disconnected
+ * output.
+ *
+ * This callback is optional. Bridges that implement it shall set the
+ * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
+ *
+ * The connector parameter shall be used for the sole purpose of EDID
+ * retrieval, and shall not be stored internally by bridge drivers for
+ * future usage.
+ *
+ * RETURNS:
+ *
+ * An edid structure newly allocated with drm_edid_alloc() or returned
+ * from drm_edid_read() family of functions on success, or NULL
+ * otherwise. The caller is responsible for freeing the returned edid
+ * structure with drm_edid_free().
+ */
+ const struct drm_edid *(*edid_read)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
* @get_edid:
*
* Read and parse the EDID data of the connected display.
@@ -888,6 +919,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
int drm_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector);
+const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector);
struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector);
void drm_bridge_hpd_enable(struct drm_bridge *bridge,
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index 6ea339d5de..81572d32db 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -71,7 +71,6 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
}
#define DRM_FIXED_POINT 32
-#define DRM_FIXED_POINT_HALF 16
#define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
#define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
#define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
@@ -90,12 +89,12 @@ static inline int drm_fixp2int(s64 a)
static inline int drm_fixp2int_round(s64 a)
{
- return drm_fixp2int(a + (1 << (DRM_FIXED_POINT_HALF - 1)));
+ return drm_fixp2int(a + DRM_FIXED_ONE / 2);
}
static inline int drm_fixp2int_ceil(s64 a)
{
- if (a > 0)
+ if (a >= 0)
return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
else
return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index ba483c87f0..3ae1989222 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -3,6 +3,8 @@
#ifndef DRM_KUNIT_HELPERS_H_
#define DRM_KUNIT_HELPERS_H_
+#include <drm/drm_drv.h>
+
#include <linux/device.h>
#include <kunit/test.h>
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index e3c3ac6159..159213786e 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -898,7 +898,8 @@ struct drm_connector_helper_funcs {
*
* RETURNS:
*
- * The number of modes added by calling drm_mode_probed_add().
+ * The number of modes added by calling drm_mode_probed_add(). Return 0
+ * on failures (no modes) instead of negative error codes.
*/
int (*get_modes)(struct drm_connector *connector);
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index a4eff85b1f..2b9d856ff3 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -79,6 +79,12 @@ struct ttm_tt {
* page_flags = TTM_TT_FLAG_EXTERNAL |
* TTM_TT_FLAG_EXTERNAL_MAPPABLE;
*
+ * TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as
+ * not encrypted. The framework will try to match what the dma layer
+ * is doing, but note that it is a little fragile because ttm page
+ * fault handling abuses the DMA api a bit and dma_map_attrs can't be
+ * used to assure pgprot always matches.
+ *
* TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is
* set by TTM after ttm_tt_populate() has successfully returned, and is
* then unset when TTM calls ttm_tt_unpopulate().
@@ -87,8 +93,9 @@ struct ttm_tt {
#define TTM_TT_FLAG_ZERO_ALLOC BIT(1)
#define TTM_TT_FLAG_EXTERNAL BIT(2)
#define TTM_TT_FLAG_EXTERNAL_MAPPABLE BIT(3)
+#define TTM_TT_FLAG_DECRYPTED BIT(4)
-#define TTM_TT_FLAG_PRIV_POPULATED BIT(4)
+#define TTM_TT_FLAG_PRIV_POPULATED BIT(5)
uint32_t page_flags;
/** @num_pages: Number of pages in the page array. */
uint32_t num_pages;
diff --git a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
index 754c54a6eb..7850cdc62e 100644
--- a/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a779g0-cpg-mssr.h
@@ -86,5 +86,6 @@
#define R8A779G0_CLK_CPEX 74
#define R8A779G0_CLK_CBFUSA 75
#define R8A779G0_CLK_R 76
+#define R8A779G0_CLK_CP 77
#endif /* __DT_BINDINGS_CLOCK_R8A779G0_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/dma/fsl-edma.h b/include/dt-bindings/dma/fsl-edma.h
new file mode 100644
index 0000000000..fd11478cfe
--- /dev/null
+++ b/include/dt-bindings/dma/fsl-edma.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+
+#ifndef _FSL_EDMA_DT_BINDING_H_
+#define _FSL_EDMA_DT_BINDING_H_
+
+/* Receive Channel */
+#define FSL_EDMA_RX 0x1
+
+/* iMX8 audio remote DMA */
+#define FSL_EDMA_REMOTE 0x2
+
+/* FIFO is continue memory region */
+#define FSL_EDMA_MULTI_FIFO 0x4
+
+/* Channel need stick to even channel */
+#define FSL_EDMA_EVEN_CH 0x8
+
+/* Channel need stick to odd channel */
+#define FSL_EDMA_ODD_CH 0x10
+
+#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index fc80944190..e990c18028 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -75,6 +75,8 @@ extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 1c5ca92a05..90f8bd1736 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1021,6 +1021,18 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
efficiencies);
}
+static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
+{
+ unsigned int freq;
+
+ if (idx < 0)
+ return false;
+
+ freq = policy->freq_table[idx].frequency;
+
+ return freq == clamp_val(freq, policy->min, policy->max);
+}
+
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
@@ -1054,7 +1066,8 @@ retry:
return 0;
}
- if (idx < 0 && efficiencies) {
+ /* Limit frequency index to honor policy->min/max */
+ if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
efficiencies = false;
goto retry;
}
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index 7595142f3f..7b2968612b 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -80,7 +80,8 @@ void dm_io_client_destroy(struct dm_io_client *client);
* error occurred doing io to the corresponding region.
*/
int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
- struct dm_io_region *region, unsigned int long *sync_error_bits);
+ struct dm_io_region *region, unsigned int long *sync_error_bits,
+ unsigned short ioprio);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_IO_H */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index b9d83652c0..5620894315 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -35,7 +35,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
struct file *eventfd_fget(int fd);
struct eventfd_ctx *eventfd_ctx_fdget(int fd);
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
-__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
+__u64 eventfd_signal(struct eventfd_ctx *ctx);
__u64 eventfd_signal_mask(struct eventfd_ctx *ctx, __u64 n, __poll_t mask);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
@@ -58,7 +58,7 @@ static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd)
return ERR_PTR(-ENOSYS);
}
-static inline int eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
+static inline int eventfd_signal(struct eventfd_ctx *ctx)
{
return -ENOSYS;
}
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 039fe0ce8d..1812b7ec84 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -27,6 +27,7 @@
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
+#define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1)
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a4953fafc8..02e043bd93 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -547,24 +547,27 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
__BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \
u64, __ur_3, u64, __ur_4, u64, __ur_5)
-#define BPF_CALL_x(x, name, ...) \
+#define BPF_CALL_x(x, attr, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
- u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
+ attr u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
-#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__)
-#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__)
-#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__)
-#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__)
-#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__)
-#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__)
+#define __NOATTR
+#define BPF_CALL_0(name, ...) BPF_CALL_x(0, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_1(name, ...) BPF_CALL_x(1, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_2(name, ...) BPF_CALL_x(2, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_3(name, ...) BPF_CALL_x(3, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_4(name, ...) BPF_CALL_x(4, __NOATTR, name, __VA_ARGS__)
+#define BPF_CALL_5(name, ...) BPF_CALL_x(5, __NOATTR, name, __VA_ARGS__)
+
+#define NOTRACE_BPF_CALL_1(name, ...) BPF_CALL_x(1, notrace, name, __VA_ARGS__)
#define bpf_ctx_range(TYPE, MEMBER) \
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7f659c2679..3226a1d47b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2084,7 +2084,7 @@ struct super_operations {
#ifdef CONFIG_QUOTA
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
- struct dquot **(*get_dquots)(struct inode *);
+ struct dquot __rcu **(*get_dquots)(struct inode *);
#endif
long (*nr_cached_objects)(struct super_block *,
struct shrink_control *);
@@ -3205,6 +3205,15 @@ extern int generic_check_addressable(unsigned, u64);
extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
+static inline bool sb_has_encoding(const struct super_block *sb)
+{
+#if IS_ENABLED(CONFIG_UNICODE)
+ return !!sb->s_encoding;
+#else
+ return false;
+#endif
+}
+
int may_setattr(struct mnt_idmap *idmap, struct inode *inode,
unsigned int ia_valid);
int setattr_prepare(struct mnt_idmap *, struct dentry *, struct iattr *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index de292a0071..e2a916cf29 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -353,6 +353,15 @@ static inline bool gfp_has_io_fs(gfp_t gfp)
return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS);
}
+/*
+ * Check if the gfp flags allow compaction - GFP_NOIO is a really
+ * tricky context because the migration might require IO.
+ */
+static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
+{
+ return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO);
+}
+
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 2b00faf980..6ef0557b4b 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -164,8 +164,28 @@ struct hv_ring_buffer {
u8 buffer[];
} __packed;
+
+/*
+ * If the requested ring buffer size is at least 8 times the size of the
+ * header, steal space from the ring buffer for the header. Otherwise, add
+ * space for the header so that is doesn't take too much of the ring buffer
+ * space.
+ *
+ * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
+ * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
+ * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
+ * large allocation that will be almost half wasted. As a contrasting example,
+ * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
+ * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
+ * In this latter case, we must add 64 Kbytes for the header and not worry
+ * about what's wasted.
+ */
+#define VMBUS_HEADER_ADJ(payload_sz) \
+ ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \
+ 0 : sizeof(struct hv_ring_buffer))
+
/* Calculate the proper size of a ringbuffer, it must be page-aligned */
-#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
(payload_sz))
struct hv_ring_buffer_info {
diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h
index 33f21bd85d..f3196f82fd 100644
--- a/include/linux/intel_rapl.h
+++ b/include/linux/intel_rapl.h
@@ -178,6 +178,12 @@ struct rapl_package {
struct rapl_if_priv *priv;
};
+struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu);
+struct rapl_package *rapl_add_package_cpuslocked(int id, struct rapl_if_priv *priv,
+ bool id_is_cpu);
+void rapl_remove_package_cpuslocked(struct rapl_package *rp);
+
struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu);
struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu);
void rapl_remove_package(struct rapl_package *rp);
diff --git a/include/linux/intel_tcc.h b/include/linux/intel_tcc.h
index f422612c28..8ff8eabb4a 100644
--- a/include/linux/intel_tcc.h
+++ b/include/linux/intel_tcc.h
@@ -13,6 +13,6 @@
int intel_tcc_get_tjmax(int cpu);
int intel_tcc_get_offset(int cpu);
int intel_tcc_set_offset(int cpu, int offset);
-int intel_tcc_get_temp(int cpu, bool pkg);
+int intel_tcc_get_temp(int cpu, int *temp, bool pkg);
#endif /* __INTEL_TCC_H__ */
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index aefb73eeee..b3a32687f5 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -93,6 +93,7 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
unsigned int issue_flags);
struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd);
+bool io_is_uring_fops(struct file *file);
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
@@ -111,10 +112,6 @@ static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
void (*task_work_cb)(struct io_uring_cmd *, unsigned))
{
}
-static inline struct sock *io_uring_get_socket(struct file *file)
-{
- return NULL;
-}
static inline void io_uring_task_cancel(void)
{
}
@@ -141,6 +138,10 @@ static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd
{
return NULL;
}
+static inline bool io_is_uring_fops(struct file *file)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 239a4f6880..335eca49dc 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -358,9 +358,6 @@ struct io_ring_ctx {
struct wait_queue_head rsrc_quiesce_wq;
unsigned rsrc_quiesce;
- #if defined(CONFIG_UNIX)
- struct socket *ring_sock;
- #endif
/* hashed buffered write serialization */
struct io_wq_hash *hash_map;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 1dbb14dacc..1d2afcd475 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -107,6 +107,7 @@ enum {
ATA_DFLAG_NCQ_PRIO_ENABLED = (1 << 20), /* Priority cmds sent to dev */
ATA_DFLAG_CDL_ENABLED = (1 << 21), /* cmd duration limits is enabled */
+ ATA_DFLAG_RESUMING = (1 << 22), /* Device is resuming */
ATA_DFLAG_DETACH = (1 << 24),
ATA_DFLAG_DETACHED = (1 << 25),
ATA_DFLAG_DA = (1 << 26), /* device supports Device Attention */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 77cd2e1372..bfc8320fb4 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10215,7 +10215,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_63_to_46[0x12];
u8 mrtc[0x1];
- u8 regs_44_to_32[0xd];
+ u8 regs_44_to_41[0x4];
+ u8 mfrl[0x1];
+ u8 regs_39_to_32[0x8];
u8 regs_31_to_10[0x16];
u8 mtmp[0x1];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index bd53cf4be7..f0e55bf3ec 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -269,7 +269,10 @@ struct mlx5_wqe_eth_seg {
union {
struct {
__be16 sz;
- u8 start[2];
+ union {
+ u8 start[2];
+ DECLARE_FLEX_ARRAY(u8, data);
+ };
} inline_hdr;
struct {
__be16 type;
diff --git a/include/linux/mman.h b/include/linux/mman.h
index dc7048824b..bcb201ab7a 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -162,6 +162,14 @@ calc_vm_flag_bits(unsigned long flags)
unsigned long vm_commit_limit(void);
+#ifndef arch_memory_deny_write_exec_supported
+static inline bool arch_memory_deny_write_exec_supported(void)
+{
+ return true;
+}
+#define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported
+#endif
+
/*
* Denies creating a writable executable mapping or gaining executable permissions.
*
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 001b2ce838..89b1e0ed98 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -115,6 +115,14 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod);
+#ifdef CONFIG_MODULES
+void flush_module_init_free_work(void);
+#else
+static inline void flush_module_init_free_work(void)
+{
+}
+#endif
+
/* Any cleanup needed when module leaves. */
void module_arch_cleanup(struct module *mod);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index badb4c1ac0..5c19ead604 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -169,7 +169,7 @@
struct spinand_op;
struct spinand_device;
-#define SPINAND_MAX_ID_LEN 4
+#define SPINAND_MAX_ID_LEN 5
/*
* For erase, write and read operation, we got the following timings :
* tBERS (erase) 1ms to 4ms
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 279262057a..832b7e354b 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -612,6 +612,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_commit_data *data);
+void nfs_commit_begin(struct nfs_mds_commit_info *cinfo);
bool nfs_commit_end(struct nfs_mds_commit_info *cinfo);
static inline bool nfs_have_writebacks(const struct inode *inode)
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 3921fbed0b..51421fdbb0 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -17,10 +17,12 @@
* build_OID_registry.pl to generate the data for look_up_OID().
*/
enum OID {
+ OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
+ OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
@@ -28,6 +30,7 @@ enum OID {
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
+ OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */
@@ -64,6 +67,7 @@ enum OID {
OID_PKU2U, /* 1.3.5.1.5.2.7 */
OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
+ OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 675937a5bd..ca0e61c838 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2511,6 +2511,11 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
return NULL;
}
+static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
+{
+ return dev->error_state == pci_channel_io_perm_failure;
+}
+
void pci_request_acs(void);
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
bool pci_acs_path_enabled(struct pci_dev *start,
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index 70998e6dd6..6ca51e0080 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -26,6 +26,7 @@ void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy);
int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
unsigned int port);
+int tegra_xusb_padctl_get_port_number(struct phy *phy);
int tegra_xusb_padctl_enable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy,
enum usb_device_speed speed);
int tegra_xusb_padctl_disable_phy_sleepwalk(struct tegra_xusb_padctl *padctl, struct phy *phy);
diff --git a/include/linux/poll.h b/include/linux/poll.h
index a9e0e1c2d1..d1ea4f3714 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -14,11 +14,7 @@
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
-#ifdef __clang__
-#define MAX_STACK_ALLOC 768
-#else
#define MAX_STACK_ALLOC 832
-#endif
#define FRONTEND_STACK_ALLOC 256
#define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC
#define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 31d523c4e0..e1bb04e578 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -250,6 +250,37 @@ do { \
cond_resched(); \
} while (0)
+/**
+ * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states
+ * @old_ts: jiffies at start of processing.
+ *
+ * This helper is for long-running softirq handlers, such as NAPI threads in
+ * networking. The caller should initialize the variable passed in as @old_ts
+ * at the beginning of the softirq handler. When invoked frequently, this macro
+ * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will
+ * provide both RCU and RCU-Tasks quiescent states. Note that this macro
+ * modifies its old_ts argument.
+ *
+ * Because regions of code that have disabled softirq act as RCU read-side
+ * critical sections, this macro should be invoked with softirq (and
+ * preemption) enabled.
+ *
+ * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would
+ * have more chance to invoke schedule() calls and provide necessary quiescent
+ * states. As a contrast, calling cond_resched() only won't achieve the same
+ * effect because cond_resched() does not provide RCU-Tasks quiescent states.
+ */
+#define rcu_softirq_qs_periodic(old_ts) \
+do { \
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \
+ time_after(jiffies, (old_ts) + HZ / 10)) { \
+ preempt_disable(); \
+ rcu_softirq_qs(); \
+ preempt_enable(); \
+ (old_ts) = jiffies; \
+ } \
+} while (0)
+
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 782e14f622..ded528d23f 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -98,6 +98,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
+typedef bool (*ring_buffer_cond_fn)(void *data);
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table, int full);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index f43aca7f3b..678409c47b 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -786,7 +786,8 @@ enum UART_TX_FLAGS {
if (pending < WAKEUP_CHARS) { \
uart_write_wakeup(__port); \
\
- if (!((flags) & UART_TX_NOSTOP) && pending == 0) \
+ if (!((flags) & UART_TX_NOSTOP) && pending == 0 && \
+ __port->ops->tx_empty(__port)) \
__port->ops->stop_tx(__port); \
} \
\
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 2caa6b8610..66828dfc6e 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -37,7 +37,7 @@ struct shmem_inode_info {
unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
#ifdef CONFIG_TMPFS_QUOTA
- struct dquot *i_dquot[MAXQUOTAS];
+ struct dquot __rcu *i_dquot[MAXQUOTAS];
#endif
struct inode vfs_inode;
};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 763e926440..bd06942757 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3439,6 +3439,16 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
bool napi_pp_put_page(struct page *page, bool napi_safe);
static inline void
+skb_page_unref(const struct sk_buff *skb, struct page *page, bool napi_safe)
+{
+#ifdef CONFIG_PAGE_POOL
+ if (skb->pp_recycle && napi_pp_put_page(page, napi_safe))
+ return;
+#endif
+ put_page(page);
+}
+
+static inline void
napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
{
struct page *page = skb_frag_page(frag);
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index a65b2513f8..5ac5f182ce 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -349,6 +349,7 @@ struct virqfd {
wait_queue_entry_t wait;
poll_table pt;
struct work_struct shutdown;
+ struct work_struct flush_inject;
struct virqfd **pvirqfd;
};
@@ -356,5 +357,6 @@ int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *),
void (*thread)(void *, void *), void *data,
struct virqfd **pvirqfd, int fd);
void vfio_virqfd_disable(struct virqfd **pvirqfd);
+void vfio_virqfd_flush_thread(struct virqfd **pvirqfd);
#endif /* VFIO_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 24b1e5070f..ad97453e7c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -405,6 +405,13 @@ enum {
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
+
+ /*
+ * Per-node default cap on min_active. Unless explicitly set, min_active
+ * is set to min(max_active, WQ_DFL_MIN_ACTIVE). For more details, see
+ * workqueue_struct->min_active definition.
+ */
+ WQ_DFL_MIN_ACTIVE = 8,
};
/*
@@ -447,11 +454,33 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
- * @max_active: max in-flight work items per CPU, 0 for default
+ * @max_active: max in-flight work items, 0 for default
* remaining args: args for @fmt
*
- * Allocate a workqueue with the specified parameters. For detailed
- * information on WQ_* flags, please refer to
+ * For a per-cpu workqueue, @max_active limits the number of in-flight work
+ * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be
+ * executing at most one work item for the workqueue.
+ *
+ * For unbound workqueues, @max_active limits the number of in-flight work items
+ * for the whole system. e.g. @max_active of 16 indicates that that there can be
+ * at most 16 work items executing for the workqueue in the whole system.
+ *
+ * As sharing the same active counter for an unbound workqueue across multiple
+ * NUMA nodes can be expensive, @max_active is distributed to each NUMA node
+ * according to the proportion of the number of online CPUs and enforced
+ * independently.
+ *
+ * Depending on online CPU distribution, a node may end up with per-node
+ * max_active which is significantly lower than @max_active, which can lead to
+ * deadlocks if the per-node concurrency limit is lower than the maximum number
+ * of interdependent work items for the workqueue.
+ *
+ * To guarantee forward progress regardless of online CPU distribution, the
+ * concurrency limit on every node is guaranteed to be equal to or greater than
+ * min_active which is set to min(@max_active, %WQ_DFL_MIN_ACTIVE). This means
+ * that the sum of per-node max_active's may be larger than @max_active.
+ *
+ * For detailed information on %WQ_* flags, please refer to
* Documentation/core-api/workqueue.rst.
*
* RETURNS:
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 2b6cd343ee..4d95893c89 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -225,6 +225,7 @@ enum media_pad_signal_type {
* @graph_obj: Embedded structure containing the media object common data
* @entity: Entity this pad belongs to
* @index: Pad index in the entity pads array, numbered from 0 to n
+ * @num_links: Number of links connected to this pad
* @sig_type: Type of the signal inside a media pad
* @flags: Pad flags, as defined in
* :ref:`include/uapi/linux/media.h <media_header>`
@@ -236,6 +237,7 @@ struct media_pad {
struct media_gobj graph_obj; /* must be first field in struct */
struct media_entity *entity;
u16 index;
+ u16 num_links;
enum media_pad_signal_type sig_type;
unsigned long flags;
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index bdee5d649c..0d23102457 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -393,7 +393,6 @@ enum {
HCI_LIMITED_PRIVACY,
HCI_RPA_EXPIRED,
HCI_RPA_RESOLVING,
- HCI_HS_ENABLED,
HCI_LE_ENABLED,
HCI_ADVERTISING,
HCI_ADVERTISING_CONNECTABLE,
@@ -437,7 +436,6 @@ enum {
#define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
-#define HCI_POWER_OFF_TIMEOUT msecs_to_jiffies(5000) /* 5 seconds */
#define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */
#define HCI_LE_AUTOCONN_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 65dd286693..24446038d8 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -553,6 +553,7 @@ struct hci_dev {
__u32 req_status;
__u32 req_result;
struct sk_buff *req_skb;
+ struct sk_buff *req_rsp;
void *smp_data;
void *smp_bredr_data;
diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h
index 6efbc21521..e2582c2425 100644
--- a/include/net/bluetooth/hci_sync.h
+++ b/include/net/bluetooth/hci_sync.h
@@ -42,7 +42,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
void hci_cmd_sync_init(struct hci_dev *hdev);
void hci_cmd_sync_clear(struct hci_dev *hdev);
void hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err);
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err);
int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
void *data, hci_cmd_sync_work_destroy_t destroy);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index cf393e72d6..92d7197f9a 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -59,8 +59,6 @@
#define L2CAP_WAIT_ACK_POLL_PERIOD msecs_to_jiffies(200)
#define L2CAP_WAIT_ACK_TIMEOUT msecs_to_jiffies(10000)
-#define L2CAP_A2MP_DEFAULT_MTU 670
-
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
@@ -109,12 +107,6 @@ struct l2cap_conninfo {
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
-#define L2CAP_CREATE_CHAN_REQ 0x0c
-#define L2CAP_CREATE_CHAN_RSP 0x0d
-#define L2CAP_MOVE_CHAN_REQ 0x0e
-#define L2CAP_MOVE_CHAN_RSP 0x0f
-#define L2CAP_MOVE_CHAN_CFM 0x10
-#define L2CAP_MOVE_CHAN_CFM_RSP 0x11
#define L2CAP_CONN_PARAM_UPDATE_REQ 0x12
#define L2CAP_CONN_PARAM_UPDATE_RSP 0x13
#define L2CAP_LE_CONN_REQ 0x14
@@ -144,7 +136,6 @@ struct l2cap_conninfo {
/* L2CAP fixed channels */
#define L2CAP_FC_SIG_BREDR 0x02
#define L2CAP_FC_CONNLESS 0x04
-#define L2CAP_FC_A2MP 0x08
#define L2CAP_FC_ATT 0x10
#define L2CAP_FC_SIG_LE 0x20
#define L2CAP_FC_SMP_LE 0x40
@@ -267,7 +258,6 @@ struct l2cap_conn_rsp {
/* channel identifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
-#define L2CAP_CID_A2MP 0x0003
#define L2CAP_CID_ATT 0x0004
#define L2CAP_CID_LE_SIGNALING 0x0005
#define L2CAP_CID_SMP 0x0006
@@ -282,7 +272,6 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
-#define L2CAP_CR_BAD_AMP 0x0005
#define L2CAP_CR_INVALID_SCID 0x0006
#define L2CAP_CR_SCID_IN_USE 0x0007
@@ -404,29 +393,6 @@ struct l2cap_info_rsp {
__u8 data[];
} __packed;
-struct l2cap_create_chan_req {
- __le16 psm;
- __le16 scid;
- __u8 amp_id;
-} __packed;
-
-struct l2cap_create_chan_rsp {
- __le16 dcid;
- __le16 scid;
- __le16 result;
- __le16 status;
-} __packed;
-
-struct l2cap_move_chan_req {
- __le16 icid;
- __u8 dest_amp_id;
-} __packed;
-
-struct l2cap_move_chan_rsp {
- __le16 icid;
- __le16 result;
-} __packed;
-
#define L2CAP_MR_SUCCESS 0x0000
#define L2CAP_MR_PEND 0x0001
#define L2CAP_MR_BAD_ID 0x0002
@@ -539,8 +505,6 @@ struct l2cap_seq_list {
struct l2cap_chan {
struct l2cap_conn *conn;
- struct hci_conn *hs_hcon;
- struct hci_chan *hs_hchan;
struct kref kref;
atomic_t nesting;
@@ -591,12 +555,6 @@ struct l2cap_chan {
unsigned long conn_state;
unsigned long flags;
- __u8 remote_amp_id;
- __u8 local_amp_id;
- __u8 move_id;
- __u8 move_state;
- __u8 move_role;
-
__u16 next_tx_seq;
__u16 expected_ack_seq;
__u16 expected_tx_seq;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 8f2c487618..e90596e21c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4914,6 +4914,7 @@ struct cfg80211_ops {
* NL80211_REGDOM_SET_BY_DRIVER.
* @WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON: reg_call_notifier() is called if driver
* set this flag to update channels on beacon hints.
+ * @WIPHY_FLAG_DISABLE_WEXT: disable wireless extensions for this device
*/
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
@@ -4925,6 +4926,7 @@ enum wiphy_flags {
WIPHY_FLAG_4ADDR_STATION = BIT(6),
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
+ WIPHY_FLAG_DISABLE_WEXT = BIT(9),
WIPHY_FLAG_MESH_AUTH = BIT(10),
WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),
/* use hole at 12 */
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index f79ce133e5..519d23941b 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -378,6 +378,7 @@ struct ieee802154_llsec_key {
struct ieee802154_llsec_key_entry {
struct list_head list;
+ struct rcu_head rcu;
struct ieee802154_llsec_key_id id;
struct ieee802154_llsec_key *key;
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 4ce1988b2b..f40915d2ec 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -12,6 +12,7 @@ struct request;
struct scsi_driver {
struct device_driver gendrv;
+ int (*resume)(struct device *);
void (*rescan)(struct device *);
blk_status_t (*init_command)(struct scsi_cmnd *);
void (*uninit_command)(struct scsi_cmnd *);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 3b907fc2ef..510f594b06 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -767,6 +767,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
+extern int scsi_resume_device(struct scsi_device *sdev);
extern int scsi_rescan_device(struct scsi_device *sdev);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h
index c47cc71a99..fdd462b295 100644
--- a/include/soc/qcom/qcom-spmi-pmic.h
+++ b/include/soc/qcom/qcom-spmi-pmic.h
@@ -48,7 +48,7 @@
#define PMK8350_SUBTYPE 0x2f
#define PMR735B_SUBTYPE 0x34
#define PM6350_SUBTYPE 0x36
-#define PM2250_SUBTYPE 0x37
+#define PM4125_SUBTYPE 0x37
#define PMI8998_FAB_ID_SMIC 0x11
#define PMI8998_FAB_ID_GF 0x30
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index 475294c853..be58d87050 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -131,6 +131,9 @@ struct tasdevice_priv {
const struct firmware *fmw, int offset);
int (*tasdevice_load_block)(struct tasdevice_priv *tas_priv,
struct tasdev_blk *block);
+
+ int (*save_calibration)(struct tasdevice_priv *tas_priv);
+ void (*apply_calibration)(struct tasdevice_priv *tas_priv);
};
void tas2781_reset(struct tasdevice_priv *tas_dev);
@@ -140,6 +143,8 @@ int tascodec_init(struct tasdevice_priv *tas_priv, void *codec,
struct tasdevice_priv *tasdevice_kzalloc(struct i2c_client *i2c);
int tasdevice_init(struct tasdevice_priv *tas_priv);
void tasdevice_remove(struct tasdevice_priv *tas_priv);
+int tasdevice_save_calibration(struct tasdevice_priv *tas_priv);
+void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv);
int tasdevice_dev_read(struct tasdevice_priv *tas_priv,
unsigned short chn, unsigned int reg, unsigned int *value);
int tasdevice_dev_write(struct tasdevice_priv *tas_priv,
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index a3995925cb..1f4258308b 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -81,14 +81,14 @@ TRACE_EVENT(qdisc_reset,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
+ __assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
@@ -106,14 +106,14 @@ TRACE_EVENT(qdisc_destroy,
TP_ARGS(q),
TP_STRUCT__entry(
- __string( dev, qdisc_dev(q) )
- __string( kind, q->ops->id )
- __field( u32, parent )
- __field( u32, handle )
+ __string( dev, qdisc_dev(q)->name )
+ __string( kind, q->ops->id )
+ __field( u32, parent )
+ __field( u32, handle )
),
TP_fast_assign(
- __assign_str(dev, qdisc_dev(q));
+ __assign_str(dev, qdisc_dev(q)->name);
__assign_str(kind, q->ops->id);
__entry->parent = q->parent;
__entry->handle = q->handle;
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 8881aea60f..2445f365bc 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -52,7 +52,7 @@
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 41
+#define VIRTIO_TRANSPORT_F_END 42
#ifndef VIRTIO_CONFIG_NO_LEGACY
/* Do we get callbacks when the ring is completely used, even if we've
@@ -114,4 +114,10 @@
* This feature indicates that the driver can reset a queue individually.
*/
#define VIRTIO_F_RING_RESET 40
+
+/*
+ * This feature indicates that the device support administration virtqueues.
+ */
+#define VIRTIO_F_ADMIN_VQ 41
+
#endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */
diff --git a/init/Kconfig b/init/Kconfig
index bfde8189c2..e4ff62f583 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -876,14 +876,14 @@ config CC_IMPLICIT_FALLTHROUGH
default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
-# Currently, disable gcc-11+ array-bounds globally.
+# Currently, disable gcc-10+ array-bounds globally.
# It's still broken in gcc-13, so no upper bound yet.
-config GCC11_NO_ARRAY_BOUNDS
+config GCC10_NO_ARRAY_BOUNDS
def_bool y
config CC_NO_ARRAY_BOUNDS
bool
- default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
+ default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS
#
# For architectures that know their GCC __int128 support is sound
diff --git a/init/initramfs.c b/init/initramfs.c
index 8d0fd946cd..efc477b905 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -673,7 +673,7 @@ static void __init populate_initrd_image(char *err)
printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
err);
- file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
+ file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);
if (IS_ERR(file))
return;
diff --git a/init/main.c b/init/main.c
index e24b0780fd..9e6ab6d593 100644
--- a/init/main.c
+++ b/init/main.c
@@ -88,6 +88,7 @@
#include <linux/sched/task_stack.h>
#include <linux/context_tracking.h>
#include <linux/random.h>
+#include <linux/moduleloader.h>
#include <linux/list.h>
#include <linux/integrity.h>
#include <linux/proc_ns.h>
@@ -1402,11 +1403,11 @@ static void mark_readonly(void)
if (rodata_enabled) {
/*
* load_module() results in W+X mappings, which are cleaned
- * up with call_rcu(). Let's make sure that queued work is
+ * up with init_free_wq. Let's make sure that queued work is
* flushed so that we don't hit false positives looking for
* insecure pages which are W+X.
*/
- rcu_barrier();
+ flush_module_init_free_work();
mark_rodata_ro();
rodata_test();
} else
diff --git a/io_uring/filetable.c b/io_uring/filetable.c
index e7d749991d..6e86e6188d 100644
--- a/io_uring/filetable.c
+++ b/io_uring/filetable.c
@@ -87,13 +87,10 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
io_file_bitmap_clear(&ctx->file_table, slot_index);
}
- ret = io_scm_file_account(ctx, file);
- if (!ret) {
- *io_get_tag_slot(ctx->file_data, slot_index) = 0;
- io_fixed_file_set(file_slot, file);
- io_file_bitmap_set(&ctx->file_table, slot_index);
- }
- return ret;
+ *io_get_tag_slot(ctx->file_data, slot_index) = 0;
+ io_fixed_file_set(file_slot, file);
+ io_file_bitmap_set(&ctx->file_table, slot_index);
+ return 0;
}
int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file,
diff --git a/io_uring/futex.c b/io_uring/futex.c
index 3c3575303c..792a03df58 100644
--- a/io_uring/futex.c
+++ b/io_uring/futex.c
@@ -159,6 +159,7 @@ bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
hlist_for_each_entry_safe(req, tmp, &ctx->futex_list, hash_node) {
if (!io_match_task_safe(req, task, cancel_all))
continue;
+ hlist_del_init(&req->hash_node);
__io_futex_cancel(ctx, req);
found = true;
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 59f5791c90..13a9d9fcd2 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -60,7 +60,6 @@
#include <linux/net.h>
#include <net/sock.h>
#include <net/af_unix.h>
-#include <net/scm.h>
#include <linux/anon_inodes.h>
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -177,19 +176,6 @@ static struct ctl_table kernel_io_uring_disabled_table[] = {
};
#endif
-struct sock *io_uring_get_socket(struct file *file)
-{
-#if defined(CONFIG_UNIX)
- if (io_is_uring_fops(file)) {
- struct io_ring_ctx *ctx = file->private_data;
-
- return ctx->ring_sock->sk;
- }
-#endif
- return NULL;
-}
-EXPORT_SYMBOL(io_uring_get_socket);
-
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
{
if (!wq_list_empty(&ctx->submit_state.compl_reqs) ||
@@ -1188,12 +1174,11 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
static unsigned int handle_tw_list(struct llist_node *node,
struct io_ring_ctx **ctx,
- struct io_tw_state *ts,
- struct llist_node *last)
+ struct io_tw_state *ts)
{
unsigned int count = 0;
- while (node && node != last) {
+ do {
struct llist_node *next = node->next;
struct io_kiocb *req = container_of(node, struct io_kiocb,
io_task_work.node);
@@ -1217,7 +1202,7 @@ static unsigned int handle_tw_list(struct llist_node *node,
*ctx = NULL;
cond_resched();
}
- }
+ } while (node);
return count;
}
@@ -1236,22 +1221,6 @@ static inline struct llist_node *io_llist_xchg(struct llist_head *head,
return xchg(&head->first, new);
}
-/**
- * io_llist_cmpxchg - possibly swap all entries in a lock-less list
- * @head: the head of lock-less list to delete all entries
- * @old: expected old value of the first entry of the list
- * @new: new entry as the head of the list
- *
- * perform a cmpxchg on the first entry of the list.
- */
-
-static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
- struct llist_node *old,
- struct llist_node *new)
-{
- return cmpxchg(&head->first, old, new);
-}
-
static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
{
struct llist_node *node = llist_del_all(&tctx->task_list);
@@ -1286,9 +1255,7 @@ void tctx_task_work(struct callback_head *cb)
struct io_ring_ctx *ctx = NULL;
struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
task_work);
- struct llist_node fake = {};
struct llist_node *node;
- unsigned int loops = 0;
unsigned int count = 0;
if (unlikely(current->flags & PF_EXITING)) {
@@ -1296,21 +1263,9 @@ void tctx_task_work(struct callback_head *cb)
return;
}
- do {
- loops++;
- node = io_llist_xchg(&tctx->task_list, &fake);
- count += handle_tw_list(node, &ctx, &ts, &fake);
-
- /* skip expensive cmpxchg if there are items in the list */
- if (READ_ONCE(tctx->task_list.first) != &fake)
- continue;
- if (ts.locked && !wq_list_empty(&ctx->submit_state.compl_reqs)) {
- io_submit_flush_completions(ctx);
- if (READ_ONCE(tctx->task_list.first) != &fake)
- continue;
- }
- node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
- } while (node != &fake);
+ node = llist_del_all(&tctx->task_list);
+ if (node)
+ count = handle_tw_list(node, &ctx, &ts);
ctx_flush_and_put(ctx, &ts);
@@ -1318,7 +1273,7 @@ void tctx_task_work(struct callback_head *cb)
if (unlikely(atomic_read(&tctx->in_cancel)))
io_uring_drop_tctx_refs(current);
- trace_io_uring_task_work_run(tctx, count, loops);
+ trace_io_uring_task_work_run(tctx, count, 1);
}
static inline void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
@@ -1415,7 +1370,20 @@ static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
}
}
-static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts)
+static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
+ int min_events)
+{
+ if (llist_empty(&ctx->work_llist))
+ return false;
+ if (events < min_events)
+ return true;
+ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
+ atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+ return false;
+}
+
+static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
+ int min_events)
{
struct llist_node *node;
unsigned int loops = 0;
@@ -1444,18 +1412,20 @@ again:
}
loops++;
- if (!llist_empty(&ctx->work_llist))
+ if (io_run_local_work_continue(ctx, ret, min_events))
goto again;
if (ts->locked) {
io_submit_flush_completions(ctx);
- if (!llist_empty(&ctx->work_llist))
+ if (io_run_local_work_continue(ctx, ret, min_events))
goto again;
}
+
trace_io_uring_local_work_run(ctx, ret, loops);
return ret;
}
-static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
+static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
+ int min_events)
{
struct io_tw_state ts = { .locked = true, };
int ret;
@@ -1463,20 +1433,20 @@ static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
if (llist_empty(&ctx->work_llist))
return 0;
- ret = __io_run_local_work(ctx, &ts);
+ ret = __io_run_local_work(ctx, &ts, min_events);
/* shouldn't happen! */
if (WARN_ON_ONCE(!ts.locked))
mutex_lock(&ctx->uring_lock);
return ret;
}
-static int io_run_local_work(struct io_ring_ctx *ctx)
+static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
{
struct io_tw_state ts = {};
int ret;
ts.locked = mutex_trylock(&ctx->uring_lock);
- ret = __io_run_local_work(ctx, &ts);
+ ret = __io_run_local_work(ctx, &ts, min_events);
if (ts.locked)
mutex_unlock(&ctx->uring_lock);
@@ -1672,7 +1642,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
io_task_work_pending(ctx)) {
u32 tail = ctx->cached_cq_tail;
- (void) io_run_local_work_locked(ctx);
+ (void) io_run_local_work_locked(ctx, min);
if (task_work_pending(current) ||
wq_list_empty(&ctx->iopoll_list)) {
@@ -2515,7 +2485,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
{
if (!llist_empty(&ctx->work_llist)) {
__set_current_state(TASK_RUNNING);
- if (io_run_local_work(ctx) > 0)
+ if (io_run_local_work(ctx, INT_MAX) > 0)
return 0;
}
if (io_run_task_work() > 0)
@@ -2538,7 +2508,7 @@ static bool current_pending_io(void)
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq)
{
- int io_wait, ret;
+ int ret;
if (unlikely(READ_ONCE(ctx->check_cq)))
return 1;
@@ -2556,7 +2526,6 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
* can take into account that the task is waiting for IO - turns out
* to be important for low QD IO.
*/
- io_wait = current->in_iowait;
if (current_pending_io())
current->in_iowait = 1;
ret = 0;
@@ -2564,7 +2533,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
schedule();
else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
ret = -ETIME;
- current->in_iowait = io_wait;
+ current->in_iowait = 0;
return ret;
}
@@ -2583,7 +2552,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (!io_allowed_run_tw(ctx))
return -EEXIST;
if (!llist_empty(&ctx->work_llist))
- io_run_local_work(ctx);
+ io_run_local_work(ctx, min_events);
io_run_task_work();
io_cqring_overflow_flush(ctx);
/* if user messes with these they will just get an early return */
@@ -2621,11 +2590,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
trace_io_uring_cqring_wait(ctx, min_events);
do {
+ int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
unsigned long check_cq;
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
- int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
-
atomic_set(&ctx->cq_wait_nr, nr_wait);
set_current_state(TASK_INTERRUPTIBLE);
} else {
@@ -2644,7 +2612,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
*/
io_run_task_work();
if (!llist_empty(&ctx->work_llist))
- io_run_local_work(ctx);
+ io_run_local_work(ctx, nr_wait);
/*
* Non-local task_work will be run on exit to userspace, but
@@ -2715,7 +2683,7 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
struct page **page_array;
unsigned int nr_pages;
void *page_addr;
- int ret, i;
+ int ret, i, pinned;
*npages = 0;
@@ -2729,12 +2697,12 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
if (!page_array)
return ERR_PTR(-ENOMEM);
- ret = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
- page_array);
- if (ret != nr_pages) {
-err:
- io_pages_free(&page_array, ret > 0 ? ret : 0);
- return ret < 0 ? ERR_PTR(ret) : ERR_PTR(-EFAULT);
+
+ pinned = pin_user_pages_fast(uaddr, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
+ page_array);
+ if (pinned != nr_pages) {
+ ret = (pinned < 0) ? pinned : -EFAULT;
+ goto free_pages;
}
page_addr = page_address(page_array[0]);
@@ -2748,7 +2716,7 @@ err:
* didn't support this feature.
*/
if (PageHighMem(page_array[i]))
- goto err;
+ goto free_pages;
/*
* No support for discontig pages for now, should either be a
@@ -2757,13 +2725,17 @@ err:
* just fail them with EINVAL.
*/
if (page_address(page_array[i]) != page_addr)
- goto err;
+ goto free_pages;
page_addr += PAGE_SIZE;
}
*pages = page_array;
*npages = nr_pages;
return page_to_virt(page_array[0]);
+
+free_pages:
+ io_pages_free(&page_array, pinned > 0 ? pinned : 0);
+ return ERR_PTR(ret);
}
static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
@@ -2785,14 +2757,15 @@ static void io_rings_free(struct io_ring_ctx *ctx)
if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);
- ctx->rings = NULL;
- ctx->sq_sqes = NULL;
} else {
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
ctx->n_ring_pages = 0;
io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
ctx->n_sqe_pages = 0;
}
+
+ ctx->rings = NULL;
+ ctx->sq_sqes = NULL;
}
void *io_mem_alloc(size_t size)
@@ -2952,13 +2925,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_rsrc_node_destroy(ctx, ctx->rsrc_node);
WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
-
-#if defined(CONFIG_UNIX)
- if (ctx->ring_sock) {
- ctx->ring_sock->file = NULL; /* so that iput() is called */
- sock_release(ctx->ring_sock);
- }
-#endif
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free);
@@ -3374,7 +3340,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
io_allowed_defer_tw_run(ctx))
- ret |= io_run_local_work(ctx) > 0;
+ ret |= io_run_local_work(ctx, INT_MAX) > 0;
ret |= io_cancel_defer_files(ctx, task, cancel_all);
mutex_lock(&ctx->uring_lock);
ret |= io_poll_remove_all(ctx, task, cancel_all);
@@ -3736,7 +3702,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
* it should handle ownership problems if any.
*/
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
- (void)io_run_local_work_locked(ctx);
+ (void)io_run_local_work_locked(ctx, min_complete);
}
mutex_unlock(&ctx->uring_lock);
}
@@ -3880,32 +3846,12 @@ static int io_uring_install_fd(struct file *file)
/*
* Allocate an anonymous fd, this is what constitutes the application
* visible backing of an io_uring instance. The application mmaps this
- * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
- * we have to tie this fd to a socket for file garbage collection purposes.
+ * fd to gain access to the SQ/CQ ring details.
*/
static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
{
- struct file *file;
-#if defined(CONFIG_UNIX)
- int ret;
-
- ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
- &ctx->ring_sock);
- if (ret)
- return ERR_PTR(ret);
-#endif
-
- file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
+ return anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
O_RDWR | O_CLOEXEC, NULL);
-#if defined(CONFIG_UNIX)
- if (IS_ERR(file)) {
- sock_release(ctx->ring_sock);
- ctx->ring_sock = NULL;
- } else {
- ctx->ring_sock->file = file;
- }
-#endif
- return file;
}
static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index c9992cd7f1..0d66a7058d 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -61,7 +61,6 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned issue_flags);
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
-bool io_is_uring_fops(struct file *file);
bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_queue(struct io_kiocb *req);
void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use);
diff --git a/io_uring/net.c b/io_uring/net.c
index 1616220291..5a4001139e 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -87,7 +87,7 @@ static inline bool io_check_multishot(struct io_kiocb *req,
* generic paths but multipoll may decide to post extra cqes.
*/
return !(issue_flags & IO_URING_F_IOWQ) ||
- !(issue_flags & IO_URING_F_MULTISHOT) ||
+ !(req->flags & REQ_F_APOLL_MULTISHOT) ||
!req->ctx->task_complete;
}
@@ -204,16 +204,115 @@ static int io_setup_async_msg(struct io_kiocb *req,
return -EAGAIN;
}
+#ifdef CONFIG_COMPAT
+static int io_compat_msg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg,
+ struct compat_msghdr *msg, int ddir)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct compat_iovec __user *uiov;
+ int ret;
+
+ if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
+ return -EFAULT;
+
+ uiov = compat_ptr(msg->msg_iov);
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ compat_ssize_t clen;
+
+ iomsg->free_iov = NULL;
+ if (msg->msg_iovlen == 0) {
+ sr->len = 0;
+ } else if (msg->msg_iovlen > 1) {
+ return -EINVAL;
+ } else {
+ if (!access_ok(uiov, sizeof(*uiov)))
+ return -EFAULT;
+ if (__get_user(clen, &uiov->iov_len))
+ return -EFAULT;
+ if (clen < 0)
+ return -EINVAL;
+ sr->len = clen;
+ }
+
+ return 0;
+ }
+
+ iomsg->free_iov = iomsg->fast_iov;
+ ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
+ UIO_FASTIOV, &iomsg->free_iov,
+ &iomsg->msg.msg_iter, true);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+#endif
+
+static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
+ struct user_msghdr *msg, int ddir)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ int ret;
+
+ if (copy_from_user(msg, sr->umsg, sizeof(*sr->umsg)))
+ return -EFAULT;
+
+ if (req->flags & REQ_F_BUFFER_SELECT) {
+ if (msg->msg_iovlen == 0) {
+ sr->len = iomsg->fast_iov[0].iov_len = 0;
+ iomsg->fast_iov[0].iov_base = NULL;
+ iomsg->free_iov = NULL;
+ } else if (msg->msg_iovlen > 1) {
+ return -EINVAL;
+ } else {
+ if (copy_from_user(iomsg->fast_iov, msg->msg_iov,
+ sizeof(*msg->msg_iov)))
+ return -EFAULT;
+ sr->len = iomsg->fast_iov[0].iov_len;
+ iomsg->free_iov = NULL;
+ }
+
+ return 0;
+ }
+
+ iomsg->free_iov = iomsg->fast_iov;
+ ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, UIO_FASTIOV,
+ &iomsg->free_iov, &iomsg->msg.msg_iter, false);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return 0;
+}
+
static int io_sendmsg_copy_hdr(struct io_kiocb *req,
struct io_async_msghdr *iomsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ struct user_msghdr msg;
int ret;
iomsg->msg.msg_name = &iomsg->addr;
- iomsg->free_iov = iomsg->fast_iov;
- ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
- &iomsg->free_iov);
+ iomsg->msg.msg_iter.nr_segs = 0;
+
+#ifdef CONFIG_COMPAT
+ if (unlikely(req->ctx->compat)) {
+ struct compat_msghdr cmsg;
+
+ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
+ if (unlikely(ret))
+ return ret;
+
+ return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
+ }
+#endif
+
+ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
+ if (unlikely(ret))
+ return ret;
+
+ ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
+
/* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
@@ -435,142 +534,77 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
-static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
+static int io_recvmsg_mshot_prep(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg,
+ int namelen, size_t controllen)
{
- int hdr;
-
- if (iomsg->namelen < 0)
- return true;
- if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
- iomsg->namelen, &hdr))
- return true;
- if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
- return true;
+ if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
+ (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
+ int hdr;
+
+ if (unlikely(namelen < 0))
+ return -EOVERFLOW;
+ if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
+ namelen, &hdr))
+ return -EOVERFLOW;
+ if (check_add_overflow(hdr, controllen, &hdr))
+ return -EOVERFLOW;
+
+ iomsg->namelen = namelen;
+ iomsg->controllen = controllen;
+ return 0;
+ }
- return false;
+ return 0;
}
-static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
+static int io_recvmsg_copy_hdr(struct io_kiocb *req,
+ struct io_async_msghdr *iomsg)
{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
struct user_msghdr msg;
int ret;
- if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
- return -EFAULT;
-
- ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
- if (ret)
- return ret;
-
- if (req->flags & REQ_F_BUFFER_SELECT) {
- if (msg.msg_iovlen == 0) {
- sr->len = iomsg->fast_iov[0].iov_len = 0;
- iomsg->fast_iov[0].iov_base = NULL;
- iomsg->free_iov = NULL;
- } else if (msg.msg_iovlen > 1) {
- return -EINVAL;
- } else {
- if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
- return -EFAULT;
- sr->len = iomsg->fast_iov[0].iov_len;
- iomsg->free_iov = NULL;
- }
-
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
- iomsg->namelen = msg.msg_namelen;
- iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
- }
- } else {
- iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
- &iomsg->free_iov, &iomsg->msg.msg_iter,
- false);
- if (ret > 0)
- ret = 0;
- }
-
- return ret;
-}
+ iomsg->msg.msg_name = &iomsg->addr;
+ iomsg->msg.msg_iter.nr_segs = 0;
#ifdef CONFIG_COMPAT
-static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
- struct compat_msghdr msg;
- struct compat_iovec __user *uiov;
- int ret;
+ if (unlikely(req->ctx->compat)) {
+ struct compat_msghdr cmsg;
- if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
- return -EFAULT;
-
- ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
- if (ret)
- return ret;
-
- uiov = compat_ptr(msg.msg_iov);
- if (req->flags & REQ_F_BUFFER_SELECT) {
- compat_ssize_t clen;
-
- iomsg->free_iov = NULL;
- if (msg.msg_iovlen == 0) {
- sr->len = 0;
- } else if (msg.msg_iovlen > 1) {
- return -EINVAL;
- } else {
- if (!access_ok(uiov, sizeof(*uiov)))
- return -EFAULT;
- if (__get_user(clen, &uiov->iov_len))
- return -EFAULT;
- if (clen < 0)
- return -EINVAL;
- sr->len = clen;
- }
+ ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
+ if (unlikely(ret))
+ return ret;
- if (req->flags & REQ_F_APOLL_MULTISHOT) {
- iomsg->namelen = msg.msg_namelen;
- iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
- }
- } else {
- iomsg->free_iov = iomsg->fast_iov;
- ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
- UIO_FASTIOV, &iomsg->free_iov,
- &iomsg->msg.msg_iter, true);
- if (ret < 0)
+ ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
+ if (unlikely(ret))
return ret;
- }
- return 0;
-}
+ return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
+ cmsg.msg_controllen);
+ }
#endif
-static int io_recvmsg_copy_hdr(struct io_kiocb *req,
- struct io_async_msghdr *iomsg)
-{
- iomsg->msg.msg_name = &iomsg->addr;
- iomsg->msg.msg_iter.nr_segs = 0;
+ ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
+ if (unlikely(ret))
+ return ret;
-#ifdef CONFIG_COMPAT
- if (req->ctx->compat)
- return __io_compat_recvmsg_copy_hdr(req, iomsg);
-#endif
+ ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
+ if (unlikely(ret))
+ return ret;
- return __io_recvmsg_copy_hdr(req, iomsg);
+ return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
+ msg.msg_controllen);
}
int io_recvmsg_prep_async(struct io_kiocb *req)
{
+ struct io_async_msghdr *iomsg;
int ret;
if (!io_msg_alloc_async_prep(req))
return -ENOMEM;
- ret = io_recvmsg_copy_hdr(req, req->async_data);
+ iomsg = req->async_data;
+ ret = io_recvmsg_copy_hdr(req, iomsg);
if (!ret)
req->flags |= REQ_F_NEED_CLEANUP;
return ret;
@@ -881,7 +915,8 @@ retry_multishot:
kfree(kmsg->free_iov);
io_netmsg_recycle(req, issue_flags);
req->flags &= ~REQ_F_NEED_CLEANUP;
- }
+ } else if (ret == -EAGAIN)
+ return io_setup_async_msg(req, kmsg, issue_flags);
return ret;
}
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 7513afc7b7..c6f4789623 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -539,14 +539,6 @@ static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
poll->wait.private = (void *) wqe_private;
if (poll->events & EPOLLEXCLUSIVE) {
- /*
- * Exclusive waits may only wake a limited amount of entries
- * rather than all of them, this may interfere with lazy
- * wake if someone does wait(events > 1). Ensure we don't do
- * lazy wake for those, as we need to process each one as they
- * come in.
- */
- req->flags |= REQ_F_POLL_NO_LAZY;
add_wait_queue_exclusive(head, &poll->wait);
} else {
add_wait_queue(head, &poll->wait);
@@ -618,6 +610,17 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
if (issue_flags & IO_URING_F_UNLOCKED)
req->flags &= ~REQ_F_HASH_LOCKED;
+
+ /*
+ * Exclusive waits may only wake a limited amount of entries
+ * rather than all of them, this may interfere with lazy
+ * wake if someone does wait(events > 1). Ensure we don't do
+ * lazy wake for those, as we need to process each one as they
+ * come in.
+ */
+ if (poll->events & EPOLLEXCLUSIVE)
+ req->flags |= REQ_F_POLL_NO_LAZY;
+
mask = vfs_poll(req->file, &ipt->pt) & poll->events;
if (unlikely(ipt->error || !ipt->nr_entries)) {
@@ -995,7 +998,6 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
struct io_hash_bucket *bucket;
struct io_kiocb *preq;
int ret2, ret = 0;
- struct io_tw_state ts = { .locked = true };
io_ring_submit_lock(ctx, issue_flags);
preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
@@ -1044,7 +1046,8 @@ found:
req_set_fail(preq);
io_req_set_res(preq, -ECANCELED, 0);
- io_req_task_complete(preq, &ts);
+ preq->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(preq);
out:
io_ring_submit_unlock(ctx, issue_flags);
if (ret < 0) {
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index f521c5965a..4818b79231 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -24,7 +24,6 @@ struct io_rsrc_update {
};
static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
-static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
struct io_mapped_ubuf **pimu,
struct page **last_hpage);
@@ -157,7 +156,7 @@ static void io_rsrc_put_work(struct io_rsrc_node *node)
switch (node->type) {
case IORING_RSRC_FILE:
- io_rsrc_file_put(node->ctx, prsrc);
+ fput(prsrc->file);
break;
case IORING_RSRC_BUFFER:
io_rsrc_buf_put(node->ctx, prsrc);
@@ -402,23 +401,13 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
break;
}
/*
- * Don't allow io_uring instances to be registered. If
- * UNIX isn't enabled, then this causes a reference
- * cycle and this instance can never get freed. If UNIX
- * is enabled we'll handle it just fine, but there's
- * still no point in allowing a ring fd as it doesn't
- * support regular read/write anyway.
+ * Don't allow io_uring instances to be registered.
*/
if (io_is_uring_fops(file)) {
fput(file);
err = -EBADF;
break;
}
- err = io_scm_file_account(ctx, file);
- if (err) {
- fput(file);
- break;
- }
*io_get_tag_slot(data, i) = tag;
io_fixed_file_set(file_slot, file);
io_file_bitmap_set(&ctx->file_table, i);
@@ -675,22 +664,12 @@ void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
for (i = 0; i < ctx->nr_user_files; i++) {
struct file *file = io_file_from_index(&ctx->file_table, i);
- /* skip scm accounted files, they'll be freed by ->ring_sock */
- if (!file || io_file_need_scm(file))
+ if (!file)
continue;
io_file_bitmap_clear(&ctx->file_table, i);
fput(file);
}
-#if defined(CONFIG_UNIX)
- if (ctx->ring_sock) {
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
- kfree_skb(skb);
- }
-#endif
io_free_file_tables(&ctx->file_table);
io_file_table_set_alloc_range(ctx, 0, 0);
io_rsrc_data_free(ctx->file_data);
@@ -718,137 +697,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx)
return ret;
}
-/*
- * Ensure the UNIX gc is aware of our file set, so we are certain that
- * the io_uring can be safely unregistered on process exit, even if we have
- * loops in the file referencing. We account only files that can hold other
- * files because otherwise they can't form a loop and so are not interesting
- * for GC.
- */
-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
-{
-#if defined(CONFIG_UNIX)
- struct sock *sk = ctx->ring_sock->sk;
- struct sk_buff_head *head = &sk->sk_receive_queue;
- struct scm_fp_list *fpl;
- struct sk_buff *skb;
-
- if (likely(!io_file_need_scm(file)))
- return 0;
-
- /*
- * See if we can merge this file into an existing skb SCM_RIGHTS
- * file set. If there's no room, fall back to allocating a new skb
- * and filling it in.
- */
- spin_lock_irq(&head->lock);
- skb = skb_peek(head);
- if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
- __skb_unlink(skb, head);
- else
- skb = NULL;
- spin_unlock_irq(&head->lock);
-
- if (!skb) {
- fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
- if (!fpl)
- return -ENOMEM;
-
- skb = alloc_skb(0, GFP_KERNEL);
- if (!skb) {
- kfree(fpl);
- return -ENOMEM;
- }
-
- fpl->user = get_uid(current_user());
- fpl->max = SCM_MAX_FD;
- fpl->count = 0;
-
- UNIXCB(skb).fp = fpl;
- skb->sk = sk;
- skb->destructor = io_uring_destruct_scm;
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
- }
-
- fpl = UNIXCB(skb).fp;
- fpl->fp[fpl->count++] = get_file(file);
- unix_inflight(fpl->user, file);
- skb_queue_head(head, skb);
- fput(file);
-#endif
- return 0;
-}
-
-static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
-{
-#if defined(CONFIG_UNIX)
- struct sock *sock = ctx->ring_sock->sk;
- struct sk_buff_head list, *head = &sock->sk_receive_queue;
- struct sk_buff *skb;
- int i;
-
- __skb_queue_head_init(&list);
-
- /*
- * Find the skb that holds this file in its SCM_RIGHTS. When found,
- * remove this entry and rearrange the file array.
- */
- skb = skb_dequeue(head);
- while (skb) {
- struct scm_fp_list *fp;
-
- fp = UNIXCB(skb).fp;
- for (i = 0; i < fp->count; i++) {
- int left;
-
- if (fp->fp[i] != file)
- continue;
-
- unix_notinflight(fp->user, fp->fp[i]);
- left = fp->count - 1 - i;
- if (left) {
- memmove(&fp->fp[i], &fp->fp[i + 1],
- left * sizeof(struct file *));
- }
- fp->count--;
- if (!fp->count) {
- kfree_skb(skb);
- skb = NULL;
- } else {
- __skb_queue_tail(&list, skb);
- }
- fput(file);
- file = NULL;
- break;
- }
-
- if (!file)
- break;
-
- __skb_queue_tail(&list, skb);
-
- skb = skb_dequeue(head);
- }
-
- if (skb_peek(&list)) {
- spin_lock_irq(&head->lock);
- while ((skb = __skb_dequeue(&list)) != NULL)
- __skb_queue_tail(head, skb);
- spin_unlock_irq(&head->lock);
- }
-#endif
-}
-
-static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
-{
- struct file *file = prsrc->file;
-
- if (likely(!io_file_need_scm(file)))
- fput(file);
- else
- io_rsrc_file_scm_put(ctx, file);
-}
-
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args, u64 __user *tags)
{
@@ -897,21 +745,12 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
goto fail;
/*
- * Don't allow io_uring instances to be registered. If UNIX
- * isn't enabled, then this causes a reference cycle and this
- * instance can never get freed. If UNIX is enabled we'll
- * handle it just fine, but there's still no point in allowing
- * a ring fd as it doesn't support regular read/write anyway.
+ * Don't allow io_uring instances to be registered.
*/
if (io_is_uring_fops(file)) {
fput(file);
goto fail;
}
- ret = io_scm_file_account(ctx, file);
- if (ret) {
- fput(file);
- goto fail;
- }
file_slot = io_fixed_file_slot(&ctx->file_table, i);
io_fixed_file_set(file_slot, file);
io_file_bitmap_set(&ctx->file_table, i);
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 08ac0d8e07..7238b9cfe3 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -75,21 +75,6 @@ int io_sqe_files_unregister(struct io_ring_ctx *ctx);
int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args, u64 __user *tags);
-int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
-
-static inline bool io_file_need_scm(struct file *filp)
-{
- return false;
-}
-
-static inline int io_scm_file_account(struct io_ring_ctx *ctx,
- struct file *file)
-{
- if (likely(!io_file_need_scm(file)))
- return 0;
- return __io_scm_file_account(ctx, file);
-}
-
int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args);
int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 9394bf83e8..70c5beb05d 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -926,6 +926,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
*/
if (!file_can_poll(req->file))
return -EBADFD;
+ if (issue_flags & IO_URING_F_IOWQ)
+ return -EAGAIN;
ret = __io_read(req, issue_flags);
@@ -940,6 +942,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
*/
if (io_kbuf_recycle(req, issue_flags))
rw->len = 0;
+ if (issue_flags & IO_URING_F_MULTISHOT)
+ return IOU_ISSUE_SKIP_COMPLETE;
return -EAGAIN;
}
diff --git a/io_uring/waitid.c b/io_uring/waitid.c
index 6f85197860..77d340666c 100644
--- a/io_uring/waitid.c
+++ b/io_uring/waitid.c
@@ -125,12 +125,6 @@ static void io_waitid_complete(struct io_kiocb *req, int ret)
lockdep_assert_held(&req->ctx->uring_lock);
- /*
- * Did cancel find it meanwhile?
- */
- if (hlist_unhashed(&req->hash_node))
- return;
-
hlist_del_init(&req->hash_node);
ret = io_waitid_finish(req, ret);
@@ -202,6 +196,7 @@ bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task,
hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) {
if (!io_match_task_safe(req, task, cancel_all))
continue;
+ hlist_del_init(&req->hash_node);
__io_waitid_cancel(ctx, req);
found = true;
}
diff --git a/kernel/bounds.c b/kernel/bounds.c
index b529182e8b..c5a9fcd2d6 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -19,7 +19,7 @@ int main(void)
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
#ifdef CONFIG_SMP
- DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
+ DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
#ifdef CONFIG_LRU_GEN
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index fe254ae035..27fd417771 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -863,7 +863,12 @@ static LIST_HEAD(pack_list);
* CONFIG_MMU=n. Use PAGE_SIZE in these cases.
*/
#ifdef PMD_SIZE
-#define BPF_PROG_PACK_SIZE (PMD_SIZE * num_possible_nodes())
+/* PMD_SIZE is really big for some archs. It doesn't make sense to
+ * reserve too much memory in one allocation. Hardcode BPF_PROG_PACK_SIZE to
+ * 2MiB * num_possible_nodes(). On most architectures PMD_SIZE will be
+ * greater than or equal to 2MB.
+ */
+#define BPF_PROG_PACK_SIZE (SZ_2M * num_possible_nodes())
#else
#define BPF_PROG_PACK_SIZE PAGE_SIZE
#endif
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8a0bb80fe4..8f1d390bcb 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -178,7 +178,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
void **frames, int n,
struct xdp_cpumap_stats *stats)
{
- struct xdp_rxq_info rxq;
+ struct xdp_rxq_info rxq = {};
struct xdp_buff xdp;
int i, nframes = 0;
@@ -262,6 +262,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
static int cpu_map_kthread_run(void *data)
{
struct bpf_cpu_map_entry *rcpu = data;
+ unsigned long last_qs = jiffies;
complete(&rcpu->kthread_running);
set_current_state(TASK_INTERRUPTIBLE);
@@ -287,10 +288,12 @@ static int cpu_map_kthread_run(void *data)
if (__ptr_ring_empty(rcpu->queue)) {
schedule();
sched = 1;
+ last_qs = jiffies;
} else {
__set_current_state(TASK_RUNNING);
}
} else {
+ rcu_softirq_qs_periodic(last_qs);
sched = cond_resched();
}
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index a936c704d4..4e2cdbb562 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -130,13 +130,14 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
bpf_map_init_from_attr(&dtab->map, attr);
if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
- dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
-
- if (!dtab->n_buckets) /* Overflow check */
+ /* hash table size must be power of 2; roundup_pow_of_two() can
+ * overflow into UB on 32-bit arches, so check that first
+ */
+ if (dtab->map.max_entries > 1UL << 31)
return -EINVAL;
- }
- if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+ dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
+
dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
dtab->map.numa_node);
if (!dtab->dev_index_head)
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 5b9146fa82..85cd17ca38 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -498,7 +498,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
num_possible_cpus());
}
- /* hash table size must be power of 2 */
+ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
+ * into UB on 32-bit arches, so check that first
+ */
+ err = -E2BIG;
+ if (htab->map.max_entries > 1UL << 31)
+ goto free_htab;
+
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
htab->elem_size = sizeof(struct htab_elem) +
@@ -508,10 +514,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else
htab->elem_size += round_up(htab->map.value_size, 8);
- err = -E2BIG;
- /* prevent zero size kmalloc and check for u32 overflow */
- if (htab->n_buckets == 0 ||
- htab->n_buckets > U32_MAX / sizeof(struct bucket))
+ /* check for u32 overflow */
+ if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
goto free_htab;
err = bpf_map_init_elem_count(&htab->map);
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index ce4729ef1a..b912d055a8 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -334,7 +334,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
__this_cpu_write(irqsave_flags, flags);
}
-notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
+NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
{
__bpf_spin_lock_irqsave(lock);
return 0;
@@ -357,7 +357,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
local_irq_restore(flags);
}
-notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
+NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
{
__bpf_spin_unlock_irqrestore(lock);
return 0;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index dff7ba5397..c99f8e5234 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -91,11 +91,14 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
} else if (value_size / 8 > sysctl_perf_event_max_stack)
return ERR_PTR(-EINVAL);
- /* hash table size must be power of 2 */
- n_buckets = roundup_pow_of_two(attr->max_entries);
- if (!n_buckets)
+ /* hash table size must be power of 2; roundup_pow_of_two() can overflow
+ * into UB on 32-bit arches, so check that first
+ */
+ if (attr->max_entries > 1UL << 31)
return ERR_PTR(-E2BIG);
+ n_buckets = roundup_pow_of_two(attr->max_entries);
+
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
if (!smap)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e215413c79..890d4c4bf9 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5445,7 +5445,9 @@ BTF_ID(struct, prog_test_ref_kfunc)
#ifdef CONFIG_CGROUPS
BTF_ID(struct, cgroup)
#endif
+#ifdef CONFIG_BPF_JIT
BTF_ID(struct, bpf_cpumask)
+#endif
BTF_ID(struct, task_struct)
BTF_SET_END(rcu_protected_types)
@@ -16686,6 +16688,9 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat
{
int i;
+ if (old->callback_depth > cur->callback_depth)
+ return false;
+
for (i = 0; i < MAX_BPF_REG; i++)
if (!regsafe(env, &old->regs[i], &cur->regs[i],
&env->idmap_scratch, exact))
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 615daaf87f..ffe0e00294 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -2466,7 +2466,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
update_partition_sd_lb(cs, old_prs);
out_free:
free_cpumasks(NULL, &tmp);
- return 0;
+ return retval;
}
/**
@@ -2502,9 +2502,6 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
return 0;
- if (alloc_cpumasks(NULL, &tmp))
- return -ENOMEM;
-
if (*buf)
compute_effective_exclusive_cpumask(trialcs, NULL);
@@ -2519,6 +2516,9 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval)
return retval;
+ if (alloc_cpumasks(NULL, &tmp))
+ return -ENOMEM;
+
if (old_prs) {
if (cpumask_empty(trialcs->effective_xcpus)) {
invalidate = true;
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 755d8d4ef5..9e337493d7 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -377,6 +377,9 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
+#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+ insert_resource(&iomem_resource, &crashk_low_res);
+#endif
#endif
return 0;
}
@@ -458,8 +461,12 @@ retry:
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
+#ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
+ insert_resource(&iomem_resource, &crashk_res);
+#endif
}
+#ifndef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
static __init int insert_crashkernel_resources(void)
{
if (crashk_res.start < crashk_res.end)
@@ -472,6 +479,7 @@ static __init int insert_crashkernel_resources(void)
}
early_initcall(insert_crashkernel_resources);
#endif
+#endif
int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
void **addr, unsigned long *sz)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 33d942615b..9edfb3b770 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -981,8 +981,7 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
dma_addr_t tbl_dma_addr =
phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
unsigned long max_slots = get_max_slots(boundary_mask);
- unsigned int iotlb_align_mask =
- dma_get_min_align_mask(dev) | alloc_align_mask;
+ unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
unsigned int nslots = nr_slots(alloc_size), stride;
unsigned int offset = swiotlb_align_offset(dev, orig_addr);
unsigned int index, slots_checked, count = 0, i;
@@ -994,18 +993,25 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
BUG_ON(area_index >= pool->nareas);
/*
- * For allocations of PAGE_SIZE or larger only look for page aligned
- * allocations.
+ * Ensure that the allocation is at least slot-aligned and update
+ * 'iotlb_align_mask' to ignore bits that will be preserved when
+ * offsetting into the allocation.
*/
- if (alloc_size >= PAGE_SIZE)
- iotlb_align_mask |= ~PAGE_MASK;
- iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
+ alloc_align_mask |= (IO_TLB_SIZE - 1);
+ iotlb_align_mask &= ~alloc_align_mask;
/*
* For mappings with an alignment requirement don't bother looping to
* unaligned slots once we found an aligned one.
*/
- stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
+ stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
+
+ /*
+ * For allocations of PAGE_SIZE or larger only look for page aligned
+ * allocations.
+ */
+ if (alloc_size >= PAGE_SIZE)
+ stride = umax(stride, PAGE_SHIFT - IO_TLB_SHIFT + 1);
spin_lock_irqsave(&area->lock, flags);
if (unlikely(nslots > pool->area_nslabs - area->used))
@@ -1015,11 +1021,14 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
index = area->index;
for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
+ phys_addr_t tlb_addr;
+
slot_index = slot_base + index;
+ tlb_addr = slot_addr(tbl_dma_addr, slot_index);
- if (orig_addr &&
- (slot_addr(tbl_dma_addr, slot_index) &
- iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
+ if ((tlb_addr & alloc_align_mask) ||
+ (orig_addr && (tlb_addr & iotlb_align_mask) !=
+ (orig_addr & iotlb_align_mask))) {
index = wrap_area_index(pool, index + 1);
slots_checked++;
continue;
@@ -1608,12 +1617,14 @@ struct page *swiotlb_alloc(struct device *dev, size_t size)
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
struct io_tlb_pool *pool;
phys_addr_t tlb_addr;
+ unsigned int align;
int index;
if (!mem)
return NULL;
- index = swiotlb_find_slots(dev, 0, size, 0, &pool);
+ align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
+ index = swiotlb_find_slots(dev, 0, size, align, &pool);
if (index == -1)
return NULL;
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index d7ee4bc3f2..5ff4f1cd36 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -77,8 +77,14 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
/* Either of the above might have changed the syscall number */
syscall = syscall_get_nr(current, regs);
- if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT))
+ if (unlikely(work & SYSCALL_WORK_SYSCALL_TRACEPOINT)) {
trace_sys_enter(regs, syscall);
+ /*
+ * Probes or BPF hooks in the tracepoint may have changed the
+ * system call number as well.
+ */
+ syscall = syscall_get_nr(current, regs);
+ }
syscall_enter_audit(regs, syscall);
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index 0ea1b2970a..28db5b7589 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -236,6 +236,10 @@ choice
possible to load a signed module containing the algorithm to check
the signature on that module.
+config MODULE_SIG_SHA1
+ bool "Sign modules with SHA-1"
+ select CRYPTO_SHA1
+
config MODULE_SIG_SHA256
bool "Sign modules with SHA-256"
select CRYPTO_SHA256
@@ -265,6 +269,7 @@ endchoice
config MODULE_SIG_HASH
string
depends on MODULE_SIG || IMA_APPRAISE_MODSIG
+ default "sha1" if MODULE_SIG_SHA1
default "sha256" if MODULE_SIG_SHA256
default "sha384" if MODULE_SIG_SHA384
default "sha512" if MODULE_SIG_SHA512
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 98fedfdb8d..34d9e718c2 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -2486,6 +2486,11 @@ static void do_free_init(struct work_struct *w)
}
}
+void flush_module_init_free_work(void)
+{
+ flush_work(&init_free_wq);
+}
+
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "module."
/* Default value for module->async_probe_requested */
@@ -2590,8 +2595,8 @@ static noinline int do_init_module(struct module *mod)
* Note that module_alloc() on most architectures creates W+X page
* mappings which won't be cleaned up until do_free_init() runs. Any
* code such as mark_rodata_ro() which depends on those mappings to
- * be cleaned up needs to sync with the queued work - ie
- * rcu_barrier()
+ * be cleaned up needs to sync with the queued work by invoking
+ * flush_module_init_free_work().
*/
if (llist_add(&freeinit->node, &init_free_list))
schedule_work(&init_free_wq);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index fa3bf161d1..a718067dee 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -192,6 +192,7 @@ static int __init mem_sleep_default_setup(char *str)
if (mem_sleep_labels[state] &&
!strcmp(str, mem_sleep_labels[state])) {
mem_sleep_default = state;
+ mem_sleep_current = state;
break;
}
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 6c2afee5ef..ac2d9750e5 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -130,6 +130,7 @@ struct printk_message {
};
bool other_cpu_in_panic(void);
+bool this_cpu_in_panic(void);
bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
bool is_extended, bool may_supress);
diff --git a/kernel/printk/nbcon.c b/kernel/printk/nbcon.c
index b96077152f..c8093bcc01 100644
--- a/kernel/printk/nbcon.c
+++ b/kernel/printk/nbcon.c
@@ -140,39 +140,6 @@ static inline bool nbcon_state_try_cmpxchg(struct console *con, struct nbcon_sta
return atomic_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_state), &cur->atom, new->atom);
}
-#ifdef CONFIG_64BIT
-
-#define __seq_to_nbcon_seq(seq) (seq)
-#define __nbcon_seq_to_seq(seq) (seq)
-
-#else /* CONFIG_64BIT */
-
-#define __seq_to_nbcon_seq(seq) ((u32)seq)
-
-static inline u64 __nbcon_seq_to_seq(u32 nbcon_seq)
-{
- u64 seq;
- u64 rb_next_seq;
-
- /*
- * The provided sequence is only the lower 32 bits of the ringbuffer
- * sequence. It needs to be expanded to 64bit. Get the next sequence
- * number from the ringbuffer and fold it.
- *
- * Having a 32bit representation in the console is sufficient.
- * If a console ever gets more than 2^31 records behind
- * the ringbuffer then this is the least of the problems.
- *
- * Also the access to the ring buffer is always safe.
- */
- rb_next_seq = prb_next_seq(prb);
- seq = rb_next_seq - ((u32)rb_next_seq - nbcon_seq);
-
- return seq;
-}
-
-#endif /* CONFIG_64BIT */
-
/**
* nbcon_seq_read - Read the current console sequence
* @con: Console to read the sequence of
@@ -183,7 +150,7 @@ u64 nbcon_seq_read(struct console *con)
{
unsigned long nbcon_seq = atomic_long_read(&ACCESS_PRIVATE(con, nbcon_seq));
- return __nbcon_seq_to_seq(nbcon_seq);
+ return __ulseq_to_u64seq(prb, nbcon_seq);
}
/**
@@ -204,7 +171,7 @@ void nbcon_seq_force(struct console *con, u64 seq)
*/
u64 valid_seq = max_t(u64, seq, prb_first_valid_seq(prb));
- atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __seq_to_nbcon_seq(valid_seq));
+ atomic_long_set(&ACCESS_PRIVATE(con, nbcon_seq), __u64seq_to_ulseq(valid_seq));
/* Clear con->seq since nbcon consoles use con->nbcon_seq instead. */
con->seq = 0;
@@ -223,11 +190,11 @@ void nbcon_seq_force(struct console *con, u64 seq)
*/
static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq)
{
- unsigned long nbcon_seq = __seq_to_nbcon_seq(ctxt->seq);
+ unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq);
struct console *con = ctxt->console;
if (atomic_long_try_cmpxchg(&ACCESS_PRIVATE(con, nbcon_seq), &nbcon_seq,
- __seq_to_nbcon_seq(new_seq))) {
+ __u64seq_to_ulseq(new_seq))) {
ctxt->seq = new_seq;
} else {
ctxt->seq = nbcon_seq_read(con);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index f2444b581e..7a835b277e 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -347,6 +347,29 @@ static bool panic_in_progress(void)
return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
}
+/* Return true if a panic is in progress on the current CPU. */
+bool this_cpu_in_panic(void)
+{
+ /*
+ * We can use raw_smp_processor_id() here because it is impossible for
+ * the task to be migrated to the panic_cpu, or away from it. If
+ * panic_cpu has already been set, and we're not currently executing on
+ * that CPU, then we never will be.
+ */
+ return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id());
+}
+
+/*
+ * Return true if a panic is in progress on a remote CPU.
+ *
+ * On true, the local CPU should immediately release any printing resources
+ * that may be needed by the panic CPU.
+ */
+bool other_cpu_in_panic(void)
+{
+ return (panic_in_progress() && !this_cpu_in_panic());
+}
+
/*
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
@@ -1846,10 +1869,23 @@ static bool console_waiter;
*/
static void console_lock_spinning_enable(void)
{
+ /*
+ * Do not use spinning in panic(). The panic CPU wants to keep the lock.
+ * Non-panic CPUs abandon the flush anyway.
+ *
+ * Just keep the lockdep annotation. The panic-CPU should avoid
+ * taking console_owner_lock because it might cause a deadlock.
+ * This looks like the easiest way how to prevent false lockdep
+ * reports without handling races a lockless way.
+ */
+ if (panic_in_progress())
+ goto lockdep;
+
raw_spin_lock(&console_owner_lock);
console_owner = current;
raw_spin_unlock(&console_owner_lock);
+lockdep:
/* The waiter may spin on us after setting console_owner */
spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
}
@@ -1874,6 +1910,22 @@ static int console_lock_spinning_disable_and_check(int cookie)
{
int waiter;
+ /*
+ * Ignore spinning waiters during panic() because they might get stopped
+ * or blocked at any time,
+ *
+ * It is safe because nobody is allowed to start spinning during panic
+ * in the first place. If there has been a waiter then non panic CPUs
+ * might stay spinning. They would get stopped anyway. The panic context
+ * will never start spinning and an interrupted spin on panic CPU will
+ * never continue.
+ */
+ if (panic_in_progress()) {
+ /* Keep lockdep happy. */
+ spin_release(&console_owner_dep_map, _THIS_IP_);
+ return 0;
+ }
+
raw_spin_lock(&console_owner_lock);
waiter = READ_ONCE(console_waiter);
console_owner = NULL;
@@ -1974,6 +2026,12 @@ static int console_trylock_spinning(void)
*/
mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
+ /*
+ * Update @console_may_schedule for trylock because the previous
+ * owner may have been schedulable.
+ */
+ console_may_schedule = 0;
+
return 1;
}
@@ -2601,26 +2659,6 @@ static int console_cpu_notify(unsigned int cpu)
return 0;
}
-/*
- * Return true if a panic is in progress on a remote CPU.
- *
- * On true, the local CPU should immediately release any printing resources
- * that may be needed by the panic CPU.
- */
-bool other_cpu_in_panic(void)
-{
- if (!panic_in_progress())
- return false;
-
- /*
- * We can use raw_smp_processor_id() here because it is impossible for
- * the task to be migrated to the panic_cpu, or away from it. If
- * panic_cpu has already been set, and we're not currently executing on
- * that CPU, then we never will be.
- */
- return atomic_read(&panic_cpu) != raw_smp_processor_id();
-}
-
/**
* console_lock - block the console subsystem from printing
*
@@ -3263,6 +3301,21 @@ static int __init keep_bootcon_setup(char *str)
early_param("keep_bootcon", keep_bootcon_setup);
+static int console_call_setup(struct console *newcon, char *options)
+{
+ int err;
+
+ if (!newcon->setup)
+ return 0;
+
+ /* Synchronize with possible boot console. */
+ console_lock();
+ err = newcon->setup(newcon, options);
+ console_unlock();
+
+ return err;
+}
+
/*
* This is called by register_console() to try to match
* the newly registered console with any of the ones selected
@@ -3298,8 +3351,8 @@ static int try_enable_preferred_console(struct console *newcon,
if (_braille_register_console(newcon, c))
return 0;
- if (newcon->setup &&
- (err = newcon->setup(newcon, c->options)) != 0)
+ err = console_call_setup(newcon, c->options);
+ if (err)
return err;
}
newcon->flags |= CON_ENABLED;
@@ -3325,7 +3378,7 @@ static void try_enable_default_console(struct console *newcon)
if (newcon->index < 0)
newcon->index = 0;
- if (newcon->setup && newcon->setup(newcon, NULL) != 0)
+ if (console_call_setup(newcon, NULL) != 0)
return;
newcon->flags |= CON_ENABLED;
@@ -3761,7 +3814,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
might_sleep();
- seq = prb_next_seq(prb);
+ seq = prb_next_reserve_seq(prb);
/* Flush the consoles so that records up to @seq are printed. */
console_lock();
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index fde338606c..f5a8bb606f 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -6,6 +6,7 @@
#include <linux/errno.h>
#include <linux/bug.h>
#include "printk_ringbuffer.h"
+#include "internal.h"
/**
* DOC: printk_ringbuffer overview
@@ -303,6 +304,9 @@
*
* desc_push_tail:B / desc_reserve:D
* set descriptor reusable (state), then push descriptor tail (id)
+ *
+ * desc_update_last_finalized:A / desc_last_finalized_seq:A
+ * store finalized record, then set new highest finalized sequence number
*/
#define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits)
@@ -1442,19 +1446,117 @@ fail_reopen:
}
/*
+ * @last_finalized_seq value guarantees that all records up to and including
+ * this sequence number are finalized and can be read. The only exception are
+ * too old records which have already been overwritten.
+ *
+ * It is also guaranteed that @last_finalized_seq only increases.
+ *
+ * Be aware that finalized records following non-finalized records are not
+ * reported because they are not yet available to the reader. For example,
+ * a new record stored via printk() will not be available to a printer if
+ * it follows a record that has not been finalized yet. However, once that
+ * non-finalized record becomes finalized, @last_finalized_seq will be
+ * appropriately updated and the full set of finalized records will be
+ * available to the printer. And since each printk() caller will either
+ * directly print or trigger deferred printing of all available unprinted
+ * records, all printk() messages will get printed.
+ */
+static u64 desc_last_finalized_seq(struct printk_ringbuffer *rb)
+{
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ unsigned long ulseq;
+
+ /*
+ * Guarantee the sequence number is loaded before loading the
+ * associated record in order to guarantee that the record can be
+ * seen by this CPU. This pairs with desc_update_last_finalized:A.
+ */
+ ulseq = atomic_long_read_acquire(&desc_ring->last_finalized_seq
+ ); /* LMM(desc_last_finalized_seq:A) */
+
+ return __ulseq_to_u64seq(rb, ulseq);
+}
+
+static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
+ struct printk_record *r, unsigned int *line_count);
+
+/*
+ * Check if there are records directly following @last_finalized_seq that are
+ * finalized. If so, update @last_finalized_seq to the latest of these
+ * records. It is not allowed to skip over records that are not yet finalized.
+ */
+static void desc_update_last_finalized(struct printk_ringbuffer *rb)
+{
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ u64 old_seq = desc_last_finalized_seq(rb);
+ unsigned long oldval;
+ unsigned long newval;
+ u64 finalized_seq;
+ u64 try_seq;
+
+try_again:
+ finalized_seq = old_seq;
+ try_seq = finalized_seq + 1;
+
+ /* Try to find later finalized records. */
+ while (_prb_read_valid(rb, &try_seq, NULL, NULL)) {
+ finalized_seq = try_seq;
+ try_seq++;
+ }
+
+ /* No update needed if no later finalized record was found. */
+ if (finalized_seq == old_seq)
+ return;
+
+ oldval = __u64seq_to_ulseq(old_seq);
+ newval = __u64seq_to_ulseq(finalized_seq);
+
+ /*
+ * Set the sequence number of a later finalized record that has been
+ * seen.
+ *
+ * Guarantee the record data is visible to other CPUs before storing
+ * its sequence number. This pairs with desc_last_finalized_seq:A.
+ *
+ * Memory barrier involvement:
+ *
+ * If desc_last_finalized_seq:A reads from
+ * desc_update_last_finalized:A, then desc_read:A reads from
+ * _prb_commit:B.
+ *
+ * Relies on:
+ *
+ * RELEASE from _prb_commit:B to desc_update_last_finalized:A
+ * matching
+ * ACQUIRE from desc_last_finalized_seq:A to desc_read:A
+ *
+ * Note: _prb_commit:B and desc_update_last_finalized:A can be
+ * different CPUs. However, the desc_update_last_finalized:A
+ * CPU (which performs the release) must have previously seen
+ * _prb_commit:B.
+ */
+ if (!atomic_long_try_cmpxchg_release(&desc_ring->last_finalized_seq,
+ &oldval, newval)) { /* LMM(desc_update_last_finalized:A) */
+ old_seq = __ulseq_to_u64seq(rb, oldval);
+ goto try_again;
+ }
+}
+
+/*
* Attempt to finalize a specified descriptor. If this fails, the descriptor
* is either already final or it will finalize itself when the writer commits.
*/
-static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
+static void desc_make_final(struct printk_ringbuffer *rb, unsigned long id)
{
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
unsigned long prev_state_val = DESC_SV(id, desc_committed);
struct prb_desc *d = to_desc(desc_ring, id);
- atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
- DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
-
- /* Best effort to remember the last finalized @id. */
- atomic_long_set(&desc_ring->last_finalized_id, id);
+ if (atomic_long_try_cmpxchg_relaxed(&d->state_var, &prev_state_val,
+ DESC_SV(id, desc_finalized))) { /* LMM(desc_make_final:A) */
+ desc_update_last_finalized(rb);
+ }
}
/**
@@ -1550,7 +1652,7 @@ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
* readers. (For seq==0 there is no previous descriptor.)
*/
if (info->seq > 0)
- desc_make_final(desc_ring, DESC_ID(id - 1));
+ desc_make_final(rb, DESC_ID(id - 1));
r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
/* If text data allocation fails, a data-less record is committed. */
@@ -1643,7 +1745,7 @@ void prb_commit(struct prb_reserved_entry *e)
*/
head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
if (head_id != e->id)
- desc_make_final(desc_ring, e->id);
+ desc_make_final(e->rb, e->id);
}
/**
@@ -1663,12 +1765,9 @@ void prb_commit(struct prb_reserved_entry *e)
*/
void prb_final_commit(struct prb_reserved_entry *e)
{
- struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
-
_prb_commit(e, desc_finalized);
- /* Best effort to remember the last finalized @id. */
- atomic_long_set(&desc_ring->last_finalized_id, e->id);
+ desc_update_last_finalized(e->rb);
}
/*
@@ -1832,7 +1931,7 @@ static int prb_read(struct printk_ringbuffer *rb, u64 seq,
}
/* Get the sequence number of the tail descriptor. */
-static u64 prb_first_seq(struct printk_ringbuffer *rb)
+u64 prb_first_seq(struct printk_ringbuffer *rb)
{
struct prb_desc_ring *desc_ring = &rb->desc_ring;
enum desc_state d_state;
@@ -1875,12 +1974,123 @@ static u64 prb_first_seq(struct printk_ringbuffer *rb)
return seq;
}
+/**
+ * prb_next_reserve_seq() - Get the sequence number after the most recently
+ * reserved record.
+ *
+ * @rb: The ringbuffer to get the sequence number from.
+ *
+ * This is the public function available to readers to see what sequence
+ * number will be assigned to the next reserved record.
+ *
+ * Note that depending on the situation, this value can be equal to or
+ * higher than the sequence number returned by prb_next_seq().
+ *
+ * Context: Any context.
+ * Return: The sequence number that will be assigned to the next record
+ * reserved.
+ */
+u64 prb_next_reserve_seq(struct printk_ringbuffer *rb)
+{
+ struct prb_desc_ring *desc_ring = &rb->desc_ring;
+ unsigned long last_finalized_id;
+ atomic_long_t *state_var;
+ u64 last_finalized_seq;
+ unsigned long head_id;
+ struct prb_desc desc;
+ unsigned long diff;
+ struct prb_desc *d;
+ int err;
+
+ /*
+ * It may not be possible to read a sequence number for @head_id.
+ * So the ID of @last_finailzed_seq is used to calculate what the
+ * sequence number of @head_id will be.
+ */
+
+try_again:
+ last_finalized_seq = desc_last_finalized_seq(rb);
+
+ /*
+ * @head_id is loaded after @last_finalized_seq to ensure that
+ * it points to the record with @last_finalized_seq or newer.
+ *
+ * Memory barrier involvement:
+ *
+ * If desc_last_finalized_seq:A reads from
+ * desc_update_last_finalized:A, then
+ * prb_next_reserve_seq:A reads from desc_reserve:D.
+ *
+ * Relies on:
+ *
+ * RELEASE from desc_reserve:D to desc_update_last_finalized:A
+ * matching
+ * ACQUIRE from desc_last_finalized_seq:A to prb_next_reserve_seq:A
+ *
+ * Note: desc_reserve:D and desc_update_last_finalized:A can be
+ * different CPUs. However, the desc_update_last_finalized:A CPU
+ * (which performs the release) must have previously seen
+ * desc_read:C, which implies desc_reserve:D can be seen.
+ */
+ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_next_reserve_seq:A) */
+
+ d = to_desc(desc_ring, last_finalized_seq);
+ state_var = &d->state_var;
+
+ /* Extract the ID, used to specify the descriptor to read. */
+ last_finalized_id = DESC_ID(atomic_long_read(state_var));
+
+ /* Ensure @last_finalized_id is correct. */
+ err = desc_read_finalized_seq(desc_ring, last_finalized_id, last_finalized_seq, &desc);
+
+ if (err == -EINVAL) {
+ if (last_finalized_seq == 0) {
+ /*
+ * No record has been finalized or even reserved yet.
+ *
+ * The @head_id is initialized such that the first
+ * increment will yield the first record (seq=0).
+ * Handle it separately to avoid a negative @diff
+ * below.
+ */
+ if (head_id == DESC0_ID(desc_ring->count_bits))
+ return 0;
+
+ /*
+ * One or more descriptors are already reserved. Use
+ * the descriptor ID of the first one (@seq=0) for
+ * the @diff below.
+ */
+ last_finalized_id = DESC0_ID(desc_ring->count_bits) + 1;
+ } else {
+ /* Record must have been overwritten. Try again. */
+ goto try_again;
+ }
+ }
+
+ /* Diff of known descriptor IDs to compute related sequence numbers. */
+ diff = head_id - last_finalized_id;
+
+ /*
+ * @head_id points to the most recently reserved record, but this
+ * function returns the sequence number that will be assigned to the
+ * next (not yet reserved) record. Thus +1 is needed.
+ */
+ return (last_finalized_seq + diff + 1);
+}
+
/*
- * Non-blocking read of a record. Updates @seq to the last finalized record
- * (which may have no data available).
+ * Non-blocking read of a record.
+ *
+ * On success @seq is updated to the record that was read and (if provided)
+ * @r and @line_count will contain the read/calculated data.
+ *
+ * On failure @seq is updated to a record that is not yet available to the
+ * reader, but it will be the next record available to the reader.
*
- * See the description of prb_read_valid() and prb_read_valid_info()
- * for details.
+ * Note: When the current CPU is in panic, this function will skip over any
+ * non-existent/non-finalized records in order to allow the panic CPU
+ * to print any and all records that have been finalized.
*/
static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
struct printk_record *r, unsigned int *line_count)
@@ -1899,12 +2109,32 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
*seq = tail_seq;
} else if (err == -ENOENT) {
- /* Record exists, but no data available. Skip. */
+ /* Record exists, but the data was lost. Skip. */
(*seq)++;
} else {
- /* Non-existent/non-finalized record. Must stop. */
- return false;
+ /*
+ * Non-existent/non-finalized record. Must stop.
+ *
+ * For panic situations it cannot be expected that
+ * non-finalized records will become finalized. But
+ * there may be other finalized records beyond that
+ * need to be printed for a panic situation. If this
+ * is the panic CPU, skip this
+ * non-existent/non-finalized record unless it is
+ * at or beyond the head, in which case it is not
+ * possible to continue.
+ *
+ * Note that new messages printed on panic CPU are
+ * finalized when we are here. The only exception
+ * might be the last message without trailing newline.
+ * But it would have the sequence number returned
+ * by "prb_next_reserve_seq() - 1".
+ */
+ if (this_cpu_in_panic() && ((*seq + 1) < prb_next_reserve_seq(rb)))
+ (*seq)++;
+ else
+ return false;
}
}
@@ -1932,7 +2162,7 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
* On success, the reader must check r->info.seq to see which record was
* actually read. This allows the reader to detect dropped records.
*
- * Failure means @seq refers to a not yet written record.
+ * Failure means @seq refers to a record not yet available to the reader.
*/
bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
struct printk_record *r)
@@ -1962,7 +2192,7 @@ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
* On success, the reader must check info->seq to see which record meta data
* was actually read. This allows the reader to detect dropped records.
*
- * Failure means @seq refers to a not yet written record.
+ * Failure means @seq refers to a record not yet available to the reader.
*/
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
struct printk_info *info, unsigned int *line_count)
@@ -2008,7 +2238,9 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
* newest sequence number available to readers will be.
*
* This provides readers a sequence number to jump to if all currently
- * available records should be skipped.
+ * available records should be skipped. It is guaranteed that all records
+ * previous to the returned value have been finalized and are (or were)
+ * available to the reader.
*
* Context: Any context.
* Return: The sequence number of the next newest (not yet available) record
@@ -2016,34 +2248,19 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
*/
u64 prb_next_seq(struct printk_ringbuffer *rb)
{
- struct prb_desc_ring *desc_ring = &rb->desc_ring;
- enum desc_state d_state;
- unsigned long id;
u64 seq;
- /* Check if the cached @id still points to a valid @seq. */
- id = atomic_long_read(&desc_ring->last_finalized_id);
- d_state = desc_read(desc_ring, id, NULL, &seq, NULL);
+ seq = desc_last_finalized_seq(rb);
- if (d_state == desc_finalized || d_state == desc_reusable) {
- /*
- * Begin searching after the last finalized record.
- *
- * On 0, the search must begin at 0 because of hack#2
- * of the bootstrapping phase it is not known if a
- * record at index 0 exists.
- */
- if (seq != 0)
- seq++;
- } else {
- /*
- * The information about the last finalized sequence number
- * has gone. It should happen only when there is a flood of
- * new messages and the ringbuffer is rapidly recycled.
- * Give up and start from the beginning.
- */
- seq = 0;
- }
+ /*
+ * Begin searching after the last finalized record.
+ *
+ * On 0, the search must begin at 0 because of hack#2
+ * of the bootstrapping phase it is not known if a
+ * record at index 0 exists.
+ */
+ if (seq != 0)
+ seq++;
/*
* The information about the last finalized @seq might be inaccurate.
@@ -2085,7 +2302,7 @@ void prb_init(struct printk_ringbuffer *rb,
rb->desc_ring.infos = infos;
atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
- atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits));
+ atomic_long_set(&rb->desc_ring.last_finalized_seq, 0);
rb->text_data_ring.size_bits = textbits;
rb->text_data_ring.data = text_buf;
diff --git a/kernel/printk/printk_ringbuffer.h b/kernel/printk/printk_ringbuffer.h
index 18cd25e489..cb887489d0 100644
--- a/kernel/printk/printk_ringbuffer.h
+++ b/kernel/printk/printk_ringbuffer.h
@@ -75,7 +75,7 @@ struct prb_desc_ring {
struct printk_info *infos;
atomic_long_t head_id;
atomic_long_t tail_id;
- atomic_long_t last_finalized_id;
+ atomic_long_t last_finalized_seq;
};
/*
@@ -259,7 +259,7 @@ static struct printk_ringbuffer name = { \
.infos = &_##name##_infos[0], \
.head_id = ATOMIC_INIT(DESC0_ID(descbits)), \
.tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \
- .last_finalized_id = ATOMIC_INIT(DESC0_ID(descbits)), \
+ .last_finalized_seq = ATOMIC_INIT(0), \
}, \
.text_data_ring = { \
.size_bits = (avgtextbits) + (descbits), \
@@ -378,7 +378,41 @@ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
struct printk_info *info, unsigned int *line_count);
+u64 prb_first_seq(struct printk_ringbuffer *rb);
u64 prb_first_valid_seq(struct printk_ringbuffer *rb);
u64 prb_next_seq(struct printk_ringbuffer *rb);
+u64 prb_next_reserve_seq(struct printk_ringbuffer *rb);
+
+#ifdef CONFIG_64BIT
+
+#define __u64seq_to_ulseq(u64seq) (u64seq)
+#define __ulseq_to_u64seq(rb, ulseq) (ulseq)
+
+#else /* CONFIG_64BIT */
+
+#define __u64seq_to_ulseq(u64seq) ((u32)u64seq)
+
+static inline u64 __ulseq_to_u64seq(struct printk_ringbuffer *rb, u32 ulseq)
+{
+ u64 rb_first_seq = prb_first_seq(rb);
+ u64 seq;
+
+ /*
+ * The provided sequence is only the lower 32 bits of the ringbuffer
+ * sequence. It needs to be expanded to 64bit. Get the first sequence
+ * number from the ringbuffer and fold it.
+ *
+ * Having a 32bit representation in the console is sufficient.
+ * If a console ever gets more than 2^31 records behind
+ * the ringbuffer then this is the least of the problems.
+ *
+ * Also the access to the ring buffer is always safe.
+ */
+ seq = rb_first_seq - (s32)((u32)rb_first_seq - ulseq);
+
+ return seq;
+}
+
+#endif /* CONFIG_64BIT */
#endif /* _KERNEL_PRINTK_RINGBUFFER_H */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 157f3ca2a9..f544f24df1 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -4741,13 +4741,16 @@ static void __init rcu_start_exp_gp_kworkers(void)
rcu_exp_gp_kworker = kthread_create_worker(0, gp_kworker_name);
if (IS_ERR_OR_NULL(rcu_exp_gp_kworker)) {
pr_err("Failed to create %s!\n", gp_kworker_name);
+ rcu_exp_gp_kworker = NULL;
return;
}
rcu_exp_par_gp_kworker = kthread_create_worker(0, par_gp_kworker_name);
if (IS_ERR_OR_NULL(rcu_exp_par_gp_kworker)) {
pr_err("Failed to create %s!\n", par_gp_kworker_name);
+ rcu_exp_par_gp_kworker = NULL;
kthread_destroy_worker(rcu_exp_gp_kworker);
+ rcu_exp_gp_kworker = NULL;
return;
}
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 2ac440bc7e..8107f81845 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -428,7 +428,12 @@ static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
__sync_rcu_exp_select_node_cpus(rewp);
}
-static inline bool rcu_gp_par_worker_started(void)
+static inline bool rcu_exp_worker_started(void)
+{
+ return !!READ_ONCE(rcu_exp_gp_kworker);
+}
+
+static inline bool rcu_exp_par_worker_started(void)
{
return !!READ_ONCE(rcu_exp_par_gp_kworker);
}
@@ -478,7 +483,12 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
__sync_rcu_exp_select_node_cpus(rewp);
}
-static inline bool rcu_gp_par_worker_started(void)
+static inline bool rcu_exp_worker_started(void)
+{
+ return !!READ_ONCE(rcu_gp_wq);
+}
+
+static inline bool rcu_exp_par_worker_started(void)
{
return !!READ_ONCE(rcu_par_gp_wq);
}
@@ -541,7 +551,7 @@ static void sync_rcu_exp_select_cpus(void)
rnp->exp_need_flush = false;
if (!READ_ONCE(rnp->expmask))
continue; /* Avoid early boot non-existent wq. */
- if (!rcu_gp_par_worker_started() ||
+ if (!rcu_exp_par_worker_started() ||
rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
rcu_is_last_leaf_node(rnp)) {
/* No worker started yet or last leaf, do direct call. */
@@ -956,7 +966,7 @@ static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
*/
void synchronize_rcu_expedited(void)
{
- bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
+ bool use_worker;
unsigned long flags;
struct rcu_exp_work rew;
struct rcu_node *rnp;
@@ -967,6 +977,9 @@ void synchronize_rcu_expedited(void)
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu_expedited() in RCU read-side critical section");
+ use_worker = (rcu_scheduler_active != RCU_SCHEDULER_INIT) &&
+ rcu_exp_worker_started();
+
/* Is the state is such that the call is a grace period? */
if (rcu_blocking_is_gp()) {
// Note well that this code runs with !PREEMPT && !SMP.
@@ -996,7 +1009,7 @@ void synchronize_rcu_expedited(void)
return; /* Someone else did our work for us. */
/* Ensure that load happens before action based on it. */
- if (unlikely(boottime)) {
+ if (unlikely(!use_worker)) {
/* Direct call during scheduler init and early_initcalls(). */
rcu_exp_sel_wait_wake(s);
} else {
@@ -1014,7 +1027,7 @@ void synchronize_rcu_expedited(void)
/* Let the next expedited grace period start. */
mutex_unlock(&rcu_state.exp_mutex);
- if (likely(!boottime))
+ if (likely(use_worker))
synchronize_rcu_expedited_destroy_work(&rew);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7ac9f4b1d9..c43b71792a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7296,7 +7296,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
if (!available_idle_cpu(cpu)) {
idle = false;
if (*idle_cpu == -1) {
- if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, p->cpus_ptr)) {
+ if (sched_idle_cpu(cpu) && cpumask_test_cpu(cpu, cpus)) {
*idle_cpu = cpu;
break;
}
@@ -7304,7 +7304,7 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
}
break;
}
- if (*idle_cpu == -1 && cpumask_test_cpu(cpu, p->cpus_ptr))
+ if (*idle_cpu == -1 && cpumask_test_cpu(cpu, cpus))
*idle_cpu = cpu;
}
@@ -7318,13 +7318,19 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
/*
* Scan the local SMT mask for idle CPUs.
*/
-static int select_idle_smt(struct task_struct *p, int target)
+static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{
int cpu;
for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
if (cpu == target)
continue;
+ /*
+ * Check if the CPU is in the LLC scheduling domain of @target.
+ * Due to isolcpus, there is no guarantee that all the siblings are in the domain.
+ */
+ if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
+ continue;
if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
return cpu;
}
@@ -7348,7 +7354,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
return __select_idle_cpu(core, p);
}
-static inline int select_idle_smt(struct task_struct *p, int target)
+static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
{
return -1;
}
@@ -7598,7 +7604,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
has_idle_core = test_idle_cores(target);
if (!has_idle_core && cpus_share_cache(prev, target)) {
- i = select_idle_smt(p, prev);
+ i = select_idle_smt(p, sd, prev);
if ((unsigned int)i < nr_cpumask_bits)
return i;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index f8e543f1e3..8bb106a56b 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2408,8 +2408,11 @@ static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))
return -EINVAL;
- /* PARISC cannot allow mdwe as it needs writable stacks */
- if (IS_ENABLED(CONFIG_PARISC))
+ /*
+ * EOPNOTSUPP might be more appropriate here in principle, but
+ * existing userspace depends on EINVAL specifically.
+ */
+ if (!arch_memory_deny_write_exec_supported())
return -EINVAL;
current_bits = get_current_mdwe();
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index 9de66bbbb3..4782edcbe7 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -129,15 +129,17 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
goto out;
}
pccontext->clk = clk;
- fp->private_data = pccontext;
- if (clk->ops.open)
+ if (clk->ops.open) {
err = clk->ops.open(pccontext, fp->f_mode);
- else
- err = 0;
-
- if (!err) {
- get_device(clk->dev);
+ if (err) {
+ kfree(pccontext);
+ goto out;
+ }
}
+
+ fp->private_data = pccontext;
+ get_device(clk->dev);
+ err = 0;
out:
up_read(&clk->rwsem);
return err;
diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
index ca058c8af6..3e5d422dd1 100644
--- a/kernel/time/time_test.c
+++ b/kernel/time/time_test.c
@@ -73,7 +73,7 @@ static void time64_to_tm_test_date_range(struct kunit *test)
days = div_s64(secs, 86400);
- #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %ld", \
+ #define FAIL_MSG "%05ld/%02d/%02d (%2d) : %lld", \
year, month, mdday, yday, days
KUNIT_ASSERT_EQ_MSG(test, year - 1900, result.tm_year, FAIL_MSG);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 266d02809d..8aab7ed414 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1180,13 +1180,15 @@ static int adjust_historical_crosststamp(struct system_time_snapshot *history,
}
/*
- * cycle_between - true if test occurs chronologically between before and after
+ * timestamp_in_interval - true if ts is chronologically in [start, end]
+ *
+ * True if ts occurs chronologically at or after start, and before or at end.
*/
-static bool cycle_between(u64 before, u64 test, u64 after)
+static bool timestamp_in_interval(u64 start, u64 end, u64 ts)
{
- if (test > before && test < after)
+ if (ts >= start && ts <= end)
return true;
- if (test < before && before > after)
+ if (start > end && (ts >= start || ts <= end))
return true;
return false;
}
@@ -1246,7 +1248,7 @@ int get_device_system_crosststamp(int (*get_time_fn)
*/
now = tk_clock_read(&tk->tkr_mono);
interval_start = tk->tkr_mono.cycle_last;
- if (!cycle_between(interval_start, cycles, now)) {
+ if (!timestamp_in_interval(interval_start, now, cycles)) {
clock_was_set_seq = tk->clock_was_set_seq;
cs_was_changed_seq = tk->cs_was_changed_seq;
cycles = interval_start;
@@ -1259,10 +1261,8 @@ int get_device_system_crosststamp(int (*get_time_fn)
tk_core.timekeeper.offs_real);
base_raw = tk->tkr_raw.base;
- nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
- system_counterval.cycles);
- nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
- system_counterval.cycles);
+ nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, cycles);
+ nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, cycles);
} while (read_seqcount_retry(&tk_core.seq, seq));
xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real);
@@ -1277,13 +1277,13 @@ int get_device_system_crosststamp(int (*get_time_fn)
bool discontinuity;
/*
- * Check that the counter value occurs after the provided
+ * Check that the counter value is not before the provided
* history reference and that the history doesn't cross a
* clocksource change
*/
if (!history_begin ||
- !cycle_between(history_begin->cycles,
- system_counterval.cycles, cycles) ||
+ !timestamp_in_interval(history_begin->cycles,
+ cycles, system_counterval.cycles) ||
history_begin->cs_was_changed_seq != cs_was_changed_seq)
return -EINVAL;
partial_history_cycles = cycles - system_counterval.cycles;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6fa67c297e..140f8eed83 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -412,7 +412,6 @@ struct rb_irq_work {
struct irq_work work;
wait_queue_head_t waiters;
wait_queue_head_t full_waiters;
- long wait_index;
bool waiters_pending;
bool full_waiters_pending;
bool wakeup_full;
@@ -903,8 +902,19 @@ static void rb_wake_up_waiters(struct irq_work *work)
wake_up_all(&rbwork->waiters);
if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
+ /* Only cpu_buffer sets the above flags */
+ struct ring_buffer_per_cpu *cpu_buffer =
+ container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
+
+ /* Called from interrupt context */
+ raw_spin_lock(&cpu_buffer->reader_lock);
rbwork->wakeup_full = false;
rbwork->full_waiters_pending = false;
+
+ /* Waking up all waiters, they will reset the shortest full */
+ cpu_buffer->shortest_full = 0;
+ raw_spin_unlock(&cpu_buffer->reader_lock);
+
wake_up_all(&rbwork->full_waiters);
}
}
@@ -945,14 +955,95 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
rbwork = &cpu_buffer->irq_work;
}
- rbwork->wait_index++;
- /* make sure the waiters see the new index */
- smp_wmb();
-
/* This can be called in any context */
irq_work_queue(&rbwork->work);
}
+static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ bool ret = false;
+
+ /* Reads of all CPUs always waits for any data */
+ if (cpu == RING_BUFFER_ALL_CPUS)
+ return !ring_buffer_empty(buffer);
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ if (!ring_buffer_empty_cpu(buffer, cpu)) {
+ unsigned long flags;
+ bool pagebusy;
+
+ if (!full)
+ return true;
+
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+ ret = !pagebusy && full_hit(buffer, cpu, full);
+
+ if (!ret && (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)) {
+ cpu_buffer->shortest_full = full;
+ }
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ }
+ return ret;
+}
+
+static inline bool
+rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
+ int cpu, int full, ring_buffer_cond_fn cond, void *data)
+{
+ if (rb_watermark_hit(buffer, cpu, full))
+ return true;
+
+ if (cond(data))
+ return true;
+
+ /*
+ * The events can happen in critical sections where
+ * checking a work queue can cause deadlocks.
+ * After adding a task to the queue, this flag is set
+ * only to notify events to try to wake up the queue
+ * using irq_work.
+ *
+ * We don't clear it even if the buffer is no longer
+ * empty. The flag only causes the next event to run
+ * irq_work to do the work queue wake up. The worse
+ * that can happen if we race with !trace_empty() is that
+ * an event will cause an irq_work to try to wake up
+ * an empty queue.
+ *
+ * There's no reason to protect this flag either, as
+ * the work queue and irq_work logic will do the necessary
+ * synchronization for the wake ups. The only thing
+ * that is necessary is that the wake up happens after
+ * a task has been queued. It's OK for spurious wake ups.
+ */
+ if (full)
+ rbwork->full_waiters_pending = true;
+ else
+ rbwork->waiters_pending = true;
+
+ return false;
+}
+
+/*
+ * The default wait condition for ring_buffer_wait() is to just to exit the
+ * wait loop the first time it is woken up.
+ */
+static bool rb_wait_once(void *data)
+{
+ long *once = data;
+
+ /* wait_event() actually calls this twice before scheduling*/
+ if (*once > 1)
+ return true;
+
+ (*once)++;
+ return false;
+}
+
/**
* ring_buffer_wait - wait for input to the ring buffer
* @buffer: buffer to wait on
@@ -966,101 +1057,39 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
{
struct ring_buffer_per_cpu *cpu_buffer;
- DEFINE_WAIT(wait);
- struct rb_irq_work *work;
- long wait_index;
+ struct wait_queue_head *waitq;
+ ring_buffer_cond_fn cond;
+ struct rb_irq_work *rbwork;
+ void *data;
+ long once = 0;
int ret = 0;
+ cond = rb_wait_once;
+ data = &once;
+
/*
* Depending on what the caller is waiting for, either any
* data in any cpu buffer, or a specific buffer, put the
* caller on the appropriate wait queue.
*/
if (cpu == RING_BUFFER_ALL_CPUS) {
- work = &buffer->irq_work;
+ rbwork = &buffer->irq_work;
/* Full only makes sense on per cpu reads */
full = 0;
} else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -ENODEV;
cpu_buffer = buffer->buffers[cpu];
- work = &cpu_buffer->irq_work;
- }
-
- wait_index = READ_ONCE(work->wait_index);
-
- while (true) {
- if (full)
- prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
- else
- prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
-
- /*
- * The events can happen in critical sections where
- * checking a work queue can cause deadlocks.
- * After adding a task to the queue, this flag is set
- * only to notify events to try to wake up the queue
- * using irq_work.
- *
- * We don't clear it even if the buffer is no longer
- * empty. The flag only causes the next event to run
- * irq_work to do the work queue wake up. The worse
- * that can happen if we race with !trace_empty() is that
- * an event will cause an irq_work to try to wake up
- * an empty queue.
- *
- * There's no reason to protect this flag either, as
- * the work queue and irq_work logic will do the necessary
- * synchronization for the wake ups. The only thing
- * that is necessary is that the wake up happens after
- * a task has been queued. It's OK for spurious wake ups.
- */
- if (full)
- work->full_waiters_pending = true;
- else
- work->waiters_pending = true;
-
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
-
- if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
- break;
-
- if (cpu != RING_BUFFER_ALL_CPUS &&
- !ring_buffer_empty_cpu(buffer, cpu)) {
- unsigned long flags;
- bool pagebusy;
- bool done;
-
- if (!full)
- break;
-
- raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
- done = !pagebusy && full_hit(buffer, cpu, full);
-
- if (!cpu_buffer->shortest_full ||
- cpu_buffer->shortest_full > full)
- cpu_buffer->shortest_full = full;
- raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
- if (done)
- break;
- }
-
- schedule();
-
- /* Make sure to see the new wait index */
- smp_rmb();
- if (wait_index != work->wait_index)
- break;
+ rbwork = &cpu_buffer->irq_work;
}
if (full)
- finish_wait(&work->full_waiters, &wait);
+ waitq = &rbwork->full_waiters;
else
- finish_wait(&work->waiters, &wait);
+ waitq = &rbwork->waiters;
+
+ ret = wait_event_interruptible((*waitq),
+ rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
return ret;
}
@@ -1084,30 +1113,51 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table, int full)
{
struct ring_buffer_per_cpu *cpu_buffer;
- struct rb_irq_work *work;
+ struct rb_irq_work *rbwork;
if (cpu == RING_BUFFER_ALL_CPUS) {
- work = &buffer->irq_work;
+ rbwork = &buffer->irq_work;
full = 0;
} else {
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return EPOLLERR;
cpu_buffer = buffer->buffers[cpu];
- work = &cpu_buffer->irq_work;
+ rbwork = &cpu_buffer->irq_work;
}
if (full) {
- poll_wait(filp, &work->full_waiters, poll_table);
- work->full_waiters_pending = true;
+ unsigned long flags;
+
+ poll_wait(filp, &rbwork->full_waiters, poll_table);
+
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
if (!cpu_buffer->shortest_full ||
cpu_buffer->shortest_full > full)
cpu_buffer->shortest_full = full;
- } else {
- poll_wait(filp, &work->waiters, poll_table);
- work->waiters_pending = true;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ if (full_hit(buffer, cpu, full))
+ return EPOLLIN | EPOLLRDNORM;
+ /*
+ * Only allow full_waiters_pending update to be seen after
+ * the shortest_full is set. If the writer sees the
+ * full_waiters_pending flag set, it will compare the
+ * amount in the ring buffer to shortest_full. If the amount
+ * in the ring buffer is greater than the shortest_full
+ * percent, it will call the irq_work handler to wake up
+ * this list. The irq_handler will reset shortest_full
+ * back to zero. That's done under the reader_lock, but
+ * the below smp_mb() makes sure that the update to
+ * full_waiters_pending doesn't leak up into the above.
+ */
+ smp_mb();
+ rbwork->full_waiters_pending = true;
+ return 0;
}
+ poll_wait(filp, &rbwork->waiters, poll_table);
+ rbwork->waiters_pending = true;
+
/*
* There's a tight race between setting the waiters_pending and
* checking if the ring buffer is empty. Once the waiters_pending bit
@@ -1123,9 +1173,6 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
*/
smp_mb();
- if (full)
- return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
-
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
return EPOLLIN | EPOLLRDNORM;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3fdc57450e..e03960f9f4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -8352,6 +8352,20 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
return size;
}
+static int tracing_buffers_flush(struct file *file, fl_owner_t id)
+{
+ struct ftrace_buffer_info *info = file->private_data;
+ struct trace_iterator *iter = &info->iter;
+
+ iter->wait_index++;
+ /* Make sure the waiters see the new wait_index */
+ smp_wmb();
+
+ ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
+
+ return 0;
+}
+
static int tracing_buffers_release(struct inode *inode, struct file *file)
{
struct ftrace_buffer_info *info = file->private_data;
@@ -8363,12 +8377,6 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
__trace_array_put(iter->tr);
- iter->wait_index++;
- /* Make sure the waiters see the new wait_index */
- smp_wmb();
-
- ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
-
if (info->spare)
ring_buffer_free_read_page(iter->array_buffer->buffer,
info->spare_cpu, info->spare);
@@ -8582,6 +8590,7 @@ static const struct file_operations tracing_buffers_fops = {
.read = tracing_buffers_read,
.poll = tracing_buffers_poll,
.release = tracing_buffers_release,
+ .flush = tracing_buffers_flush,
.splice_read = tracing_buffers_splice_read,
.unlocked_ioctl = tracing_buffers_ioctl,
.llseek = no_llseek,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4f87b1851c..8f761417a9 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -108,7 +108,7 @@ enum {
RESCUER_NICE_LEVEL = MIN_NICE,
HIGHPRI_NICE_LEVEL = MIN_NICE,
- WQ_NAME_LEN = 24,
+ WQ_NAME_LEN = 32,
};
/*
@@ -122,6 +122,9 @@ enum {
*
* L: pool->lock protected. Access with pool->lock held.
*
+ * LN: pool->lock and wq_node_nr_active->lock protected for writes. Either for
+ * reads.
+ *
* K: Only modified by worker while holding pool->lock. Can be safely read by
* self, while holding pool->lock or from IRQ context if %current is the
* kworker.
@@ -143,6 +146,9 @@ enum {
*
* WR: wq->mutex protected for writes. RCU protected for reads.
*
+ * WO: wq->mutex protected for writes. Updated with WRITE_ONCE() and can be read
+ * with READ_ONCE() without locking.
+ *
* MD: wq_mayday_lock protected.
*
* WD: Used internally by the watchdog.
@@ -240,18 +246,18 @@ struct pool_workqueue {
* pwq->inactive_works instead of pool->worklist and marked with
* WORK_STRUCT_INACTIVE.
*
- * All work items marked with WORK_STRUCT_INACTIVE do not participate
- * in pwq->nr_active and all work items in pwq->inactive_works are
- * marked with WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE
- * work items are in pwq->inactive_works. Some of them are ready to
- * run in pool->worklist or worker->scheduled. Those work itmes are
- * only struct wq_barrier which is used for flush_work() and should
- * not participate in pwq->nr_active. For non-barrier work item, it
- * is marked with WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
+ * All work items marked with WORK_STRUCT_INACTIVE do not participate in
+ * nr_active and all work items in pwq->inactive_works are marked with
+ * WORK_STRUCT_INACTIVE. But not all WORK_STRUCT_INACTIVE work items are
+ * in pwq->inactive_works. Some of them are ready to run in
+ * pool->worklist or worker->scheduled. Those work itmes are only struct
+ * wq_barrier which is used for flush_work() and should not participate
+ * in nr_active. For non-barrier work item, it is marked with
+ * WORK_STRUCT_INACTIVE iff it is in pwq->inactive_works.
*/
int nr_active; /* L: nr of active works */
- int max_active; /* L: max active works */
struct list_head inactive_works; /* L: inactive works */
+ struct list_head pending_node; /* LN: node on wq_node_nr_active->pending_pwqs */
struct list_head pwqs_node; /* WR: node on wq->pwqs */
struct list_head mayday_node; /* MD: node on wq->maydays */
@@ -279,6 +285,26 @@ struct wq_flusher {
struct wq_device;
/*
+ * Unlike in a per-cpu workqueue where max_active limits its concurrency level
+ * on each CPU, in an unbound workqueue, max_active applies to the whole system.
+ * As sharing a single nr_active across multiple sockets can be very expensive,
+ * the counting and enforcement is per NUMA node.
+ *
+ * The following struct is used to enforce per-node max_active. When a pwq wants
+ * to start executing a work item, it should increment ->nr using
+ * tryinc_node_nr_active(). If acquisition fails due to ->nr already being over
+ * ->max, the pwq is queued on ->pending_pwqs. As in-flight work items finish
+ * and decrement ->nr, node_activate_pending_pwq() activates the pending pwqs in
+ * round-robin order.
+ */
+struct wq_node_nr_active {
+ int max; /* per-node max_active */
+ atomic_t nr; /* per-node nr_active */
+ raw_spinlock_t lock; /* nests inside pool locks */
+ struct list_head pending_pwqs; /* LN: pwqs with inactive works */
+};
+
+/*
* The externally visible workqueue. It relays the issued work items to
* the appropriate worker_pool through its pool_workqueues.
*/
@@ -298,10 +324,15 @@ struct workqueue_struct {
struct worker *rescuer; /* MD: rescue worker */
int nr_drainers; /* WQ: drain in progress */
- int saved_max_active; /* WQ: saved pwq max_active */
+
+ /* See alloc_workqueue() function comment for info on min/max_active */
+ int max_active; /* WO: max active works */
+ int min_active; /* WO: min active works */
+ int saved_max_active; /* WQ: saved max_active */
+ int saved_min_active; /* WQ: saved min_active */
struct workqueue_attrs *unbound_attrs; /* PW: only for unbound wqs */
- struct pool_workqueue *dfl_pwq; /* PW: only for unbound wqs */
+ struct pool_workqueue __rcu *dfl_pwq; /* PW: only for unbound wqs */
#ifdef CONFIG_SYSFS
struct wq_device *wq_dev; /* I: for sysfs interface */
@@ -323,6 +354,7 @@ struct workqueue_struct {
/* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
+ struct wq_node_nr_active *node_nr_active[]; /* I: per-node nr_active */
};
static struct kmem_cache *pwq_cache;
@@ -626,6 +658,36 @@ static int worker_pool_assign_id(struct worker_pool *pool)
return ret;
}
+static struct pool_workqueue __rcu **
+unbound_pwq_slot(struct workqueue_struct *wq, int cpu)
+{
+ if (cpu >= 0)
+ return per_cpu_ptr(wq->cpu_pwq, cpu);
+ else
+ return &wq->dfl_pwq;
+}
+
+/* @cpu < 0 for dfl_pwq */
+static struct pool_workqueue *unbound_pwq(struct workqueue_struct *wq, int cpu)
+{
+ return rcu_dereference_check(*unbound_pwq_slot(wq, cpu),
+ lockdep_is_held(&wq_pool_mutex) ||
+ lockdep_is_held(&wq->mutex));
+}
+
+/**
+ * unbound_effective_cpumask - effective cpumask of an unbound workqueue
+ * @wq: workqueue of interest
+ *
+ * @wq->unbound_attrs->cpumask contains the cpumask requested by the user which
+ * is masked with wq_unbound_cpumask to determine the effective cpumask. The
+ * default pwq is always mapped to the pool with the current effective cpumask.
+ */
+static struct cpumask *unbound_effective_cpumask(struct workqueue_struct *wq)
+{
+ return unbound_pwq(wq, -1)->pool->attrs->__pod_cpumask;
+}
+
static unsigned int work_color_to_flags(int color)
{
return color << WORK_STRUCT_COLOR_SHIFT;
@@ -1396,6 +1458,71 @@ work_func_t wq_worker_last_func(struct task_struct *task)
}
/**
+ * wq_node_nr_active - Determine wq_node_nr_active to use
+ * @wq: workqueue of interest
+ * @node: NUMA node, can be %NUMA_NO_NODE
+ *
+ * Determine wq_node_nr_active to use for @wq on @node. Returns:
+ *
+ * - %NULL for per-cpu workqueues as they don't need to use shared nr_active.
+ *
+ * - node_nr_active[nr_node_ids] if @node is %NUMA_NO_NODE.
+ *
+ * - Otherwise, node_nr_active[@node].
+ */
+static struct wq_node_nr_active *wq_node_nr_active(struct workqueue_struct *wq,
+ int node)
+{
+ if (!(wq->flags & WQ_UNBOUND))
+ return NULL;
+
+ if (node == NUMA_NO_NODE)
+ node = nr_node_ids;
+
+ return wq->node_nr_active[node];
+}
+
+/**
+ * wq_update_node_max_active - Update per-node max_actives to use
+ * @wq: workqueue to update
+ * @off_cpu: CPU that's going down, -1 if a CPU is not going down
+ *
+ * Update @wq->node_nr_active[]->max. @wq must be unbound. max_active is
+ * distributed among nodes according to the proportions of numbers of online
+ * cpus. The result is always between @wq->min_active and max_active.
+ */
+static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
+{
+ struct cpumask *effective = unbound_effective_cpumask(wq);
+ int min_active = READ_ONCE(wq->min_active);
+ int max_active = READ_ONCE(wq->max_active);
+ int total_cpus, node;
+
+ lockdep_assert_held(&wq->mutex);
+
+ if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective))
+ off_cpu = -1;
+
+ total_cpus = cpumask_weight_and(effective, cpu_online_mask);
+ if (off_cpu >= 0)
+ total_cpus--;
+
+ for_each_node(node) {
+ int node_cpus;
+
+ node_cpus = cpumask_weight_and(effective, cpumask_of_node(node));
+ if (off_cpu >= 0 && cpu_to_node(off_cpu) == node)
+ node_cpus--;
+
+ wq_node_nr_active(wq, node)->max =
+ clamp(DIV_ROUND_UP(max_active * node_cpus, total_cpus),
+ min_active, max_active);
+ }
+
+ wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
+}
+
+/**
* get_pwq - get an extra reference on the specified pool_workqueue
* @pwq: pool_workqueue to get
*
@@ -1447,24 +1574,293 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq)
}
}
-static void pwq_activate_inactive_work(struct work_struct *work)
+static bool pwq_is_empty(struct pool_workqueue *pwq)
{
- struct pool_workqueue *pwq = get_work_pwq(work);
+ return !pwq->nr_active && list_empty(&pwq->inactive_works);
+}
+static void __pwq_activate_work(struct pool_workqueue *pwq,
+ struct work_struct *work)
+{
+ unsigned long *wdb = work_data_bits(work);
+
+ WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE));
trace_workqueue_activate_work(work);
if (list_empty(&pwq->pool->worklist))
pwq->pool->watchdog_ts = jiffies;
move_linked_works(work, &pwq->pool->worklist, NULL);
- __clear_bit(WORK_STRUCT_INACTIVE_BIT, work_data_bits(work));
+ __clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb);
+}
+
+/**
+ * pwq_activate_work - Activate a work item if inactive
+ * @pwq: pool_workqueue @work belongs to
+ * @work: work item to activate
+ *
+ * Returns %true if activated. %false if already active.
+ */
+static bool pwq_activate_work(struct pool_workqueue *pwq,
+ struct work_struct *work)
+{
+ struct worker_pool *pool = pwq->pool;
+ struct wq_node_nr_active *nna;
+
+ lockdep_assert_held(&pool->lock);
+
+ if (!(*work_data_bits(work) & WORK_STRUCT_INACTIVE))
+ return false;
+
+ nna = wq_node_nr_active(pwq->wq, pool->node);
+ if (nna)
+ atomic_inc(&nna->nr);
+
pwq->nr_active++;
+ __pwq_activate_work(pwq, work);
+ return true;
+}
+
+static bool tryinc_node_nr_active(struct wq_node_nr_active *nna)
+{
+ int max = READ_ONCE(nna->max);
+
+ while (true) {
+ int old, tmp;
+
+ old = atomic_read(&nna->nr);
+ if (old >= max)
+ return false;
+ tmp = atomic_cmpxchg_relaxed(&nna->nr, old, old + 1);
+ if (tmp == old)
+ return true;
+ }
+}
+
+/**
+ * pwq_tryinc_nr_active - Try to increment nr_active for a pwq
+ * @pwq: pool_workqueue of interest
+ * @fill: max_active may have increased, try to increase concurrency level
+ *
+ * Try to increment nr_active for @pwq. Returns %true if an nr_active count is
+ * successfully obtained. %false otherwise.
+ */
+static bool pwq_tryinc_nr_active(struct pool_workqueue *pwq, bool fill)
+{
+ struct workqueue_struct *wq = pwq->wq;
+ struct worker_pool *pool = pwq->pool;
+ struct wq_node_nr_active *nna = wq_node_nr_active(wq, pool->node);
+ bool obtained = false;
+
+ lockdep_assert_held(&pool->lock);
+
+ if (!nna) {
+ /* per-cpu workqueue, pwq->nr_active is sufficient */
+ obtained = pwq->nr_active < READ_ONCE(wq->max_active);
+ goto out;
+ }
+
+ /*
+ * Unbound workqueue uses per-node shared nr_active $nna. If @pwq is
+ * already waiting on $nna, pwq_dec_nr_active() will maintain the
+ * concurrency level. Don't jump the line.
+ *
+ * We need to ignore the pending test after max_active has increased as
+ * pwq_dec_nr_active() can only maintain the concurrency level but not
+ * increase it. This is indicated by @fill.
+ */
+ if (!list_empty(&pwq->pending_node) && likely(!fill))
+ goto out;
+
+ obtained = tryinc_node_nr_active(nna);
+ if (obtained)
+ goto out;
+
+ /*
+ * Lockless acquisition failed. Lock, add ourself to $nna->pending_pwqs
+ * and try again. The smp_mb() is paired with the implied memory barrier
+ * of atomic_dec_return() in pwq_dec_nr_active() to ensure that either
+ * we see the decremented $nna->nr or they see non-empty
+ * $nna->pending_pwqs.
+ */
+ raw_spin_lock(&nna->lock);
+
+ if (list_empty(&pwq->pending_node))
+ list_add_tail(&pwq->pending_node, &nna->pending_pwqs);
+ else if (likely(!fill))
+ goto out_unlock;
+
+ smp_mb();
+
+ obtained = tryinc_node_nr_active(nna);
+
+ /*
+ * If @fill, @pwq might have already been pending. Being spuriously
+ * pending in cold paths doesn't affect anything. Let's leave it be.
+ */
+ if (obtained && likely(!fill))
+ list_del_init(&pwq->pending_node);
+
+out_unlock:
+ raw_spin_unlock(&nna->lock);
+out:
+ if (obtained)
+ pwq->nr_active++;
+ return obtained;
+}
+
+/**
+ * pwq_activate_first_inactive - Activate the first inactive work item on a pwq
+ * @pwq: pool_workqueue of interest
+ * @fill: max_active may have increased, try to increase concurrency level
+ *
+ * Activate the first inactive work item of @pwq if available and allowed by
+ * max_active limit.
+ *
+ * Returns %true if an inactive work item has been activated. %false if no
+ * inactive work item is found or max_active limit is reached.
+ */
+static bool pwq_activate_first_inactive(struct pool_workqueue *pwq, bool fill)
+{
+ struct work_struct *work =
+ list_first_entry_or_null(&pwq->inactive_works,
+ struct work_struct, entry);
+
+ if (work && pwq_tryinc_nr_active(pwq, fill)) {
+ __pwq_activate_work(pwq, work);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/**
+ * node_activate_pending_pwq - Activate a pending pwq on a wq_node_nr_active
+ * @nna: wq_node_nr_active to activate a pending pwq for
+ * @caller_pool: worker_pool the caller is locking
+ *
+ * Activate a pwq in @nna->pending_pwqs. Called with @caller_pool locked.
+ * @caller_pool may be unlocked and relocked to lock other worker_pools.
+ */
+static void node_activate_pending_pwq(struct wq_node_nr_active *nna,
+ struct worker_pool *caller_pool)
+{
+ struct worker_pool *locked_pool = caller_pool;
+ struct pool_workqueue *pwq;
+ struct work_struct *work;
+
+ lockdep_assert_held(&caller_pool->lock);
+
+ raw_spin_lock(&nna->lock);
+retry:
+ pwq = list_first_entry_or_null(&nna->pending_pwqs,
+ struct pool_workqueue, pending_node);
+ if (!pwq)
+ goto out_unlock;
+
+ /*
+ * If @pwq is for a different pool than @locked_pool, we need to lock
+ * @pwq->pool->lock. Let's trylock first. If unsuccessful, do the unlock
+ * / lock dance. For that, we also need to release @nna->lock as it's
+ * nested inside pool locks.
+ */
+ if (pwq->pool != locked_pool) {
+ raw_spin_unlock(&locked_pool->lock);
+ locked_pool = pwq->pool;
+ if (!raw_spin_trylock(&locked_pool->lock)) {
+ raw_spin_unlock(&nna->lock);
+ raw_spin_lock(&locked_pool->lock);
+ raw_spin_lock(&nna->lock);
+ goto retry;
+ }
+ }
+
+ /*
+ * $pwq may not have any inactive work items due to e.g. cancellations.
+ * Drop it from pending_pwqs and see if there's another one.
+ */
+ work = list_first_entry_or_null(&pwq->inactive_works,
+ struct work_struct, entry);
+ if (!work) {
+ list_del_init(&pwq->pending_node);
+ goto retry;
+ }
+
+ /*
+ * Acquire an nr_active count and activate the inactive work item. If
+ * $pwq still has inactive work items, rotate it to the end of the
+ * pending_pwqs so that we round-robin through them. This means that
+ * inactive work items are not activated in queueing order which is fine
+ * given that there has never been any ordering across different pwqs.
+ */
+ if (likely(tryinc_node_nr_active(nna))) {
+ pwq->nr_active++;
+ __pwq_activate_work(pwq, work);
+
+ if (list_empty(&pwq->inactive_works))
+ list_del_init(&pwq->pending_node);
+ else
+ list_move_tail(&pwq->pending_node, &nna->pending_pwqs);
+
+ /* if activating a foreign pool, make sure it's running */
+ if (pwq->pool != caller_pool)
+ kick_pool(pwq->pool);
+ }
+
+out_unlock:
+ raw_spin_unlock(&nna->lock);
+ if (locked_pool != caller_pool) {
+ raw_spin_unlock(&locked_pool->lock);
+ raw_spin_lock(&caller_pool->lock);
+ }
}
-static void pwq_activate_first_inactive(struct pool_workqueue *pwq)
+/**
+ * pwq_dec_nr_active - Retire an active count
+ * @pwq: pool_workqueue of interest
+ *
+ * Decrement @pwq's nr_active and try to activate the first inactive work item.
+ * For unbound workqueues, this function may temporarily drop @pwq->pool->lock.
+ */
+static void pwq_dec_nr_active(struct pool_workqueue *pwq)
{
- struct work_struct *work = list_first_entry(&pwq->inactive_works,
- struct work_struct, entry);
+ struct worker_pool *pool = pwq->pool;
+ struct wq_node_nr_active *nna = wq_node_nr_active(pwq->wq, pool->node);
- pwq_activate_inactive_work(work);
+ lockdep_assert_held(&pool->lock);
+
+ /*
+ * @pwq->nr_active should be decremented for both percpu and unbound
+ * workqueues.
+ */
+ pwq->nr_active--;
+
+ /*
+ * For a percpu workqueue, it's simple. Just need to kick the first
+ * inactive work item on @pwq itself.
+ */
+ if (!nna) {
+ pwq_activate_first_inactive(pwq, false);
+ return;
+ }
+
+ /*
+ * If @pwq is for an unbound workqueue, it's more complicated because
+ * multiple pwqs and pools may be sharing the nr_active count. When a
+ * pwq needs to wait for an nr_active count, it puts itself on
+ * $nna->pending_pwqs. The following atomic_dec_return()'s implied
+ * memory barrier is paired with smp_mb() in pwq_tryinc_nr_active() to
+ * guarantee that either we see non-empty pending_pwqs or they see
+ * decremented $nna->nr.
+ *
+ * $nna->max may change as CPUs come online/offline and @pwq->wq's
+ * max_active gets updated. However, it is guaranteed to be equal to or
+ * larger than @pwq->wq->min_active which is above zero unless freezing.
+ * This maintains the forward progress guarantee.
+ */
+ if (atomic_dec_return(&nna->nr) >= READ_ONCE(nna->max))
+ return;
+
+ if (!list_empty(&nna->pending_pwqs))
+ node_activate_pending_pwq(nna, pool);
}
/**
@@ -1482,14 +1878,8 @@ static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, unsigned long work_
{
int color = get_work_color(work_data);
- if (!(work_data & WORK_STRUCT_INACTIVE)) {
- pwq->nr_active--;
- if (!list_empty(&pwq->inactive_works)) {
- /* one down, submit an inactive one */
- if (pwq->nr_active < pwq->max_active)
- pwq_activate_first_inactive(pwq);
- }
- }
+ if (!(work_data & WORK_STRUCT_INACTIVE))
+ pwq_dec_nr_active(pwq);
pwq->nr_in_flight[color]--;
@@ -1602,8 +1992,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
* management later on and cause stall. Make sure the work
* item is activated before grabbing.
*/
- if (*work_data_bits(work) & WORK_STRUCT_INACTIVE)
- pwq_activate_inactive_work(work);
+ pwq_activate_work(pwq, work);
list_del_init(&work->entry);
pwq_dec_nr_in_flight(pwq, *work_data_bits(work));
@@ -1787,12 +2176,16 @@ retry:
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
- if (likely(pwq->nr_active < pwq->max_active)) {
+ /*
+ * Limit the number of concurrently active work items to max_active.
+ * @work must also queue behind existing inactive work items to maintain
+ * ordering when max_active changes. See wq_adjust_max_active().
+ */
+ if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) {
if (list_empty(&pool->worklist))
pool->watchdog_ts = jiffies;
trace_workqueue_activate_work(work);
- pwq->nr_active++;
insert_work(pwq, work, &pool->worklist, work_flags);
kick_pool(pool);
} else {
@@ -3021,7 +3414,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
barr->task = current;
- /* The barrier work item does not participate in pwq->nr_active. */
+ /* The barrier work item does not participate in nr_active. */
work_flags |= WORK_STRUCT_INACTIVE;
/*
@@ -3310,7 +3703,7 @@ reflush:
bool drained;
raw_spin_lock_irq(&pwq->pool->lock);
- drained = !pwq->nr_active && list_empty(&pwq->inactive_works);
+ drained = pwq_is_empty(pwq);
raw_spin_unlock_irq(&pwq->pool->lock);
if (drained)
@@ -3921,11 +4314,65 @@ static void wq_free_lockdep(struct workqueue_struct *wq)
}
#endif
+static void free_node_nr_active(struct wq_node_nr_active **nna_ar)
+{
+ int node;
+
+ for_each_node(node) {
+ kfree(nna_ar[node]);
+ nna_ar[node] = NULL;
+ }
+
+ kfree(nna_ar[nr_node_ids]);
+ nna_ar[nr_node_ids] = NULL;
+}
+
+static void init_node_nr_active(struct wq_node_nr_active *nna)
+{
+ atomic_set(&nna->nr, 0);
+ raw_spin_lock_init(&nna->lock);
+ INIT_LIST_HEAD(&nna->pending_pwqs);
+}
+
+/*
+ * Each node's nr_active counter will be accessed mostly from its own node and
+ * should be allocated in the node.
+ */
+static int alloc_node_nr_active(struct wq_node_nr_active **nna_ar)
+{
+ struct wq_node_nr_active *nna;
+ int node;
+
+ for_each_node(node) {
+ nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, node);
+ if (!nna)
+ goto err_free;
+ init_node_nr_active(nna);
+ nna_ar[node] = nna;
+ }
+
+ /* [nr_node_ids] is used as the fallback */
+ nna = kzalloc_node(sizeof(*nna), GFP_KERNEL, NUMA_NO_NODE);
+ if (!nna)
+ goto err_free;
+ init_node_nr_active(nna);
+ nna_ar[nr_node_ids] = nna;
+
+ return 0;
+
+err_free:
+ free_node_nr_active(nna_ar);
+ return -ENOMEM;
+}
+
static void rcu_free_wq(struct rcu_head *rcu)
{
struct workqueue_struct *wq =
container_of(rcu, struct workqueue_struct, rcu);
+ if (wq->flags & WQ_UNBOUND)
+ free_node_nr_active(wq->node_nr_active);
+
wq_free_lockdep(wq);
free_percpu(wq->cpu_pwq);
free_workqueue_attrs(wq->unbound_attrs);
@@ -4124,6 +4571,15 @@ static void pwq_release_workfn(struct kthread_work *work)
mutex_unlock(&wq_pool_mutex);
}
+ if (!list_empty(&pwq->pending_node)) {
+ struct wq_node_nr_active *nna =
+ wq_node_nr_active(pwq->wq, pwq->pool->node);
+
+ raw_spin_lock_irq(&nna->lock);
+ list_del_init(&pwq->pending_node);
+ raw_spin_unlock_irq(&nna->lock);
+ }
+
call_rcu(&pwq->rcu, rcu_free_pwq);
/*
@@ -4136,50 +4592,6 @@ static void pwq_release_workfn(struct kthread_work *work)
}
}
-/**
- * pwq_adjust_max_active - update a pwq's max_active to the current setting
- * @pwq: target pool_workqueue
- *
- * If @pwq isn't freezing, set @pwq->max_active to the associated
- * workqueue's saved_max_active and activate inactive work items
- * accordingly. If @pwq is freezing, clear @pwq->max_active to zero.
- */
-static void pwq_adjust_max_active(struct pool_workqueue *pwq)
-{
- struct workqueue_struct *wq = pwq->wq;
- bool freezable = wq->flags & WQ_FREEZABLE;
- unsigned long flags;
-
- /* for @wq->saved_max_active */
- lockdep_assert_held(&wq->mutex);
-
- /* fast exit for non-freezable wqs */
- if (!freezable && pwq->max_active == wq->saved_max_active)
- return;
-
- /* this function can be called during early boot w/ irq disabled */
- raw_spin_lock_irqsave(&pwq->pool->lock, flags);
-
- /*
- * During [un]freezing, the caller is responsible for ensuring that
- * this function is called at least once after @workqueue_freezing
- * is updated and visible.
- */
- if (!freezable || !workqueue_freezing) {
- pwq->max_active = wq->saved_max_active;
-
- while (!list_empty(&pwq->inactive_works) &&
- pwq->nr_active < pwq->max_active)
- pwq_activate_first_inactive(pwq);
-
- kick_pool(pwq->pool);
- } else {
- pwq->max_active = 0;
- }
-
- raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
-}
-
/* initialize newly allocated @pwq which is associated with @wq and @pool */
static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
struct worker_pool *pool)
@@ -4193,6 +4605,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
pwq->flush_color = -1;
pwq->refcnt = 1;
INIT_LIST_HEAD(&pwq->inactive_works);
+ INIT_LIST_HEAD(&pwq->pending_node);
INIT_LIST_HEAD(&pwq->pwqs_node);
INIT_LIST_HEAD(&pwq->mayday_node);
kthread_init_work(&pwq->release_work, pwq_release_workfn);
@@ -4212,9 +4625,6 @@ static void link_pwq(struct pool_workqueue *pwq)
/* set the matching work_color */
pwq->work_color = wq->work_color;
- /* sync max_active to the current setting */
- pwq_adjust_max_active(pwq);
-
/* link in @pwq */
list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
}
@@ -4283,10 +4693,11 @@ static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
"possible intersect\n");
}
-/* install @pwq into @wq's cpu_pwq and return the old pwq */
+/* install @pwq into @wq and return the old pwq, @cpu < 0 for dfl_pwq */
static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
int cpu, struct pool_workqueue *pwq)
{
+ struct pool_workqueue __rcu **slot = unbound_pwq_slot(wq, cpu);
struct pool_workqueue *old_pwq;
lockdep_assert_held(&wq_pool_mutex);
@@ -4295,8 +4706,8 @@ static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
/* link_pwq() can handle duplicate calls */
link_pwq(pwq);
- old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
- rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
+ old_pwq = rcu_access_pointer(*slot);
+ rcu_assign_pointer(*slot, pwq);
return old_pwq;
}
@@ -4396,14 +4807,14 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
- /* save the previous pwq and install the new one */
+ /* save the previous pwqs and install the new ones */
for_each_possible_cpu(cpu)
ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
ctx->pwq_tbl[cpu]);
+ ctx->dfl_pwq = install_unbound_pwq(ctx->wq, -1, ctx->dfl_pwq);
- /* @dfl_pwq might not have been used, ensure it's linked */
- link_pwq(ctx->dfl_pwq);
- swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
+ /* update node_nr_active->max */
+ wq_update_node_max_active(ctx->wq, -1);
mutex_unlock(&ctx->wq->mutex);
}
@@ -4526,9 +4937,7 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu,
/* nothing to do if the target cpumask matches the current pwq */
wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
- pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
- lockdep_is_held(&wq_pool_mutex));
- if (wqattrs_equal(target_attrs, pwq->pool->attrs))
+ if (wqattrs_equal(target_attrs, unbound_pwq(wq, cpu)->pool->attrs))
return;
/* create a new pwq */
@@ -4546,10 +4955,11 @@ static void wq_update_pod(struct workqueue_struct *wq, int cpu,
use_dfl_pwq:
mutex_lock(&wq->mutex);
- raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
- get_pwq(wq->dfl_pwq);
- raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
- old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
+ pwq = unbound_pwq(wq, -1);
+ raw_spin_lock_irq(&pwq->pool->lock);
+ get_pwq(pwq);
+ raw_spin_unlock_irq(&pwq->pool->lock);
+ old_pwq = install_unbound_pwq(wq, cpu, pwq);
out_unlock:
mutex_unlock(&wq->mutex);
put_pwq_unlocked(old_pwq);
@@ -4587,10 +4997,13 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
cpus_read_lock();
if (wq->flags & __WQ_ORDERED) {
+ struct pool_workqueue *dfl_pwq;
+
ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
/* there should only be single pwq for ordering guarantee */
- WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
- wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
+ dfl_pwq = rcu_access_pointer(wq->dfl_pwq);
+ WARN(!ret && (wq->pwqs.next != &dfl_pwq->pwqs_node ||
+ wq->pwqs.prev != &dfl_pwq->pwqs_node),
"ordering guarantee broken for workqueue %s\n", wq->name);
} else {
ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
@@ -4665,6 +5078,69 @@ static int init_rescuer(struct workqueue_struct *wq)
return 0;
}
+/**
+ * wq_adjust_max_active - update a wq's max_active to the current setting
+ * @wq: target workqueue
+ *
+ * If @wq isn't freezing, set @wq->max_active to the saved_max_active and
+ * activate inactive work items accordingly. If @wq is freezing, clear
+ * @wq->max_active to zero.
+ */
+static void wq_adjust_max_active(struct workqueue_struct *wq)
+{
+ bool activated;
+ int new_max, new_min;
+
+ lockdep_assert_held(&wq->mutex);
+
+ if ((wq->flags & WQ_FREEZABLE) && workqueue_freezing) {
+ new_max = 0;
+ new_min = 0;
+ } else {
+ new_max = wq->saved_max_active;
+ new_min = wq->saved_min_active;
+ }
+
+ if (wq->max_active == new_max && wq->min_active == new_min)
+ return;
+
+ /*
+ * Update @wq->max/min_active and then kick inactive work items if more
+ * active work items are allowed. This doesn't break work item ordering
+ * because new work items are always queued behind existing inactive
+ * work items if there are any.
+ */
+ WRITE_ONCE(wq->max_active, new_max);
+ WRITE_ONCE(wq->min_active, new_min);
+
+ if (wq->flags & WQ_UNBOUND)
+ wq_update_node_max_active(wq, -1);
+
+ if (new_max == 0)
+ return;
+
+ /*
+ * Round-robin through pwq's activating the first inactive work item
+ * until max_active is filled.
+ */
+ do {
+ struct pool_workqueue *pwq;
+
+ activated = false;
+ for_each_pwq(pwq, wq) {
+ unsigned long flags;
+
+ /* can be called during early boot w/ irq disabled */
+ raw_spin_lock_irqsave(&pwq->pool->lock, flags);
+ if (pwq_activate_first_inactive(pwq, true)) {
+ activated = true;
+ kick_pool(pwq->pool);
+ }
+ raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
+ }
+ } while (activated);
+}
+
__printf(1, 4)
struct workqueue_struct *alloc_workqueue(const char *fmt,
unsigned int flags,
@@ -4672,7 +5148,8 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
{
va_list args;
struct workqueue_struct *wq;
- struct pool_workqueue *pwq;
+ size_t wq_size;
+ int name_len;
/*
* Unbound && max_active == 1 used to imply ordered, which is no longer
@@ -4688,7 +5165,12 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
flags |= WQ_UNBOUND;
/* allocate wq and format name */
- wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+ if (flags & WQ_UNBOUND)
+ wq_size = struct_size(wq, node_nr_active, nr_node_ids + 1);
+ else
+ wq_size = sizeof(*wq);
+
+ wq = kzalloc(wq_size, GFP_KERNEL);
if (!wq)
return NULL;
@@ -4699,15 +5181,22 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
}
va_start(args, max_active);
- vsnprintf(wq->name, sizeof(wq->name), fmt, args);
+ name_len = vsnprintf(wq->name, sizeof(wq->name), fmt, args);
va_end(args);
+ if (name_len >= WQ_NAME_LEN)
+ pr_warn_once("workqueue: name exceeds WQ_NAME_LEN. Truncating to: %s\n",
+ wq->name);
+
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, wq->name);
/* init wq */
wq->flags = flags;
- wq->saved_max_active = max_active;
+ wq->max_active = max_active;
+ wq->min_active = min(max_active, WQ_DFL_MIN_ACTIVE);
+ wq->saved_max_active = wq->max_active;
+ wq->saved_min_active = wq->min_active;
mutex_init(&wq->mutex);
atomic_set(&wq->nr_pwqs_to_flush, 0);
INIT_LIST_HEAD(&wq->pwqs);
@@ -4718,8 +5207,13 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
wq_init_lockdep(wq);
INIT_LIST_HEAD(&wq->list);
+ if (flags & WQ_UNBOUND) {
+ if (alloc_node_nr_active(wq->node_nr_active) < 0)
+ goto err_unreg_lockdep;
+ }
+
if (alloc_and_link_pwqs(wq) < 0)
- goto err_unreg_lockdep;
+ goto err_free_node_nr_active;
if (wq_online && init_rescuer(wq) < 0)
goto err_destroy;
@@ -4735,8 +5229,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
mutex_lock(&wq_pool_mutex);
mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq)
- pwq_adjust_max_active(pwq);
+ wq_adjust_max_active(wq);
mutex_unlock(&wq->mutex);
list_add_tail_rcu(&wq->list, &workqueues);
@@ -4745,6 +5238,9 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
return wq;
+err_free_node_nr_active:
+ if (wq->flags & WQ_UNBOUND)
+ free_node_nr_active(wq->node_nr_active);
err_unreg_lockdep:
wq_unregister_lockdep(wq);
wq_free_lockdep(wq);
@@ -4766,9 +5262,9 @@ static bool pwq_busy(struct pool_workqueue *pwq)
if (pwq->nr_in_flight[i])
return true;
- if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
+ if ((pwq != rcu_access_pointer(pwq->wq->dfl_pwq)) && (pwq->refcnt > 1))
return true;
- if (pwq->nr_active || !list_empty(&pwq->inactive_works))
+ if (!pwq_is_empty(pwq))
return true;
return false;
@@ -4850,13 +5346,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
rcu_read_lock();
for_each_possible_cpu(cpu) {
- pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
- RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
- put_pwq_unlocked(pwq);
+ put_pwq_unlocked(unbound_pwq(wq, cpu));
+ RCU_INIT_POINTER(*unbound_pwq_slot(wq, cpu), NULL);
}
- put_pwq_unlocked(wq->dfl_pwq);
- wq->dfl_pwq = NULL;
+ put_pwq_unlocked(unbound_pwq(wq, -1));
+ RCU_INIT_POINTER(*unbound_pwq_slot(wq, -1), NULL);
rcu_read_unlock();
}
@@ -4867,15 +5362,14 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
* @wq: target workqueue
* @max_active: new max_active value.
*
- * Set max_active of @wq to @max_active.
+ * Set max_active of @wq to @max_active. See the alloc_workqueue() function
+ * comment.
*
* CONTEXT:
* Don't call from IRQ context.
*/
void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
{
- struct pool_workqueue *pwq;
-
/* disallow meddling with max_active for ordered workqueues */
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return;
@@ -4886,9 +5380,10 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
wq->flags &= ~__WQ_ORDERED;
wq->saved_max_active = max_active;
+ if (wq->flags & WQ_UNBOUND)
+ wq->saved_min_active = min(wq->saved_min_active, max_active);
- for_each_pwq(pwq, wq)
- pwq_adjust_max_active(pwq);
+ wq_adjust_max_active(wq);
mutex_unlock(&wq->mutex);
}
@@ -5135,8 +5630,8 @@ static void show_pwq(struct pool_workqueue *pwq)
pr_info(" pwq %d:", pool->id);
pr_cont_pool_info(pool);
- pr_cont(" active=%d/%d refcnt=%d%s\n",
- pwq->nr_active, pwq->max_active, pwq->refcnt,
+ pr_cont(" active=%d refcnt=%d%s\n",
+ pwq->nr_active, pwq->refcnt,
!list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
@@ -5210,7 +5705,7 @@ void show_one_workqueue(struct workqueue_struct *wq)
unsigned long flags;
for_each_pwq(pwq, wq) {
- if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+ if (!pwq_is_empty(pwq)) {
idle = false;
break;
}
@@ -5222,7 +5717,7 @@ void show_one_workqueue(struct workqueue_struct *wq)
for_each_pwq(pwq, wq) {
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
- if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
+ if (!pwq_is_empty(pwq)) {
/*
* Defer printing to avoid deadlocks in console
* drivers that queue work while holding locks
@@ -5569,6 +6064,10 @@ int workqueue_online_cpu(unsigned int cpu)
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
wq_update_pod(wq, tcpu, cpu, true);
+
+ mutex_lock(&wq->mutex);
+ wq_update_node_max_active(wq, -1);
+ mutex_unlock(&wq->mutex);
}
}
@@ -5597,6 +6096,10 @@ int workqueue_offline_cpu(unsigned int cpu)
for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
wq_update_pod(wq, tcpu, cpu, false);
+
+ mutex_lock(&wq->mutex);
+ wq_update_node_max_active(wq, cpu);
+ mutex_unlock(&wq->mutex);
}
}
mutex_unlock(&wq_pool_mutex);
@@ -5684,7 +6187,6 @@ EXPORT_SYMBOL_GPL(work_on_cpu_safe_key);
void freeze_workqueues_begin(void)
{
struct workqueue_struct *wq;
- struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
@@ -5693,8 +6195,7 @@ void freeze_workqueues_begin(void)
list_for_each_entry(wq, &workqueues, list) {
mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq)
- pwq_adjust_max_active(pwq);
+ wq_adjust_max_active(wq);
mutex_unlock(&wq->mutex);
}
@@ -5759,7 +6260,6 @@ out_unlock:
void thaw_workqueues(void)
{
struct workqueue_struct *wq;
- struct pool_workqueue *pwq;
mutex_lock(&wq_pool_mutex);
@@ -5771,8 +6271,7 @@ void thaw_workqueues(void)
/* restore max_active and repopulate worklist */
list_for_each_entry(wq, &workqueues, list) {
mutex_lock(&wq->mutex);
- for_each_pwq(pwq, wq)
- pwq_adjust_max_active(pwq);
+ wq_adjust_max_active(wq);
mutex_unlock(&wq->mutex);
}
@@ -6610,7 +7109,7 @@ void __init workqueue_init_early(void)
WQ_FREEZABLE, 0);
system_power_efficient_wq = alloc_workqueue("events_power_efficient",
WQ_POWER_EFFICIENT, 0);
- system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
+ system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_pwr_efficient",
WQ_FREEZABLE | WQ_POWER_EFFICIENT,
0);
BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
@@ -6797,8 +7296,12 @@ void __init workqueue_init_topology(void)
* combinations to apply per-pod sharing.
*/
list_for_each_entry(wq, &workqueues, list) {
- for_each_online_cpu(cpu) {
+ for_each_online_cpu(cpu)
wq_update_pod(wq, cpu, cpu, true);
+ if (wq->flags & WQ_UNBOUND) {
+ mutex_lock(&wq->mutex);
+ wq_update_node_max_active(wq, -1);
+ mutex_unlock(&wq->mutex);
}
}
diff --git a/lib/cmdline_kunit.c b/lib/cmdline_kunit.c
index d4572dbc91..705b82736b 100644
--- a/lib/cmdline_kunit.c
+++ b/lib/cmdline_kunit.c
@@ -124,7 +124,7 @@ static void cmdline_do_one_range_test(struct kunit *test, const char *in,
n, e[0], r[0]);
p = memchr_inv(&r[1], 0, sizeof(r) - sizeof(r[0]));
- KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %u out of bound", n, p - r);
+ KUNIT_EXPECT_PTR_EQ_MSG(test, p, NULL, "in test %u at %td out of bound", n, p - r);
}
static void cmdline_test_range(struct kunit *test)
diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c
index 22d4ee86db..3f7f967e36 100644
--- a/lib/kunit/executor_test.c
+++ b/lib/kunit/executor_test.c
@@ -129,7 +129,7 @@ static void parse_filter_attr_test(struct kunit *test)
GFP_KERNEL);
for (j = 0; j < filter_count; j++) {
parsed_filters[j] = kunit_next_attr_filter(&filter, &err);
- KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter '%s'", filters[j]);
+ KUNIT_ASSERT_EQ_MSG(test, err, 0, "failed to parse filter from '%s'", filters);
}
KUNIT_EXPECT_STREQ(test, kunit_attr_filter_name(parsed_filters[0]), "speed");
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 440aee705c..30e00ef0bf 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -32,7 +32,7 @@ struct some_bytes {
BUILD_BUG_ON(sizeof(instance.data) != 32); \
for (size_t i = 0; i < sizeof(instance.data); i++) { \
KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \
- "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \
+ "line %d: '%s' not initialized to 0x%02x @ %zu (saw 0x%02x)\n", \
__LINE__, #instance, v, i, instance.data[i]); \
} \
} while (0)
@@ -41,7 +41,7 @@ struct some_bytes {
BUILD_BUG_ON(sizeof(one) != sizeof(two)); \
for (size_t i = 0; i < sizeof(one); i++) { \
KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \
- "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \
+ "line %d: %s.data[%zu] (0x%02x) != %s.data[%zu] (0x%02x)\n", \
__LINE__, #one, i, one.data[i], #two, i, two.data[i]); \
} \
kunit_info(test, "ok: " TEST_OP "() " name "\n"); \
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index ce39ce9f35..2829ddb0e3 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -170,8 +170,8 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *p)
if (addr >= start && addr < start + IO_SPACE_LIMIT)
return;
- iounmap(p);
#endif
+ iounmap(p);
}
EXPORT_SYMBOL(pci_iounmap);
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
index 4c40580a99..f247089d63 100644
--- a/lib/test_blackhole_dev.c
+++ b/lib/test_blackhole_dev.c
@@ -29,7 +29,6 @@ static int __init test_blackholedev_init(void)
{
struct ipv6hdr *ip6h;
struct sk_buff *skb;
- struct ethhdr *ethh;
struct udphdr *uh;
int data_len;
int ret;
@@ -61,7 +60,7 @@ static int __init test_blackholedev_init(void)
ip6h->saddr = in6addr_loopback;
ip6h->daddr = in6addr_loopback;
/* Ether */
- ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
+ skb_push(skb, sizeof(struct ethhdr));
skb_set_mac_header(skb, 0);
skb->protocol = htons(ETH_P_IPV6);
diff --git a/mm/compaction.c b/mm/compaction.c
index 01ba298739..f31d18741a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -2701,16 +2701,11 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac,
enum compact_priority prio, struct page **capture)
{
- int may_perform_io = (__force int)(gfp_mask & __GFP_IO);
struct zoneref *z;
struct zone *zone;
enum compact_result rc = COMPACT_SKIPPED;
- /*
- * Check if the GFP flags allow compaction - GFP_NOIO is really
- * tricky context because the migration might require IO
- */
- if (!may_perform_io)
+ if (!gfp_compaction_allowed(gfp_mask))
return COMPACT_SKIPPED;
trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
diff --git a/mm/filemap.c b/mm/filemap.c
index 5be7887957..321349e2c9 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -4150,7 +4150,23 @@ static void filemap_cachestat(struct address_space *mapping,
/* shmem file - in swap cache */
swp_entry_t swp = radix_to_swp_entry(folio);
+ /* swapin error results in poisoned entry */
+ if (non_swap_entry(swp))
+ goto resched;
+
+ /*
+ * Getting a swap entry from the shmem
+ * inode means we beat
+ * shmem_unuse(). rcu_read_lock()
+ * ensures swapoff waits for us before
+ * freeing the swapper space. However,
+ * we can race with swapping and
+ * invalidation, so there might not be
+ * a shadow in the swapcache (yet).
+ */
shadow = get_shadow_from_swap_cache(swp);
+ if (!shadow)
+ goto resched;
}
#endif
if (workingset_test_recent(shadow, true, &workingset))
diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c
index 34515a106c..23906e886b 100644
--- a/mm/kasan/kasan_test.c
+++ b/mm/kasan/kasan_test.c
@@ -451,7 +451,8 @@ static void kmalloc_oob_16(struct kunit *test)
/* This test is specifically crafted for the generic mode. */
KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
- ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
+ /* RELOC_HIDE to prevent gcc from warning about short alloc */
+ ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 792fb3a5ce..27c9f451d4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4379,7 +4379,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
* only one element of the array here.
*/
for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
- eventfd_signal(t->entries[i].eventfd, 1);
+ eventfd_signal(t->entries[i].eventfd);
/* i = current_threshold + 1 */
i++;
@@ -4391,7 +4391,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
* only one element of the array here.
*/
for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
- eventfd_signal(t->entries[i].eventfd, 1);
+ eventfd_signal(t->entries[i].eventfd);
/* Update current_threshold */
t->current_threshold = i - 1;
@@ -4431,7 +4431,7 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
spin_lock(&memcg_oom_lock);
list_for_each_entry(ev, &memcg->oom_notify, list)
- eventfd_signal(ev->eventfd, 1);
+ eventfd_signal(ev->eventfd);
spin_unlock(&memcg_oom_lock);
return 0;
@@ -4650,7 +4650,7 @@ static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
/* already in OOM ? */
if (memcg->under_oom)
- eventfd_signal(eventfd, 1);
+ eventfd_signal(eventfd);
spin_unlock(&memcg_oom_lock);
return 0;
@@ -4942,7 +4942,7 @@ static void memcg_event_remove(struct work_struct *work)
event->unregister_event(memcg, event->eventfd);
/* Notify userspace the event is going away. */
- eventfd_signal(event->eventfd, 1);
+ eventfd_signal(event->eventfd);
eventfd_ctx_put(event->eventfd);
kfree(event);
diff --git a/mm/memtest.c b/mm/memtest.c
index 32f3e9dda8..c2c609c391 100644
--- a/mm/memtest.c
+++ b/mm/memtest.c
@@ -51,10 +51,10 @@ static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size
last_bad = 0;
for (p = start; p < end; p++)
- *p = pattern;
+ WRITE_ONCE(*p, pattern);
for (p = start; p < end; p++, start_phys_aligned += incr) {
- if (*p == pattern)
+ if (READ_ONCE(*p) == pattern)
continue;
if (start_phys_aligned == last_bad + incr) {
last_bad += incr;
diff --git a/mm/mmap.c b/mm/mmap.c
index b9a43872ac..23072589fe 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -954,13 +954,21 @@ static struct vm_area_struct
} else if (merge_prev) { /* case 2 */
if (curr) {
vma_start_write(curr);
- err = dup_anon_vma(prev, curr, &anon_dup);
if (end == curr->vm_end) { /* case 7 */
+ /*
+ * can_vma_merge_after() assumed we would not be
+ * removing prev vma, so it skipped the check
+ * for vm_ops->close, but we are removing curr
+ */
+ if (curr->vm_ops && curr->vm_ops->close)
+ err = -EINVAL;
remove = curr;
} else { /* case 5 */
adjust = curr;
adj_start = (end - curr->vm_start);
}
+ if (!err)
+ err = dup_anon_vma(prev, curr, &anon_dup);
}
} else { /* merge_next */
vma_start_write(next);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6d2a74138f..b1d9dcdabd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4042,6 +4042,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct alloc_context *ac)
{
bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
+ bool can_compact = gfp_compaction_allowed(gfp_mask);
const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
struct page *page = NULL;
unsigned int alloc_flags;
@@ -4112,7 +4113,7 @@ restart:
* Don't try this for allocations that are allowed to ignore
* watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
*/
- if (can_direct_reclaim &&
+ if (can_direct_reclaim && can_compact &&
(costly_order ||
(order > 0 && ac->migratetype != MIGRATE_MOVABLE))
&& !gfp_pfmemalloc_allowed(gfp_mask)) {
@@ -4210,9 +4211,10 @@ retry:
/*
* Do not retry costly high order allocations unless they are
- * __GFP_RETRY_MAYFAIL
+ * __GFP_RETRY_MAYFAIL and we can compact
*/
- if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
+ if (costly_order && (!can_compact ||
+ !(gfp_mask & __GFP_RETRY_MAYFAIL)))
goto nopage;
if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
@@ -4225,7 +4227,7 @@ retry:
* implementation of the compaction depends on the sufficient amount
* of free memory (see __compaction_suitable)
*/
- if (did_some_progress > 0 &&
+ if (did_some_progress > 0 && can_compact &&
should_compact_retry(ac, order, alloc_flags,
compact_result, &compact_priority,
&compaction_retries))
diff --git a/mm/readahead.c b/mm/readahead.c
index 6925e6959f..1d1a84deb5 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -469,7 +469,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
if (!folio)
return -ENOMEM;
- mark = round_up(mark, 1UL << order);
+ mark = round_down(mark, 1UL << order);
if (index == mark)
folio_set_readahead(folio);
err = filemap_add_folio(ractl->mapping, folio, index, gfp);
@@ -577,7 +577,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
* It's the expected callback index, assume sequential access.
* Ramp up sizes, and push forward the readahead window.
*/
- expected = round_up(ra->start + ra->size - ra->async_size,
+ expected = round_down(ra->start + ra->size - ra->async_size,
1UL << order);
if (index == expected || index == (ra->start + ra->size)) {
ra->start += ra->size;
diff --git a/mm/shmem.c b/mm/shmem.c
index 0d1ce70bce..fb2daf89e2 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -310,7 +310,7 @@ static void shmem_disable_quotas(struct super_block *sb)
dquot_quota_off(sb, type);
}
-static struct dquot **shmem_get_dquots(struct inode *inode)
+static struct dquot __rcu **shmem_get_dquots(struct inode *inode)
{
return SHMEM_I(inode)->i_dquot;
}
diff --git a/mm/shmem_quota.c b/mm/shmem_quota.c
index 062d1c1097..ce514e700d 100644
--- a/mm/shmem_quota.c
+++ b/mm/shmem_quota.c
@@ -116,7 +116,7 @@ static int shmem_free_file_info(struct super_block *sb, int type)
static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
{
struct mem_dqinfo *info = sb_dqinfo(sb, qid->type);
- struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node *node;
qid_t id = from_kqid(&init_user_ns, *qid);
struct quota_info *dqopt = sb_dqopt(sb);
struct quota_id *entry = NULL;
@@ -126,6 +126,7 @@ static int shmem_get_next_id(struct super_block *sb, struct kqid *qid)
return -ESRCH;
down_read(&dqopt->dqio_sem);
+ node = ((struct rb_root *)info->dqi_priv)->rb_node;
while (node) {
entry = rb_entry(node, struct quota_id, node);
@@ -165,7 +166,7 @@ out_unlock:
static int shmem_acquire_dquot(struct dquot *dquot)
{
struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
- struct rb_node **n = &((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node **n;
struct shmem_sb_info *sbinfo = dquot->dq_sb->s_fs_info;
struct rb_node *parent = NULL, *new_node = NULL;
struct quota_id *new_entry, *entry;
@@ -176,6 +177,8 @@ static int shmem_acquire_dquot(struct dquot *dquot)
mutex_lock(&dquot->dq_lock);
down_write(&dqopt->dqio_sem);
+ n = &((struct rb_root *)info->dqi_priv)->rb_node;
+
while (*n) {
parent = *n;
entry = rb_entry(parent, struct quota_id, node);
@@ -264,7 +267,7 @@ static bool shmem_is_empty_dquot(struct dquot *dquot)
static int shmem_release_dquot(struct dquot *dquot)
{
struct mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_id.type);
- struct rb_node *node = ((struct rb_root *)info->dqi_priv)->rb_node;
+ struct rb_node *node;
qid_t id = from_kqid(&init_user_ns, dquot->dq_id);
struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
struct quota_id *entry = NULL;
@@ -275,6 +278,7 @@ static int shmem_release_dquot(struct dquot *dquot)
goto out_dqlock;
down_write(&dqopt->dqio_sem);
+ node = ((struct rb_root *)info->dqi_priv)->rb_node;
while (node) {
entry = rb_entry(node, struct quota_id, node);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 022581ec40..91397a2539 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1226,6 +1226,11 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p,
* with get_swap_device() and put_swap_device(), unless the swap
* functions call get/put_swap_device() by themselves.
*
+ * Note that when only holding the PTL, swapoff might succeed immediately
+ * after freeing a swap entry. Therefore, immediately after
+ * __swap_entry_free(), the swap info might become stale and should not
+ * be touched without a prior get_swap_device().
+ *
* Check whether swap entry is valid in the swap device. If so,
* return pointer to swap_info_struct, and keep the swap entry valid
* via preventing the swap device from being swapoff, until
@@ -1603,13 +1608,19 @@ int free_swap_and_cache(swp_entry_t entry)
if (non_swap_entry(entry))
return 1;
- p = _swap_info_get(entry);
+ p = get_swap_device(entry);
if (p) {
+ if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) {
+ put_swap_device(p);
+ return 0;
+ }
+
count = __swap_entry_free(p, entry);
if (count == SWAP_HAS_CACHE &&
!swap_page_trans_huge_swapped(p, entry))
__try_to_reclaim_swap(p, swp_offset(entry),
TTRS_UNMAPPED | TTRS_FULL);
+ put_swap_device(p);
}
return p != NULL;
}
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 22c6689d93..bd5183dfd8 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -169,7 +169,7 @@ static bool vmpressure_event(struct vmpressure *vmpr,
continue;
if (level < ev->level)
continue;
- eventfd_signal(ev->efd, 1);
+ eventfd_signal(ev->efd);
ret = true;
}
mutex_unlock(&vmpr->events_lock);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bba207f41b..ebaf79d683 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -5733,7 +5733,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
/* Use reclaim/compaction for costly allocs or under memory pressure */
static bool in_reclaim_compaction(struct scan_control *sc)
{
- if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
+ if (gfp_compaction_allowed(sc->gfp_mask) && sc->order &&
(sc->order > PAGE_ALLOC_COSTLY_ORDER ||
sc->priority < DEF_PRIORITY - 2))
return true;
@@ -5978,6 +5978,9 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{
unsigned long watermark;
+ if (!gfp_compaction_allowed(sc->gfp_mask))
+ return false;
+
/* Allocation can already succeed, nothing to do */
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
sc->reclaim_idx, 0))
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index da7cac0a1b..6b2b65a667 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -62,14 +62,6 @@ source "net/bluetooth/cmtp/Kconfig"
source "net/bluetooth/hidp/Kconfig"
-config BT_HS
- bool "Bluetooth High Speed (HS) features"
- depends on BT_BREDR
- help
- Bluetooth High Speed includes support for off-loading
- Bluetooth connections via 802.11 (wifi) physical layer
- available with Bluetooth version 3.0 or later.
-
config BT_LE
bool "Bluetooth Low Energy (LE) features"
depends on BT
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 141ac1fda0..628d448d78 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -21,7 +21,6 @@ bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_LE) += iso.o
-bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
bluetooth-$(CONFIG_BT_LEDS) += leds.o
bluetooth-$(CONFIG_BT_MSFTEXT) += msft.o
bluetooth-$(CONFIG_BT_AOSPEXT) += aosp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
deleted file mode 100644
index e7adb8a98c..0000000000
--- a/net/bluetooth/a2mp.c
+++ /dev/null
@@ -1,1054 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/l2cap.h>
-
-#include "hci_request.h"
-#include "a2mp.h"
-#include "amp.h"
-
-#define A2MP_FEAT_EXT 0x8000
-
-/* Global AMP Manager list */
-static LIST_HEAD(amp_mgr_list);
-static DEFINE_MUTEX(amp_mgr_list_lock);
-
-/* A2MP build & send command helper functions */
-static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
-{
- struct a2mp_cmd *cmd;
- int plen;
-
- plen = sizeof(*cmd) + len;
- cmd = kzalloc(plen, GFP_KERNEL);
- if (!cmd)
- return NULL;
-
- cmd->code = code;
- cmd->ident = ident;
- cmd->len = cpu_to_le16(len);
-
- memcpy(cmd->data, data, len);
-
- return cmd;
-}
-
-static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data)
-{
- struct l2cap_chan *chan = mgr->a2mp_chan;
- struct a2mp_cmd *cmd;
- u16 total_len = len + sizeof(*cmd);
- struct kvec iv;
- struct msghdr msg;
-
- cmd = __a2mp_build(code, ident, len, data);
- if (!cmd)
- return;
-
- iv.iov_base = cmd;
- iv.iov_len = total_len;
-
- memset(&msg, 0, sizeof(msg));
-
- iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len);
-
- l2cap_chan_send(chan, &msg, total_len);
-
- kfree(cmd);
-}
-
-static u8 __next_ident(struct amp_mgr *mgr)
-{
- if (++mgr->ident == 0)
- mgr->ident = 1;
-
- return mgr->ident;
-}
-
-static struct amp_mgr *amp_mgr_lookup_by_state(u8 state)
-{
- struct amp_mgr *mgr;
-
- mutex_lock(&amp_mgr_list_lock);
- list_for_each_entry(mgr, &amp_mgr_list, list) {
- if (test_and_clear_bit(state, &mgr->state)) {
- amp_mgr_get(mgr);
- mutex_unlock(&amp_mgr_list_lock);
- return mgr;
- }
- }
- mutex_unlock(&amp_mgr_list_lock);
-
- return NULL;
-}
-
-/* hci_dev_list shall be locked */
-static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
-{
- struct hci_dev *hdev;
- int i = 1;
-
- cl[0].id = AMP_ID_BREDR;
- cl[0].type = AMP_TYPE_BREDR;
- cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->dev_type == HCI_AMP) {
- cl[i].id = hdev->id;
- cl[i].type = hdev->amp_type;
- if (test_bit(HCI_UP, &hdev->flags))
- cl[i].status = hdev->amp_status;
- else
- cl[i].status = AMP_STATUS_POWERED_DOWN;
- i++;
- }
- }
-}
-
-/* Processing A2MP messages */
-static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_cmd_rej *rej = (void *) skb->data;
-
- if (le16_to_cpu(hdr->len) < sizeof(*rej))
- return -EINVAL;
-
- BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason));
-
- skb_pull(skb, sizeof(*rej));
-
- return 0;
-}
-
-static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_discov_req *req = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct a2mp_discov_rsp *rsp;
- u16 ext_feat;
- u8 num_ctrl;
- struct hci_dev *hdev;
-
- if (len < sizeof(*req))
- return -EINVAL;
-
- skb_pull(skb, sizeof(*req));
-
- ext_feat = le16_to_cpu(req->ext_feat);
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
-
- /* check that packet is not broken for now */
- while (ext_feat & A2MP_FEAT_EXT) {
- if (len < sizeof(ext_feat))
- return -EINVAL;
-
- ext_feat = get_unaligned_le16(skb->data);
- BT_DBG("efm 0x%4.4x", ext_feat);
- len -= sizeof(ext_feat);
- skb_pull(skb, sizeof(ext_feat));
- }
-
- read_lock(&hci_dev_list_lock);
-
- /* at minimum the BR/EDR needs to be listed */
- num_ctrl = 1;
-
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->dev_type == HCI_AMP)
- num_ctrl++;
- }
-
- len = struct_size(rsp, cl, num_ctrl);
- rsp = kmalloc(len, GFP_ATOMIC);
- if (!rsp) {
- read_unlock(&hci_dev_list_lock);
- return -ENOMEM;
- }
-
- rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- rsp->ext_feat = 0;
-
- __a2mp_add_cl(mgr, rsp->cl);
-
- read_unlock(&hci_dev_list_lock);
-
- a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
-
- kfree(rsp);
- return 0;
-}
-
-static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_discov_rsp *rsp = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct a2mp_cl *cl;
- u16 ext_feat;
- bool found = false;
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- len -= sizeof(*rsp);
- skb_pull(skb, sizeof(*rsp));
-
- ext_feat = le16_to_cpu(rsp->ext_feat);
-
- BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat);
-
- /* check that packet is not broken for now */
- while (ext_feat & A2MP_FEAT_EXT) {
- if (len < sizeof(ext_feat))
- return -EINVAL;
-
- ext_feat = get_unaligned_le16(skb->data);
- BT_DBG("efm 0x%4.4x", ext_feat);
- len -= sizeof(ext_feat);
- skb_pull(skb, sizeof(ext_feat));
- }
-
- cl = (void *) skb->data;
- while (len >= sizeof(*cl)) {
- BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type,
- cl->status);
-
- if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
- struct a2mp_info_req req;
-
- found = true;
-
- memset(&req, 0, sizeof(req));
-
- req.id = cl->id;
- a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
- sizeof(req), &req);
- }
-
- len -= sizeof(*cl);
- cl = skb_pull(skb, sizeof(*cl));
- }
-
- /* Fall back to L2CAP init sequence */
- if (!found) {
- struct l2cap_conn *conn = mgr->l2cap_conn;
- struct l2cap_chan *chan;
-
- mutex_lock(&conn->chan_lock);
-
- list_for_each_entry(chan, &conn->chan_l, list) {
-
- BT_DBG("chan %p state %s", chan,
- state_to_string(chan->state));
-
- if (chan->scid == L2CAP_CID_A2MP)
- continue;
-
- l2cap_chan_lock(chan);
-
- if (chan->state == BT_CONNECT)
- l2cap_send_conn_req(chan);
-
- l2cap_chan_unlock(chan);
- }
-
- mutex_unlock(&conn->chan_lock);
- }
-
- return 0;
-}
-
-static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_cl *cl = (void *) skb->data;
-
- while (skb->len >= sizeof(*cl)) {
- BT_DBG("Controller id %u type %u status %u", cl->id, cl->type,
- cl->status);
- cl = skb_pull(skb, sizeof(*cl));
- }
-
- /* TODO send A2MP_CHANGE_RSP */
-
- return 0;
-}
-
-static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- a2mp_send_getinfo_rsp(hdev);
-}
-
-static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_info_req *req = (void *) skb->data;
- struct hci_dev *hdev;
- struct hci_request hreq;
- int err = 0;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("id %u", req->id);
-
- hdev = hci_dev_get(req->id);
- if (!hdev || hdev->dev_type != HCI_AMP) {
- struct a2mp_info_rsp rsp;
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.id = req->id;
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
- a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp),
- &rsp);
-
- goto done;
- }
-
- set_bit(READ_LOC_AMP_INFO, &mgr->state);
- hci_req_init(&hreq, hdev);
- hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
- err = hci_req_run(&hreq, read_local_amp_info_complete);
- if (err < 0)
- a2mp_send_getinfo_rsp(hdev);
-
-done:
- if (hdev)
- hci_dev_put(hdev);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data;
- struct a2mp_amp_assoc_req req;
- struct amp_ctrl *ctrl;
-
- if (le16_to_cpu(hdr->len) < sizeof(*rsp))
- return -EINVAL;
-
- BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status);
-
- if (rsp->status)
- return -EINVAL;
-
- ctrl = amp_ctrl_add(mgr, rsp->id);
- if (!ctrl)
- return -ENOMEM;
-
- memset(&req, 0, sizeof(req));
-
- req.id = rsp->id;
- a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
- &req);
-
- skb_pull(skb, sizeof(*rsp));
- return 0;
-}
-
-static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_amp_assoc_req *req = (void *) skb->data;
- struct hci_dev *hdev;
- struct amp_mgr *tmp;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("id %u", req->id);
-
- /* Make sure that other request is not processed */
- tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
-
- hdev = hci_dev_get(req->id);
- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
- struct a2mp_amp_assoc_rsp rsp;
-
- memset(&rsp, 0, sizeof(rsp));
- rsp.id = req->id;
-
- if (tmp) {
- rsp.status = A2MP_STATUS_COLLISION_OCCURED;
- amp_mgr_put(tmp);
- } else {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- }
-
- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
- &rsp);
-
- goto done;
- }
-
- amp_read_loc_assoc(hdev, mgr);
-
-done:
- if (hdev)
- hci_dev_put(hdev);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data;
- u16 len = le16_to_cpu(hdr->len);
- struct hci_dev *hdev;
- struct amp_ctrl *ctrl;
- struct hci_conn *hcon;
- size_t assoc_len;
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- assoc_len = len - sizeof(*rsp);
-
- BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status,
- assoc_len);
-
- if (rsp->status)
- return -EINVAL;
-
- /* Save remote ASSOC data */
- ctrl = amp_ctrl_lookup(mgr, rsp->id);
- if (ctrl) {
- u8 *assoc;
-
- assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL);
- if (!assoc) {
- amp_ctrl_put(ctrl);
- return -ENOMEM;
- }
-
- ctrl->assoc = assoc;
- ctrl->assoc_len = assoc_len;
- ctrl->assoc_rem_len = assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- }
-
- /* Create Phys Link */
- hdev = hci_dev_get(rsp->id);
- if (!hdev)
- return -EINVAL;
-
- hcon = phylink_add(hdev, mgr, rsp->id, true);
- if (!hcon)
- goto done;
-
- BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id);
-
- mgr->bredr_chan->remote_amp_id = rsp->id;
-
- amp_create_phylink(hdev, mgr, hcon);
-
-done:
- hci_dev_put(hdev);
- skb_pull(skb, len);
- return 0;
-}
-
-static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_physlink_req *req = (void *) skb->data;
- struct a2mp_physlink_rsp rsp;
- struct hci_dev *hdev;
- struct hci_conn *hcon;
- struct amp_ctrl *ctrl;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
-
- hdev = hci_dev_get(req->remote_id);
- if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- goto send_rsp;
- }
-
- ctrl = amp_ctrl_lookup(mgr, rsp.remote_id);
- if (!ctrl) {
- ctrl = amp_ctrl_add(mgr, rsp.remote_id);
- if (ctrl) {
- amp_ctrl_get(ctrl);
- } else {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- goto send_rsp;
- }
- }
-
- if (ctrl) {
- size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req);
- u8 *assoc;
-
- assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
- if (!assoc) {
- amp_ctrl_put(ctrl);
- hci_dev_put(hdev);
- return -ENOMEM;
- }
-
- ctrl->assoc = assoc;
- ctrl->assoc_len = assoc_len;
- ctrl->assoc_rem_len = assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- }
-
- hcon = phylink_add(hdev, mgr, req->local_id, false);
- if (hcon) {
- amp_accept_phylink(hdev, mgr, hcon);
- rsp.status = A2MP_STATUS_SUCCESS;
- } else {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- }
-
-send_rsp:
- if (hdev)
- hci_dev_put(hdev);
-
- /* Reply error now and success after HCI Write Remote AMP Assoc
- command complete with success status
- */
- if (rsp.status != A2MP_STATUS_SUCCESS) {
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident,
- sizeof(rsp), &rsp);
- } else {
- set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state);
- mgr->ident = hdr->ident;
- }
-
- skb_pull(skb, le16_to_cpu(hdr->len));
- return 0;
-}
-
-static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- struct a2mp_physlink_req *req = (void *) skb->data;
- struct a2mp_physlink_rsp rsp;
- struct hci_dev *hdev;
- struct hci_conn *hcon;
-
- if (le16_to_cpu(hdr->len) < sizeof(*req))
- return -EINVAL;
-
- BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.local_id = req->remote_id;
- rsp.remote_id = req->local_id;
- rsp.status = A2MP_STATUS_SUCCESS;
-
- hdev = hci_dev_get(req->remote_id);
- if (!hdev) {
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
- goto send_rsp;
- }
-
- hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
- &mgr->l2cap_conn->hcon->dst);
- if (!hcon) {
- bt_dev_err(hdev, "no phys link exist");
- rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
- goto clean;
- }
-
- /* TODO Disconnect Phys Link here */
-
-clean:
- hci_dev_put(hdev);
-
-send_rsp:
- a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
-
- skb_pull(skb, sizeof(*req));
- return 0;
-}
-
-static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
- struct a2mp_cmd *hdr)
-{
- BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code);
-
- skb_pull(skb, le16_to_cpu(hdr->len));
- return 0;
-}
-
-/* Handle A2MP signalling */
-static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
-{
- struct a2mp_cmd *hdr;
- struct amp_mgr *mgr = chan->data;
- int err = 0;
-
- amp_mgr_get(mgr);
-
- while (skb->len >= sizeof(*hdr)) {
- u16 len;
-
- hdr = (void *) skb->data;
- len = le16_to_cpu(hdr->len);
-
- BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len);
-
- skb_pull(skb, sizeof(*hdr));
-
- if (len > skb->len || !hdr->ident) {
- err = -EINVAL;
- break;
- }
-
- mgr->ident = hdr->ident;
-
- switch (hdr->code) {
- case A2MP_COMMAND_REJ:
- a2mp_command_rej(mgr, skb, hdr);
- break;
-
- case A2MP_DISCOVER_REQ:
- err = a2mp_discover_req(mgr, skb, hdr);
- break;
-
- case A2MP_CHANGE_NOTIFY:
- err = a2mp_change_notify(mgr, skb, hdr);
- break;
-
- case A2MP_GETINFO_REQ:
- err = a2mp_getinfo_req(mgr, skb, hdr);
- break;
-
- case A2MP_GETAMPASSOC_REQ:
- err = a2mp_getampassoc_req(mgr, skb, hdr);
- break;
-
- case A2MP_CREATEPHYSLINK_REQ:
- err = a2mp_createphyslink_req(mgr, skb, hdr);
- break;
-
- case A2MP_DISCONNPHYSLINK_REQ:
- err = a2mp_discphyslink_req(mgr, skb, hdr);
- break;
-
- case A2MP_DISCOVER_RSP:
- err = a2mp_discover_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_GETINFO_RSP:
- err = a2mp_getinfo_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_GETAMPASSOC_RSP:
- err = a2mp_getampassoc_rsp(mgr, skb, hdr);
- break;
-
- case A2MP_CHANGE_RSP:
- case A2MP_CREATEPHYSLINK_RSP:
- case A2MP_DISCONNPHYSLINK_RSP:
- err = a2mp_cmd_rsp(mgr, skb, hdr);
- break;
-
- default:
- BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
- err = -EINVAL;
- break;
- }
- }
-
- if (err) {
- struct a2mp_cmd_rej rej;
-
- memset(&rej, 0, sizeof(rej));
-
- rej.reason = cpu_to_le16(0);
- hdr = (void *) skb->data;
-
- BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
-
- a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
- &rej);
- }
-
- /* Always free skb and return success error code to prevent
- from sending L2CAP Disconnect over A2MP channel */
- kfree_skb(skb);
-
- amp_mgr_put(mgr);
-
- return 0;
-}
-
-static void a2mp_chan_close_cb(struct l2cap_chan *chan)
-{
- l2cap_chan_put(chan);
-}
-
-static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
- int err)
-{
- struct amp_mgr *mgr = chan->data;
-
- if (!mgr)
- return;
-
- BT_DBG("chan %p state %s", chan, state_to_string(state));
-
- chan->state = state;
-
- switch (state) {
- case BT_CLOSED:
- if (mgr)
- amp_mgr_put(mgr);
- break;
- }
-}
-
-static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
- unsigned long hdr_len,
- unsigned long len, int nb)
-{
- struct sk_buff *skb;
-
- skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- return skb;
-}
-
-static const struct l2cap_ops a2mp_chan_ops = {
- .name = "L2CAP A2MP channel",
- .recv = a2mp_chan_recv_cb,
- .close = a2mp_chan_close_cb,
- .state_change = a2mp_chan_state_change_cb,
- .alloc_skb = a2mp_chan_alloc_skb_cb,
-
- /* Not implemented for A2MP */
- .new_connection = l2cap_chan_no_new_connection,
- .teardown = l2cap_chan_no_teardown,
- .ready = l2cap_chan_no_ready,
- .defer = l2cap_chan_no_defer,
- .resume = l2cap_chan_no_resume,
- .set_shutdown = l2cap_chan_no_set_shutdown,
- .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
-};
-
-static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
-{
- struct l2cap_chan *chan;
- int err;
-
- chan = l2cap_chan_create();
- if (!chan)
- return NULL;
-
- BT_DBG("chan %p", chan);
-
- chan->chan_type = L2CAP_CHAN_FIXED;
- chan->scid = L2CAP_CID_A2MP;
- chan->dcid = L2CAP_CID_A2MP;
- chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
- chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
- chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
-
- chan->ops = &a2mp_chan_ops;
-
- l2cap_chan_set_defaults(chan);
- chan->remote_max_tx = chan->max_tx;
- chan->remote_tx_win = chan->tx_win;
-
- chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
- chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
-
- skb_queue_head_init(&chan->tx_q);
-
- chan->mode = L2CAP_MODE_ERTM;
-
- err = l2cap_ertm_init(chan);
- if (err < 0) {
- l2cap_chan_del(chan, 0);
- return NULL;
- }
-
- chan->conf_state = 0;
-
- if (locked)
- __l2cap_chan_add(conn, chan);
- else
- l2cap_chan_add(conn, chan);
-
- chan->remote_mps = chan->omtu;
- chan->mps = chan->omtu;
-
- chan->state = BT_CONNECTED;
-
- return chan;
-}
-
-/* AMP Manager functions */
-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr)
-{
- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
-
- kref_get(&mgr->kref);
-
- return mgr;
-}
-
-static void amp_mgr_destroy(struct kref *kref)
-{
- struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
-
- BT_DBG("mgr %p", mgr);
-
- mutex_lock(&amp_mgr_list_lock);
- list_del(&mgr->list);
- mutex_unlock(&amp_mgr_list_lock);
-
- amp_ctrl_list_flush(mgr);
- kfree(mgr);
-}
-
-int amp_mgr_put(struct amp_mgr *mgr)
-{
- BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref));
-
- return kref_put(&mgr->kref, &amp_mgr_destroy);
-}
-
-static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked)
-{
- struct amp_mgr *mgr;
- struct l2cap_chan *chan;
-
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return NULL;
-
- BT_DBG("conn %p mgr %p", conn, mgr);
-
- mgr->l2cap_conn = conn;
-
- chan = a2mp_chan_open(conn, locked);
- if (!chan) {
- kfree(mgr);
- return NULL;
- }
-
- mgr->a2mp_chan = chan;
- chan->data = mgr;
-
- conn->hcon->amp_mgr = mgr;
-
- kref_init(&mgr->kref);
-
- /* Remote AMP ctrl list initialization */
- INIT_LIST_HEAD(&mgr->amp_ctrls);
- mutex_init(&mgr->amp_ctrls_lock);
-
- mutex_lock(&amp_mgr_list_lock);
- list_add(&mgr->list, &amp_mgr_list);
- mutex_unlock(&amp_mgr_list_lock);
-
- return mgr;
-}
-
-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- struct amp_mgr *mgr;
-
- if (conn->hcon->type != ACL_LINK)
- return NULL;
-
- mgr = amp_mgr_create(conn, false);
- if (!mgr) {
- BT_ERR("Could not create AMP manager");
- return NULL;
- }
-
- BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
-
- return mgr->a2mp_chan;
-}
-
-void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
-{
- struct amp_mgr *mgr;
- struct a2mp_info_rsp rsp;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO);
- if (!mgr)
- return;
-
- BT_DBG("%s mgr %p", hdev->name, mgr);
-
- memset(&rsp, 0, sizeof(rsp));
-
- rsp.id = hdev->id;
- rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
-
- if (hdev->amp_type != AMP_TYPE_BREDR) {
- rsp.status = 0;
- rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
- rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
- rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
- rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
- rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
- }
-
- a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp);
- amp_mgr_put(mgr);
-}
-
-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct a2mp_amp_assoc_rsp *rsp;
- size_t len;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
- if (!mgr)
- return;
-
- BT_DBG("%s mgr %p", hdev->name, mgr);
-
- len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len;
- rsp = kzalloc(len, GFP_KERNEL);
- if (!rsp) {
- amp_mgr_put(mgr);
- return;
- }
-
- rsp->id = hdev->id;
-
- if (status) {
- rsp->status = A2MP_STATUS_INVALID_CTRL_ID;
- } else {
- rsp->status = A2MP_STATUS_SUCCESS;
- memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len);
- }
-
- a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp);
- amp_mgr_put(mgr);
- kfree(rsp);
-}
-
-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct a2mp_physlink_req *req;
- struct l2cap_chan *bredr_chan;
- size_t len;
-
- mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL);
- if (!mgr)
- return;
-
- len = sizeof(*req) + loc_assoc->len;
-
- BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len);
-
- req = kzalloc(len, GFP_KERNEL);
- if (!req) {
- amp_mgr_put(mgr);
- return;
- }
-
- bredr_chan = mgr->bredr_chan;
- if (!bredr_chan)
- goto clean;
-
- req->local_id = hdev->id;
- req->remote_id = bredr_chan->remote_amp_id;
- memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len);
-
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req);
-
-clean:
- amp_mgr_put(mgr);
- kfree(req);
-}
-
-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
-{
- struct amp_mgr *mgr;
- struct a2mp_physlink_rsp rsp;
- struct hci_conn *hs_hcon;
-
- mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC);
- if (!mgr)
- return;
-
- memset(&rsp, 0, sizeof(rsp));
-
- hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
- if (!hs_hcon) {
- rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
- } else {
- rsp.remote_id = hs_hcon->remote_id;
- rsp.status = A2MP_STATUS_SUCCESS;
- }
-
- BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon,
- status);
-
- rsp.local_id = hdev->id;
- a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp);
- amp_mgr_put(mgr);
-}
-
-void a2mp_discover_amp(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
- struct a2mp_discov_req req;
-
- BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr);
-
- if (!mgr) {
- mgr = amp_mgr_create(conn, true);
- if (!mgr)
- return;
- }
-
- mgr->bredr_chan = chan;
-
- memset(&req, 0, sizeof(req));
-
- req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
- req.ext_feat = 0;
- a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
-}
diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h
deleted file mode 100644
index 2fd253a61a..0000000000
--- a/net/bluetooth/a2mp.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#ifndef __A2MP_H
-#define __A2MP_H
-
-#include <net/bluetooth/l2cap.h>
-
-enum amp_mgr_state {
- READ_LOC_AMP_INFO,
- READ_LOC_AMP_ASSOC,
- READ_LOC_AMP_ASSOC_FINAL,
- WRITE_REMOTE_AMP_ASSOC,
-};
-
-struct amp_mgr {
- struct list_head list;
- struct l2cap_conn *l2cap_conn;
- struct l2cap_chan *a2mp_chan;
- struct l2cap_chan *bredr_chan;
- struct kref kref;
- __u8 ident;
- __u8 handle;
- unsigned long state;
- unsigned long flags;
-
- struct list_head amp_ctrls;
- struct mutex amp_ctrls_lock;
-};
-
-struct a2mp_cmd {
- __u8 code;
- __u8 ident;
- __le16 len;
- __u8 data[];
-} __packed;
-
-/* A2MP command codes */
-#define A2MP_COMMAND_REJ 0x01
-struct a2mp_cmd_rej {
- __le16 reason;
- __u8 data[];
-} __packed;
-
-#define A2MP_DISCOVER_REQ 0x02
-struct a2mp_discov_req {
- __le16 mtu;
- __le16 ext_feat;
-} __packed;
-
-struct a2mp_cl {
- __u8 id;
- __u8 type;
- __u8 status;
-} __packed;
-
-#define A2MP_DISCOVER_RSP 0x03
-struct a2mp_discov_rsp {
- __le16 mtu;
- __le16 ext_feat;
- struct a2mp_cl cl[];
-} __packed;
-
-#define A2MP_CHANGE_NOTIFY 0x04
-#define A2MP_CHANGE_RSP 0x05
-
-#define A2MP_GETINFO_REQ 0x06
-struct a2mp_info_req {
- __u8 id;
-} __packed;
-
-#define A2MP_GETINFO_RSP 0x07
-struct a2mp_info_rsp {
- __u8 id;
- __u8 status;
- __le32 total_bw;
- __le32 max_bw;
- __le32 min_latency;
- __le16 pal_cap;
- __le16 assoc_size;
-} __packed;
-
-#define A2MP_GETAMPASSOC_REQ 0x08
-struct a2mp_amp_assoc_req {
- __u8 id;
-} __packed;
-
-#define A2MP_GETAMPASSOC_RSP 0x09
-struct a2mp_amp_assoc_rsp {
- __u8 id;
- __u8 status;
- __u8 amp_assoc[];
-} __packed;
-
-#define A2MP_CREATEPHYSLINK_REQ 0x0A
-#define A2MP_DISCONNPHYSLINK_REQ 0x0C
-struct a2mp_physlink_req {
- __u8 local_id;
- __u8 remote_id;
- __u8 amp_assoc[];
-} __packed;
-
-#define A2MP_CREATEPHYSLINK_RSP 0x0B
-#define A2MP_DISCONNPHYSLINK_RSP 0x0D
-struct a2mp_physlink_rsp {
- __u8 local_id;
- __u8 remote_id;
- __u8 status;
-} __packed;
-
-/* A2MP response status */
-#define A2MP_STATUS_SUCCESS 0x00
-#define A2MP_STATUS_INVALID_CTRL_ID 0x01
-#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
-#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
-#define A2MP_STATUS_COLLISION_OCCURED 0x03
-#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
-#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
-#define A2MP_STATUS_SECURITY_VIOLATION 0x06
-
-struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
-
-#if IS_ENABLED(CONFIG_BT_HS)
-int amp_mgr_put(struct amp_mgr *mgr);
-struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb);
-void a2mp_discover_amp(struct l2cap_chan *chan);
-#else
-static inline int amp_mgr_put(struct amp_mgr *mgr)
-{
- return 0;
-}
-
-static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- return NULL;
-}
-
-static inline void a2mp_discover_amp(struct l2cap_chan *chan)
-{
-}
-#endif
-
-void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
-void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
-void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
-void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status);
-
-#endif /* __A2MP_H */
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index b93464ac35..67604ccec2 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -309,14 +309,11 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
- lock_sock(sk);
-
skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
err = 0;
- release_sock(sk);
return err;
}
@@ -346,8 +343,6 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
skb_free_datagram(sk, skb);
- release_sock(sk);
-
if (flags & MSG_TRUNC)
copied = skblen;
@@ -570,10 +565,11 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
if (sk->sk_state == BT_LISTEN)
return -EINVAL;
- lock_sock(sk);
+ spin_lock(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
amount = skb ? skb->len : 0;
- release_sock(sk);
+ spin_unlock(&sk->sk_receive_queue.lock);
+
err = put_user(amount, (int __user *)arg);
break;
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
deleted file mode 100644
index 5d698f1986..0000000000
--- a/net/bluetooth/amp.c
+++ /dev/null
@@ -1,590 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci.h>
-#include <net/bluetooth/hci_core.h>
-#include <crypto/hash.h>
-
-#include "hci_request.h"
-#include "a2mp.h"
-#include "amp.h"
-
-/* Remote AMP Controllers interface */
-void amp_ctrl_get(struct amp_ctrl *ctrl)
-{
- BT_DBG("ctrl %p orig refcnt %d", ctrl,
- kref_read(&ctrl->kref));
-
- kref_get(&ctrl->kref);
-}
-
-static void amp_ctrl_destroy(struct kref *kref)
-{
- struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref);
-
- BT_DBG("ctrl %p", ctrl);
-
- kfree(ctrl->assoc);
- kfree(ctrl);
-}
-
-int amp_ctrl_put(struct amp_ctrl *ctrl)
-{
- BT_DBG("ctrl %p orig refcnt %d", ctrl,
- kref_read(&ctrl->kref));
-
- return kref_put(&ctrl->kref, &amp_ctrl_destroy);
-}
-
-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id)
-{
- struct amp_ctrl *ctrl;
-
- ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return NULL;
-
- kref_init(&ctrl->kref);
- ctrl->id = id;
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_add(&ctrl->list, &mgr->amp_ctrls);
- mutex_unlock(&mgr->amp_ctrls_lock);
-
- BT_DBG("mgr %p ctrl %p", mgr, ctrl);
-
- return ctrl;
-}
-
-void amp_ctrl_list_flush(struct amp_mgr *mgr)
-{
- struct amp_ctrl *ctrl, *n;
-
- BT_DBG("mgr %p", mgr);
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) {
- list_del(&ctrl->list);
- amp_ctrl_put(ctrl);
- }
- mutex_unlock(&mgr->amp_ctrls_lock);
-}
-
-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id)
-{
- struct amp_ctrl *ctrl;
-
- BT_DBG("mgr %p id %u", mgr, id);
-
- mutex_lock(&mgr->amp_ctrls_lock);
- list_for_each_entry(ctrl, &mgr->amp_ctrls, list) {
- if (ctrl->id == id) {
- amp_ctrl_get(ctrl);
- mutex_unlock(&mgr->amp_ctrls_lock);
- return ctrl;
- }
- }
- mutex_unlock(&mgr->amp_ctrls_lock);
-
- return NULL;
-}
-
-/* Physical Link interface */
-static u8 __next_handle(struct amp_mgr *mgr)
-{
- if (++mgr->handle == 0)
- mgr->handle = 1;
-
- return mgr->handle;
-}
-
-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- u8 remote_id, bool out)
-{
- bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
- struct hci_conn *hcon;
- u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE;
-
- hcon = hci_conn_add(hdev, AMP_LINK, dst, role, __next_handle(mgr));
- if (!hcon)
- return NULL;
-
- BT_DBG("hcon %p dst %pMR", hcon, dst);
-
- hcon->state = BT_CONNECT;
- hcon->attempt++;
- hcon->remote_id = remote_id;
- hcon->amp_mgr = amp_mgr_get(mgr);
-
- return hcon;
-}
-
-/* AMP crypto key generation interface */
-static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
-{
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- int ret;
-
- if (!ksize)
- return -EINVAL;
-
- tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
- if (IS_ERR(tfm)) {
- BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm));
- return PTR_ERR(tfm);
- }
-
- ret = crypto_shash_setkey(tfm, key, ksize);
- if (ret) {
- BT_DBG("crypto_ahash_setkey failed: err %d", ret);
- goto failed;
- }
-
- shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!shash) {
- ret = -ENOMEM;
- goto failed;
- }
-
- shash->tfm = tfm;
-
- ret = crypto_shash_digest(shash, plaintext, psize, output);
-
- kfree(shash);
-
-failed:
- crypto_free_shash(tfm);
- return ret;
-}
-
-int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
-{
- struct hci_dev *hdev = conn->hdev;
- struct link_key *key;
- u8 keybuf[HCI_AMP_LINK_KEY_SIZE];
- u8 gamp_key[HCI_AMP_LINK_KEY_SIZE];
- int err;
-
- if (!hci_conn_check_link_mode(conn))
- return -EACCES;
-
- BT_DBG("conn %p key_type %d", conn, conn->key_type);
-
- /* Legacy key */
- if (conn->key_type < 3) {
- bt_dev_err(hdev, "legacy key type %u", conn->key_type);
- return -EACCES;
- }
-
- *type = conn->key_type;
- *len = HCI_AMP_LINK_KEY_SIZE;
-
- key = hci_find_link_key(hdev, &conn->dst);
- if (!key) {
- BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst);
- return -EACCES;
- }
-
- /* BR/EDR Link Key concatenated together with itself */
- memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE);
- memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE);
-
- /* Derive Generic AMP Link Key (gamp) */
- err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key);
- if (err) {
- bt_dev_err(hdev, "could not derive Generic AMP Key: err %d", err);
- return err;
- }
-
- if (conn->key_type == HCI_LK_DEBUG_COMBINATION) {
- BT_DBG("Use Generic AMP Key (gamp)");
- memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE);
- return err;
- }
-
- /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */
- return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
-}
-
-static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
- struct amp_assoc *assoc = &hdev->loc_assoc;
- size_t rem_len, frag_len;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
- if (rp->status)
- goto send_rsp;
-
- frag_len = skb->len - sizeof(*rp);
- rem_len = __le16_to_cpu(rp->rem_len);
-
- if (rem_len > frag_len) {
- BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
- memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
- assoc->offset += frag_len;
-
- /* Read other fragments */
- amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
- return;
- }
-
- memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
- assoc->len = assoc->offset + rem_len;
- assoc->offset = 0;
-
-send_rsp:
- /* Send A2MP Rsp when all fragments are received */
- a2mp_send_getampassoc_rsp(hdev, rp->status);
- a2mp_send_create_phy_link_req(hdev, rp->status);
-}
-
-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct amp_assoc *loc_assoc = &hdev->loc_assoc;
- struct hci_request req;
- int err;
-
- BT_DBG("%s handle %u", hdev->name, phy_handle);
-
- cp.phy_handle = phy_handle;
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
- cp.len_so_far = cpu_to_le16(loc_assoc->offset);
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct hci_request req;
- int err;
-
- memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
- memset(&cp, 0, sizeof(cp));
-
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
-
- set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
- struct hci_conn *hcon)
-{
- struct hci_cp_read_local_amp_assoc cp;
- struct amp_mgr *mgr = hcon->amp_mgr;
- struct hci_request req;
- int err;
-
- if (!mgr)
- return;
-
- cp.phy_handle = hcon->handle;
- cp.len_so_far = cpu_to_le16(0);
- cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
-
- set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
-
- /* Read Local AMP Assoc final link information data */
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
- err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
- if (err < 0)
- a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
-}
-
-static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
-{
- struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
-
- BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
- hdev->name, rp->status, rp->phy_handle);
-
- if (rp->status)
- return;
-
- amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
-/* Write AMP Assoc data fragments, returns true with last fragment written*/
-static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
- struct hci_conn *hcon)
-{
- struct hci_cp_write_remote_amp_assoc *cp;
- struct amp_mgr *mgr = hcon->amp_mgr;
- struct amp_ctrl *ctrl;
- struct hci_request req;
- u16 frag_len, len;
-
- ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
- if (!ctrl)
- return false;
-
- if (!ctrl->assoc_rem_len) {
- BT_DBG("all fragments are written");
- ctrl->assoc_rem_len = ctrl->assoc_len;
- ctrl->assoc_len_so_far = 0;
-
- amp_ctrl_put(ctrl);
- return true;
- }
-
- frag_len = min_t(u16, 248, ctrl->assoc_rem_len);
- len = frag_len + sizeof(*cp);
-
- cp = kzalloc(len, GFP_KERNEL);
- if (!cp) {
- amp_ctrl_put(ctrl);
- return false;
- }
-
- BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u",
- hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len);
-
- cp->phy_handle = hcon->handle;
- cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far);
- cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len);
- memcpy(cp->frag, ctrl->assoc, frag_len);
-
- ctrl->assoc_len_so_far += frag_len;
- ctrl->assoc_rem_len -= frag_len;
-
- amp_ctrl_put(ctrl);
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
- hci_req_run_skb(&req, write_remote_amp_assoc_complete);
-
- kfree(cp);
-
- return false;
-}
-
-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle)
-{
- struct hci_conn *hcon;
-
- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, handle);
- if (!hcon)
- return;
-
- /* Send A2MP create phylink rsp when all fragments are written */
- if (amp_write_rem_assoc_frag(hdev, hcon))
- a2mp_send_create_phy_link_rsp(hdev, 0);
-}
-
-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
-{
- struct hci_conn *hcon;
-
- BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle);
-
- hcon = hci_conn_hash_lookup_handle(hdev, handle);
- if (!hcon)
- return;
-
- BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon);
-
- amp_write_rem_assoc_frag(hdev, hcon);
-}
-
-static void create_phylink_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_cp_create_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
- if (!cp)
- return;
-
- hci_dev_lock(hdev);
-
- if (status) {
- struct hci_conn *hcon;
-
- hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
- if (hcon)
- hci_conn_del(hcon);
- } else {
- amp_write_remote_assoc(hdev, cp->phy_handle);
- }
-
- hci_dev_unlock(hdev);
-}
-
-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon)
-{
- struct hci_cp_create_phy_link cp;
- struct hci_request req;
-
- cp.phy_handle = hcon->handle;
-
- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
- hcon->handle);
-
- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
- &cp.key_type)) {
- BT_DBG("Cannot create link key");
- return;
- }
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
- hci_req_run(&req, create_phylink_complete);
-}
-
-static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
-{
- struct hci_cp_accept_phy_link *cp;
-
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
- if (status)
- return;
-
- cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
- if (!cp)
- return;
-
- amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon)
-{
- struct hci_cp_accept_phy_link cp;
- struct hci_request req;
-
- cp.phy_handle = hcon->handle;
-
- BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon,
- hcon->handle);
-
- if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len,
- &cp.key_type)) {
- BT_DBG("Cannot create link key");
- return;
- }
-
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
- hci_req_run(&req, accept_phylink_complete);
-}
-
-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
-{
- struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
- struct amp_mgr *mgr = hs_hcon->amp_mgr;
- struct l2cap_chan *bredr_chan;
-
- BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr);
-
- if (!bredr_hdev || !mgr || !mgr->bredr_chan)
- return;
-
- bredr_chan = mgr->bredr_chan;
-
- l2cap_chan_lock(bredr_chan);
-
- set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags);
- bredr_chan->remote_amp_id = hs_hcon->remote_id;
- bredr_chan->local_amp_id = hs_hcon->hdev->id;
- bredr_chan->hs_hcon = hs_hcon;
- bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu;
-
- __l2cap_physical_cfm(bredr_chan, 0);
-
- l2cap_chan_unlock(bredr_chan);
-
- hci_dev_put(bredr_hdev);
-}
-
-void amp_create_logical_link(struct l2cap_chan *chan)
-{
- struct hci_conn *hs_hcon = chan->hs_hcon;
- struct hci_cp_create_accept_logical_link cp;
- struct hci_dev *hdev;
-
- BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
- &chan->conn->hcon->dst);
-
- if (!hs_hcon)
- return;
-
- hdev = hci_dev_hold(chan->hs_hcon->hdev);
- if (!hdev)
- return;
-
- cp.phy_handle = hs_hcon->handle;
-
- cp.tx_flow_spec.id = chan->local_id;
- cp.tx_flow_spec.stype = chan->local_stype;
- cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu);
- cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
- cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat);
- cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to);
-
- cp.rx_flow_spec.id = chan->remote_id;
- cp.rx_flow_spec.stype = chan->remote_stype;
- cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu);
- cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime);
- cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat);
- cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to);
-
- if (hs_hcon->out)
- hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp),
- &cp);
- else
- hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp),
- &cp);
-
- hci_dev_put(hdev);
-}
-
-void amp_disconnect_logical_link(struct hci_chan *hchan)
-{
- struct hci_conn *hcon = hchan->conn;
- struct hci_cp_disconn_logical_link cp;
-
- if (hcon->state != BT_CONNECTED) {
- BT_DBG("hchan %p not connected", hchan);
- return;
- }
-
- cp.log_handle = cpu_to_le16(hchan->handle);
- hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp);
-}
-
-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason)
-{
- BT_DBG("hchan %p", hchan);
-
- hci_chan_del(hchan);
-}
diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h
deleted file mode 100644
index 97c87abd12..0000000000
--- a/net/bluetooth/amp.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- Copyright (c) 2011,2012 Intel Corp.
-
-*/
-
-#ifndef __AMP_H
-#define __AMP_H
-
-struct amp_ctrl {
- struct list_head list;
- struct kref kref;
- __u8 id;
- __u16 assoc_len_so_far;
- __u16 assoc_rem_len;
- __u16 assoc_len;
- __u8 *assoc;
-};
-
-int amp_ctrl_put(struct amp_ctrl *ctrl);
-void amp_ctrl_get(struct amp_ctrl *ctrl);
-struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id);
-struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id);
-void amp_ctrl_list_flush(struct amp_mgr *mgr);
-
-struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
- u8 remote_id, bool out);
-
-int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type);
-
-void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle);
-void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr);
-void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
- struct hci_conn *hcon);
-void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon);
-void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
- struct hci_conn *hcon);
-
-#if IS_ENABLED(CONFIG_BT_HS)
-void amp_create_logical_link(struct l2cap_chan *chan);
-void amp_disconnect_logical_link(struct hci_chan *hchan);
-#else
-static inline void amp_create_logical_link(struct l2cap_chan *chan)
-{
-}
-
-static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
-{
-}
-#endif
-
-void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
-void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
-void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
-void amp_create_logical_link(struct l2cap_chan *chan);
-void amp_disconnect_logical_link(struct hci_chan *hchan);
-void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason);
-
-#endif /* __AMP_H */
diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c
index 9214189279..1bc51e2b05 100644
--- a/net/bluetooth/eir.c
+++ b/net/bluetooth/eir.c
@@ -13,48 +13,33 @@
#define PNP_INFO_SVCLASS_ID 0x1200
-static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len)
-{
- u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
-
- /* If data is already NULL terminated just pass it directly */
- if (data[data_len - 1] == '\0')
- return eir_append_data(eir, eir_len, type, data, data_len);
-
- memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH);
- name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
-
- return eir_append_data(eir, eir_len, type, name, sizeof(name));
-}
-
u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
{
size_t short_len;
size_t complete_len;
- /* no space left for name (+ NULL + type + len) */
- if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
+ /* no space left for name (+ type + len) */
+ if ((max_adv_len(hdev) - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 2)
return ad_len;
/* use complete name if present and fits */
complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
- return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE,
- hdev->dev_name, complete_len + 1);
+ return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
+ hdev->dev_name, complete_len);
/* use short name if present */
short_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
if (short_len)
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
hdev->short_name,
- short_len == HCI_MAX_SHORT_NAME_LENGTH ?
- short_len : short_len + 1);
+ short_len);
/* use shortened full name if present, we already know that name
* is longer then HCI_MAX_SHORT_NAME_LENGTH
*/
if (complete_len)
- return eir_append_name(ptr, ad_len, EIR_NAME_SHORT,
+ return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
hdev->dev_name,
HCI_MAX_SHORT_NAME_LENGTH);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index d01db89fcb..50c55d7335 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -36,7 +36,6 @@
#include "hci_request.h"
#include "smp.h"
-#include "a2mp.h"
#include "eir.h"
struct sco_param {
@@ -1169,9 +1168,6 @@ void hci_conn_del(struct hci_conn *conn)
}
}
- if (conn->amp_mgr)
- amp_mgr_put(conn->amp_mgr);
-
skb_queue_purge(&conn->data_q);
/* Remove the connection from the list and cleanup its remaining
@@ -2981,7 +2977,7 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
case HCI_EV_LE_CONN_COMPLETE:
case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
case HCI_EVT_LE_CIS_ESTABLISHED:
- hci_cmd_sync_cancel(hdev, -ECANCELED);
+ hci_cmd_sync_cancel(hdev, ECANCELED);
break;
}
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2821a42cef..0592369579 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -908,7 +908,7 @@ int hci_get_dev_info(void __user *arg)
else
flags = hdev->flags;
- strcpy(di.name, hdev->name);
+ strscpy(di.name, hdev->name, sizeof(di.name));
di.bdaddr = hdev->bdaddr;
di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
di.flags = flags;
@@ -1491,11 +1491,12 @@ static void hci_cmd_timeout(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev,
cmd_timer.work);
- if (hdev->sent_cmd) {
- struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
- u16 opcode = __le16_to_cpu(sent->opcode);
+ if (hdev->req_skb) {
+ u16 opcode = hci_skb_opcode(hdev->req_skb);
bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
+
+ hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
} else {
bt_dev_err(hdev, "command tx timeout");
}
@@ -2795,6 +2796,7 @@ void hci_release_dev(struct hci_dev *hdev)
ida_destroy(&hdev->unset_handle_ida);
ida_simple_remove(&hci_index_ida, hdev->id);
kfree_skb(hdev->sent_cmd);
+ kfree_skb(hdev->req_skb);
kfree_skb(hdev->recv_event);
kfree(hdev);
}
@@ -2826,6 +2828,23 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)
return ret;
}
+/* Cancel ongoing command synchronously:
+ *
+ * - Cancel command timer
+ * - Reset command counter
+ * - Cancel command request
+ */
+static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
+{
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ cancel_delayed_work_sync(&hdev->ncmd_timer);
+ atomic_set(&hdev->cmd_cnt, 1);
+
+ hci_cmd_sync_cancel_sync(hdev, err);
+}
+
/* Suspend HCI device */
int hci_suspend_dev(struct hci_dev *hdev)
{
@@ -2843,7 +2862,7 @@ int hci_suspend_dev(struct hci_dev *hdev)
return 0;
/* Cancel potentially blocking sync operation before suspend */
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cancel_cmd_sync(hdev, EHOSTDOWN);
hci_req_sync_lock(hdev);
ret = hci_suspend_sync(hdev);
@@ -3107,21 +3126,33 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
EXPORT_SYMBOL(__hci_cmd_send);
/* Get data from the previously sent command */
-void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
{
struct hci_command_hdr *hdr;
- if (!hdev->sent_cmd)
+ if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
return NULL;
- hdr = (void *) hdev->sent_cmd->data;
+ hdr = (void *)skb->data;
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
- BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
+ return skb->data + HCI_COMMAND_HDR_SIZE;
+}
+
+/* Get data from the previously sent command */
+void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
+{
+ void *data;
- return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
+ /* Check if opcode matches last sent command */
+ data = hci_cmd_data(hdev->sent_cmd, opcode);
+ if (!data)
+ /* Check if opcode matches last request */
+ data = hci_cmd_data(hdev->req_skb, opcode);
+
+ return data;
}
/* Get data from last received event */
@@ -4022,17 +4053,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
if (!status && !hci_req_is_complete(hdev))
return;
+ skb = hdev->req_skb;
+
/* If this was the last command in a request the complete
- * callback would be found in hdev->sent_cmd instead of the
+ * callback would be found in hdev->req_skb instead of the
* command queue (hdev->cmd_q).
*/
- if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
- *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
+ if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
return;
}
- if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
- *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
+ if (skb && bt_cb(skb)->hci.req_complete) {
+ *req_complete = bt_cb(skb)->hci.req_complete;
return;
}
@@ -4128,6 +4161,36 @@ static void hci_rx_work(struct work_struct *work)
}
}
+static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "skb %p", skb);
+
+ kfree_skb(hdev->sent_cmd);
+
+ hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
+ if (!hdev->sent_cmd) {
+ skb_queue_head(&hdev->cmd_q, skb);
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+ return;
+ }
+
+ err = hci_send_frame(hdev, skb);
+ if (err < 0) {
+ hci_cmd_sync_cancel_sync(hdev, -err);
+ return;
+ }
+
+ if (hci_req_status_pend(hdev) &&
+ !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
+ }
+
+ atomic_dec(&hdev->cmd_cnt);
+}
+
static void hci_cmd_work(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
@@ -4142,30 +4205,15 @@ static void hci_cmd_work(struct work_struct *work)
if (!skb)
return;
- kfree_skb(hdev->sent_cmd);
-
- hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
- if (hdev->sent_cmd) {
- int res;
- if (hci_req_status_pend(hdev))
- hci_dev_set_flag(hdev, HCI_CMD_PENDING);
- atomic_dec(&hdev->cmd_cnt);
+ hci_send_cmd_sync(hdev, skb);
- res = hci_send_frame(hdev, skb);
- if (res < 0)
- __hci_cmd_sync_cancel(hdev, -res);
-
- rcu_read_lock();
- if (test_bit(HCI_RESET, &hdev->flags) ||
- hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
- cancel_delayed_work(&hdev->cmd_timer);
- else
- queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
- HCI_CMD_TIMEOUT);
- rcu_read_unlock();
- } else {
- skb_queue_head(&hdev->cmd_q, skb);
- queue_work(hdev->workqueue, &hdev->cmd_work);
- }
+ rcu_read_lock();
+ if (test_bit(HCI_RESET, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
+ cancel_delayed_work(&hdev->cmd_timer);
+ else
+ queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
+ HCI_CMD_TIMEOUT);
+ rcu_read_unlock();
}
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 2a5f5a7d24..6275b14b56 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -36,8 +36,6 @@
#include "hci_request.h"
#include "hci_debugfs.h"
#include "hci_codec.h"
-#include "a2mp.h"
-#include "amp.h"
#include "smp.h"
#include "msft.h"
#include "eir.h"
@@ -2526,9 +2524,7 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
* Only those in BT_CONFIG or BT_CONNECTED states can be
* considered connected.
*/
- if (conn &&
- (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
- !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED))
mgmt_device_connected(hdev, conn, name, name_len);
if (discov->state == DISCOVERY_STOPPED)
@@ -3556,8 +3552,6 @@ static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
- hci_conn_check_pending(hdev);
-
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
@@ -3762,8 +3756,9 @@ static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ } else {
mgmt_device_connected(hdev, conn, NULL, 0);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -3936,6 +3931,11 @@ static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
* last.
*/
hci_connect_cfm(conn, rp->status);
+
+ /* Notify device connected in case it is a BIG Sync */
+ if (!rp->status && test_bit(HCI_CONN_BIG_SYNC, &conn->flags))
+ mgmt_device_connected(hdev, conn, NULL, 0);
+
break;
}
@@ -4381,7 +4381,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
* (since for this kind of commands there will not be a command
* complete event).
*/
- if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
+ if (ev->status || (hdev->req_skb && !hci_skb_event(hdev->req_skb))) {
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
req_complete_skb);
if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
@@ -5010,8 +5010,9 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
bacpy(&cp.bdaddr, &conn->dst);
cp.pscan_rep_mode = 0x02;
hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
- } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ } else {
mgmt_device_connected(hdev, conn, NULL, 0);
+ }
if (!hci_outgoing_auth_needed(hdev, conn)) {
conn->state = BT_CONNECTED;
@@ -5984,8 +5985,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
goto unlock;
}
- if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
- mgmt_device_connected(hdev, conn, NULL, 0);
+ mgmt_device_connected(hdev, conn, NULL, 0);
conn->sec_level = BT_SECURITY_LOW;
conn->state = BT_CONFIG;
@@ -7214,6 +7214,9 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
/* Notify iso layer */
hci_connect_cfm(pa_sync, 0x00);
+ /* Notify MGMT layer */
+ mgmt_device_connected(hdev, pa_sync, NULL, 0);
+
unlock:
hci_dev_unlock(hdev);
}
@@ -7324,10 +7327,10 @@ static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
/* Only match event if command OGF is for LE */
- if (hdev->sent_cmd &&
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
- hci_skb_event(hdev->sent_cmd) == ev->subevent) {
- *opcode = hci_skb_opcode(hdev->sent_cmd);
+ if (hdev->req_skb &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) == 0x08 &&
+ hci_skb_event(hdev->req_skb) == ev->subevent) {
+ *opcode = hci_skb_opcode(hdev->req_skb);
hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
req_complete_skb);
}
@@ -7714,10 +7717,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
}
/* Only match event if command OGF is not for LE */
- if (hdev->sent_cmd &&
- hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
- hci_skb_event(hdev->sent_cmd) == event) {
- hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
+ if (hdev->req_skb &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->req_skb)) != 0x08 &&
+ hci_skb_event(hdev->req_skb) == event) {
+ hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->req_skb),
status, &req_complete, &req_complete_skb);
req_evt = event;
}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 6e023b0104..00e0213800 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -895,7 +895,7 @@ void hci_request_setup(struct hci_dev *hdev)
void hci_request_cancel_all(struct hci_dev *hdev)
{
- __hci_cmd_sync_cancel(hdev, ENODEV);
+ hci_cmd_sync_cancel_sync(hdev, ENODEV);
cancel_interleave_scan(hdev);
}
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index b90ee68bba..5ce71c483b 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -32,6 +32,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
+ /* Free the request command so it is not used as response */
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+
if (skb) {
struct sock *sk = hci_skb_sk(skb);
@@ -39,7 +43,7 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (sk)
sock_put(sk);
- hdev->req_skb = skb_get(skb);
+ hdev->req_rsp = skb_get(skb);
}
wake_up_interruptible(&hdev->req_wait_q);
@@ -187,8 +191,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
hdev->req_status = 0;
hdev->req_result = 0;
- skb = hdev->req_skb;
- hdev->req_skb = NULL;
+ skb = hdev->req_rsp;
+ hdev->req_rsp = NULL;
bt_dev_dbg(hdev, "end: err %d", err);
@@ -652,7 +656,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
mutex_unlock(&hdev->cmd_sync_work_lock);
}
-void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
@@ -660,26 +664,31 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
hdev->req_result = err;
hdev->req_status = HCI_REQ_CANCELED;
- cancel_delayed_work_sync(&hdev->cmd_timer);
- cancel_delayed_work_sync(&hdev->ncmd_timer);
- atomic_set(&hdev->cmd_cnt, 1);
-
- wake_up_interruptible(&hdev->req_wait_q);
+ queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
}
}
+EXPORT_SYMBOL(hci_cmd_sync_cancel);
-void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+/* Cancel ongoing command request synchronously:
+ *
+ * - Set result and mark status to HCI_REQ_CANCELED
+ * - Wakeup command sync thread
+ */
+void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
{
bt_dev_dbg(hdev, "err 0x%2.2x", err);
if (hdev->req_status == HCI_REQ_PEND) {
- hdev->req_result = err;
+ /* req_result is __u32 so error must be positive to be properly
+ * propagated.
+ */
+ hdev->req_result = err < 0 ? -err : err;
hdev->req_status = HCI_REQ_CANCELED;
- queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
+ wake_up_interruptible(&hdev->req_wait_q);
}
}
-EXPORT_SYMBOL(hci_cmd_sync_cancel);
+EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
/* Submit HCI command to be run in as cmd_sync_work:
*
@@ -4902,6 +4911,11 @@ int hci_dev_open_sync(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
+ if (hdev->req_skb) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+ }
+
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
@@ -5063,6 +5077,12 @@ int hci_dev_close_sync(struct hci_dev *hdev)
hdev->sent_cmd = NULL;
}
+ /* Drop last request */
+ if (hdev->req_skb) {
+ kfree_skb(hdev->req_skb);
+ hdev->req_skb = NULL;
+ }
+
clear_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 656f49b299..ab5a9d42fa 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -39,8 +39,6 @@
#include <net/bluetooth/l2cap.h>
#include "smp.h"
-#include "a2mp.h"
-#include "amp.h"
#define LE_FLOWCTL_MAX_CREDITS 65535
@@ -167,24 +165,6 @@ static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
return NULL;
}
-static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
- u8 ident)
-{
- struct l2cap_chan *c;
-
- mutex_lock(&conn->chan_lock);
- c = __l2cap_get_chan_by_ident(conn, ident);
- if (c) {
- /* Only lock if chan reference is not 0 */
- c = l2cap_chan_hold_unless_zero(c);
- if (c)
- l2cap_chan_lock(c);
- }
- mutex_unlock(&conn->chan_lock);
-
- return c;
-}
-
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
u8 src_type)
{
@@ -651,7 +631,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
chan->ops->teardown(chan, err);
if (conn) {
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
/* Delete from channel list */
list_del(&chan->list);
@@ -666,16 +645,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
if (chan->chan_type != L2CAP_CHAN_FIXED ||
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
hci_conn_drop(conn->hcon);
-
- if (mgr && mgr->bredr_chan == chan)
- mgr->bredr_chan = NULL;
- }
-
- if (chan->hs_hchan) {
- struct hci_chan *hs_hchan = chan->hs_hchan;
-
- BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
- amp_disconnect_logical_link(hs_hchan);
}
if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
@@ -977,12 +946,6 @@ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
hci_send_acl(conn->hchan, skb, flags);
}
-static bool __chan_is_moving(struct l2cap_chan *chan)
-{
- return chan->move_state != L2CAP_MOVE_STABLE &&
- chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
-}
-
static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
{
struct hci_conn *hcon = chan->conn->hcon;
@@ -991,15 +954,6 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
skb->priority);
- if (chan->hs_hcon && !__chan_is_moving(chan)) {
- if (chan->hs_hchan)
- hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
- else
- kfree_skb(skb);
-
- return;
- }
-
/* Use NO_FLUSH for LE links (where this is the only option) or
* if the BR/EDR link supports it and flushing has not been
* explicitly requested (through FLAG_FLUSHABLE).
@@ -1180,9 +1134,6 @@ static void l2cap_send_sframe(struct l2cap_chan *chan,
if (!control->sframe)
return;
- if (__chan_is_moving(chan))
- return;
-
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
!control->poll)
control->final = 1;
@@ -1237,40 +1188,6 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
}
-static bool __amp_capable(struct l2cap_chan *chan)
-{
- struct l2cap_conn *conn = chan->conn;
- struct hci_dev *hdev;
- bool amp_available = false;
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return false;
-
- if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
- return false;
-
- read_lock(&hci_dev_list_lock);
- list_for_each_entry(hdev, &hci_dev_list, list) {
- if (hdev->amp_type != AMP_TYPE_BREDR &&
- test_bit(HCI_UP, &hdev->flags)) {
- amp_available = true;
- break;
- }
- }
- read_unlock(&hci_dev_list_lock);
-
- if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
- return amp_available;
-
- return false;
-}
-
-static bool l2cap_check_efs(struct l2cap_chan *chan)
-{
- /* Check EFS parameters */
- return true;
-}
-
void l2cap_send_conn_req(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -1286,76 +1203,6 @@ void l2cap_send_conn_req(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
}
-static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
-{
- struct l2cap_create_chan_req req;
- req.scid = cpu_to_le16(chan->scid);
- req.psm = chan->psm;
- req.amp_id = amp_id;
-
- chan->ident = l2cap_get_ident(chan->conn);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
- sizeof(req), &req);
-}
-
-static void l2cap_move_setup(struct l2cap_chan *chan)
-{
- struct sk_buff *skb;
-
- BT_DBG("chan %p", chan);
-
- if (chan->mode != L2CAP_MODE_ERTM)
- return;
-
- __clear_retrans_timer(chan);
- __clear_monitor_timer(chan);
- __clear_ack_timer(chan);
-
- chan->retry_count = 0;
- skb_queue_walk(&chan->tx_q, skb) {
- if (bt_cb(skb)->l2cap.retries)
- bt_cb(skb)->l2cap.retries = 1;
- else
- break;
- }
-
- chan->expected_tx_seq = chan->buffer_seq;
-
- clear_bit(CONN_REJ_ACT, &chan->conn_state);
- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
- l2cap_seq_list_clear(&chan->retrans_list);
- l2cap_seq_list_clear(&chan->srej_list);
- skb_queue_purge(&chan->srej_q);
-
- chan->tx_state = L2CAP_TX_STATE_XMIT;
- chan->rx_state = L2CAP_RX_STATE_MOVE;
-
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-}
-
-static void l2cap_move_done(struct l2cap_chan *chan)
-{
- u8 move_role = chan->move_role;
- BT_DBG("chan %p", chan);
-
- chan->move_state = L2CAP_MOVE_STABLE;
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
-
- if (chan->mode != L2CAP_MODE_ERTM)
- return;
-
- switch (move_role) {
- case L2CAP_MOVE_ROLE_INITIATOR:
- l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
- chan->rx_state = L2CAP_RX_STATE_WAIT_F;
- break;
- case L2CAP_MOVE_ROLE_RESPONDER:
- chan->rx_state = L2CAP_RX_STATE_WAIT_P;
- break;
- }
-}
-
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
/* The channel may have already been flagged as connected in
@@ -1505,10 +1352,7 @@ static void l2cap_le_start(struct l2cap_chan *chan)
static void l2cap_start_connection(struct l2cap_chan *chan)
{
- if (__amp_capable(chan)) {
- BT_DBG("chan %p AMP capable: discover AMPs", chan);
- a2mp_discover_amp(chan);
- } else if (chan->conn->hcon->type == LE_LINK) {
+ if (chan->conn->hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else {
l2cap_send_conn_req(chan);
@@ -1611,11 +1455,6 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
__clear_ack_timer(chan);
}
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_state_change(chan, BT_DISCONN);
- return;
- }
-
req.dcid = cpu_to_le16(chan->dcid);
req.scid = cpu_to_le16(chan->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
@@ -1754,11 +1593,6 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
l2cap_chan_lock(chan);
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_chan_unlock(chan);
- continue;
- }
-
if (hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -2067,9 +1901,6 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
BT_DBG("chan %p, skbs %p", chan, skbs);
- if (__chan_is_moving(chan))
- return;
-
skb_queue_splice_tail_init(skbs, &chan->tx_q);
while (!skb_queue_empty(&chan->tx_q)) {
@@ -2112,9 +1943,6 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return 0;
- if (__chan_is_moving(chan))
- return 0;
-
while (chan->tx_send_head &&
chan->unacked_frames < chan->remote_tx_win &&
chan->tx_state == L2CAP_TX_STATE_XMIT) {
@@ -2180,9 +2008,6 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return;
- if (__chan_is_moving(chan))
- return;
-
while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
seq = l2cap_seq_list_pop(&chan->retrans_list);
@@ -2522,8 +2347,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
pdu_len = chan->conn->mtu;
/* Constrain PDU size for BR/EDR connections */
- if (!chan->hs_hcon)
- pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
+ pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
/* Adjust for largest possible L2CAP overhead. */
if (chan->fcs)
@@ -3287,11 +3111,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->tx_q);
- chan->local_amp_id = AMP_ID_BREDR;
- chan->move_id = AMP_ID_BREDR;
- chan->move_state = L2CAP_MOVE_STABLE;
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
-
if (chan->mode != L2CAP_MODE_ERTM)
return 0;
@@ -3326,52 +3145,19 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
{
- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
- (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
+ return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
}
static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
{
- return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
- (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
+ return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
}
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
struct l2cap_conf_rfc *rfc)
{
- if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
- u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
-
- /* Class 1 devices have must have ERTM timeouts
- * exceeding the Link Supervision Timeout. The
- * default Link Supervision Timeout for AMP
- * controllers is 10 seconds.
- *
- * Class 1 devices use 0xffffffff for their
- * best-effort flush timeout, so the clamping logic
- * will result in a timeout that meets the above
- * requirement. ERTM timeouts are 16-bit values, so
- * the maximum timeout is 65.535 seconds.
- */
-
- /* Convert timeout to milliseconds and round */
- ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
-
- /* This is the recommended formula for class 2 devices
- * that start ERTM timers when packets are sent to the
- * controller.
- */
- ertm_to = 3 * ertm_to + 500;
-
- if (ertm_to > 0xffff)
- ertm_to = 0xffff;
-
- rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
- rfc->monitor_timeout = rfc->retrans_timeout;
- } else {
- rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- }
+ rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+ rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
}
static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
@@ -3623,13 +3409,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
case L2CAP_CONF_EWS:
if (olen != 2)
break;
- if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -ECONNREFUSED;
- set_bit(FLAG_EXT_CTRL, &chan->flags);
- set_bit(CONF_EWS_RECV, &chan->conf_state);
- chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
- chan->remote_tx_win = val;
- break;
+ return -ECONNREFUSED;
default:
if (hint)
@@ -4027,11 +3807,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
rsp.dcid = cpu_to_le16(chan->scid);
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-
- if (chan->hs_hcon)
- rsp_code = L2CAP_CREATE_CHAN_RSP;
- else
- rsp_code = L2CAP_CONN_RSP;
+ rsp_code = L2CAP_CONN_RSP;
BT_DBG("chan %p rsp_code %u", chan, rsp_code);
@@ -4190,7 +3966,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
chan->dst_type = bdaddr_dst_type(conn->hcon);
chan->psm = psm;
chan->dcid = scid;
- chan->local_amp_id = amp_id;
__l2cap_chan_add(conn, chan);
@@ -4516,10 +4291,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
/* check compatibility */
/* Send rsp for BR/EDR channel */
- if (!chan->hs_hcon)
- l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
- else
- chan->ident = cmd->ident;
+ l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
}
unlock:
@@ -4571,15 +4343,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
goto done;
}
- if (!chan->hs_hcon) {
- l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
- 0);
- } else {
- if (l2cap_check_efs(chan)) {
- amp_create_logical_link(chan);
- chan->ident = cmd->ident;
- }
- }
+ l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
}
goto done;
@@ -4750,9 +4514,6 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
- if (conn->local_fixed_chan & L2CAP_FC_A2MP)
- feat_mask |= L2CAP_FEAT_EXT_FLOW
- | L2CAP_FEAT_EXT_WINDOW;
put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
@@ -4841,751 +4602,6 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn,
return 0;
}
-static int l2cap_create_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_create_chan_req *req = data;
- struct l2cap_create_chan_rsp rsp;
- struct l2cap_chan *chan;
- struct hci_dev *hdev;
- u16 psm, scid;
-
- if (cmd_len != sizeof(*req))
- return -EPROTO;
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -EINVAL;
-
- psm = le16_to_cpu(req->psm);
- scid = le16_to_cpu(req->scid);
-
- BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
-
- /* For controller id 0 make BR/EDR connection */
- if (req->amp_id == AMP_ID_BREDR) {
- l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
- return 0;
- }
-
- /* Validate AMP controller id */
- hdev = hci_dev_get(req->amp_id);
- if (!hdev)
- goto error;
-
- if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
- hci_dev_put(hdev);
- goto error;
- }
-
- chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
- req->amp_id);
- if (chan) {
- struct amp_mgr *mgr = conn->hcon->amp_mgr;
- struct hci_conn *hs_hcon;
-
- hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
- &conn->hcon->dst);
- if (!hs_hcon) {
- hci_dev_put(hdev);
- cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
- chan->dcid);
- return 0;
- }
-
- BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
-
- mgr->bredr_chan = chan;
- chan->hs_hcon = hs_hcon;
- chan->fcs = L2CAP_FCS_NONE;
- conn->mtu = hdev->block_mtu;
- }
-
- hci_dev_put(hdev);
-
- return 0;
-
-error:
- rsp.dcid = 0;
- rsp.scid = cpu_to_le16(scid);
- rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
-
- l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- return 0;
-}
-
-static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
-{
- struct l2cap_move_chan_req req;
- u8 ident;
-
- BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
-
- ident = l2cap_get_ident(chan->conn);
- chan->ident = ident;
-
- req.icid = cpu_to_le16(chan->scid);
- req.dest_amp_id = dest_amp_id;
-
- l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
- &req);
-
- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
-}
-
-static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
-{
- struct l2cap_move_chan_rsp rsp;
-
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
-
- rsp.icid = cpu_to_le16(chan->dcid);
- rsp.result = cpu_to_le16(result);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
- sizeof(rsp), &rsp);
-}
-
-static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
-{
- struct l2cap_move_chan_cfm cfm;
-
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
-
- chan->ident = l2cap_get_ident(chan->conn);
-
- cfm.icid = cpu_to_le16(chan->scid);
- cfm.result = cpu_to_le16(result);
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
- sizeof(cfm), &cfm);
-
- __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
-}
-
-static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
-{
- struct l2cap_move_chan_cfm cfm;
-
- BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
-
- cfm.icid = cpu_to_le16(icid);
- cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
-
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
- sizeof(cfm), &cfm);
-}
-
-static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid)
-{
- struct l2cap_move_chan_cfm_rsp rsp;
-
- BT_DBG("icid 0x%4.4x", icid);
-
- rsp.icid = cpu_to_le16(icid);
- l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
-}
-
-static void __release_logical_link(struct l2cap_chan *chan)
-{
- chan->hs_hchan = NULL;
- chan->hs_hcon = NULL;
-
- /* Placeholder - release the logical link */
-}
-
-static void l2cap_logical_fail(struct l2cap_chan *chan)
-{
- /* Logical link setup failed */
- if (chan->state != BT_CONNECTED) {
- /* Create channel failure, disconnect */
- l2cap_send_disconn_req(chan, ECONNRESET);
- return;
- }
-
- switch (chan->move_role) {
- case L2CAP_MOVE_ROLE_RESPONDER:
- l2cap_move_done(chan);
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
- break;
- case L2CAP_MOVE_ROLE_INITIATOR:
- if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
- chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
- /* Remote has only sent pending or
- * success responses, clean up
- */
- l2cap_move_done(chan);
- }
-
- /* Other amp move states imply that the move
- * has already aborted
- */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- break;
- }
-}
-
-static void l2cap_logical_finish_create(struct l2cap_chan *chan,
- struct hci_chan *hchan)
-{
- struct l2cap_conf_rsp rsp;
-
- chan->hs_hchan = hchan;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
-
- if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
- int err;
-
- set_default_fcs(chan);
-
- err = l2cap_ertm_init(chan);
- if (err < 0)
- l2cap_send_disconn_req(chan, -err);
- else
- l2cap_chan_ready(chan);
- }
-}
-
-static void l2cap_logical_finish_move(struct l2cap_chan *chan,
- struct hci_chan *hchan)
-{
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- BT_DBG("move_state %d", chan->move_state);
-
- switch (chan->move_state) {
- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
- /* Move confirm will be sent after a success
- * response is received
- */
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- break;
- case L2CAP_MOVE_WAIT_LOGICAL_CFM:
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
- }
- break;
- default:
- /* Move was not in expected state, free the channel */
- __release_logical_link(chan);
-
- chan->move_state = L2CAP_MOVE_STABLE;
- }
-}
-
-/* Call with chan locked */
-void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
- u8 status)
-{
- BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
-
- if (status) {
- l2cap_logical_fail(chan);
- __release_logical_link(chan);
- return;
- }
-
- if (chan->state != BT_CONNECTED) {
- /* Ignore logical link if channel is on BR/EDR */
- if (chan->local_amp_id != AMP_ID_BREDR)
- l2cap_logical_finish_create(chan, hchan);
- } else {
- l2cap_logical_finish_move(chan, hchan);
- }
-}
-
-void l2cap_move_start(struct l2cap_chan *chan)
-{
- BT_DBG("chan %p", chan);
-
- if (chan->local_amp_id == AMP_ID_BREDR) {
- if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
- return;
- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
- /* Placeholder - start physical link setup */
- } else {
- chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- chan->move_id = 0;
- l2cap_move_setup(chan);
- l2cap_send_move_chan_req(chan, 0);
- }
-}
-
-static void l2cap_do_create(struct l2cap_chan *chan, int result,
- u8 local_amp_id, u8 remote_amp_id)
-{
- BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
- local_amp_id, remote_amp_id);
-
- chan->fcs = L2CAP_FCS_NONE;
-
- /* Outgoing channel on AMP */
- if (chan->state == BT_CONNECT) {
- if (result == L2CAP_CR_SUCCESS) {
- chan->local_amp_id = local_amp_id;
- l2cap_send_create_chan_req(chan, remote_amp_id);
- } else {
- /* Revert to BR/EDR connect */
- l2cap_send_conn_req(chan);
- }
-
- return;
- }
-
- /* Incoming channel on AMP */
- if (__l2cap_no_conn_pending(chan)) {
- struct l2cap_conn_rsp rsp;
- char buf[128];
- rsp.scid = cpu_to_le16(chan->dcid);
- rsp.dcid = cpu_to_le16(chan->scid);
-
- if (result == L2CAP_CR_SUCCESS) {
- /* Send successful response */
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- } else {
- /* Send negative response */
- rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
- }
-
- l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
- sizeof(rsp), &rsp);
-
- if (result == L2CAP_CR_SUCCESS) {
- l2cap_state_change(chan, BT_CONFIG);
- set_bit(CONF_REQ_SENT, &chan->conf_state);
- l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
- L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- chan->num_conf_req++;
- }
- }
-}
-
-static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
- u8 remote_amp_id)
-{
- l2cap_move_setup(chan);
- chan->move_id = local_amp_id;
- chan->move_state = L2CAP_MOVE_WAIT_RSP;
-
- l2cap_send_move_chan_req(chan, remote_amp_id);
-}
-
-static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
-{
- struct hci_chan *hchan = NULL;
-
- /* Placeholder - get hci_chan for logical link */
-
- if (hchan) {
- if (hchan->state == BT_CONNECTED) {
- /* Logical link is ready to go */
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
-
- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
- } else {
- /* Wait for logical link to be ready */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- }
- } else {
- /* Logical link not available */
- l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
- }
-}
-
-static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
-{
- if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
- u8 rsp_result;
- if (result == -EINVAL)
- rsp_result = L2CAP_MR_BAD_ID;
- else
- rsp_result = L2CAP_MR_NOT_ALLOWED;
-
- l2cap_send_move_chan_rsp(chan, rsp_result);
- }
-
- chan->move_role = L2CAP_MOVE_ROLE_NONE;
- chan->move_state = L2CAP_MOVE_STABLE;
-
- /* Restart data transmission */
- l2cap_ertm_send(chan);
-}
-
-/* Invoke with locked chan */
-void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
-{
- u8 local_amp_id = chan->local_amp_id;
- u8 remote_amp_id = chan->remote_amp_id;
-
- BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
- chan, result, local_amp_id, remote_amp_id);
-
- if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
- return;
-
- if (chan->state != BT_CONNECTED) {
- l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
- } else if (result != L2CAP_MR_SUCCESS) {
- l2cap_do_move_cancel(chan, result);
- } else {
- switch (chan->move_role) {
- case L2CAP_MOVE_ROLE_INITIATOR:
- l2cap_do_move_initiate(chan, local_amp_id,
- remote_amp_id);
- break;
- case L2CAP_MOVE_ROLE_RESPONDER:
- l2cap_do_move_respond(chan, result);
- break;
- default:
- l2cap_do_move_cancel(chan, result);
- break;
- }
- }
-}
-
-static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_req *req = data;
- struct l2cap_move_chan_rsp rsp;
- struct l2cap_chan *chan;
- u16 icid = 0;
- u16 result = L2CAP_MR_NOT_ALLOWED;
-
- if (cmd_len != sizeof(*req))
- return -EPROTO;
-
- icid = le16_to_cpu(req->icid);
-
- BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
-
- if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
- return -EINVAL;
-
- chan = l2cap_get_chan_by_dcid(conn, icid);
- if (!chan) {
- rsp.icid = cpu_to_le16(icid);
- rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
- l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
- sizeof(rsp), &rsp);
- return 0;
- }
-
- chan->ident = cmd->ident;
-
- if (chan->scid < L2CAP_CID_DYN_START ||
- chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
- (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING)) {
- result = L2CAP_MR_NOT_ALLOWED;
- goto send_move_response;
- }
-
- if (chan->local_amp_id == req->dest_amp_id) {
- result = L2CAP_MR_SAME_ID;
- goto send_move_response;
- }
-
- if (req->dest_amp_id != AMP_ID_BREDR) {
- struct hci_dev *hdev;
- hdev = hci_dev_get(req->dest_amp_id);
- if (!hdev || hdev->dev_type != HCI_AMP ||
- !test_bit(HCI_UP, &hdev->flags)) {
- if (hdev)
- hci_dev_put(hdev);
-
- result = L2CAP_MR_BAD_ID;
- goto send_move_response;
- }
- hci_dev_put(hdev);
- }
-
- /* Detect a move collision. Only send a collision response
- * if this side has "lost", otherwise proceed with the move.
- * The winner has the larger bd_addr.
- */
- if ((__chan_is_moving(chan) ||
- chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
- bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
- result = L2CAP_MR_COLLISION;
- goto send_move_response;
- }
-
- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
- l2cap_move_setup(chan);
- chan->move_id = req->dest_amp_id;
-
- if (req->dest_amp_id == AMP_ID_BREDR) {
- /* Moving to BR/EDR */
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- result = L2CAP_MR_PEND;
- } else {
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
- result = L2CAP_MR_SUCCESS;
- }
- } else {
- chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
- /* Placeholder - uncomment when amp functions are available */
- /*amp_accept_physical(chan, req->dest_amp_id);*/
- result = L2CAP_MR_PEND;
- }
-
-send_move_response:
- l2cap_send_move_chan_rsp(chan, result);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
-static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
-{
- struct l2cap_chan *chan;
- struct hci_chan *hchan = NULL;
-
- chan = l2cap_get_chan_by_scid(conn, icid);
- if (!chan) {
- l2cap_send_move_chan_cfm_icid(conn, icid);
- return;
- }
-
- __clear_chan_timer(chan);
- if (result == L2CAP_MR_PEND)
- __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
-
- switch (chan->move_state) {
- case L2CAP_MOVE_WAIT_LOGICAL_COMP:
- /* Move confirm will be sent when logical link
- * is complete.
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- break;
- case L2CAP_MOVE_WAIT_RSP_SUCCESS:
- if (result == L2CAP_MR_PEND) {
- break;
- } else if (test_bit(CONN_LOCAL_BUSY,
- &chan->conn_state)) {
- chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
- } else {
- /* Logical link is up or moving to BR/EDR,
- * proceed with move
- */
- chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- }
- break;
- case L2CAP_MOVE_WAIT_RSP:
- /* Moving to AMP */
- if (result == L2CAP_MR_SUCCESS) {
- /* Remote is ready, send confirm immediately
- * after logical link is ready
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
- } else {
- /* Both logical link and move success
- * are required to confirm
- */
- chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
- }
-
- /* Placeholder - get hci_chan for logical link */
- if (!hchan) {
- /* Logical link not available */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- break;
- }
-
- /* If the logical link is not yet connected, do not
- * send confirmation.
- */
- if (hchan->state != BT_CONNECTED)
- break;
-
- /* Logical link is already ready to go */
-
- chan->hs_hcon = hchan->conn;
- chan->hs_hcon->l2cap_data = chan->conn;
-
- if (result == L2CAP_MR_SUCCESS) {
- /* Can confirm now */
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
- } else {
- /* Now only need move success
- * to confirm
- */
- chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
- }
-
- l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
- break;
- default:
- /* Any other amp move state means the move failed. */
- chan->move_id = chan->local_amp_id;
- l2cap_move_done(chan);
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
- }
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-}
-
-static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
- u16 result)
-{
- struct l2cap_chan *chan;
-
- chan = l2cap_get_chan_by_ident(conn, ident);
- if (!chan) {
- /* Could not locate channel, icid is best guess */
- l2cap_send_move_chan_cfm_icid(conn, icid);
- return;
- }
-
- __clear_chan_timer(chan);
-
- if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
- if (result == L2CAP_MR_COLLISION) {
- chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
- } else {
- /* Cleanup - cancel move */
- chan->move_id = chan->local_amp_id;
- l2cap_move_done(chan);
- }
- }
-
- l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-}
-
-static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_rsp *rsp = data;
- u16 icid, result;
-
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
- icid = le16_to_cpu(rsp->icid);
- result = le16_to_cpu(rsp->result);
-
- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
-
- if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
- l2cap_move_continue(conn, icid, result);
- else
- l2cap_move_fail(conn, cmd->ident, icid, result);
-
- return 0;
-}
-
-static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_cfm *cfm = data;
- struct l2cap_chan *chan;
- u16 icid, result;
-
- if (cmd_len != sizeof(*cfm))
- return -EPROTO;
-
- icid = le16_to_cpu(cfm->icid);
- result = le16_to_cpu(cfm->result);
-
- BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
-
- chan = l2cap_get_chan_by_dcid(conn, icid);
- if (!chan) {
- /* Spec requires a response even if the icid was not found */
- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
- return 0;
- }
-
- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
- if (result == L2CAP_MC_CONFIRMED) {
- chan->local_amp_id = chan->move_id;
- if (chan->local_amp_id == AMP_ID_BREDR)
- __release_logical_link(chan);
- } else {
- chan->move_id = chan->local_amp_id;
- }
-
- l2cap_move_done(chan);
- }
-
- l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
-static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd,
- u16 cmd_len, void *data)
-{
- struct l2cap_move_chan_cfm_rsp *rsp = data;
- struct l2cap_chan *chan;
- u16 icid;
-
- if (cmd_len != sizeof(*rsp))
- return -EPROTO;
-
- icid = le16_to_cpu(rsp->icid);
-
- BT_DBG("icid 0x%4.4x", icid);
-
- chan = l2cap_get_chan_by_scid(conn, icid);
- if (!chan)
- return 0;
-
- __clear_chan_timer(chan);
-
- if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
- chan->local_amp_id = chan->move_id;
-
- if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
- __release_logical_link(chan);
-
- l2cap_move_done(chan);
- }
-
- l2cap_chan_unlock(chan);
- l2cap_chan_put(chan);
-
- return 0;
-}
-
static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
struct l2cap_cmd_hdr *cmd,
u16 cmd_len, u8 *data)
@@ -5745,7 +4761,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
break;
case L2CAP_CONN_RSP:
- case L2CAP_CREATE_CHAN_RSP:
l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
break;
@@ -5780,26 +4795,6 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
l2cap_information_rsp(conn, cmd, cmd_len, data);
break;
- case L2CAP_CREATE_CHAN_REQ:
- err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_REQ:
- err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_RSP:
- l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM:
- err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
- break;
-
- case L2CAP_MOVE_CHAN_CFM_RSP:
- l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
- break;
-
default:
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
err = -EINVAL;
@@ -7051,8 +6046,8 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
if (control->final) {
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
- !__chan_is_moving(chan)) {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
control->final = 0;
l2cap_retransmit_all(chan, control);
}
@@ -7245,11 +6240,7 @@ static int l2cap_finish_move(struct l2cap_chan *chan)
BT_DBG("chan %p", chan);
chan->rx_state = L2CAP_RX_STATE_RECV;
-
- if (chan->hs_hcon)
- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
- else
- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
return l2cap_resegment(chan);
}
@@ -7316,11 +6307,7 @@ static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
*/
chan->next_tx_seq = control->reqseq;
chan->unacked_frames = 0;
-
- if (chan->hs_hcon)
- chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
- else
- chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
+ chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
err = l2cap_resegment(chan);
@@ -7672,21 +6659,10 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
chan = l2cap_get_chan_by_scid(conn, cid);
if (!chan) {
- if (cid == L2CAP_CID_A2MP) {
- chan = a2mp_channel_create(conn, skb);
- if (!chan) {
- kfree_skb(skb);
- return;
- }
-
- l2cap_chan_hold(chan);
- l2cap_chan_lock(chan);
- } else {
- BT_DBG("unknown cid 0x%4.4x", cid);
- /* Drop packet and return */
- kfree_skb(skb);
- return;
- }
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return;
}
BT_DBG("chan %p, len %d", chan, skb->len);
@@ -7887,10 +6863,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
- if (hcon->type == ACL_LINK &&
- hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
- conn->local_fixed_chan |= L2CAP_FC_A2MP;
-
if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
(bredr_sc_enabled(hcon->hdev) ||
hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
@@ -8355,11 +7327,6 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
state_to_string(chan->state));
- if (chan->scid == L2CAP_CID_A2MP) {
- l2cap_chan_unlock(chan);
- continue;
- }
-
if (!status && encrypt)
chan->sec_level = hcon->sec_level;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e50d3d1020..ee7a41d699 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1027,23 +1027,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
- err = -EINVAL;
- break;
- }
-
- if (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING) {
- err = -EOPNOTSUPP;
- break;
- }
-
- chan->chan_policy = (u8) opt;
-
- if (sk->sk_state == BT_CONNECTED &&
- chan->move_role == L2CAP_MOVE_ROLE_NONE)
- l2cap_move_start(chan);
-
+ err = -EOPNOTSUPP;
break;
case BT_SNDMTU:
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 9dd815b660..92fd3786bb 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -835,8 +835,6 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_ssp_capable(hdev)) {
settings |= MGMT_SETTING_SSP;
- if (IS_ENABLED(CONFIG_BT_HS))
- settings |= MGMT_SETTING_HS;
}
if (lmp_sc_capable(hdev))
@@ -901,9 +899,6 @@ static u32 get_current_settings(struct hci_dev *hdev)
if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
settings |= MGMT_SETTING_SSP;
- if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
- settings |= MGMT_SETTING_HS;
-
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
settings |= MGMT_SETTING_ADVERTISING;
@@ -1045,6 +1040,8 @@ static void rpa_expired(struct work_struct *work)
hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
}
+static int set_discoverable_sync(struct hci_dev *hdev, void *data);
+
static void discov_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -1063,7 +1060,7 @@ static void discov_off(struct work_struct *work)
hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
hdev->discov_timeout = 0;
- hci_update_discoverable(hdev);
+ hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
mgmt_new_settings(hdev);
@@ -1407,7 +1404,7 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
/* Cancel potentially blocking sync operation before power off */
if (cp->val == 0x00) {
- __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
+ hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
mgmt_set_powered_complete);
} else {
@@ -1928,7 +1925,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
if (enable && hci_dev_test_and_clear_flag(hdev,
HCI_SSP_ENABLED)) {
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
new_settings(hdev, NULL);
}
@@ -1941,12 +1937,6 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
} else {
changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
-
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
@@ -2010,11 +2000,6 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
} else {
changed = hci_dev_test_and_clear_flag(hdev,
HCI_SSP_ENABLED);
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -2060,63 +2045,10 @@ failed:
static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
- struct mgmt_mode *cp = data;
- bool changed;
- u8 status;
- int err;
-
bt_dev_dbg(hdev, "sock %p", sk);
- if (!IS_ENABLED(CONFIG_BT_HS))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_NOT_SUPPORTED);
-
- status = mgmt_bredr_support(hdev);
- if (status)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
-
- if (!lmp_ssp_capable(hdev))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
MGMT_STATUS_NOT_SUPPORTED);
-
- if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
-
- if (cp->val != 0x00 && cp->val != 0x01)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_INVALID_PARAMS);
-
- hci_dev_lock(hdev);
-
- if (pending_find(MGMT_OP_SET_SSP, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
- if (cp->val) {
- changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
- } else {
- if (hdev_is_powered(hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
- MGMT_STATUS_REJECTED);
- goto unlock;
- }
-
- changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
- }
-
- err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
- if (err < 0)
- goto unlock;
-
- if (changed)
- err = new_settings(hdev, sk);
-
-unlock:
- hci_dev_unlock(hdev);
- return err;
}
static void set_le_complete(struct hci_dev *hdev, void *data, int err)
@@ -3186,6 +3118,7 @@ failed:
static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
{
switch (link_type) {
+ case ISO_LINK:
case LE_LINK:
switch (addr_type) {
case ADDR_LE_DEV_PUBLIC:
@@ -6764,7 +6697,6 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
}
hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
@@ -8468,7 +8400,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
static u8 calculate_name_len(struct hci_dev *hdev)
{
- u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
+ u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
return eir_append_local_name(hdev, buf, 0);
}
@@ -9679,6 +9611,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
u16 eir_len = 0;
u32 flags = 0;
+ if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
+ return;
+
/* allocate buff for LE or BR/EDR adv */
if (conn->le_adv_data_len > 0)
skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
@@ -9764,14 +9699,6 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
struct mgmt_ev_device_disconnected ev;
struct sock *sk = NULL;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-
if (!mgmt_connected)
return;
@@ -9828,14 +9755,6 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
{
struct mgmt_ev_connect_failed ev;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-
bacpy(&ev.addr.bdaddr, bdaddr);
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.status = mgmt_status(status);
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index 630e302327..9612c5d1b1 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -875,6 +875,7 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
remove = true;
goto done;
}
+
cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
cp->rssi_high = address_filter->rssi_high;
cp->rssi_low = address_filter->rssi_low;
@@ -887,6 +888,8 @@ static int msft_add_address_filter_sync(struct hci_dev *hdev, void *data)
skb = __hci_cmd_sync(hdev, hdev->msft_opcode, size, cp,
HCI_CMD_TIMEOUT);
+ kfree(cp);
+
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Failed to enable address %pMR filter",
&address_filter->bdaddr);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 053ef8f25f..1d34d84970 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1941,7 +1941,7 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
skb_orphan(skb);
- if (!skb_linearize(skb)) {
+ if (!skb_linearize(skb) && sk->sk_state != BT_CLOSED) {
s = rfcomm_recv_frame(s, skb);
if (!s)
break;
diff --git a/net/core/dev.c b/net/core/dev.c
index add22ca0df..e3c06ccf21 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2262,7 +2262,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
again:
list_for_each_entry_rcu(ptype, ptype_list, list) {
- if (ptype->ignore_outgoing)
+ if (READ_ONCE(ptype->ignore_outgoing))
continue;
/* Never send packets back to the socket
@@ -6668,6 +6668,8 @@ static int napi_threaded_poll(void *data)
void *have;
while (!napi_thread_wait(napi)) {
+ unsigned long last_qs = jiffies;
+
for (;;) {
bool repoll = false;
@@ -6692,6 +6694,7 @@ static int napi_threaded_poll(void *data)
if (!repoll)
break;
+ rcu_softirq_qs_periodic(last_qs);
cond_resched();
}
}
diff --git a/net/core/gso_test.c b/net/core/gso_test.c
index 4c2e77bd12..358c44680d 100644
--- a/net/core/gso_test.c
+++ b/net/core/gso_test.c
@@ -225,7 +225,7 @@ static void gso_test_func(struct kunit *test)
segs = skb_segment(skb, features);
if (IS_ERR(segs)) {
- KUNIT_FAIL(test, "segs error %lld", PTR_ERR(segs));
+ KUNIT_FAIL(test, "segs error %pe", segs);
goto free_gso_skb;
} else if (!segs) {
KUNIT_FAIL(test, "no segments");
diff --git a/net/core/scm.c b/net/core/scm.c
index 7dc47c17d8..737917c7ac 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -105,7 +105,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
if (fd < 0 || !(file = fget_raw(fd)))
return -EBADF;
/* don't allow io_uring files */
- if (io_uring_get_socket(file)) {
+ if (io_is_uring_fops(file)) {
fput(file);
return -EINVAL;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 94cc40a6f7..78cb3304fb 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6677,6 +6677,14 @@ static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
xfrm_state_hold(sp->xvec[i]);
}
#endif
+#ifdef CONFIG_MCTP_FLOWS
+ if (old_active & (1 << SKB_EXT_MCTP)) {
+ struct mctp_flow *flow = skb_ext_get_ptr(old, SKB_EXT_MCTP);
+
+ if (flow->key)
+ refcount_inc(&flow->key->refs);
+ }
+#endif
__skb_ext_put(old);
return new;
}
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b1e29e18d1..c53b731f2d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -193,7 +193,7 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
if (sock_diag_handlers[hndl->family])
err = -EBUSY;
else
- sock_diag_handlers[hndl->family] = hndl;
+ WRITE_ONCE(sock_diag_handlers[hndl->family], hndl);
mutex_unlock(&sock_diag_table_mutex);
return err;
@@ -209,7 +209,7 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
mutex_lock(&sock_diag_table_mutex);
BUG_ON(sock_diag_handlers[family] != hnld);
- sock_diag_handlers[family] = NULL;
+ WRITE_ONCE(sock_diag_handlers[family], NULL);
mutex_unlock(&sock_diag_table_mutex);
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
@@ -227,7 +227,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX);
- if (sock_diag_handlers[req->sdiag_family] == NULL)
+ if (READ_ONCE(sock_diag_handlers[req->sdiag_family]) == NULL)
sock_load_diag_module(req->sdiag_family, 0);
mutex_lock(&sock_diag_table_mutex);
@@ -286,12 +286,12 @@ static int sock_diag_bind(struct net *net, int group)
switch (group) {
case SKNLGRP_INET_TCP_DESTROY:
case SKNLGRP_INET_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET])
+ if (!READ_ONCE(sock_diag_handlers[AF_INET]))
sock_load_diag_module(AF_INET, 0);
break;
case SKNLGRP_INET6_TCP_DESTROY:
case SKNLGRP_INET6_UDP_DESTROY:
- if (!sock_diag_handlers[AF_INET6])
+ if (!READ_ONCE(sock_diag_handlers[AF_INET6]))
sock_load_diag_module(AF_INET6, 0);
break;
}
diff --git a/net/devlink/core.c b/net/devlink/core.c
index bc3d265fe2..7f0b093208 100644
--- a/net/devlink/core.c
+++ b/net/devlink/core.c
@@ -503,14 +503,14 @@ static void __net_exit devlink_pernet_pre_exit(struct net *net)
* all devlink instances from this namespace into init_net.
*/
devlinks_xa_for_each_registered_get(net, index, devlink) {
- devl_lock(devlink);
+ devl_dev_lock(devlink, true);
err = 0;
if (devl_is_registered(devlink))
err = devlink_reload(devlink, &init_net,
DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
DEVLINK_RELOAD_LIMIT_UNSPEC,
&actions_performed, NULL);
- devl_unlock(devlink);
+ devl_dev_unlock(devlink, true);
devlink_put(devlink);
if (err && err != -EOPNOTSUPP)
pr_warn("Failed to reload devlink instance into init_net\n");
diff --git a/net/devlink/devl_internal.h b/net/devlink/devl_internal.h
index 183dbe3807..5ea2e2012e 100644
--- a/net/devlink/devl_internal.h
+++ b/net/devlink/devl_internal.h
@@ -3,6 +3,7 @@
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
*/
+#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
@@ -96,6 +97,20 @@ static inline bool devl_is_registered(struct devlink *devlink)
return xa_get_mark(&devlinks, devlink->index, DEVLINK_REGISTERED);
}
+static inline void devl_dev_lock(struct devlink *devlink, bool dev_lock)
+{
+ if (dev_lock)
+ device_lock(devlink->dev);
+ devl_lock(devlink);
+}
+
+static inline void devl_dev_unlock(struct devlink *devlink, bool dev_lock)
+{
+ devl_unlock(devlink);
+ if (dev_lock)
+ device_unlock(devlink->dev);
+}
+
typedef void devlink_rel_notify_cb_t(struct devlink *devlink, u32 obj_index);
typedef void devlink_rel_cleanup_cb_t(struct devlink *devlink, u32 obj_index,
u32 rel_index);
@@ -111,9 +126,6 @@ int devlink_rel_devlink_handle_put(struct sk_buff *msg, struct devlink *devlink,
bool *msg_updated);
/* Netlink */
-#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
-#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
-
enum devlink_multicast_groups {
DEVLINK_MCGRP_CONFIG,
};
@@ -140,7 +152,8 @@ typedef int devlink_nl_dump_one_func_t(struct sk_buff *msg,
int flags);
struct devlink *
-devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs);
+devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs,
+ bool dev_lock);
int devlink_nl_dumpit(struct sk_buff *msg, struct netlink_callback *cb,
devlink_nl_dump_one_func_t *dump_one);
diff --git a/net/devlink/health.c b/net/devlink/health.c
index 695df61f8a..71ae121dc7 100644
--- a/net/devlink/health.c
+++ b/net/devlink/health.c
@@ -1151,7 +1151,8 @@ devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb)
struct nlattr **attrs = info->attrs;
struct devlink *devlink;
- devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
+ devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs,
+ false);
if (IS_ERR(devlink))
return NULL;
diff --git a/net/devlink/netlink.c b/net/devlink/netlink.c
index d0b90ebc8b..0f41fded6a 100644
--- a/net/devlink/netlink.c
+++ b/net/devlink/netlink.c
@@ -9,6 +9,10 @@
#include "devl_internal.h"
+#define DEVLINK_NL_FLAG_NEED_PORT BIT(0)
+#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1)
+#define DEVLINK_NL_FLAG_NEED_DEV_LOCK BIT(2)
+
static const struct genl_multicast_group devlink_nl_mcgrps[] = {
[DEVLINK_MCGRP_CONFIG] = { .name = DEVLINK_GENL_MCGRP_CONFIG_NAME },
};
@@ -61,7 +65,8 @@ int devlink_nl_msg_reply_and_new(struct sk_buff **msg, struct genl_info *info)
}
struct devlink *
-devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs)
+devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs,
+ bool dev_lock)
{
struct devlink *devlink;
unsigned long index;
@@ -75,12 +80,13 @@ devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs)
devname = nla_data(attrs[DEVLINK_ATTR_DEV_NAME]);
devlinks_xa_for_each_registered_get(net, index, devlink) {
- devl_lock(devlink);
- if (devl_is_registered(devlink) &&
- strcmp(devlink->dev->bus->name, busname) == 0 &&
- strcmp(dev_name(devlink->dev), devname) == 0)
- return devlink;
- devl_unlock(devlink);
+ if (strcmp(devlink->dev->bus->name, busname) == 0 &&
+ strcmp(dev_name(devlink->dev), devname) == 0) {
+ devl_dev_lock(devlink, dev_lock);
+ if (devl_is_registered(devlink))
+ return devlink;
+ devl_dev_unlock(devlink, dev_lock);
+ }
devlink_put(devlink);
}
@@ -90,11 +96,13 @@ devlink_get_from_attrs_lock(struct net *net, struct nlattr **attrs)
static int __devlink_nl_pre_doit(struct sk_buff *skb, struct genl_info *info,
u8 flags)
{
+ bool dev_lock = flags & DEVLINK_NL_FLAG_NEED_DEV_LOCK;
struct devlink_port *devlink_port;
struct devlink *devlink;
int err;
- devlink = devlink_get_from_attrs_lock(genl_info_net(info), info->attrs);
+ devlink = devlink_get_from_attrs_lock(genl_info_net(info), info->attrs,
+ dev_lock);
if (IS_ERR(devlink))
return PTR_ERR(devlink);
@@ -114,7 +122,7 @@ static int __devlink_nl_pre_doit(struct sk_buff *skb, struct genl_info *info,
return 0;
unlock:
- devl_unlock(devlink);
+ devl_dev_unlock(devlink, dev_lock);
devlink_put(devlink);
return err;
}
@@ -138,16 +146,23 @@ int devlink_nl_pre_doit_port_optional(const struct genl_split_ops *ops,
return __devlink_nl_pre_doit(skb, info, DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT);
}
-void devlink_nl_post_doit(const struct genl_split_ops *ops,
- struct sk_buff *skb, struct genl_info *info)
+static void __devlink_nl_post_doit(struct sk_buff *skb, struct genl_info *info,
+ u8 flags)
{
+ bool dev_lock = flags & DEVLINK_NL_FLAG_NEED_DEV_LOCK;
struct devlink *devlink;
devlink = info->user_ptr[0];
- devl_unlock(devlink);
+ devl_dev_unlock(devlink, dev_lock);
devlink_put(devlink);
}
+void devlink_nl_post_doit(const struct genl_split_ops *ops,
+ struct sk_buff *skb, struct genl_info *info)
+{
+ __devlink_nl_post_doit(skb, info, 0);
+}
+
static int devlink_nl_inst_single_dumpit(struct sk_buff *msg,
struct netlink_callback *cb, int flags,
devlink_nl_dump_one_func_t *dump_one,
@@ -156,7 +171,7 @@ static int devlink_nl_inst_single_dumpit(struct sk_buff *msg,
struct devlink *devlink;
int err;
- devlink = devlink_get_from_attrs_lock(sock_net(msg->sk), attrs);
+ devlink = devlink_get_from_attrs_lock(sock_net(msg->sk), attrs, false);
if (IS_ERR(devlink))
return PTR_ERR(devlink);
err = dump_one(msg, devlink, cb, flags | NLM_F_DUMP_FILTERED);
diff --git a/net/devlink/netlink_gen.c b/net/devlink/netlink_gen.c
index 788dfdc498..371f27f653 100644
--- a/net/devlink/netlink_gen.c
+++ b/net/devlink/netlink_gen.c
@@ -198,7 +198,7 @@ static const struct nla_policy devlink_eswitch_set_nl_policy[DEVLINK_ATTR_ESWITC
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING, },
[DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_MAX(NLA_U16, 1),
- [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U16, 3),
+ [DEVLINK_ATTR_ESWITCH_INLINE_MODE] = NLA_POLICY_MAX(NLA_U8, 3),
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = NLA_POLICY_MAX(NLA_U8, 1),
};
diff --git a/net/devlink/port.c b/net/devlink/port.c
index d39ee6053c..2b3c2b1a3e 100644
--- a/net/devlink/port.c
+++ b/net/devlink/port.c
@@ -887,7 +887,7 @@ int devlink_nl_port_new_doit(struct sk_buff *skb, struct genl_info *info)
err = -ENOMEM;
goto err_out_port_del;
}
- err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW,
+ err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW,
info->snd_portid, info->snd_seq, 0, NULL);
if (WARN_ON_ONCE(err))
goto err_out_msg_free;
diff --git a/net/devlink/region.c b/net/devlink/region.c
index 0aab7b82d6..e3bab458db 100644
--- a/net/devlink/region.c
+++ b/net/devlink/region.c
@@ -883,7 +883,8 @@ int devlink_nl_region_read_dumpit(struct sk_buff *skb,
start_offset = state->start_offset;
- devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
+ devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs,
+ false);
if (IS_ERR(devlink))
return PTR_ERR(devlink);
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 6d14d935ee..26329db092 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -228,6 +228,10 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db,
*/
if (ethhdr->h_proto == htons(ETH_P_PRP) ||
ethhdr->h_proto == htons(ETH_P_HSR)) {
+ /* Check if skb contains hsr_ethhdr */
+ if (skb->mac_len < sizeof(struct hsr_ethhdr))
+ return NULL;
+
/* Use the existing sequence_nr from the tag as starting point
* for filtering duplicate frames.
*/
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index b099c31501..257b50124c 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -148,14 +148,21 @@ static struct notifier_block hsr_nb = {
static int __init hsr_init(void)
{
- int res;
+ int err;
BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
- register_netdevice_notifier(&hsr_nb);
- res = hsr_netlink_init();
+ err = register_netdevice_notifier(&hsr_nb);
+ if (err)
+ return err;
+
+ err = hsr_netlink_init();
+ if (err) {
+ unregister_netdevice_notifier(&hsr_nb);
+ return err;
+ }
- return res;
+ return 0;
}
static void __exit hsr_exit(void)
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 4ccfc104f1..fe501d2186 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -95,7 +95,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
__alignof__(struct scatterlist));
}
-static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
{
struct crypto_aead *aead = x->data;
int extralen = 0;
@@ -114,7 +114,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
*/
if (req->src != req->dst)
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
- put_page(sg_page(sg));
+ skb_page_unref(skb, sg_page(sg), false);
}
#ifdef CONFIG_INET_ESPINTCP
@@ -260,7 +260,7 @@ static void esp_output_done(void *data, int err)
}
tmp = ESP_SKB_CB(skb)->tmp;
- esp_ssg_unref(x, tmp);
+ esp_ssg_unref(x, tmp, skb);
kfree(tmp);
if (xo && (xo->flags & XFRM_DEV_RESUME)) {
@@ -639,7 +639,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
}
if (sg != dsg)
- esp_ssg_unref(x, tmp);
+ esp_ssg_unref(x, tmp, skb);
if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
err = esp_output_tail_tcp(x, skb);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 7d0e7aaa71..5f7fdbd01c 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -57,7 +57,7 @@ static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
return ERR_PTR(-ENOENT);
}
- if (!inet_diag_table[proto])
+ if (!READ_ONCE(inet_diag_table[proto]))
sock_load_diag_module(AF_INET, proto);
mutex_lock(&inet_diag_table_mutex);
@@ -1419,7 +1419,7 @@ int inet_diag_register(const struct inet_diag_handler *h)
mutex_lock(&inet_diag_table_mutex);
err = -EEXIST;
if (!inet_diag_table[type]) {
- inet_diag_table[type] = h;
+ WRITE_ONCE(inet_diag_table[type], h);
err = 0;
}
mutex_unlock(&inet_diag_table_mutex);
@@ -1436,7 +1436,7 @@ void inet_diag_unregister(const struct inet_diag_handler *h)
return;
mutex_lock(&inet_diag_table_mutex);
- inet_diag_table[type] = NULL;
+ WRITE_ONCE(inet_diag_table[type], NULL);
mutex_unlock(&inet_diag_table_mutex);
}
EXPORT_SYMBOL_GPL(inet_diag_unregister);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 9456bf9e27..7967ff7e02 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -1137,7 +1137,7 @@ error:
sock_prot_inuse_add(net, sk->sk_prot, -1);
spin_lock(lock);
- sk_nulls_del_node_init_rcu(sk);
+ __sk_nulls_del_node_init_rcu(sk);
spin_unlock(lock);
sk->sk_hash = 0;
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index dd37a5bf68..757ae3a4e2 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -278,12 +278,12 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
}
EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
+/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
{
- struct inet_timewait_sock *tw;
- struct sock *sk;
struct hlist_nulls_node *node;
unsigned int slot;
+ struct sock *sk;
for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
@@ -292,38 +292,35 @@ restart_rcu:
rcu_read_lock();
restart:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
- if (sk->sk_state != TCP_TIME_WAIT) {
- /* A kernel listener socket might not hold refcnt for net,
- * so reqsk_timer_handler() could be fired after net is
- * freed. Userspace listener and reqsk never exist here.
- */
- if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
- hashinfo->pernet)) {
- struct request_sock *req = inet_reqsk(sk);
-
- inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
- }
+ int state = inet_sk_state_load(sk);
+ if ((1 << state) & ~(TCPF_TIME_WAIT |
+ TCPF_NEW_SYN_RECV))
continue;
- }
- tw = inet_twsk(sk);
- if ((tw->tw_family != family) ||
- refcount_read(&twsk_net(tw)->ns.count))
+ if (sk->sk_family != family ||
+ refcount_read(&sock_net(sk)->ns.count))
continue;
- if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
+ if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
continue;
- if (unlikely((tw->tw_family != family) ||
- refcount_read(&twsk_net(tw)->ns.count))) {
- inet_twsk_put(tw);
+ if (unlikely(sk->sk_family != family ||
+ refcount_read(&sock_net(sk)->ns.count))) {
+ sock_gen_put(sk);
goto restart;
}
rcu_read_unlock();
local_bh_disable();
- inet_twsk_deschedule_put(tw);
+ if (state == TCP_TIME_WAIT) {
+ inet_twsk_deschedule_put(inet_twsk(sk));
+ } else {
+ struct request_sock *req = inet_reqsk(sk);
+
+ inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
+ req);
+ }
local_bh_enable();
goto restart_rcu;
}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 2d29fce7c5..b1b6dcf216 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -378,7 +378,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
bool log_ecn_error)
{
const struct iphdr *iph = ip_hdr(skb);
- int err;
+ int nh, err;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
@@ -404,8 +404,21 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tunnel->i_seqno = ntohl(tpi->seq) + 1;
}
+ /* Save offset of outer header relative to skb->head,
+ * because we are going to reset the network header to the inner header
+ * and might change skb->head.
+ */
+ nh = skb_network_header(skb) - skb->head;
+
skb_set_network_header(skb, (tunnel->dev->type == ARPHRD_ETHER) ? ETH_HLEN : 0);
+ if (!pskb_inet_may_pull(skb)) {
+ DEV_STATS_INC(tunnel->dev, rx_length_errors);
+ DEV_STATS_INC(tunnel->dev, rx_errors);
+ goto drop;
+ }
+ iph = (struct iphdr *)(skb->head + nh);
+
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
if (log_ecn_error)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index e49242706b..66eade3fb6 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1603,9 +1603,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
if (copy_from_sockptr(&olr, optlen, sizeof(int)))
return -EFAULT;
- olr = min_t(unsigned int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
+
+ olr = min_t(unsigned int, olr, sizeof(int));
+
if (copy_to_sockptr(optlen, &olr, sizeof(int)))
return -EFAULT;
if (copy_to_sockptr(optval, &val, olr))
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index aea89326c6..288f1846b3 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -350,6 +350,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
goto error;
skb_reserve(skb, hlen);
+ skb->protocol = htons(ETH_P_IP);
skb->priority = READ_ONCE(sk->sk_priority);
skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b30ef770a6..0d03d48702 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -4011,11 +4011,11 @@ int do_tcp_getsockopt(struct sock *sk, int level,
if (copy_from_sockptr(&len, optlen, sizeof(int)))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case TCP_MAXSEG:
val = tp->mss_cache;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9e85f2a0bd..0ecc7311dc 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -398,10 +398,6 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family)
/* Even if tw_refcount == 1, we must clean up kernel reqsk */
inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
} else if (!purged_once) {
- /* The last refcount is decremented in tcp_sk_exit_batch() */
- if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
- continue;
-
inet_twsk_purge(&tcp_hashinfo, family);
purged_once = true;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e474b20190..17231c0f88 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2792,11 +2792,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case UDP_CORK:
val = udp_test_bit(CORK, sk);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 2cc1a45742..a3fa3eda38 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -112,7 +112,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
__alignof__(struct scatterlist));
}
-static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb)
{
struct crypto_aead *aead = x->data;
int extralen = 0;
@@ -131,7 +131,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
*/
if (req->src != req->dst)
for (sg = sg_next(req->src); sg; sg = sg_next(sg))
- put_page(sg_page(sg));
+ skb_page_unref(skb, sg_page(sg), false);
}
#ifdef CONFIG_INET6_ESPINTCP
@@ -294,7 +294,7 @@ static void esp_output_done(void *data, int err)
}
tmp = ESP_SKB_CB(skb)->tmp;
- esp_ssg_unref(x, tmp);
+ esp_ssg_unref(x, tmp, skb);
kfree(tmp);
esp_output_encap_csum(skb);
@@ -677,7 +677,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
}
if (sg != dsg)
- esp_ssg_unref(x, tmp);
+ esp_ssg_unref(x, tmp, skb);
if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
err = esp_output_tail_tcp(x, skb);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 7c20038330..be52b18e08 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -449,6 +449,11 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
+ nla_total_size(16); /* src */
}
+static void fib6_rule_flush_cache(struct fib_rules_ops *ops)
+{
+ rt_genid_bump_ipv6(ops->fro_net);
+}
+
static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.family = AF_INET6,
.rule_size = sizeof(struct fib6_rule),
@@ -461,6 +466,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
.nlmsg_payload = fib6_rule_nlmsg_payload,
+ .flush_cache = fib6_rule_flush_cache,
.nlgroup = RTNLGRP_IPV6_RULE,
.owner = THIS_MODULE,
.fro_net = &init_net,
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index bc6e0a0bad..76ee1615ff 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2719,7 +2719,6 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Should stop work after group drop. or we will
* start work again in mld_ifc_event()
*/
- synchronize_net();
mld_query_stop_work(idev);
mld_report_stop_work(idev);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ea1dec8448..ef815ba583 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5332,19 +5332,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
err_nh = NULL;
list_for_each_entry(nh, &rt6_nh_list, next) {
err = __ip6_ins_rt(nh->fib6_info, info, extack);
- fib6_info_release(nh->fib6_info);
-
- if (!err) {
- /* save reference to last route successfully inserted */
- rt_last = nh->fib6_info;
-
- /* save reference to first route for notification */
- if (!rt_notif)
- rt_notif = nh->fib6_info;
- }
- /* nh->fib6_info is used or freed at this point, reset to NULL*/
- nh->fib6_info = NULL;
if (err) {
if (replace && nhn)
NL_SET_ERR_MSG_MOD(extack,
@@ -5352,6 +5340,12 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
err_nh = nh;
goto add_errout;
}
+ /* save reference to last route successfully inserted */
+ rt_last = nh->fib6_info;
+
+ /* save reference to first route for notification */
+ if (!rt_notif)
+ rt_notif = nh->fib6_info;
/* Because each route is added like a single route we remove
* these flags after the first nexthop: if there is a collision,
@@ -5412,8 +5406,7 @@ add_errout:
cleanup:
list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
- if (nh->fib6_info)
- fib6_info_release(nh->fib6_info);
+ fib6_info_release(nh->fib6_info);
list_del(&nh->next);
kfree(nh);
}
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 0ed6e34d6e..ce33adb65a 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -156,7 +156,7 @@ static char iucv_error_pathid[16] = "INVALID PATHID";
static LIST_HEAD(iucv_handler_list);
/*
- * iucv_path_table: an array of iucv_path structures.
+ * iucv_path_table: array of pointers to iucv_path structures.
*/
static struct iucv_path **iucv_path_table;
static unsigned long iucv_max_pathid;
@@ -544,7 +544,7 @@ static int iucv_enable(void)
cpus_read_lock();
rc = -ENOMEM;
- alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
+ alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
if (!iucv_path_table)
goto out;
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 1184d40167..eda933c097 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1152,10 +1152,11 @@ static int kcm_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
switch (optname) {
case KCM_RECV_DISABLE:
val = kcm->rx_disabled;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index f011af6601..6146e4e67b 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1356,11 +1356,11 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
return -EFAULT;
- len = min_t(unsigned int, len, sizeof(int));
-
if (len < 0)
return -EINVAL;
+ len = min_t(unsigned int, len, sizeof(int));
+
err = -ENOTCONN;
if (!sk->sk_user_data)
goto end;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index b382c2e0a3..ebaf930bb4 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1869,7 +1869,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
sband->band);
}
- ieee80211_sta_set_rx_nss(link_sta);
+ ieee80211_sta_init_nss(link_sta);
return ret;
}
@@ -2164,15 +2164,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
- sta->sdata->u.vlan.sta) {
- ieee80211_clear_fast_rx(sta);
+ sta->sdata->u.vlan.sta)
RCU_INIT_POINTER(sta->sdata->u.vlan.sta, NULL);
- }
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
ieee80211_vif_dec_num_mcast(sta->sdata);
sta->sdata = vlansdata;
+ ieee80211_check_fast_rx(sta);
ieee80211_check_fast_xmit(sta);
if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 84df104f27..e0a792a770 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -2109,7 +2109,7 @@ enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta);
-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta);
+void ieee80211_sta_init_nss(struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width);
enum nl80211_chan_width
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 241e615189..6cfc07aaa1 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -7502,10 +7502,10 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
if (err)
goto err_clear;
- if (req->link_id > 0)
+ if (req->link_id >= 0)
link = sdata_dereference(sdata->link[req->link_id], sdata);
else
- link = sdata_dereference(sdata->link[0], sdata);
+ link = &sdata->deflink;
if (WARN_ON(!link)) {
err = -ENOLINK;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index d5ea5f5bcf..0efdaa8f2a 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -37,7 +37,7 @@ void rate_control_rate_init(struct sta_info *sta)
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
- ieee80211_sta_set_rx_nss(&sta->deflink);
+ ieee80211_sta_init_nss(&sta->deflink);
if (!ref)
return;
@@ -119,7 +119,8 @@ void rate_control_rate_update(struct ieee80211_local *local,
rcu_read_unlock();
}
- drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
+ if (sta->uploaded)
+ drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
}
int ieee80211_rate_control_register(const struct rate_control_ops *ops)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 7acf2223e4..f471304672 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -3,7 +3,7 @@
* Copyright 2002-2005, Devicescape Software, Inc.
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright(c) 2020-2023 Intel Corporation
+ * Copyright(c) 2020-2024 Intel Corporation
*/
#ifndef STA_INFO_H
@@ -482,6 +482,8 @@ struct ieee80211_fragment_cache {
* same for non-MLD STA. This is used as key for searching link STA
* @link_id: Link ID uniquely identifying the link STA. This is 0 for non-MLD
* and set to the corresponding vif LinkId for MLD STA
+ * @op_mode_nss: NSS limit as set by operating mode notification, or 0
+ * @capa_nss: NSS limit as determined by local and peer capabilities
* @link_hash_node: hash node for rhashtable
* @sta: Points to the STA info
* @gtk: group keys negotiated with this station, if any
@@ -518,6 +520,8 @@ struct link_sta_info {
u8 addr[ETH_ALEN];
u8 link_id;
+ u8 op_mode_nss, capa_nss;
+
struct rhlist_head link_hash_node;
struct sta_info *sta;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index b3a5c3e96a..bc13b14199 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -4,7 +4,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2023 Intel Corporation
+ * Copyright (C) 2018 - 2024 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -541,15 +541,11 @@ ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta)
return bw;
}
-void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
+void ieee80211_sta_init_nss(struct link_sta_info *link_sta)
{
u8 ht_rx_nss = 0, vht_rx_nss = 0, he_rx_nss = 0, eht_rx_nss = 0, rx_nss;
bool support_160;
- /* if we received a notification already don't overwrite it */
- if (link_sta->pub->rx_nss)
- return;
-
if (link_sta->pub->eht_cap.has_eht) {
int i;
const u8 *rx_nss_mcs = (void *)&link_sta->pub->eht_cap.eht_mcs_nss_supp;
@@ -627,7 +623,15 @@ void ieee80211_sta_set_rx_nss(struct link_sta_info *link_sta)
rx_nss = max(vht_rx_nss, ht_rx_nss);
rx_nss = max(he_rx_nss, rx_nss);
rx_nss = max(eht_rx_nss, rx_nss);
- link_sta->pub->rx_nss = max_t(u8, 1, rx_nss);
+ rx_nss = max_t(u8, 1, rx_nss);
+ link_sta->capa_nss = rx_nss;
+
+ /* that shouldn't be set yet, but we can handle it anyway */
+ if (link_sta->op_mode_nss)
+ link_sta->pub->rx_nss =
+ min_t(u8, rx_nss, link_sta->op_mode_nss);
+ else
+ link_sta->pub->rx_nss = rx_nss;
}
u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
@@ -637,7 +641,7 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
enum ieee80211_sta_rx_bandwidth new_bw;
struct sta_opmode_info sta_opmode = {};
u32 changed = 0;
- u8 nss, cur_nss;
+ u8 nss;
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
@@ -647,23 +651,17 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
nss += 1;
- if (link_sta->pub->rx_nss != nss) {
- cur_nss = link_sta->pub->rx_nss;
- /* Reset rx_nss and call ieee80211_sta_set_rx_nss() which
- * will set the same to max nss value calculated based on capability.
- */
- link_sta->pub->rx_nss = 0;
- ieee80211_sta_set_rx_nss(link_sta);
- /* Do not allow an nss change to rx_nss greater than max_nss
- * negotiated and capped to APs capability during association.
- */
- if (nss <= link_sta->pub->rx_nss) {
- link_sta->pub->rx_nss = nss;
- sta_opmode.rx_nss = nss;
- changed |= IEEE80211_RC_NSS_CHANGED;
- sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ if (link_sta->op_mode_nss != nss) {
+ if (nss <= link_sta->capa_nss) {
+ link_sta->op_mode_nss = nss;
+
+ if (nss != link_sta->pub->rx_nss) {
+ link_sta->pub->rx_nss = nss;
+ changed |= IEEE80211_RC_NSS_CHANGED;
+ sta_opmode.rx_nss = link_sta->pub->rx_nss;
+ sta_opmode.changed |= STA_OPMODE_N_SS_CHANGED;
+ }
} else {
- link_sta->pub->rx_nss = cur_nss;
pr_warn_ratelimited("Ignoring NSS change in VHT Operating Mode Notification from %pM with invalid nss %d",
link_sta->pub->addr, nss);
}
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 8d2eabc71b..f13b07ebfb 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -265,19 +265,27 @@ fail:
return -ENOMEM;
}
+static void mac802154_llsec_key_del_rcu(struct rcu_head *rcu)
+{
+ struct ieee802154_llsec_key_entry *pos;
+ struct mac802154_llsec_key *mkey;
+
+ pos = container_of(rcu, struct ieee802154_llsec_key_entry, rcu);
+ mkey = container_of(pos->key, struct mac802154_llsec_key, key);
+
+ llsec_key_put(mkey);
+ kfree_sensitive(pos);
+}
+
int mac802154_llsec_key_del(struct mac802154_llsec *sec,
const struct ieee802154_llsec_key_id *key)
{
struct ieee802154_llsec_key_entry *pos;
list_for_each_entry(pos, &sec->table.keys, list) {
- struct mac802154_llsec_key *mkey;
-
- mkey = container_of(pos->key, struct mac802154_llsec_key, key);
-
if (llsec_key_id_equal(&pos->id, key)) {
list_del_rcu(&pos->list);
- llsec_key_put(mkey);
+ call_rcu(&pos->rcu, mac802154_llsec_key_del_rcu);
return 0;
}
}
diff --git a/net/mctp/route.c b/net/mctp/route.c
index ceee44ea09..01c530dbc1 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -843,6 +843,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
/* copy message payload */
skb_copy_bits(skb, pos, skb_transport_header(skb2), size);
+ /* we need to copy the extensions, for MCTP flow data */
+ skb_ext_copy(skb2, skb);
+
/* do route */
rc = rt->output(rt, skb2);
if (rc)
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index e697a824b0..540d97715b 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -533,6 +533,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
/* Get fields bitmap */
if (nf_h323_error_boundary(bs, 0, f->sz))
return H323_ERROR_BOUND;
+ if (f->sz > 32)
+ return H323_ERROR_RANGE;
bmp = get_bitmap(bs, f->sz);
if (base)
*(unsigned int *)base = bmp;
@@ -589,6 +591,8 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f,
bmp2_len = get_bits(bs, 7) + 1;
if (nf_h323_error_boundary(bs, 0, bmp2_len))
return H323_ERROR_BOUND;
+ if (bmp2_len > 32)
+ return H323_ERROR_RANGE;
bmp2 = get_bitmap(bs, bmp2_len);
bmp |= bmp2 >> f->sz;
if (base)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 79e088e6f1..d07872814f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1211,7 +1211,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
if (flags & ~NFT_TABLE_F_MASK)
return -EOPNOTSUPP;
- if (flags == ctx->table->flags)
+ if (flags == (ctx->table->flags & NFT_TABLE_F_MASK))
return 0;
if ((nft_table_has_owner(ctx->table) &&
@@ -2619,19 +2619,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
}
}
- if (nla[NFTA_CHAIN_COUNTERS]) {
- if (!nft_is_base_chain(chain)) {
- err = -EOPNOTSUPP;
- goto err_hooks;
- }
-
- stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
- if (IS_ERR(stats)) {
- err = PTR_ERR(stats);
- goto err_hooks;
- }
- }
-
if (!(table->flags & NFT_TABLE_F_DORMANT) &&
nft_is_base_chain(chain) &&
!list_empty(&hook.list)) {
@@ -2646,6 +2633,20 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
}
unregister = true;
+
+ if (nla[NFTA_CHAIN_COUNTERS]) {
+ if (!nft_is_base_chain(chain)) {
+ err = -EOPNOTSUPP;
+ goto err_hooks;
+ }
+
+ stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]);
+ if (IS_ERR(stats)) {
+ err = PTR_ERR(stats);
+ goto err_hooks;
+ }
+ }
+
err = -ENOMEM;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
sizeof(struct nft_trans_chain));
@@ -4999,6 +5000,12 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if ((flags & (NFT_SET_EVAL | NFT_SET_OBJECT)) ==
(NFT_SET_EVAL | NFT_SET_OBJECT))
return -EOPNOTSUPP;
+ if ((flags & (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT | NFT_SET_EVAL)) ==
+ (NFT_SET_ANONYMOUS | NFT_SET_TIMEOUT))
+ return -EOPNOTSUPP;
+ if ((flags & (NFT_SET_CONSTANT | NFT_SET_TIMEOUT)) ==
+ (NFT_SET_CONSTANT | NFT_SET_TIMEOUT))
+ return -EOPNOTSUPP;
}
desc.dtype = 0;
@@ -5422,6 +5429,7 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
list_del_rcu(&set->list);
+ set->dead = 1;
if (event)
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
GFP_KERNEL);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index bfd3e5a14d..255640013a 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -1256,14 +1256,13 @@ static int nft_ct_expect_obj_init(const struct nft_ctx *ctx,
switch (priv->l3num) {
case NFPROTO_IPV4:
case NFPROTO_IPV6:
- if (priv->l3num != ctx->family)
- return -EINVAL;
+ if (priv->l3num == ctx->family || ctx->family == NFPROTO_INET)
+ break;
- fallthrough;
- case NFPROTO_INET:
- break;
+ return -EINVAL;
+ case NFPROTO_INET: /* tuple.src.l3num supports NFPROTO_IPV4/6 only */
default:
- return -EOPNOTSUPP;
+ return -EAFNOSUPPORT;
}
priv->l4proto = nla_get_u8(tb[NFTA_CT_EXPECT_L4PROTO]);
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 3089c4ca8f..abf659cb2d 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -2244,8 +2244,6 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
if (m) {
rcu_barrier();
- nft_set_pipapo_match_destroy(ctx, set, m);
-
for_each_possible_cpu(cpu)
pipapo_free_scratch(m, cpu);
free_percpu(m->scratch);
@@ -2257,8 +2255,7 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
if (priv->clone) {
m = priv->clone;
- if (priv->dirty)
- nft_set_pipapo_match_destroy(ctx, set, m);
+ nft_set_pipapo_match_destroy(ctx, set, m);
for_each_possible_cpu(cpu)
pipapo_free_scratch(priv->clone, cpu);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 0eed00184a..104a80b754 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -453,16 +453,16 @@ static int nr_create(struct net *net, struct socket *sock, int protocol,
nr_init_timers(sk);
nr->t1 =
- msecs_to_jiffies(sysctl_netrom_transport_timeout);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout));
nr->t2 =
- msecs_to_jiffies(sysctl_netrom_transport_acknowledge_delay);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay));
nr->n2 =
- msecs_to_jiffies(sysctl_netrom_transport_maximum_tries);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries));
nr->t4 =
- msecs_to_jiffies(sysctl_netrom_transport_busy_delay);
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay));
nr->idle =
- msecs_to_jiffies(sysctl_netrom_transport_no_activity_timeout);
- nr->window = sysctl_netrom_transport_requested_window_size;
+ msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout));
+ nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size);
nr->bpqext = 1;
nr->state = NR_STATE_0;
@@ -954,7 +954,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
* G8PZT's Xrouter which is sending packets with command type 7
* as an extension of the protocol.
*/
- if (sysctl_netrom_reset_circuit &&
+ if (READ_ONCE(sysctl_netrom_reset_circuit) &&
(frametype != NR_RESET || flags != 0))
nr_transmit_reset(skb, 1);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 3aaac4a22b..2c34389c3c 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -81,7 +81,7 @@ static int nr_header(struct sk_buff *skb, struct net_device *dev,
buff[6] |= AX25_SSSID_SPARE;
buff += AX25_ADDR_LEN;
- *buff++ = sysctl_netrom_network_ttl_initialiser;
+ *buff++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
*buff++ = NR_PROTO_IP;
*buff++ = NR_PROTO_IP;
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index 2f084b6f69..97944db6b5 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -97,7 +97,7 @@ static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
@@ -128,7 +128,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
@@ -262,7 +262,7 @@ static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype
break;
case NR_RESET:
- if (sysctl_netrom_reset_circuit)
+ if (READ_ONCE(sysctl_netrom_reset_circuit))
nr_disconnect(sk, ECONNRESET);
break;
diff --git a/net/netrom/nr_out.c b/net/netrom/nr_out.c
index 44929657f5..5e531394a7 100644
--- a/net/netrom/nr_out.c
+++ b/net/netrom/nr_out.c
@@ -204,7 +204,7 @@ void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
dptr[6] |= AX25_SSSID_SPARE;
dptr += AX25_ADDR_LEN;
- *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
if (!nr_route_frame(skb, NULL)) {
kfree_skb(skb);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index baea3cbd76..70480869ad 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -153,7 +153,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
nr_neigh->digipeat = NULL;
nr_neigh->ax25 = NULL;
nr_neigh->dev = dev;
- nr_neigh->quality = sysctl_netrom_default_path_quality;
+ nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality);
nr_neigh->locked = 0;
nr_neigh->count = 0;
nr_neigh->number = nr_neigh_no++;
@@ -728,7 +728,7 @@ void nr_link_failed(ax25_cb *ax25, int reason)
nr_neigh->ax25 = NULL;
ax25_cb_put(ax25);
- if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
+ if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
nr_neigh_put(nr_neigh);
return;
}
@@ -766,7 +766,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if (ax25 != NULL) {
ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
ax25->ax25_dev->dev, 0,
- sysctl_netrom_obsolescence_count_initialiser);
+ READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
if (ret)
return ret;
}
@@ -780,7 +780,7 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
return ret;
}
- if (!sysctl_netrom_routing_control && ax25 != NULL)
+ if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
return 0;
/* Its Time-To-Live has expired */
diff --git a/net/netrom/nr_subr.c b/net/netrom/nr_subr.c
index e2d2af924c..c3bbd58808 100644
--- a/net/netrom/nr_subr.c
+++ b/net/netrom/nr_subr.c
@@ -182,7 +182,8 @@ void nr_write_internal(struct sock *sk, int frametype)
*dptr++ = nr->my_id;
*dptr++ = frametype;
*dptr++ = nr->window;
- if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ if (nr->bpqext)
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
break;
case NR_DISCREQ:
@@ -236,7 +237,7 @@ void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags)
dptr[6] |= AX25_SSSID_SPARE;
dptr += AX25_ADDR_LEN;
- *dptr++ = sysctl_netrom_network_ttl_initialiser;
+ *dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
if (mine) {
*dptr++ = 0;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 7adf48549a..f017d7d33d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -4004,7 +4004,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
if (val < 0 || val > 1)
return -EINVAL;
- po->prot_hook.ignore_outgoing = !!val;
+ WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
return 0;
}
case PACKET_TX_HAS_OFF:
@@ -4135,7 +4135,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
0);
break;
case PACKET_IGNORE_OUTGOING:
- val = po->prot_hook.ignore_outgoing;
+ val = READ_ONCE(po->prot_hook.ignore_outgoing);
break;
case PACKET_ROLLOVER_STATS:
if (!po->rollover)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index fba82d3659..a4e3c5de99 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -301,6 +301,9 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
kfree(sg);
}
ret = PTR_ERR(trans_private);
+ /* Trigger connection so that its ready for the next retry */
+ if (ret == -ENODEV)
+ rds_conn_connect_if_down(cp->cp_conn);
goto out;
}
diff --git a/net/rds/send.c b/net/rds/send.c
index 5e57a1581d..09a2801106 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -103,13 +103,12 @@ EXPORT_SYMBOL_GPL(rds_send_path_reset);
static int acquire_in_xmit(struct rds_conn_path *cp)
{
- return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0;
+ return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0;
}
static void release_in_xmit(struct rds_conn_path *cp)
{
- clear_bit(RDS_IN_XMIT, &cp->cp_flags);
- smp_mb__after_atomic();
+ clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags);
/*
* We don't use wait_on_bit()/wake_up_bit() because our waking is in a
* hot path and finding waiters is very rare. We don't want to walk
@@ -1313,12 +1312,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
/* Parse any control messages the user may have included. */
ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
- if (ret) {
- /* Trigger connection so that its ready for the next retry */
- if (ret == -EAGAIN)
- rds_conn_connect_if_down(conn);
+ if (ret)
goto out;
- }
if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 31a8252bd0..ad99409c63 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1008,7 +1008,8 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = {
};
static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = {
- [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 },
+ [TCA_TAPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
+ TC_QOPT_MAX_QUEUE),
[TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 },
[TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
TC_FP_EXPRESS,
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index d435bffc61..97ff11973c 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -284,10 +284,10 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags)
}
if (snprintf(portbuf, sizeof(portbuf),
- ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf))
+ ".%u.%u", port >> 8, port & 0xff) >= (int)sizeof(portbuf))
return NULL;
- if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf))
+ if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) >= sizeof(addrbuf))
return NULL;
return kstrdup(addrbuf, gfp_flags);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index e31cfdf7ea..f6fc80e1d6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -398,6 +398,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
u64 seq_send64;
int keylen;
u32 time32;
+ int ret;
p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
if (IS_ERR(p))
@@ -450,8 +451,16 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
}
ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
- return gss_krb5_import_ctx_v2(ctx, gfp_mask);
+ ret = gss_krb5_import_ctx_v2(ctx, gfp_mask);
+ if (ret) {
+ p = ERR_PTR(ret);
+ goto out_free;
+ }
+ return 0;
+
+out_free:
+ kfree(ctx->mech_used.data);
out_err:
return PTR_ERR(p);
}
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index d79f12c255..cb32ab9a83 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -250,8 +250,8 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
if (!creds) {
- kfree(oa->data);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto free_oa;
}
oa->data[0].option.data = CREDS_VALUE;
@@ -265,29 +265,40 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
/* option buffer */
p = xdr_inline_decode(xdr, 4);
- if (unlikely(p == NULL))
- return -ENOSPC;
+ if (unlikely(p == NULL)) {
+ err = -ENOSPC;
+ goto free_creds;
+ }
length = be32_to_cpup(p);
p = xdr_inline_decode(xdr, length);
- if (unlikely(p == NULL))
- return -ENOSPC;
+ if (unlikely(p == NULL)) {
+ err = -ENOSPC;
+ goto free_creds;
+ }
if (length == sizeof(CREDS_VALUE) &&
memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) {
/* We have creds here. parse them */
err = gssx_dec_linux_creds(xdr, creds);
if (err)
- return err;
+ goto free_creds;
oa->data[0].value.len = 1; /* presence */
} else {
/* consume uninteresting buffer */
err = gssx_dec_buffer(xdr, &dummy);
if (err)
- return err;
+ goto free_creds;
}
}
return 0;
+
+free_creds:
+ kfree(creds);
+free_oa:
+ kfree(oa->data);
+ oa->data = NULL;
+ return err;
}
static int gssx_dec_status(struct xdr_stream *xdr,
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 2a81880dac..027c86e804 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -198,7 +198,7 @@ void wait_for_unix_gc(void)
if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
!READ_ONCE(gc_in_progress))
unix_gc();
- wait_event(unix_gc_wait, gc_in_progress == false);
+ wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress));
}
/* The external entry point: unix_gc() */
diff --git a/net/unix/scm.c b/net/unix/scm.c
index 6ff628f234..822ce0d0d7 100644
--- a/net/unix/scm.c
+++ b/net/unix/scm.c
@@ -35,10 +35,8 @@ struct sock *unix_get_socket(struct file *filp)
/* PF_UNIX ? */
if (s && ops && ops->family == PF_UNIX)
u_sock = s;
- } else {
- /* Could be an io_uring instance */
- u_sock = io_uring_get_socket(filp);
}
+
return u_sock;
}
EXPORT_SYMBOL(unix_get_socket);
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index a161c64d17..838ad6541a 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -4,6 +4,7 @@
* Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com>
* Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved.
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2024 Intel Corporation
*
* (As all part of the Linux kernel, this file is GPL)
*/
@@ -662,7 +663,8 @@ struct iw_statistics *get_wireless_stats(struct net_device *dev)
dev->ieee80211_ptr->wiphy->wext &&
dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) {
wireless_warn_cfg80211_wext();
- if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
+ if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO |
+ WIPHY_FLAG_DISABLE_WEXT))
return NULL;
return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev);
}
@@ -704,7 +706,8 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
#ifdef CONFIG_CFG80211_WEXT
if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) {
wireless_warn_cfg80211_wext();
- if (dev->ieee80211_ptr->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)
+ if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO |
+ WIPHY_FLAG_DISABLE_WEXT))
return NULL;
handlers = dev->ieee80211_ptr->wiphy->wext;
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index aad8ffeaee..ae90696efe 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -460,12 +460,12 @@ static int x25_getsockopt(struct socket *sock, int level, int optname,
if (get_user(len, optlen))
goto out;
- len = min_t(unsigned int, len, sizeof(int));
-
rc = -EINVAL;
if (len < 0)
goto out;
+ len = min_t(unsigned int, len, sizeof(int));
+
rc = -EFAULT;
if (put_user(len, optlen))
goto out;
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 3784534c91..6346690d5c 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -407,7 +407,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct net_device *dev = x->xso.dev;
- if (!x->type_offload || x->encap)
+ if (!x->type_offload ||
+ (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap))
return false;
if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 662c83beb3..e5722c95b8 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -704,9 +704,13 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
{
struct net *net = dev_net(skb_dst(skb)->dev);
struct xfrm_state *x = skb_dst(skb)->xfrm;
+ int family;
int err;
- switch (x->outer_mode.family) {
+ family = (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) ? x->outer_mode.family
+ : skb_dst(skb)->ops->family;
+
+ switch (family) {
case AF_INET:
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c13dc3ef79..9c5f2efed3 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2694,7 +2694,9 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
- family = xfrm[i]->props.family;
+ if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+ family = xfrm[i]->props.family;
+
oif = fl->flowi_oif ? : fl->flowi_l3mdev;
dst = xfrm_dst_lookup(xfrm[i], tos, oif,
&saddr, &daddr, family, mark);
@@ -3416,7 +3418,7 @@ decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reve
}
fl4->flowi4_proto = flkeys->basic.ip_proto;
- fl4->flowi4_tos = flkeys->ip.tos;
+ fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
}
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index ad01997c3a..444e58bc3f 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2017,6 +2017,9 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
if (xp->xfrm_nr == 0)
return 0;
+ if (xp->xfrm_nr > XFRM_MAX_DEPTH)
+ return -ENOBUFS;
+
for (i = 0; i < xp->xfrm_nr; i++) {
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index 69ba0281f9..2284b37512 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -234,10 +234,10 @@ static void mtty_trigger_interrupt(struct mdev_state *mdev_state)
if (is_msi(mdev_state)) {
if (mdev_state->msi_evtfd)
- eventfd_signal(mdev_state->msi_evtfd, 1);
+ eventfd_signal(mdev_state->msi_evtfd);
} else if (is_intx(mdev_state)) {
if (mdev_state->intx_evtfd && !mdev_state->intx_mask) {
- eventfd_signal(mdev_state->intx_evtfd, 1);
+ eventfd_signal(mdev_state->intx_evtfd);
mdev_state->intx_mask = true;
}
}
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 2fe6f2828d..16c750bb95 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -143,6 +143,8 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict)
+KBUILD_CFLAGS += -Wno-enum-compare-conditional
+KBUILD_CFLAGS += -Wno-enum-enum-conversion
endif
endif
diff --git a/scripts/clang-tools/gen_compile_commands.py b/scripts/clang-tools/gen_compile_commands.py
index 5dea447924..e4fb686dfa 100755
--- a/scripts/clang-tools/gen_compile_commands.py
+++ b/scripts/clang-tools/gen_compile_commands.py
@@ -170,7 +170,7 @@ def process_line(root_directory, command_prefix, file_path):
# escape the pound sign '#', either as '\#' or '$(pound)' (depending on the
# kernel version). The compile_commands.json file is not interepreted
# by Make, so this code replaces the escaped version with '#'.
- prefix = command_prefix.replace('\#', '#').replace('$(pound)', '#')
+ prefix = command_prefix.replace(r'\#', '#').replace('$(pound)', '#')
# Return the canonical path, eliminating any symbolic links encountered in the path.
abs_path = os.path.realpath(os.path.join(root_directory, file_path))
diff --git a/scripts/kconfig/lexer.l b/scripts/kconfig/lexer.l
index cc386e4436..2c2b3e6f24 100644
--- a/scripts/kconfig/lexer.l
+++ b/scripts/kconfig/lexer.l
@@ -302,8 +302,11 @@ static char *expand_token(const char *in, size_t n)
new_string();
append_string(in, n);
- /* get the whole line because we do not know the end of token. */
- while ((c = input()) != EOF) {
+ /*
+ * get the whole line because we do not know the end of token.
+ * input() returns 0 (not EOF!) when it reachs the end of file.
+ */
+ while ((c = input()) != 0) {
if (c == '\n') {
unput(c);
break;
diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c
index 898358f57f..6788e73b66 100644
--- a/security/landlock/syscalls.c
+++ b/security/landlock/syscalls.c
@@ -33,6 +33,18 @@
#include "ruleset.h"
#include "setup.h"
+static bool is_initialized(void)
+{
+ if (likely(landlock_initialized))
+ return true;
+
+ pr_warn_once(
+ "Disabled but requested by user space. "
+ "You should enable Landlock at boot time: "
+ "https://docs.kernel.org/userspace-api/landlock.html#boot-time-configuration\n");
+ return false;
+}
+
/**
* copy_min_struct_from_user - Safe future-proof argument copying
*
@@ -173,7 +185,7 @@ SYSCALL_DEFINE3(landlock_create_ruleset,
/* Build-time checks. */
build_check_abi();
- if (!landlock_initialized)
+ if (!is_initialized())
return -EOPNOTSUPP;
if (flags) {
@@ -398,7 +410,7 @@ SYSCALL_DEFINE4(landlock_add_rule, const int, ruleset_fd,
struct landlock_ruleset *ruleset;
int err;
- if (!landlock_initialized)
+ if (!is_initialized())
return -EOPNOTSUPP;
/* No flag for now. */
@@ -458,7 +470,7 @@ SYSCALL_DEFINE2(landlock_restrict_self, const int, ruleset_fd, const __u32,
struct landlock_cred_security *new_llcred;
int err;
- if (!landlock_initialized)
+ if (!is_initialized())
return -EOPNOTSUPP;
/*
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 1f1ea85294..e1e297deb0 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1312,7 +1312,8 @@ static int smack_inode_setxattr(struct mnt_idmap *idmap,
check_star = 1;
} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
check_priv = 1;
- if (size != TRANS_TRUE_SIZE ||
+ if (!S_ISDIR(d_backing_inode(dentry)->i_mode) ||
+ size != TRANS_TRUE_SIZE ||
strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
rc = -EINVAL;
} else
@@ -2853,6 +2854,15 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
if (value == NULL || size > SMK_LONGLABEL || size == 0)
return -EINVAL;
+ if (strcmp(name, XATTR_SMACK_TRANSMUTE) == 0) {
+ if (!S_ISDIR(inode->i_mode) || size != TRANS_TRUE_SIZE ||
+ strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
+ return -EINVAL;
+
+ nsp->smk_flags |= SMK_INODE_TRANSMUTE;
+ return 0;
+ }
+
skp = smk_import_entry(value, size);
if (IS_ERR(skp))
return PTR_ERR(skp);
diff --git a/sound/core/seq/seq_midi.c b/sound/core/seq/seq_midi.c
index 18320a248a..78dcb0ea15 100644
--- a/sound/core/seq/seq_midi.c
+++ b/sound/core/seq/seq_midi.c
@@ -113,6 +113,12 @@ static int dump_midi(struct snd_rawmidi_substream *substream, const char *buf, i
return 0;
}
+/* callback for snd_seq_dump_var_event(), bridging to dump_midi() */
+static int __dump_midi(void *ptr, void *buf, int count)
+{
+ return dump_midi(ptr, buf, count);
+}
+
static int event_process_midi(struct snd_seq_event *ev, int direct,
void *private_data, int atomic, int hop)
{
@@ -132,7 +138,7 @@ static int event_process_midi(struct snd_seq_event *ev, int direct,
pr_debug("ALSA: seq_midi: invalid sysex event flags = 0x%x\n", ev->flags);
return 0;
}
- snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)dump_midi, substream);
+ snd_seq_dump_var_event(ev, __dump_midi, substream);
snd_midi_event_reset_decode(msynth->parser);
} else {
if (msynth->parser == NULL)
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 1b9260108e..1678737f11 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -62,6 +62,13 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
/*
* decode input event and put to read buffer of each opened file
*/
+
+/* callback for snd_seq_dump_var_event(), bridging to snd_rawmidi_receive() */
+static int dump_to_rawmidi(void *ptr, void *buf, int count)
+{
+ return snd_rawmidi_receive(ptr, buf, count);
+}
+
static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
struct snd_seq_event *ev,
bool atomic)
@@ -80,7 +87,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
continue;
- snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
+ snd_seq_dump_var_event(ev, dump_to_rawmidi, vmidi->substream);
snd_midi_event_reset_decode(vmidi->parser);
} else {
len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
index d74cf11eef..b5b24fefb2 100644
--- a/sound/pci/hda/cs35l41_hda_property.c
+++ b/sound/pci/hda/cs35l41_hda_property.c
@@ -83,6 +83,7 @@ static const struct cs35l41_config cs35l41_config_table[] = {
{ "104317F3", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
{ "10431863", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
{ "104318D3", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ { "10431A83", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
{ "10431C9F", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
{ "10431CAF", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
{ "10431CCF", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
@@ -91,6 +92,7 @@ static const struct cs35l41_config cs35l41_config_table[] = {
{ "10431D1F", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
{ "10431DA2", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
{ "10431E02", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
+ { "10431E12", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
{ "10431EE2", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
{ "10431F12", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 1000, 4500, 24 },
{ "10431F1F", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, -1, 0, 0, 0, 0 },
@@ -205,6 +207,7 @@ static int generic_dsd_config(struct cs35l41_hda *cs35l41, struct device *physde
struct spi_device *spi;
bool dsd_found;
int ret;
+ int i;
for (cfg = cs35l41_config_table; cfg->ssid; cfg++) {
if (!strcasecmp(cfg->ssid, cs35l41->acpi_subsystem_id))
@@ -290,16 +293,6 @@ static int generic_dsd_config(struct cs35l41_hda *cs35l41, struct device *physde
cs35l41->index = id == 0x40 ? 0 : 1;
}
- if (cfg->num_amps == 3)
- /* 3 amps means a center channel, so no duplicate channels */
- cs35l41->channel_index = 0;
- else
- /*
- * if 4 amps, there are duplicate channels, so they need different indexes
- * if 2 amps, no duplicate channels, channel_index would be 0
- */
- cs35l41->channel_index = cs35l41->index / 2;
-
cs35l41->reset_gpio = fwnode_gpiod_get_index(acpi_fwnode_handle(cs35l41->dacpi), "reset",
cs35l41->index, GPIOD_OUT_LOW,
"cs35l41-reset");
@@ -307,6 +300,11 @@ static int generic_dsd_config(struct cs35l41_hda *cs35l41, struct device *physde
hw_cfg->spk_pos = cfg->channel[cs35l41->index];
+ cs35l41->channel_index = 0;
+ for (i = 0; i < cs35l41->index; i++)
+ if (cfg->channel[i] == hw_cfg->spk_pos)
+ cs35l41->channel_index++;
+
if (cfg->boost_type == INTERNAL) {
hw_cfg->bst_type = CS35L41_INT_BOOST;
hw_cfg->bst_ind = cfg->boost_ind_nanohenry;
@@ -419,6 +417,7 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
{ "CSC3551", "104317F3", generic_dsd_config },
{ "CSC3551", "10431863", generic_dsd_config },
{ "CSC3551", "104318D3", generic_dsd_config },
+ { "CSC3551", "10431A83", generic_dsd_config },
{ "CSC3551", "10431C9F", generic_dsd_config },
{ "CSC3551", "10431CAF", generic_dsd_config },
{ "CSC3551", "10431CCF", generic_dsd_config },
@@ -427,6 +426,7 @@ static const struct cs35l41_prop_model cs35l41_prop_model_table[] = {
{ "CSC3551", "10431D1F", generic_dsd_config },
{ "CSC3551", "10431DA2", generic_dsd_config },
{ "CSC3551", "10431E02", generic_dsd_config },
+ { "CSC3551", "10431E12", generic_dsd_config },
{ "CSC3551", "10431EE2", generic_dsd_config },
{ "CSC3551", "10431F12", generic_dsd_config },
{ "CSC3551", "10431F1F", generic_dsd_config },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index eb45e5c3db..27be1feb8c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3684,6 +3684,7 @@ static void alc285_hp_init(struct hda_codec *codec)
int i, val;
int coef38, coef0d, coef36;
+ alc_write_coefex_idx(codec, 0x58, 0x00, 0x1888); /* write default value */
alc_update_coef_idx(codec, 0x4a, 1<<15, 1<<15); /* Reset HP JD */
coef38 = alc_read_coef_idx(codec, 0x38); /* Amp control */
coef0d = alc_read_coef_idx(codec, 0x0d); /* Digital Misc control */
@@ -6695,6 +6696,60 @@ static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec,
}
}
+static void alc285_fixup_hp_envy_x360(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ static const struct coef_fw coefs[] = {
+ WRITE_COEF(0x08, 0x6a0c), WRITE_COEF(0x0d, 0xa023),
+ WRITE_COEF(0x10, 0x0320), WRITE_COEF(0x1a, 0x8c03),
+ WRITE_COEF(0x25, 0x1800), WRITE_COEF(0x26, 0x003a),
+ WRITE_COEF(0x28, 0x1dfe), WRITE_COEF(0x29, 0xb014),
+ WRITE_COEF(0x2b, 0x1dfe), WRITE_COEF(0x37, 0xfe15),
+ WRITE_COEF(0x38, 0x7909), WRITE_COEF(0x45, 0xd489),
+ WRITE_COEF(0x46, 0x00f4), WRITE_COEF(0x4a, 0x21e0),
+ WRITE_COEF(0x66, 0x03f0), WRITE_COEF(0x67, 0x1000),
+ WRITE_COEF(0x6e, 0x1005), { }
+ };
+
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x12, 0xb7a60130 }, /* Internal microphone*/
+ { 0x14, 0x90170150 }, /* B&O soundbar speakers */
+ { 0x17, 0x90170153 }, /* Side speakers */
+ { 0x19, 0x03a11040 }, /* Headset microphone */
+ { }
+ };
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_apply_pincfgs(codec, pincfgs);
+
+ /* Fixes volume control problem for side speakers */
+ alc295_fixup_disable_dac3(codec, fix, action);
+
+ /* Fixes no sound from headset speaker */
+ snd_hda_codec_amp_stereo(codec, 0x21, HDA_OUTPUT, 0, -1, 0);
+
+ /* Auto-enable headset mic when plugged */
+ snd_hda_jack_set_gating_jack(codec, 0x19, 0x21);
+
+ /* Headset mic volume enhancement */
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREF50);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ alc_process_coef_fw(codec, coefs);
+ break;
+ case HDA_FIXUP_ACT_BUILD:
+ rename_ctl(codec, "Bass Speaker Playback Volume",
+ "B&O-Tuned Playback Volume");
+ rename_ctl(codec, "Front Playback Switch",
+ "B&O Soundbar Playback Switch");
+ rename_ctl(codec, "Bass Speaker Playback Switch",
+ "Side Speaker Playback Switch");
+ break;
+ }
+}
+
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
@@ -7290,6 +7345,7 @@ enum {
ALC280_FIXUP_HP_9480M,
ALC245_FIXUP_HP_X360_AMP,
ALC285_FIXUP_HP_SPECTRE_X360_EB1,
+ ALC285_FIXUP_HP_ENVY_X360,
ALC288_FIXUP_DELL_HEADSET_MODE,
ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC288_FIXUP_DELL_XPS_13,
@@ -9265,6 +9321,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_hp_spectre_x360_eb1
},
+ [ALC285_FIXUP_HP_ENVY_X360] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_hp_envy_x360,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_HP_GPIO_AMP_INIT,
+ },
[ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_ideapad_s740_coef,
@@ -9847,6 +9909,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360),
SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360),
SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
@@ -9982,6 +10045,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED),
@@ -10288,6 +10355,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x38ab, "Thinkbook 16P", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b4, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b5, "Legion Slim 7 16IRH8", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x38b6, "Legion Slim 7 16APH8", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10538,6 +10607,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"},
{.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"},
{.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"},
+ {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"},
{.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"},
{.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"},
{.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"},
@@ -10980,6 +11050,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
* at most one tbl is allowed to define for the same vendor and same codec
*/
static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1025, "Acer", ALC2XX_FIXUP_HEADSET_MIC,
+ {0x19, 0x40000000}),
SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
{0x19, 0x40000000},
{0x1b, 0x40000000}),
@@ -11669,8 +11741,7 @@ static void alc897_hp_automute_hook(struct hda_codec *codec,
snd_hda_gen_hp_automute(codec, jack);
vref = spec->gen.hp_jack_present ? (PIN_HP | AC_PINCTL_VREF_100) : PIN_HP;
- snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
- vref);
+ snd_hda_set_pin_ctl(codec, 0x1b, vref);
}
static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
@@ -11679,6 +11750,10 @@ static void alc897_fixup_lenovo_headset_mic(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->gen.hp_automute_hook = alc897_hp_automute_hook;
+ spec->no_shutup_pins = 1;
+ }
+ if (action == HDA_FIXUP_ACT_PROBE) {
+ snd_hda_set_pin_ctl_cache(codec, 0x1a, PIN_IN | AC_PINCTL_VREF_100);
}
}
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 9f98965742..9f3dc13a3b 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -71,7 +71,7 @@ struct tas2781_hda {
struct snd_kcontrol *dsp_prog_ctl;
struct snd_kcontrol *dsp_conf_ctl;
struct snd_kcontrol *prof_ctl;
- struct snd_kcontrol *snd_ctls[3];
+ struct snd_kcontrol *snd_ctls[2];
};
static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data)
@@ -142,11 +142,13 @@ static void tas2781_hda_playback_hook(struct device *dev, int action)
pm_runtime_get_sync(dev);
mutex_lock(&tas_hda->priv->codec_lock);
tasdevice_tuning_switch(tas_hda->priv, 0);
+ tas_hda->priv->playback_started = true;
mutex_unlock(&tas_hda->priv->codec_lock);
break;
case HDA_GEN_PCM_ACT_CLOSE:
mutex_lock(&tas_hda->priv->codec_lock);
tasdevice_tuning_switch(tas_hda->priv, 1);
+ tas_hda->priv->playback_started = false;
mutex_unlock(&tas_hda->priv->codec_lock);
pm_runtime_mark_last_busy(dev);
@@ -177,8 +179,12 @@ static int tasdevice_get_profile_id(struct snd_kcontrol *kcontrol,
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&tas_priv->codec_lock);
+
ucontrol->value.integer.value[0] = tas_priv->rcabin.profile_cfg_id;
+ mutex_unlock(&tas_priv->codec_lock);
+
return 0;
}
@@ -192,11 +198,15 @@ static int tasdevice_set_profile_id(struct snd_kcontrol *kcontrol,
val = clamp(nr_profile, 0, max);
+ mutex_lock(&tas_priv->codec_lock);
+
if (tas_priv->rcabin.profile_cfg_id != val) {
tas_priv->rcabin.profile_cfg_id = val;
ret = 1;
}
+ mutex_unlock(&tas_priv->codec_lock);
+
return ret;
}
@@ -233,8 +243,12 @@ static int tasdevice_program_get(struct snd_kcontrol *kcontrol,
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&tas_priv->codec_lock);
+
ucontrol->value.integer.value[0] = tas_priv->cur_prog;
+ mutex_unlock(&tas_priv->codec_lock);
+
return 0;
}
@@ -249,11 +263,15 @@ static int tasdevice_program_put(struct snd_kcontrol *kcontrol,
val = clamp(nr_program, 0, max);
+ mutex_lock(&tas_priv->codec_lock);
+
if (tas_priv->cur_prog != val) {
tas_priv->cur_prog = val;
ret = 1;
}
+ mutex_unlock(&tas_priv->codec_lock);
+
return ret;
}
@@ -262,8 +280,12 @@ static int tasdevice_config_get(struct snd_kcontrol *kcontrol,
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&tas_priv->codec_lock);
+
ucontrol->value.integer.value[0] = tas_priv->cur_conf;
+ mutex_unlock(&tas_priv->codec_lock);
+
return 0;
}
@@ -278,33 +300,16 @@ static int tasdevice_config_put(struct snd_kcontrol *kcontrol,
val = clamp(nr_config, 0, max);
+ mutex_lock(&tas_priv->codec_lock);
+
if (tas_priv->cur_conf != val) {
tas_priv->cur_conf = val;
ret = 1;
}
- return ret;
-}
+ mutex_unlock(&tas_priv->codec_lock);
-/*
- * tas2781_digital_getvol - get the volum control
- * @kcontrol: control pointer
- * @ucontrol: User data
- * Customer Kcontrol for tas2781 is primarily for regmap booking, paging
- * depends on internal regmap mechanism.
- * tas2781 contains book and page two-level register map, especially
- * book switching will set the register BXXP00R7F, after switching to the
- * correct book, then leverage the mechanism for paging to access the
- * register.
- */
-static int tas2781_digital_getvol(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
-
- return tasdevice_digital_getvol(tas_priv, ucontrol, mc);
+ return ret;
}
static int tas2781_amp_getvol(struct snd_kcontrol *kcontrol,
@@ -313,19 +318,15 @@ static int tas2781_amp_getvol(struct snd_kcontrol *kcontrol,
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
+ int ret;
- return tasdevice_amp_getvol(tas_priv, ucontrol, mc);
-}
+ mutex_lock(&tas_priv->codec_lock);
-static int tas2781_digital_putvol(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
+ ret = tasdevice_amp_getvol(tas_priv, ucontrol, mc);
+
+ mutex_unlock(&tas_priv->codec_lock);
- /* The check of the given value is in tasdevice_digital_putvol. */
- return tasdevice_digital_putvol(tas_priv, ucontrol, mc);
+ return ret;
}
static int tas2781_amp_putvol(struct snd_kcontrol *kcontrol,
@@ -334,9 +335,16 @@ static int tas2781_amp_putvol(struct snd_kcontrol *kcontrol,
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
+ int ret;
+
+ mutex_lock(&tas_priv->codec_lock);
/* The check of the given value is in tasdevice_amp_putvol. */
- return tasdevice_amp_putvol(tas_priv, ucontrol, mc);
+ ret = tasdevice_amp_putvol(tas_priv, ucontrol, mc);
+
+ mutex_unlock(&tas_priv->codec_lock);
+
+ return ret;
}
static int tas2781_force_fwload_get(struct snd_kcontrol *kcontrol,
@@ -344,10 +352,14 @@ static int tas2781_force_fwload_get(struct snd_kcontrol *kcontrol,
{
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&tas_priv->codec_lock);
+
ucontrol->value.integer.value[0] = (int)tas_priv->force_fwload_status;
dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,
tas_priv->force_fwload_status ? "ON" : "OFF");
+ mutex_unlock(&tas_priv->codec_lock);
+
return 0;
}
@@ -357,6 +369,8 @@ static int tas2781_force_fwload_put(struct snd_kcontrol *kcontrol,
struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);
bool change, val = (bool)ucontrol->value.integer.value[0];
+ mutex_lock(&tas_priv->codec_lock);
+
if (tas_priv->force_fwload_status == val)
change = false;
else {
@@ -366,6 +380,8 @@ static int tas2781_force_fwload_put(struct snd_kcontrol *kcontrol,
dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,
tas_priv->force_fwload_status ? "ON" : "OFF");
+ mutex_unlock(&tas_priv->codec_lock);
+
return change;
}
@@ -373,9 +389,6 @@ static const struct snd_kcontrol_new tas2781_snd_controls[] = {
ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain", TAS2781_AMP_LEVEL,
1, 0, 20, 0, tas2781_amp_getvol,
tas2781_amp_putvol, amp_vol_tlv),
- ACARD_SINGLE_RANGE_EXT_TLV("Speaker Digital Gain", TAS2781_DVC_LVL,
- 0, 0, 200, 1, tas2781_digital_getvol,
- tas2781_digital_putvol, dvc_tlv),
ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load", 0,
tas2781_force_fwload_get, tas2781_force_fwload_put),
};
@@ -479,7 +492,7 @@ static int tas2781_save_calibration(struct tasdevice_priv *tas_priv)
dev_dbg(tas_priv->dev, "%4ld-%2d-%2d, %2d:%2d:%2d\n",
tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
- tas2781_apply_calib(tas_priv);
+ tasdevice_apply_calibration(tas_priv);
} else
tas_priv->cali_data.total_sz = 0;
@@ -582,7 +595,10 @@ static void tasdev_fw_ready(const struct firmware *fmw, void *context)
/* If calibrated data occurs error, dsp will still works with default
* calibrated data inside algo.
*/
- tas2781_save_calibration(tas_priv);
+ tasdevice_save_calibration(tas_priv);
+
+ tasdevice_tuning_switch(tas_hda->priv, 0);
+ tas_hda->priv->playback_started = true;
out:
mutex_unlock(&tas_hda->priv->codec_lock);
@@ -683,10 +699,6 @@ static int tas2781_hda_i2c_probe(struct i2c_client *clt)
const char *device_name;
int ret;
- if (strstr(dev_name(&clt->dev), "TIAS2781"))
- device_name = "TIAS2781";
- else
- return -ENODEV;
tas_hda = devm_kzalloc(&clt->dev, sizeof(*tas_hda), GFP_KERNEL);
if (!tas_hda)
@@ -699,6 +711,13 @@ static int tas2781_hda_i2c_probe(struct i2c_client *clt)
if (!tas_hda->priv)
return -ENOMEM;
+ if (strstr(dev_name(&clt->dev), "TIAS2781")) {
+ device_name = "TIAS2781";
+ tas_hda->priv->save_calibration = tas2781_save_calibration;
+ tas_hda->priv->apply_calibration = tas2781_apply_calib;
+ } else
+ return -ENODEV;
+
tas_hda->priv->irq_info.irq = clt->irq;
ret = tas2781_read_acpi(tas_hda->priv, device_name);
if (ret)
@@ -740,23 +759,19 @@ static void tas2781_hda_i2c_remove(struct i2c_client *clt)
static int tas2781_runtime_suspend(struct device *dev)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
- int i;
dev_dbg(tas_hda->dev, "Runtime Suspend\n");
mutex_lock(&tas_hda->priv->codec_lock);
+ /* The driver powers up the amplifiers at module load time.
+ * Stop the playback if it's unused.
+ */
if (tas_hda->priv->playback_started) {
tasdevice_tuning_switch(tas_hda->priv, 1);
tas_hda->priv->playback_started = false;
}
- for (i = 0; i < tas_hda->priv->ndev; i++) {
- tas_hda->priv->tasdevice[i].cur_book = -1;
- tas_hda->priv->tasdevice[i].cur_prog = -1;
- tas_hda->priv->tasdevice[i].cur_conf = -1;
- }
-
mutex_unlock(&tas_hda->priv->codec_lock);
return 0;
@@ -765,8 +780,6 @@ static int tas2781_runtime_suspend(struct device *dev)
static int tas2781_runtime_resume(struct device *dev)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
- unsigned long calib_data_sz =
- tas_hda->priv->ndev * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
dev_dbg(tas_hda->dev, "Runtime Resume\n");
@@ -777,8 +790,7 @@ static int tas2781_runtime_resume(struct device *dev)
/* If calibrated data occurs error, dsp will still works with default
* calibrated data inside algo.
*/
- if (tas_hda->priv->cali_data.total_sz > calib_data_sz)
- tas2781_apply_calib(tas_hda->priv);
+ tasdevice_apply_calibration(tas_hda->priv);
mutex_unlock(&tas_hda->priv->codec_lock);
@@ -788,16 +800,16 @@ static int tas2781_runtime_resume(struct device *dev)
static int tas2781_system_suspend(struct device *dev)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
- int ret;
dev_dbg(tas_hda->priv->dev, "System Suspend\n");
- ret = pm_runtime_force_suspend(dev);
- if (ret)
- return ret;
+ mutex_lock(&tas_hda->priv->codec_lock);
/* Shutdown chip before system suspend */
- tasdevice_tuning_switch(tas_hda->priv, 1);
+ if (tas_hda->priv->playback_started)
+ tasdevice_tuning_switch(tas_hda->priv, 1);
+
+ mutex_unlock(&tas_hda->priv->codec_lock);
/*
* Reset GPIO may be shared, so cannot reset here.
@@ -809,15 +821,9 @@ static int tas2781_system_suspend(struct device *dev)
static int tas2781_system_resume(struct device *dev)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
- unsigned long calib_data_sz =
- tas_hda->priv->ndev * TASDEVICE_SPEAKER_CALIBRATION_SIZE;
- int i, ret;
+ int i;
- dev_info(tas_hda->priv->dev, "System Resume\n");
-
- ret = pm_runtime_force_resume(dev);
- if (ret)
- return ret;
+ dev_dbg(tas_hda->priv->dev, "System Resume\n");
mutex_lock(&tas_hda->priv->codec_lock);
@@ -832,8 +838,11 @@ static int tas2781_system_resume(struct device *dev)
/* If calibrated data occurs error, dsp will still work with default
* calibrated data inside algo.
*/
- if (tas_hda->priv->cali_data.total_sz > calib_data_sz)
- tas2781_apply_calib(tas_hda->priv);
+ tasdevice_apply_calibration(tas_hda->priv);
+
+ if (tas_hda->priv->playback_started)
+ tasdevice_tuning_switch(tas_hda->priv, 0);
+
mutex_unlock(&tas_hda->priv->codec_lock);
return 0;
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index 320ac792c7..3182c63446 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -278,7 +278,8 @@ static void run_spu_dma(struct work_struct *work)
dreamcastcard->clicks++;
if (unlikely(dreamcastcard->clicks >= AICA_PERIOD_NUMBER))
dreamcastcard->clicks %= AICA_PERIOD_NUMBER;
- mod_timer(&dreamcastcard->timer, jiffies + 1);
+ if (snd_pcm_running(dreamcastcard->substream))
+ mod_timer(&dreamcastcard->timer, jiffies + 1);
}
}
@@ -290,6 +291,8 @@ static void aica_period_elapsed(struct timer_list *t)
/*timer function - so cannot sleep */
int play_period;
struct snd_pcm_runtime *runtime;
+ if (!snd_pcm_running(substream))
+ return;
runtime = substream->runtime;
dreamcastcard = substream->pcm->private_data;
/* Have we played out an additional period? */
@@ -350,12 +353,19 @@ static int snd_aicapcm_pcm_open(struct snd_pcm_substream
return 0;
}
+static int snd_aicapcm_pcm_sync_stop(struct snd_pcm_substream *substream)
+{
+ struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
+
+ del_timer_sync(&dreamcastcard->timer);
+ cancel_work_sync(&dreamcastcard->spu_dma_work);
+ return 0;
+}
+
static int snd_aicapcm_pcm_close(struct snd_pcm_substream
*substream)
{
struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
- flush_work(&(dreamcastcard->spu_dma_work));
- del_timer(&dreamcastcard->timer);
dreamcastcard->substream = NULL;
kfree(dreamcastcard->channel);
spu_disable();
@@ -401,6 +411,7 @@ static const struct snd_pcm_ops snd_aicapcm_playback_ops = {
.prepare = snd_aicapcm_pcm_prepare,
.trigger = snd_aicapcm_pcm_trigger,
.pointer = snd_aicapcm_pcm_pointer,
+ .sync_stop = snd_aicapcm_pcm_sync_stop,
};
/* TO DO: set up to handle more than one pcm instance */
diff --git a/sound/soc/amd/acp/acp-sof-mach.c b/sound/soc/amd/acp/acp-sof-mach.c
index 5223033a12..354d0fc552 100644
--- a/sound/soc/amd/acp/acp-sof-mach.c
+++ b/sound/soc/amd/acp/acp-sof-mach.c
@@ -120,16 +120,14 @@ static int acp_sof_probe(struct platform_device *pdev)
if (dmi_id && dmi_id->driver_data)
acp_card_drvdata->tdm_mode = dmi_id->driver_data;
- acp_sofdsp_dai_links_create(card);
+ ret = acp_sofdsp_dai_links_create(card);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Failed to create DAI links\n");
ret = devm_snd_soc_register_card(&pdev->dev, card);
- if (ret) {
- dev_err(&pdev->dev,
- "devm_snd_soc_register_card(%s) failed: %d\n",
- card->name, ret);
- return ret;
- }
-
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to register card(%s)\n", card->name);
return 0;
}
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index 80ad60d485..1d1452c29e 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -203,6 +203,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J0"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "21J5"),
}
},
@@ -238,6 +245,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82UU"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
}
},
@@ -398,6 +412,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8BD6"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MECHREVO"),
DMI_MATCH(DMI_BOARD_NAME, "MRID6"),
}
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index d62c9f26c6..5009cf6412 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -2175,7 +2175,10 @@ static int cs42l43_codec_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(priv->dev);
pm_runtime_set_active(priv->dev);
pm_runtime_get_noresume(priv->dev);
- devm_pm_runtime_enable(priv->dev);
+
+ ret = devm_pm_runtime_enable(priv->dev);
+ if (ret)
+ goto err_pm;
for (i = 0; i < ARRAY_SIZE(cs42l43_irqs); i++) {
ret = cs42l43_request_irq(priv, dom, cs42l43_irqs[i].name,
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index ea08b7cfc3..e0da151508 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3829,6 +3829,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
DMI_EXACT_MATCH(DMI_BOARD_VERSION, "Default string"),
+ /*
+ * Above strings are too generic, LattePanda BIOS versions for
+ * all 4 hw revisions are:
+ * DF-BI-7-S70CR100-*
+ * DF-BI-7-S70CR110-*
+ * DF-BI-7-S70CR200-*
+ * LP-BS-7-S70CR700-*
+ * Do a partial match for S70CR to avoid false positive matches.
+ */
+ DMI_MATCH(DMI_BIOS_VERSION, "S70CR"),
},
.driver_data = (void *)&lattepanda_board_platform_data,
},
diff --git a/sound/soc/codecs/tas2781-comlib.c b/sound/soc/codecs/tas2781-comlib.c
index add16302f7..5d0e5348b3 100644
--- a/sound/soc/codecs/tas2781-comlib.c
+++ b/sound/soc/codecs/tas2781-comlib.c
@@ -413,6 +413,21 @@ void tasdevice_remove(struct tasdevice_priv *tas_priv)
}
EXPORT_SYMBOL_GPL(tasdevice_remove);
+int tasdevice_save_calibration(struct tasdevice_priv *tas_priv)
+{
+ if (tas_priv->save_calibration)
+ return tas_priv->save_calibration(tas_priv);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(tasdevice_save_calibration);
+
+void tasdevice_apply_calibration(struct tasdevice_priv *tas_priv)
+{
+ if (tas_priv->apply_calibration && tas_priv->cali_data.total_sz)
+ tas_priv->apply_calibration(tas_priv);
+}
+EXPORT_SYMBOL_GPL(tasdevice_apply_calibration);
+
static int tasdevice_clamp(int val, int max, unsigned int invert)
{
if (val > max)
diff --git a/sound/soc/codecs/tlv320adc3xxx.c b/sound/soc/codecs/tlv320adc3xxx.c
index 420bbf588e..e100cc9f5c 100644
--- a/sound/soc/codecs/tlv320adc3xxx.c
+++ b/sound/soc/codecs/tlv320adc3xxx.c
@@ -1429,7 +1429,7 @@ err_unprepare_mclk:
return ret;
}
-static void __exit adc3xxx_i2c_remove(struct i2c_client *client)
+static void adc3xxx_i2c_remove(struct i2c_client *client)
{
struct adc3xxx *adc3xxx = i2c_get_clientdata(client);
@@ -1452,7 +1452,7 @@ static struct i2c_driver adc3xxx_i2c_driver = {
.of_match_table = tlv320adc3xxx_of_match,
},
.probe = adc3xxx_i2c_probe,
- .remove = __exit_p(adc3xxx_i2c_remove),
+ .remove = adc3xxx_i2c_remove,
.id_table = adc3xxx_i2c_id,
};
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index fb90ae6a8a..7c6ed29831 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2229,6 +2229,9 @@ SND_SOC_DAPM_PGA_E("HPOUT", SND_SOC_NOPM, 0, 0, NULL, 0, hp_event,
SND_SOC_DAPM_OUTPUT("HPOUTL"),
SND_SOC_DAPM_OUTPUT("HPOUTR"),
+
+SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
};
static const struct snd_soc_dapm_widget wm8962_dapm_spk_mono_widgets[] = {
@@ -2236,7 +2239,6 @@ SND_SOC_DAPM_MIXER("Speaker Mixer", WM8962_MIXER_ENABLES, 1, 0,
spkmixl, ARRAY_SIZE(spkmixl)),
SND_SOC_DAPM_MUX_E("Speaker PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
out_pga_event, SND_SOC_DAPM_POST_PMU),
-SND_SOC_DAPM_PGA("Speaker Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
SND_SOC_DAPM_OUTPUT("SPKOUT"),
};
@@ -2251,9 +2253,6 @@ SND_SOC_DAPM_MUX_E("SPKOUTL PGA", WM8962_PWR_MGMT_2, 4, 0, &spkoutl_mux,
SND_SOC_DAPM_MUX_E("SPKOUTR PGA", WM8962_PWR_MGMT_2, 3, 0, &spkoutr_mux,
out_pga_event, SND_SOC_DAPM_POST_PMU),
-SND_SOC_DAPM_PGA("SPKOUTR Output", WM8962_CLASS_D_CONTROL_1, 7, 0, NULL, 0),
-SND_SOC_DAPM_PGA("SPKOUTL Output", WM8962_CLASS_D_CONTROL_1, 6, 0, NULL, 0),
-
SND_SOC_DAPM_OUTPUT("SPKOUTL"),
SND_SOC_DAPM_OUTPUT("SPKOUTR"),
};
@@ -2366,12 +2365,18 @@ static const struct snd_soc_dapm_route wm8962_spk_mono_intercon[] = {
{ "Speaker PGA", "Mixer", "Speaker Mixer" },
{ "Speaker PGA", "DAC", "DACL" },
- { "Speaker Output", NULL, "Speaker PGA" },
- { "Speaker Output", NULL, "SYSCLK" },
- { "Speaker Output", NULL, "TOCLK" },
- { "Speaker Output", NULL, "TEMP_SPK" },
+ { "SPKOUTL Output", NULL, "Speaker PGA" },
+ { "SPKOUTL Output", NULL, "SYSCLK" },
+ { "SPKOUTL Output", NULL, "TOCLK" },
+ { "SPKOUTL Output", NULL, "TEMP_SPK" },
- { "SPKOUT", NULL, "Speaker Output" },
+ { "SPKOUTR Output", NULL, "Speaker PGA" },
+ { "SPKOUTR Output", NULL, "SYSCLK" },
+ { "SPKOUTR Output", NULL, "TOCLK" },
+ { "SPKOUTR Output", NULL, "TEMP_SPK" },
+
+ { "SPKOUT", NULL, "SPKOUTL Output" },
+ { "SPKOUT", NULL, "SPKOUTR Output" },
};
static const struct snd_soc_dapm_route wm8962_spk_stereo_intercon[] = {
@@ -2914,8 +2919,12 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
switch (fll_id) {
case WM8962_FLL_MCLK:
case WM8962_FLL_BCLK:
+ fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
+ break;
case WM8962_FLL_OSC:
fll1 |= (fll_id - 1) << WM8962_FLL_REFCLK_SRC_SHIFT;
+ snd_soc_component_update_bits(component, WM8962_PLL2,
+ WM8962_OSC_ENA, WM8962_OSC_ENA);
break;
case WM8962_FLL_INT:
snd_soc_component_update_bits(component, WM8962_FLL_CONTROL_1,
@@ -2924,7 +2933,7 @@ static int wm8962_set_fll(struct snd_soc_component *component, int fll_id, int s
WM8962_FLL_FRC_NCO, WM8962_FLL_FRC_NCO);
break;
default:
- dev_err(component->dev, "Unknown FLL source %d\n", ret);
+ dev_err(component->dev, "Unknown FLL source %d\n", source);
return -EINVAL;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 42466b4b1c..a290f498ba 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -685,6 +685,18 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Chuwi Vi8 dual-boot (CWI506) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "i86"),
+ /* The above are too generic, also match BIOS info */
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
/* Chuwi Vi10 (CWI505) */
.matches = {
diff --git a/sound/soc/meson/aiu.c b/sound/soc/meson/aiu.c
index 7109b81cc3..5d1419ed7a 100644
--- a/sound/soc/meson/aiu.c
+++ b/sound/soc/meson/aiu.c
@@ -212,11 +212,12 @@ static const char * const aiu_spdif_ids[] = {
static int aiu_clk_get(struct device *dev)
{
struct aiu *aiu = dev_get_drvdata(dev);
+ struct clk *pclk;
int ret;
- aiu->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(aiu->pclk))
- return dev_err_probe(dev, PTR_ERR(aiu->pclk), "Can't get the aiu pclk\n");
+ pclk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(pclk))
+ return dev_err_probe(dev, PTR_ERR(pclk), "Can't get the aiu pclk\n");
aiu->spdif_mclk = devm_clk_get(dev, "spdif_mclk");
if (IS_ERR(aiu->spdif_mclk))
@@ -233,18 +234,6 @@ static int aiu_clk_get(struct device *dev)
if (ret)
return dev_err_probe(dev, ret, "Can't get the spdif clocks\n");
- ret = clk_prepare_enable(aiu->pclk);
- if (ret) {
- dev_err(dev, "peripheral clock enable failed\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- aiu->pclk);
- if (ret)
- dev_err(dev, "failed to add reset action on pclk");
-
return ret;
}
diff --git a/sound/soc/meson/aiu.h b/sound/soc/meson/aiu.h
index 393b6c2307..0f94c8bf60 100644
--- a/sound/soc/meson/aiu.h
+++ b/sound/soc/meson/aiu.h
@@ -33,7 +33,6 @@ struct aiu_platform_data {
};
struct aiu {
- struct clk *pclk;
struct clk *spdif_mclk;
struct aiu_interface i2s;
struct aiu_interface spdif;
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index 1c3d433cef..2cedbce738 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -12,6 +12,9 @@
#include "axg-tdm.h"
+/* Maximum bit clock frequency according the datasheets */
+#define MAX_SCLK 100000000 /* Hz */
+
enum {
TDM_IFACE_PAD,
TDM_IFACE_LOOPBACK,
@@ -153,19 +156,27 @@ static int axg_tdm_iface_startup(struct snd_pcm_substream *substream,
return -EINVAL;
}
- /* Apply component wide rate symmetry */
if (snd_soc_component_active(dai->component)) {
+ /* Apply component wide rate symmetry */
ret = snd_pcm_hw_constraint_single(substream->runtime,
SNDRV_PCM_HW_PARAM_RATE,
iface->rate);
- if (ret < 0) {
- dev_err(dai->dev,
- "can't set iface rate constraint\n");
- return ret;
- }
+
+ } else {
+ /* Limit rate according to the slot number and width */
+ unsigned int max_rate =
+ MAX_SCLK / (iface->slots * iface->slot_width);
+ ret = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE,
+ 0, max_rate);
}
- return 0;
+ if (ret < 0)
+ dev_err(dai->dev, "can't set iface rate constraint\n");
+ else
+ ret = 0;
+
+ return ret;
}
static int axg_tdm_iface_set_stream(struct snd_pcm_substream *substream,
@@ -264,8 +275,8 @@ static int axg_tdm_iface_set_sclk(struct snd_soc_dai *dai,
srate = iface->slots * iface->slot_width * params_rate(params);
if (!iface->mclk_rate) {
- /* If no specific mclk is requested, default to bit clock * 4 */
- clk_set_rate(iface->mclk, 4 * srate);
+ /* If no specific mclk is requested, default to bit clock * 2 */
+ clk_set_rate(iface->mclk, 2 * srate);
} else {
/* Check if we can actually get the bit clock from mclk */
if (iface->mclk_rate % srate) {
diff --git a/sound/soc/meson/t9015.c b/sound/soc/meson/t9015.c
index 9c6b4dac68..571f65788c 100644
--- a/sound/soc/meson/t9015.c
+++ b/sound/soc/meson/t9015.c
@@ -48,7 +48,6 @@
#define POWER_CFG 0x10
struct t9015 {
- struct clk *pclk;
struct regulator *avdd;
};
@@ -249,6 +248,7 @@ static int t9015_probe(struct platform_device *pdev)
struct t9015 *priv;
void __iomem *regs;
struct regmap *regmap;
+ struct clk *pclk;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -256,26 +256,14 @@ static int t9015_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, priv);
- priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk))
- return dev_err_probe(dev, PTR_ERR(priv->pclk), "failed to get core clock\n");
+ pclk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(pclk))
+ return dev_err_probe(dev, PTR_ERR(pclk), "failed to get core clock\n");
priv->avdd = devm_regulator_get(dev, "AVDD");
if (IS_ERR(priv->avdd))
return dev_err_probe(dev, PTR_ERR(priv->avdd), "failed to AVDD\n");
- ret = clk_prepare_enable(priv->pclk);
- if (ret) {
- dev_err(dev, "core clock enable failed\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- priv->pclk);
- if (ret)
- return ret;
-
ret = device_reset(dev);
if (ret) {
dev_err(dev, "reset failed\n");
diff --git a/sound/soc/rockchip/rockchip_i2s_tdm.c b/sound/soc/rockchip/rockchip_i2s_tdm.c
index 860e66ec85..9fa020ef7e 100644
--- a/sound/soc/rockchip/rockchip_i2s_tdm.c
+++ b/sound/soc/rockchip/rockchip_i2s_tdm.c
@@ -25,8 +25,6 @@
#define DEFAULT_MCLK_FS 256
#define CH_GRP_MAX 4 /* The max channel 8 / 2 */
#define MULTIPLEX_CH_MAX 10
-#define CLK_PPM_MIN -1000
-#define CLK_PPM_MAX 1000
#define TRCM_TXRX 0
#define TRCM_TX 1
@@ -53,20 +51,6 @@ struct rk_i2s_tdm_dev {
struct clk *hclk;
struct clk *mclk_tx;
struct clk *mclk_rx;
- /* The mclk_tx_src is parent of mclk_tx */
- struct clk *mclk_tx_src;
- /* The mclk_rx_src is parent of mclk_rx */
- struct clk *mclk_rx_src;
- /*
- * The mclk_root0 and mclk_root1 are root parent and supplies for
- * the different FS.
- *
- * e.g:
- * mclk_root0 is VPLL0, used for FS=48000Hz
- * mclk_root1 is VPLL1, used for FS=44100Hz
- */
- struct clk *mclk_root0;
- struct clk *mclk_root1;
struct regmap *regmap;
struct regmap *grf;
struct snd_dmaengine_dai_dma_data capture_dma_data;
@@ -76,19 +60,11 @@ struct rk_i2s_tdm_dev {
const struct rk_i2s_soc_data *soc_data;
bool is_master_mode;
bool io_multiplex;
- bool mclk_calibrate;
bool tdm_mode;
- unsigned int mclk_rx_freq;
- unsigned int mclk_tx_freq;
- unsigned int mclk_root0_freq;
- unsigned int mclk_root1_freq;
- unsigned int mclk_root0_initial_freq;
- unsigned int mclk_root1_initial_freq;
unsigned int frame_width;
unsigned int clk_trcm;
unsigned int i2s_sdis[CH_GRP_MAX];
unsigned int i2s_sdos[CH_GRP_MAX];
- int clk_ppm;
int refcount;
spinlock_t lock; /* xfer lock */
bool has_playback;
@@ -114,12 +90,6 @@ static void i2s_tdm_disable_unprepare_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
{
clk_disable_unprepare(i2s_tdm->mclk_tx);
clk_disable_unprepare(i2s_tdm->mclk_rx);
- if (i2s_tdm->mclk_calibrate) {
- clk_disable_unprepare(i2s_tdm->mclk_tx_src);
- clk_disable_unprepare(i2s_tdm->mclk_rx_src);
- clk_disable_unprepare(i2s_tdm->mclk_root0);
- clk_disable_unprepare(i2s_tdm->mclk_root1);
- }
}
/**
@@ -142,29 +112,9 @@ static int i2s_tdm_prepare_enable_mclk(struct rk_i2s_tdm_dev *i2s_tdm)
ret = clk_prepare_enable(i2s_tdm->mclk_rx);
if (ret)
goto err_mclk_rx;
- if (i2s_tdm->mclk_calibrate) {
- ret = clk_prepare_enable(i2s_tdm->mclk_tx_src);
- if (ret)
- goto err_mclk_rx;
- ret = clk_prepare_enable(i2s_tdm->mclk_rx_src);
- if (ret)
- goto err_mclk_rx_src;
- ret = clk_prepare_enable(i2s_tdm->mclk_root0);
- if (ret)
- goto err_mclk_root0;
- ret = clk_prepare_enable(i2s_tdm->mclk_root1);
- if (ret)
- goto err_mclk_root1;
- }
return 0;
-err_mclk_root1:
- clk_disable_unprepare(i2s_tdm->mclk_root0);
-err_mclk_root0:
- clk_disable_unprepare(i2s_tdm->mclk_rx_src);
-err_mclk_rx_src:
- clk_disable_unprepare(i2s_tdm->mclk_tx_src);
err_mclk_rx:
clk_disable_unprepare(i2s_tdm->mclk_tx);
err_mclk_tx:
@@ -564,159 +514,6 @@ static void rockchip_i2s_tdm_xfer_resume(struct snd_pcm_substream *substream,
I2S_XFER_RXS_START);
}
-static int rockchip_i2s_tdm_clk_set_rate(struct rk_i2s_tdm_dev *i2s_tdm,
- struct clk *clk, unsigned long rate,
- int ppm)
-{
- unsigned long rate_target;
- int delta, ret;
-
- if (ppm == i2s_tdm->clk_ppm)
- return 0;
-
- if (ppm < 0)
- delta = -1;
- else
- delta = 1;
-
- delta *= (int)div64_u64((u64)rate * (u64)abs(ppm) + 500000,
- 1000000);
-
- rate_target = rate + delta;
-
- if (!rate_target)
- return -EINVAL;
-
- ret = clk_set_rate(clk, rate_target);
- if (ret)
- return ret;
-
- i2s_tdm->clk_ppm = ppm;
-
- return 0;
-}
-
-static int rockchip_i2s_tdm_calibrate_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
- struct snd_pcm_substream *substream,
- unsigned int lrck_freq)
-{
- struct clk *mclk_root;
- struct clk *mclk_parent;
- unsigned int mclk_root_freq;
- unsigned int mclk_root_initial_freq;
- unsigned int mclk_parent_freq;
- unsigned int div, delta;
- u64 ppm;
- int ret;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- mclk_parent = i2s_tdm->mclk_tx_src;
- else
- mclk_parent = i2s_tdm->mclk_rx_src;
-
- switch (lrck_freq) {
- case 8000:
- case 16000:
- case 24000:
- case 32000:
- case 48000:
- case 64000:
- case 96000:
- case 192000:
- mclk_root = i2s_tdm->mclk_root0;
- mclk_root_freq = i2s_tdm->mclk_root0_freq;
- mclk_root_initial_freq = i2s_tdm->mclk_root0_initial_freq;
- mclk_parent_freq = DEFAULT_MCLK_FS * 192000;
- break;
- case 11025:
- case 22050:
- case 44100:
- case 88200:
- case 176400:
- mclk_root = i2s_tdm->mclk_root1;
- mclk_root_freq = i2s_tdm->mclk_root1_freq;
- mclk_root_initial_freq = i2s_tdm->mclk_root1_initial_freq;
- mclk_parent_freq = DEFAULT_MCLK_FS * 176400;
- break;
- default:
- dev_err(i2s_tdm->dev, "Invalid LRCK frequency: %u Hz\n",
- lrck_freq);
- return -EINVAL;
- }
-
- ret = clk_set_parent(mclk_parent, mclk_root);
- if (ret)
- return ret;
-
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, mclk_root,
- mclk_root_freq, 0);
- if (ret)
- return ret;
-
- delta = abs(mclk_root_freq % mclk_parent_freq - mclk_parent_freq);
- ppm = div64_u64((uint64_t)delta * 1000000, (uint64_t)mclk_root_freq);
-
- if (ppm) {
- div = DIV_ROUND_CLOSEST(mclk_root_initial_freq, mclk_parent_freq);
- if (!div)
- return -EINVAL;
-
- mclk_root_freq = mclk_parent_freq * round_up(div, 2);
-
- ret = clk_set_rate(mclk_root, mclk_root_freq);
- if (ret)
- return ret;
-
- i2s_tdm->mclk_root0_freq = clk_get_rate(i2s_tdm->mclk_root0);
- i2s_tdm->mclk_root1_freq = clk_get_rate(i2s_tdm->mclk_root1);
- }
-
- return clk_set_rate(mclk_parent, mclk_parent_freq);
-}
-
-static int rockchip_i2s_tdm_set_mclk(struct rk_i2s_tdm_dev *i2s_tdm,
- struct snd_pcm_substream *substream,
- struct clk **mclk)
-{
- unsigned int mclk_freq;
- int ret;
-
- if (i2s_tdm->clk_trcm) {
- if (i2s_tdm->mclk_tx_freq != i2s_tdm->mclk_rx_freq) {
- dev_err(i2s_tdm->dev,
- "clk_trcm, tx: %d and rx: %d should be the same\n",
- i2s_tdm->mclk_tx_freq,
- i2s_tdm->mclk_rx_freq);
- return -EINVAL;
- }
-
- ret = clk_set_rate(i2s_tdm->mclk_tx, i2s_tdm->mclk_tx_freq);
- if (ret)
- return ret;
-
- ret = clk_set_rate(i2s_tdm->mclk_rx, i2s_tdm->mclk_rx_freq);
- if (ret)
- return ret;
-
- /* mclk_rx is also ok. */
- *mclk = i2s_tdm->mclk_tx;
- } else {
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- *mclk = i2s_tdm->mclk_tx;
- mclk_freq = i2s_tdm->mclk_tx_freq;
- } else {
- *mclk = i2s_tdm->mclk_rx;
- mclk_freq = i2s_tdm->mclk_rx_freq;
- }
-
- ret = clk_set_rate(*mclk, mclk_freq);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int rockchip_i2s_ch_to_io(unsigned int ch, bool substream_capture)
{
if (substream_capture) {
@@ -853,19 +650,17 @@ static int rockchip_i2s_tdm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct rk_i2s_tdm_dev *i2s_tdm = to_info(dai);
- struct clk *mclk;
- int ret = 0;
unsigned int val = 0;
unsigned int mclk_rate, bclk_rate, div_bclk = 4, div_lrck = 64;
+ int err;
if (i2s_tdm->is_master_mode) {
- if (i2s_tdm->mclk_calibrate)
- rockchip_i2s_tdm_calibrate_mclk(i2s_tdm, substream,
- params_rate(params));
+ struct clk *mclk = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
+ i2s_tdm->mclk_tx : i2s_tdm->mclk_rx;
- ret = rockchip_i2s_tdm_set_mclk(i2s_tdm, substream, &mclk);
- if (ret)
- return ret;
+ err = clk_set_rate(mclk, DEFAULT_MCLK_FS * params_rate(params));
+ if (err)
+ return err;
mclk_rate = clk_get_rate(mclk);
bclk_rate = i2s_tdm->frame_width * params_rate(params);
@@ -973,96 +768,6 @@ static int rockchip_i2s_tdm_trigger(struct snd_pcm_substream *substream,
return 0;
}
-static int rockchip_i2s_tdm_set_sysclk(struct snd_soc_dai *cpu_dai, int stream,
- unsigned int freq, int dir)
-{
- struct rk_i2s_tdm_dev *i2s_tdm = to_info(cpu_dai);
-
- /* Put set mclk rate into rockchip_i2s_tdm_set_mclk() */
- if (i2s_tdm->clk_trcm) {
- i2s_tdm->mclk_tx_freq = freq;
- i2s_tdm->mclk_rx_freq = freq;
- } else {
- if (stream == SNDRV_PCM_STREAM_PLAYBACK)
- i2s_tdm->mclk_tx_freq = freq;
- else
- i2s_tdm->mclk_rx_freq = freq;
- }
-
- dev_dbg(i2s_tdm->dev, "The target mclk_%s freq is: %d\n",
- stream ? "rx" : "tx", freq);
-
- return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
- uinfo->value.integer.min = CLK_PPM_MIN;
- uinfo->value.integer.max = CLK_PPM_MAX;
- uinfo->value.integer.step = 1;
-
- return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
- struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
-
- ucontrol->value.integer.value[0] = i2s_tdm->clk_ppm;
-
- return 0;
-}
-
-static int rockchip_i2s_tdm_clk_compensation_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dai *dai = snd_kcontrol_chip(kcontrol);
- struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
- int ret = 0, ppm = 0;
- int changed = 0;
- unsigned long old_rate;
-
- if (ucontrol->value.integer.value[0] < CLK_PPM_MIN ||
- ucontrol->value.integer.value[0] > CLK_PPM_MAX)
- return -EINVAL;
-
- ppm = ucontrol->value.integer.value[0];
-
- old_rate = clk_get_rate(i2s_tdm->mclk_root0);
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root0,
- i2s_tdm->mclk_root0_freq, ppm);
- if (ret)
- return ret;
- if (old_rate != clk_get_rate(i2s_tdm->mclk_root0))
- changed = 1;
-
- if (clk_is_match(i2s_tdm->mclk_root0, i2s_tdm->mclk_root1))
- return changed;
-
- old_rate = clk_get_rate(i2s_tdm->mclk_root1);
- ret = rockchip_i2s_tdm_clk_set_rate(i2s_tdm, i2s_tdm->mclk_root1,
- i2s_tdm->mclk_root1_freq, ppm);
- if (ret)
- return ret;
- if (old_rate != clk_get_rate(i2s_tdm->mclk_root1))
- changed = 1;
-
- return changed;
-}
-
-static struct snd_kcontrol_new rockchip_i2s_tdm_compensation_control = {
- .iface = SNDRV_CTL_ELEM_IFACE_PCM,
- .name = "PCM Clock Compensation in PPM",
- .info = rockchip_i2s_tdm_clk_compensation_info,
- .get = rockchip_i2s_tdm_clk_compensation_get,
- .put = rockchip_i2s_tdm_clk_compensation_put,
-};
-
static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
{
struct rk_i2s_tdm_dev *i2s_tdm = snd_soc_dai_get_drvdata(dai);
@@ -1072,9 +777,6 @@ static int rockchip_i2s_tdm_dai_probe(struct snd_soc_dai *dai)
if (i2s_tdm->has_playback)
snd_soc_dai_dma_data_set_playback(dai, &i2s_tdm->playback_dma_data);
- if (i2s_tdm->mclk_calibrate)
- snd_soc_add_dai_controls(dai, &rockchip_i2s_tdm_compensation_control, 1);
-
return 0;
}
@@ -1115,7 +817,6 @@ static const struct snd_soc_dai_ops rockchip_i2s_tdm_dai_ops = {
.probe = rockchip_i2s_tdm_dai_probe,
.hw_params = rockchip_i2s_tdm_hw_params,
.set_bclk_ratio = rockchip_i2s_tdm_set_bclk_ratio,
- .set_sysclk = rockchip_i2s_tdm_set_sysclk,
.set_fmt = rockchip_i2s_tdm_set_fmt,
.set_tdm_slot = rockchip_dai_tdm_slot,
.trigger = rockchip_i2s_tdm_trigger,
@@ -1444,35 +1145,6 @@ static void rockchip_i2s_tdm_path_config(struct rk_i2s_tdm_dev *i2s_tdm,
rockchip_i2s_tdm_tx_path_config(i2s_tdm, num);
}
-static int rockchip_i2s_tdm_get_calibrate_mclks(struct rk_i2s_tdm_dev *i2s_tdm)
-{
- int num_mclks = 0;
-
- i2s_tdm->mclk_tx_src = devm_clk_get(i2s_tdm->dev, "mclk_tx_src");
- if (!IS_ERR(i2s_tdm->mclk_tx_src))
- num_mclks++;
-
- i2s_tdm->mclk_rx_src = devm_clk_get(i2s_tdm->dev, "mclk_rx_src");
- if (!IS_ERR(i2s_tdm->mclk_rx_src))
- num_mclks++;
-
- i2s_tdm->mclk_root0 = devm_clk_get(i2s_tdm->dev, "mclk_root0");
- if (!IS_ERR(i2s_tdm->mclk_root0))
- num_mclks++;
-
- i2s_tdm->mclk_root1 = devm_clk_get(i2s_tdm->dev, "mclk_root1");
- if (!IS_ERR(i2s_tdm->mclk_root1))
- num_mclks++;
-
- if (num_mclks < 4 && num_mclks != 0)
- return -ENOENT;
-
- if (num_mclks == 4)
- i2s_tdm->mclk_calibrate = 1;
-
- return 0;
-}
-
static int rockchip_i2s_tdm_path_prepare(struct rk_i2s_tdm_dev *i2s_tdm,
struct device_node *np,
bool is_rx_path)
@@ -1610,11 +1282,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
i2s_tdm->io_multiplex =
of_property_read_bool(node, "rockchip,io-multiplex");
- ret = rockchip_i2s_tdm_get_calibrate_mclks(i2s_tdm);
- if (ret)
- return dev_err_probe(i2s_tdm->dev, ret,
- "mclk-calibrate clocks missing");
-
regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs)) {
return dev_err_probe(i2s_tdm->dev, PTR_ERR(regs),
@@ -1667,13 +1334,6 @@ static int rockchip_i2s_tdm_probe(struct platform_device *pdev)
goto err_disable_hclk;
}
- if (i2s_tdm->mclk_calibrate) {
- i2s_tdm->mclk_root0_initial_freq = clk_get_rate(i2s_tdm->mclk_root0);
- i2s_tdm->mclk_root1_initial_freq = clk_get_rate(i2s_tdm->mclk_root1);
- i2s_tdm->mclk_root0_freq = i2s_tdm->mclk_root0_initial_freq;
- i2s_tdm->mclk_root1_freq = i2s_tdm->mclk_root1_initial_freq;
- }
-
pm_runtime_enable(&pdev->dev);
regmap_update_bits(i2s_tdm->regmap, I2S_DMACR, I2S_DMACR_TDL_MASK,
diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c
index 14cf1a41fb..9d10364697 100644
--- a/sound/soc/sh/rz-ssi.c
+++ b/sound/soc/sh/rz-ssi.c
@@ -1015,7 +1015,7 @@ static int rz_ssi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), ssi);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
- "irq request error (dma_tx)\n");
+ "irq request error (dma_rt)\n");
} else {
if (ssi->irq_tx < 0)
return ssi->irq_tx;
diff --git a/sound/soc/sof/amd/acp.c b/sound/soc/sof/amd/acp.c
index 603ea5fc0d..c6f637f298 100644
--- a/sound/soc/sof/amd/acp.c
+++ b/sound/soc/sof/amd/acp.c
@@ -547,17 +547,27 @@ int amd_sof_acp_probe(struct snd_sof_dev *sdev)
adata->signed_fw_image = false;
dmi_id = dmi_first_match(acp_sof_quirk_table);
if (dmi_id && dmi_id->driver_data) {
- adata->fw_code_bin = kasprintf(GFP_KERNEL, "%s/sof-%s-code.bin",
- plat_data->fw_filename_prefix,
- chip->name);
- adata->fw_data_bin = kasprintf(GFP_KERNEL, "%s/sof-%s-data.bin",
- plat_data->fw_filename_prefix,
- chip->name);
- adata->signed_fw_image = dmi_id->driver_data;
+ adata->fw_code_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s/sof-%s-code.bin",
+ plat_data->fw_filename_prefix,
+ chip->name);
+ if (!adata->fw_code_bin) {
+ ret = -ENOMEM;
+ goto free_ipc_irq;
+ }
+
+ adata->fw_data_bin = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "%s/sof-%s-data.bin",
+ plat_data->fw_filename_prefix,
+ chip->name);
+ if (!adata->fw_data_bin) {
+ ret = -ENOMEM;
+ goto free_ipc_irq;
+ }
- dev_dbg(sdev->dev, "fw_code_bin:%s, fw_data_bin:%s\n", adata->fw_code_bin,
- adata->fw_data_bin);
+ adata->signed_fw_image = dmi_id->driver_data;
}
+
adata->enable_fw_debug = enable_fw_debug;
acp_memory_init(sdev);
diff --git a/sound/soc/sof/ipc3-loader.c b/sound/soc/sof/ipc3-loader.c
index 28218766d2..6e3ef06721 100644
--- a/sound/soc/sof/ipc3-loader.c
+++ b/sound/soc/sof/ipc3-loader.c
@@ -148,6 +148,8 @@ static size_t sof_ipc3_fw_parse_ext_man(struct snd_sof_dev *sdev)
head = (struct sof_ext_man_header *)fw->data;
remaining = head->full_size - head->header_size;
+ if (remaining < 0 || remaining > sdev->basefw.fw->size)
+ return -EINVAL;
ext_man_size = ipc3_fw_ext_man_size(sdev, fw);
/* Assert firmware starts with extended manifest */
diff --git a/sound/soc/sof/ipc4-pcm.c b/sound/soc/sof/ipc4-pcm.c
index 39039a647c..ea70c0d7cf 100644
--- a/sound/soc/sof/ipc4-pcm.c
+++ b/sound/soc/sof/ipc4-pcm.c
@@ -413,7 +413,18 @@ skip_pause_transition:
ret = sof_ipc4_set_multi_pipeline_state(sdev, state, trigger_list);
if (ret < 0) {
dev_err(sdev->dev, "failed to set final state %d for all pipelines\n", state);
- goto free;
+ /*
+ * workaround: if the firmware is crashed while setting the
+ * pipelines to reset state we must ignore the error code and
+ * reset it to 0.
+ * Since the firmware is crashed we will not send IPC messages
+ * and we are going to see errors printed, but the state of the
+ * widgets will be correct for the next boot.
+ */
+ if (sdev->fw_state != SOF_FW_CRASHED || state != SOF_IPC4_PIPE_RESET)
+ goto free;
+
+ ret = 0;
}
/* update RUNNING/RESET state for all pipelines that were just triggered */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 3d4add94e3..d5409f3879 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -300,9 +300,12 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
c = 0;
if (bits) {
- for (; bits && *maps; maps++, bits >>= 1)
+ for (; bits && *maps; maps++, bits >>= 1) {
if (bits & 1)
chmap->map[c++] = *maps;
+ if (c == chmap->channels)
+ break;
+ }
} else {
/* If we're missing wChannelConfig, then guess something
to make sure the channel map is not skipped entirely */
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 7ec4f5671e..865bfa869a 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -2294,7 +2294,7 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
int map_fd;
profile_perf_events = calloc(
- sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+ obj->rodata->num_cpu * obj->rodata->num_metric, sizeof(int));
if (!profile_perf_events) {
p_err("failed to allocate memory for perf_event array: %s",
strerror(errno));
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 27a23196d5..d9520cb826 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -70,6 +70,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
+#include <linux/btf_ids.h>
#include <linux/rbtree.h>
#include <linux/zalloc.h>
#include <linux/err.h>
@@ -78,7 +79,7 @@
#include <subcmd/parse-options.h>
#define BTF_IDS_SECTION ".BTF_ids"
-#define BTF_ID "__BTF_ID__"
+#define BTF_ID_PREFIX "__BTF_ID__"
#define BTF_STRUCT "struct"
#define BTF_UNION "union"
@@ -89,6 +90,14 @@
#define ADDR_CNT 100
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+# define ELFDATANATIVE ELFDATA2LSB
+#elif __BYTE_ORDER == __BIG_ENDIAN
+# define ELFDATANATIVE ELFDATA2MSB
+#else
+# error "Unknown machine endianness!"
+#endif
+
struct btf_id {
struct rb_node rb_node;
char *name;
@@ -116,6 +125,7 @@ struct object {
int idlist_shndx;
size_t strtabidx;
unsigned long idlist_addr;
+ int encoding;
} efile;
struct rb_root sets;
@@ -161,7 +171,7 @@ static int eprintf(int level, int var, const char *fmt, ...)
static bool is_btf_id(const char *name)
{
- return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
+ return name && !strncmp(name, BTF_ID_PREFIX, sizeof(BTF_ID_PREFIX) - 1);
}
static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
@@ -319,6 +329,7 @@ static int elf_collect(struct object *obj)
{
Elf_Scn *scn = NULL;
size_t shdrstrndx;
+ GElf_Ehdr ehdr;
int idx = 0;
Elf *elf;
int fd;
@@ -350,6 +361,13 @@ static int elf_collect(struct object *obj)
return -1;
}
+ if (gelf_getehdr(obj->efile.elf, &ehdr) == NULL) {
+ pr_err("FAILED cannot get ELF header: %s\n",
+ elf_errmsg(-1));
+ return -1;
+ }
+ obj->efile.encoding = ehdr.e_ident[EI_DATA];
+
/*
* Scan all the elf sections and look for save data
* from .BTF_ids section and symbols.
@@ -441,7 +459,7 @@ static int symbols_collect(struct object *obj)
* __BTF_ID__TYPE__vfs_truncate__0
* prefix = ^
*/
- prefix = name + sizeof(BTF_ID) - 1;
+ prefix = name + sizeof(BTF_ID_PREFIX) - 1;
/* struct */
if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
@@ -649,19 +667,18 @@ static int cmp_id(const void *pa, const void *pb)
static int sets_patch(struct object *obj)
{
Elf_Data *data = obj->efile.idlist;
- int *ptr = data->d_buf;
struct rb_node *next;
next = rb_first(&obj->sets);
while (next) {
- unsigned long addr, idx;
+ struct btf_id_set8 *set8;
+ struct btf_id_set *set;
+ unsigned long addr, off;
struct btf_id *id;
- int *base;
- int cnt;
id = rb_entry(next, struct btf_id, rb_node);
addr = id->addr[0];
- idx = addr - obj->efile.idlist_addr;
+ off = addr - obj->efile.idlist_addr;
/* sets are unique */
if (id->addr_cnt != 1) {
@@ -670,14 +687,39 @@ static int sets_patch(struct object *obj)
return -1;
}
- idx = idx / sizeof(int);
- base = &ptr[idx] + (id->is_set8 ? 2 : 1);
- cnt = ptr[idx];
+ if (id->is_set) {
+ set = data->d_buf + off;
+ qsort(set->ids, set->cnt, sizeof(set->ids[0]), cmp_id);
+ } else {
+ set8 = data->d_buf + off;
+ /*
+ * Make sure id is at the beginning of the pairs
+ * struct, otherwise the below qsort would not work.
+ */
+ BUILD_BUG_ON(set8->pairs != &set8->pairs[0].id);
+ qsort(set8->pairs, set8->cnt, sizeof(set8->pairs[0]), cmp_id);
- pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
- (idx + 1) * sizeof(int), cnt, id->name);
+ /*
+ * When ELF endianness does not match endianness of the
+ * host, libelf will do the translation when updating
+ * the ELF. This, however, corrupts SET8 flags which are
+ * already in the target endianness. So, let's bswap
+ * them to the host endianness and libelf will then
+ * correctly translate everything.
+ */
+ if (obj->efile.encoding != ELFDATANATIVE) {
+ int i;
+
+ set8->flags = bswap_32(set8->flags);
+ for (i = 0; i < set8->cnt; i++) {
+ set8->pairs[i].flags =
+ bswap_32(set8->pairs[i].flags);
+ }
+ }
+ }
- qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
+ pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
+ off, id->is_set ? set->cnt : set8->cnt, id->name);
next = rb_next(next);
}
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
index 2f882d5cb3..72ea363d43 100644
--- a/tools/include/linux/btf_ids.h
+++ b/tools/include/linux/btf_ids.h
@@ -3,11 +3,22 @@
#ifndef _LINUX_BTF_IDS_H
#define _LINUX_BTF_IDS_H
+#include <linux/types.h> /* for u32 */
+
struct btf_id_set {
u32 cnt;
u32 ids[];
};
+struct btf_id_set8 {
+ u32 cnt;
+ u32 flags;
+ struct {
+ u32 id;
+ u32 flags;
+ } pairs[];
+};
+
#ifdef CONFIG_DEBUG_INFO_BTF
#include <linux/compiler.h> /* for __PASTE */
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index d0f53772bd..dad7917903 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -35,7 +35,7 @@
extern "C" {
#endif
-int libbpf_set_memlock_rlim(size_t memlock_bytes);
+LIBBPF_API int libbpf_set_memlock_rlim(size_t memlock_bytes);
struct bpf_map_create_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index df1b550f74..ca83af916c 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -70,6 +70,7 @@
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
+static int map_set_def_max_entries(struct bpf_map *map);
static const char * const attach_type_name[] = {
[BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
@@ -5214,6 +5215,9 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
if (bpf_map_type__is_map_in_map(def->type)) {
if (map->inner_map) {
+ err = map_set_def_max_entries(map->inner_map);
+ if (err)
+ return err;
err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
pr_warn("map '%s': failed to create inner map: %d\n",
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index f0f08635ad..57dec645d6 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -18,6 +18,20 @@
#include <libelf.h>
#include "relo_core.h"
+/* Android's libc doesn't support AT_EACCESS in faccessat() implementation
+ * ([0]), and just returns -EINVAL even if file exists and is accessible.
+ * See [1] for issues caused by this.
+ *
+ * So just redefine it to 0 on Android.
+ *
+ * [0] https://android.googlesource.com/platform/bionic/+/refs/heads/android13-release/libc/bionic/faccessat.cpp#50
+ * [1] https://github.com/libbpf/libbpf-bootstrap/issues/250#issuecomment-1911324250
+ */
+#ifdef __ANDROID__
+#undef AT_EACCESS
+#define AT_EACCESS 0
+#endif
+
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 090bcf6e3b..68a2def171 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -496,8 +496,8 @@ int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
if (err)
return libbpf_err(err);
- opts->feature_flags = md.flags;
- opts->xdp_zc_max_segs = md.xdp_zc_max_segs;
+ OPTS_SET(opts, feature_flags, md.flags);
+ OPTS_SET(opts, xdp_zc_max_segs, md.xdp_zc_max_segs);
skip_feature_flags:
return 0;
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index e94756e09c..167603f839 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -3620,6 +3620,18 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
}
if (!save_insn->visited) {
+ /*
+ * If the restore hint insn is at the
+ * beginning of a basic block and was
+ * branched to from elsewhere, and the
+ * save insn hasn't been visited yet,
+ * defer following this branch for now.
+ * It will be seen later via the
+ * straight-line path.
+ */
+ if (!prev_insn)
+ return 0;
+
WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
return 1;
}
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 058c9aecf6..af22d539f3 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -1148,7 +1148,7 @@ bpf-skel:
endif # CONFIG_PERF_BPF_SKEL
bpf-skel-clean:
- $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS)
+ $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) $(SKEL_OUT)/vmlinux.h
clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean
$(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index dcf288a4fb..5d86aa5ff5 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1818,8 +1818,8 @@ static int
record__switch_output(struct record *rec, bool at_exit)
{
struct perf_data *data = &rec->data;
+ char *new_filename = NULL;
int fd, err;
- char *new_filename;
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
@@ -2216,32 +2216,6 @@ static void hit_auxtrace_snapshot_trigger(struct record *rec)
}
}
-static void record__uniquify_name(struct record *rec)
-{
- struct evsel *pos;
- struct evlist *evlist = rec->evlist;
- char *new_name;
- int ret;
-
- if (perf_pmus__num_core_pmus() == 1)
- return;
-
- evlist__for_each_entry(evlist, pos) {
- if (!evsel__is_hybrid(pos))
- continue;
-
- if (strchr(pos->name, '/'))
- continue;
-
- ret = asprintf(&new_name, "%s/%s/",
- pos->pmu_name, pos->name);
- if (ret) {
- free(pos->name);
- pos->name = new_name;
- }
- }
-}
-
static int record__terminate_thread(struct record_thread *thread_data)
{
int err;
@@ -2475,7 +2449,12 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (data->is_pipe && rec->evlist->core.nr_entries == 1)
rec->opts.sample_id = true;
- record__uniquify_name(rec);
+ if (rec->timestamp_filename && perf_data__is_pipe(data)) {
+ rec->timestamp_filename = false;
+ pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n");
+ }
+
+ evlist__uniquify_name(rec->evlist);
/* Debug message used by test scripts */
pr_debug3("perf record opening and mmapping events\n");
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ea8c7eca5e..cd64ae44cc 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1027,8 +1027,8 @@ static int perf_top__start_counters(struct perf_top *top)
evlist__for_each_entry(evlist, counter) {
try_again:
- if (evsel__open(counter, top->evlist->core.user_requested_cpus,
- top->evlist->core.threads) < 0) {
+ if (evsel__open(counter, counter->core.cpus,
+ counter->core.threads) < 0) {
/*
* Specially handle overwrite fall back.
@@ -1299,6 +1299,7 @@ static int __cmd_top(struct perf_top *top)
}
}
+ evlist__uniquify_name(top->evlist);
ret = perf_top__start_counters(top);
if (ret)
return ret;
diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c
index c29d8a382b..550675ce0b 100644
--- a/tools/perf/util/data.c
+++ b/tools/perf/util/data.c
@@ -430,8 +430,6 @@ int perf_data__switch(struct perf_data *data,
{
int ret;
- if (check_pipe(data))
- return -EINVAL;
if (perf_data__is_read(data))
return -EINVAL;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index b0ed14a6da..eb1dd29c53 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -2525,3 +2525,28 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis
}
perf_cpu_map__put(user_requested_cpus);
}
+
+void evlist__uniquify_name(struct evlist *evlist)
+{
+ struct evsel *pos;
+ char *new_name;
+ int ret;
+
+ if (perf_pmus__num_core_pmus() == 1)
+ return;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (!evsel__is_hybrid(pos))
+ continue;
+
+ if (strchr(pos->name, '/'))
+ continue;
+
+ ret = asprintf(&new_name, "%s/%s/",
+ pos->pmu_name, pos->name);
+ if (ret) {
+ free(pos->name);
+ pos->name = new_name;
+ }
+ }
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 98e7ddb2bd..cb91dc9117 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -442,5 +442,6 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
void evlist__check_mem_load_aux(struct evlist *evlist);
void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
+void evlist__uniquify_name(struct evlist *evlist);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 72a5dfc38d..1fb24ca8ae 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -2340,7 +2340,6 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->period = evsel->core.attr.sample_period;
data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
data->misc = event->header.misc;
- data->id = -1ULL;
data->data_src = PERF_MEM_DATA_SRC_NONE;
data->vcpu = -1;
diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c
index 7be23b3ac0..b8875aac8f 100644
--- a/tools/perf/util/expr.c
+++ b/tools/perf/util/expr.c
@@ -500,7 +500,25 @@ double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const
tmp = evlist__new();
if (!tmp)
return NAN;
- ret = parse_event(tmp, id) ? 0 : 1;
+
+ if (strchr(id, '@')) {
+ char *tmp_id, *p;
+
+ tmp_id = strdup(id);
+ if (!tmp_id) {
+ ret = NAN;
+ goto out;
+ }
+ p = strchr(tmp_id, '@');
+ *p = '/';
+ p = strrchr(tmp_id, '@');
+ *p = '/';
+ ret = parse_event(tmp, tmp_id) ? 0 : 1;
+ free(tmp_id);
+ } else {
+ ret = parse_event(tmp, id) ? 0 : 1;
+ }
+out:
evlist__delete(tmp);
return ret;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index d3c9aa4326..aaa013af52 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -1019,10 +1019,9 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
* type value and format definitions. Load both right
* now.
*/
- if (pmu_format(pmu, dirfd, name)) {
- free(pmu);
- return NULL;
- }
+ if (pmu_format(pmu, dirfd, name))
+ goto err;
+
pmu->is_core = is_pmu_core(name);
pmu->cpus = pmu_cpumask(dirfd, name, pmu->is_core);
@@ -1756,6 +1755,12 @@ bool pmu__name_match(const struct perf_pmu *pmu, const char *pmu_name)
bool perf_pmu__is_software(const struct perf_pmu *pmu)
{
+ const char *known_sw_pmus[] = {
+ "kprobe",
+ "msr",
+ "uprobe",
+ };
+
if (pmu->is_core || pmu->is_uncore || pmu->auxtrace)
return false;
switch (pmu->type) {
@@ -1767,7 +1772,11 @@ bool perf_pmu__is_software(const struct perf_pmu *pmu)
case PERF_TYPE_BREAKPOINT: return true;
default: break;
}
- return !strcmp(pmu->name, "kprobe") || !strcmp(pmu->name, "uprobe");
+ for (size_t i = 0; i < ARRAY_SIZE(known_sw_pmus); i++) {
+ if (!strcmp(pmu->name, known_sw_pmus[i]))
+ return true;
+ }
+ return false;
}
FILE *perf_pmu__open_file(const struct perf_pmu *pmu, const char *name)
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index b0fc48be62..4f67e8f00a 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -232,7 +232,6 @@ void print_sdt_events(const struct print_callbacks *print_cb, void *print_state)
bool is_event_supported(u8 type, u64 config)
{
bool ret = true;
- int open_return;
struct evsel *evsel;
struct perf_event_attr attr = {
.type = type,
@@ -246,20 +245,32 @@ bool is_event_supported(u8 type, u64 config)
evsel = evsel__new(&attr);
if (evsel) {
- open_return = evsel__open(evsel, NULL, tmap);
- ret = open_return >= 0;
+ ret = evsel__open(evsel, NULL, tmap) >= 0;
- if (open_return == -EACCES) {
+ if (!ret) {
/*
- * This happens if the paranoid value
+ * The event may fail to open if the paranoid value
* /proc/sys/kernel/perf_event_paranoid is set to 2
- * Re-run with exclude_kernel set; we don't do that
- * by default as some ARM machines do not support it.
- *
+ * Re-run with exclude_kernel set; we don't do that by
+ * default as some ARM machines do not support it.
*/
evsel->core.attr.exclude_kernel = 1;
ret = evsel__open(evsel, NULL, tmap) >= 0;
}
+
+ if (!ret) {
+ /*
+ * The event may fail to open if the PMU requires
+ * exclude_guest to be set (e.g. as the Apple M1 PMU
+ * requires).
+ * Re-run with exclude_guest set; we don't do that by
+ * default as it's equally legitimate for another PMU
+ * driver to require that exclude_guest is clear.
+ */
+ evsel->core.attr.exclude_guest = 1;
+ ret = evsel__open(evsel, NULL, tmap) >= 0;
+ }
+
evsel__delete(evsel);
}
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 034b496df2..7addc34afc 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -399,6 +399,8 @@ static void addr2line_subprocess_cleanup(struct child_process *a2l)
kill(a2l->pid, SIGKILL);
finish_command(a2l); /* ignore result, we don't care */
a2l->pid = -1;
+ close(a2l->in);
+ close(a2l->out);
}
free(a2l);
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index afe6db8e7b..969ce40096 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -560,7 +560,7 @@ static void print_metric_only(struct perf_stat_config *config,
if (color)
mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
- color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+ color_snprintf(str, sizeof(str), color ?: "", fmt ?: "", val);
fprintf(out, "%*s ", mlen, str);
os->first = false;
}
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index e314261678..cf573ff3fa 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -414,12 +414,7 @@ static int prepare_metric(struct evsel **metric_events,
val = NAN;
source_count = 0;
} else {
- /*
- * If an event was scaled during stat gathering,
- * reverse the scale before computing the
- * metric.
- */
- val = aggr->counts.val * (1.0 / metric_events[i]->scale);
+ val = aggr->counts.val;
source_count = evsel__source_count(metric_events[i]);
}
}
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index e848579e61..ea3b431b97 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -280,13 +280,13 @@ struct perf_thread_map *thread_map__new_by_tid_str(const char *tid_str)
threads->nr = ntasks;
}
out:
+ strlist__delete(slist);
if (threads)
refcount_set(&threads->refcnt, 1);
return threads;
out_free_threads:
zfree(&threads);
- strlist__delete(slist);
goto out;
}
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 91907b321f..e7c9e1c7fd 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2020 Facebook */
#include <linux/btf.h>
#include <linux/btf_ids.h>
+#include <linux/delay.h>
#include <linux/error-injection.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -544,6 +545,14 @@ static int bpf_testmod_init(void)
static void bpf_testmod_exit(void)
{
+ /* Need to wait for all references to be dropped because
+ * bpf_kfunc_call_test_release() which currently resides in kernel can
+ * be called after bpf_testmod is unloaded. Once release function is
+ * moved into the module this wait can be removed.
+ */
+ while (refcount_read(&prog_test_struct.cnt) > 1)
+ msleep(20);
+
return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
index 59b38569f3..2bc932a18c 100644
--- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c
@@ -203,6 +203,7 @@ static int setup_redirect_target(const char *target_dev, bool need_mac)
if (!ASSERT_GE(target_index, 0, "if_nametoindex"))
goto fail;
+ SYS(fail, "sysctl -w net.ipv6.conf.all.disable_ipv6=1");
SYS(fail, "ip link add link_err type dummy");
SYS(fail, "ip link set lo up");
SYS(fail, "ip addr add dev lo " LOCAL_SRC "/32");
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 518f143c5b..dbe06aeaa2 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -188,6 +188,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
{
struct nstoken *nstoken = NULL;
char src_fwd_addr[IFADDR_STR_LEN+1] = {};
+ char src_addr[IFADDR_STR_LEN + 1] = {};
int err;
if (result->dev_mode == MODE_VETH) {
@@ -208,6 +209,9 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
if (get_ifaddr("src_fwd", src_fwd_addr))
goto fail;
+ if (get_ifaddr("src", src_addr))
+ goto fail;
+
result->ifindex_src = if_nametoindex("src");
if (!ASSERT_GT(result->ifindex_src, 0, "ifindex_src"))
goto fail;
@@ -270,6 +274,13 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
SYS(fail, "ip route add " IP4_DST "/32 dev dst_fwd scope global");
SYS(fail, "ip route add " IP6_DST "/128 dev dst_fwd scope global");
+ if (result->dev_mode == MODE_VETH) {
+ SYS(fail, "ip neigh add " IP4_SRC " dev src_fwd lladdr %s", src_addr);
+ SYS(fail, "ip neigh add " IP6_SRC " dev src_fwd lladdr %s", src_addr);
+ SYS(fail, "ip neigh add " IP4_DST " dev dst_fwd lladdr %s", MAC_DST);
+ SYS(fail, "ip neigh add " IP6_DST " dev dst_fwd lladdr %s", MAC_DST);
+ }
+
close_netns(nstoken);
/** setup in 'dst' namespace */
@@ -280,6 +291,7 @@ static int netns_setup_links_and_routes(struct netns_setup_result *result)
SYS(fail, "ip addr add " IP4_DST "/32 dev dst");
SYS(fail, "ip addr add " IP6_DST "/128 dev dst nodad");
SYS(fail, "ip link set dev dst up");
+ SYS(fail, "ip link set dev lo up");
SYS(fail, "ip route add " IP4_SRC "/32 dev dst scope global");
SYS(fail, "ip route add " IP4_NET "/16 dev dst scope global");
@@ -457,7 +469,7 @@ static int set_forwarding(bool enable)
return 0;
}
-static void rcv_tstamp(int fd, const char *expected, size_t s)
+static int __rcv_tstamp(int fd, const char *expected, size_t s, __u64 *tstamp)
{
struct __kernel_timespec pkt_ts = {};
char ctl[CMSG_SPACE(sizeof(pkt_ts))];
@@ -478,7 +490,7 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
ret = recvmsg(fd, &msg, 0);
if (!ASSERT_EQ(ret, s, "recvmsg"))
- return;
+ return -1;
ASSERT_STRNEQ(data, expected, s, "expected rcv data");
cmsg = CMSG_FIRSTHDR(&msg);
@@ -487,6 +499,12 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
memcpy(&pkt_ts, CMSG_DATA(cmsg), sizeof(pkt_ts));
pkt_ns = pkt_ts.tv_sec * NSEC_PER_SEC + pkt_ts.tv_nsec;
+ if (tstamp) {
+ /* caller will check the tstamp itself */
+ *tstamp = pkt_ns;
+ return 0;
+ }
+
ASSERT_NEQ(pkt_ns, 0, "pkt rcv tstamp");
ret = clock_gettime(CLOCK_REALTIME, &now_ts);
@@ -496,6 +514,60 @@ static void rcv_tstamp(int fd, const char *expected, size_t s)
if (ASSERT_GE(now_ns, pkt_ns, "check rcv tstamp"))
ASSERT_LT(now_ns - pkt_ns, 5 * NSEC_PER_SEC,
"check rcv tstamp");
+ return 0;
+}
+
+static void rcv_tstamp(int fd, const char *expected, size_t s)
+{
+ __rcv_tstamp(fd, expected, s, NULL);
+}
+
+static int wait_netstamp_needed_key(void)
+{
+ int opt = 1, srv_fd = -1, cli_fd = -1, nretries = 0, err, n;
+ char buf[] = "testing testing";
+ struct nstoken *nstoken;
+ __u64 tstamp = 0;
+
+ nstoken = open_netns(NS_DST);
+ if (!nstoken)
+ return -1;
+
+ srv_fd = start_server(AF_INET6, SOCK_DGRAM, "::1", 0, 0);
+ if (!ASSERT_GE(srv_fd, 0, "start_server"))
+ goto done;
+
+ err = setsockopt(srv_fd, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
+ &opt, sizeof(opt));
+ if (!ASSERT_OK(err, "setsockopt(SO_TIMESTAMPNS_NEW)"))
+ goto done;
+
+ cli_fd = connect_to_fd(srv_fd, TIMEOUT_MILLIS);
+ if (!ASSERT_GE(cli_fd, 0, "connect_to_fd"))
+ goto done;
+
+again:
+ n = write(cli_fd, buf, sizeof(buf));
+ if (!ASSERT_EQ(n, sizeof(buf), "send to server"))
+ goto done;
+ err = __rcv_tstamp(srv_fd, buf, sizeof(buf), &tstamp);
+ if (!ASSERT_OK(err, "__rcv_tstamp"))
+ goto done;
+ if (!tstamp && nretries++ < 5) {
+ sleep(1);
+ printf("netstamp_needed_key retry#%d\n", nretries);
+ goto again;
+ }
+
+done:
+ if (!tstamp && srv_fd != -1) {
+ close(srv_fd);
+ srv_fd = -1;
+ }
+ if (cli_fd != -1)
+ close(cli_fd);
+ close_netns(nstoken);
+ return srv_fd;
}
static void snd_tstamp(int fd, char *b, size_t s)
@@ -832,11 +904,20 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
{
struct test_tc_dtime *skel;
struct nstoken *nstoken;
- int err;
+ int hold_tstamp_fd, err;
+
+ /* Hold a sk with the SOCK_TIMESTAMP set to ensure there
+ * is no delay in the kernel net_enable_timestamp().
+ * This ensures the following tests must have
+ * non zero rcv tstamp in the recvmsg().
+ */
+ hold_tstamp_fd = wait_netstamp_needed_key();
+ if (!ASSERT_GE(hold_tstamp_fd, 0, "wait_netstamp_needed_key"))
+ return;
skel = test_tc_dtime__open();
if (!ASSERT_OK_PTR(skel, "test_tc_dtime__open"))
- return;
+ goto done;
skel->rodata->IFINDEX_SRC = setup_result->ifindex_src_fwd;
skel->rodata->IFINDEX_DST = setup_result->ifindex_dst_fwd;
@@ -881,6 +962,7 @@ static void test_tc_redirect_dtime(struct netns_setup_result *setup_result)
done:
test_tc_dtime__destroy(skel);
+ close(hold_tstamp_fd);
}
static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result)
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
index c3b45745cb..6d8b54124c 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
@@ -511,7 +511,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
- if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
+ if (!ASSERT_EQ(query_opts.feature_flags, 0,
"bond query_opts.feature_flags"))
goto out;
@@ -601,7 +601,7 @@ static void test_xdp_bonding_features(struct skeletons *skeletons)
if (!ASSERT_OK(err, "bond bpf_xdp_query"))
goto out;
- ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK,
+ ASSERT_EQ(query_opts.feature_flags, 0,
"bond query_opts.feature_flags");
out:
bpf_link__destroy(link);
diff --git a/tools/testing/selftests/bpf/progs/test_map_in_map.c b/tools/testing/selftests/bpf/progs/test_map_in_map.c
index f416032ba8..b295f9b721 100644
--- a/tools/testing/selftests/bpf/progs/test_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_map_in_map.c
@@ -21,6 +21,32 @@ struct {
__type(value, __u32);
} mim_hash SEC(".maps");
+/* The following three maps are used to test
+ * perf_event_array map can be an inner
+ * map of hash/array_of_maps.
+ */
+struct perf_event_array {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+} inner_map0 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_array_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __array(values, struct perf_event_array);
+} mim_hash_pe SEC(".maps") = {
+ .values = {&inner_map0}};
+
SEC("xdp")
int xdp_mimtest0(struct xdp_md *ctx)
{
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 7fc00e423e..e0dd101c9f 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1190,7 +1190,11 @@ static void test_map_in_map(void)
goto out_map_in_map;
}
- bpf_object__load(obj);
+ err = bpf_object__load(obj);
+ if (err) {
+ printf("Failed to load test prog\n");
+ goto out_map_in_map;
+ }
map = bpf_object__find_map_by_name(obj, "mim_array");
if (!map) {
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 4faa898ff7..27fd7ed3e4 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -271,7 +271,7 @@ ssize_t get_uprobe_offset(const void *addr)
* addi r2,r2,XXXX
*/
{
- const u32 *insn = (const u32 *)(uintptr_t)addr;
+ const __u32 *insn = (const __u32 *)(uintptr_t)addr;
if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
index ec22291363..18a49c70d4 100644
--- a/tools/testing/selftests/mm/gup_test.c
+++ b/tools/testing/selftests/mm/gup_test.c
@@ -50,39 +50,41 @@ static char *cmd_to_str(unsigned long cmd)
void *gup_thread(void *data)
{
struct gup_test gup = *(struct gup_test *)data;
- int i;
+ int i, status;
/* Only report timing information on the *_BENCHMARK commands: */
if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) ||
(cmd == PIN_LONGTERM_BENCHMARK)) {
for (i = 0; i < repeats; i++) {
gup.size = size;
- if (ioctl(gup_fd, cmd, &gup))
- perror("ioctl"), exit(1);
+ status = ioctl(gup_fd, cmd, &gup);
+ if (status)
+ break;
pthread_mutex_lock(&print_mutex);
- printf("%s: Time: get:%lld put:%lld us",
- cmd_to_str(cmd), gup.get_delta_usec,
- gup.put_delta_usec);
+ ksft_print_msg("%s: Time: get:%lld put:%lld us",
+ cmd_to_str(cmd), gup.get_delta_usec,
+ gup.put_delta_usec);
if (gup.size != size)
- printf(", truncated (size: %lld)", gup.size);
- printf("\n");
+ ksft_print_msg(", truncated (size: %lld)", gup.size);
+ ksft_print_msg("\n");
pthread_mutex_unlock(&print_mutex);
}
} else {
gup.size = size;
- if (ioctl(gup_fd, cmd, &gup)) {
- perror("ioctl");
- exit(1);
- }
+ status = ioctl(gup_fd, cmd, &gup);
+ if (status)
+ goto return_;
pthread_mutex_lock(&print_mutex);
- printf("%s: done\n", cmd_to_str(cmd));
+ ksft_print_msg("%s: done\n", cmd_to_str(cmd));
if (gup.size != size)
- printf("Truncated (size: %lld)\n", gup.size);
+ ksft_print_msg("Truncated (size: %lld)\n", gup.size);
pthread_mutex_unlock(&print_mutex);
}
+return_:
+ ksft_test_result(!status, "ioctl status %d\n", status);
return NULL;
}
@@ -170,7 +172,7 @@ int main(int argc, char **argv)
touch = 1;
break;
default:
- return -1;
+ ksft_exit_fail_msg("Wrong argument\n");
}
}
@@ -198,11 +200,12 @@ int main(int argc, char **argv)
}
}
- filed = open(file, O_RDWR|O_CREAT);
- if (filed < 0) {
- perror("open");
- exit(filed);
- }
+ ksft_print_header();
+ ksft_set_plan(nthreads);
+
+ filed = open(file, O_RDWR|O_CREAT, 0664);
+ if (filed < 0)
+ ksft_exit_fail_msg("Unable to open %s: %s\n", file, strerror(errno));
gup.nr_pages_per_call = nr_pages;
if (write)
@@ -213,27 +216,24 @@ int main(int argc, char **argv)
switch (errno) {
case EACCES:
if (getuid())
- printf("Please run this test as root\n");
+ ksft_print_msg("Please run this test as root\n");
break;
case ENOENT:
- if (opendir("/sys/kernel/debug") == NULL) {
- printf("mount debugfs at /sys/kernel/debug\n");
- break;
- }
- printf("check if CONFIG_GUP_TEST is enabled in kernel config\n");
+ if (opendir("/sys/kernel/debug") == NULL)
+ ksft_print_msg("mount debugfs at /sys/kernel/debug\n");
+ ksft_print_msg("check if CONFIG_GUP_TEST is enabled in kernel config\n");
break;
default:
- perror("failed to open " GUP_TEST_FILE);
+ ksft_print_msg("failed to open %s: %s\n", GUP_TEST_FILE, strerror(errno));
break;
}
- exit(KSFT_SKIP);
+ ksft_test_result_skip("Please run this test as root\n");
+ return ksft_exit_pass();
}
p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
- if (p == MAP_FAILED) {
- perror("mmap");
- exit(1);
- }
+ if (p == MAP_FAILED)
+ ksft_exit_fail_msg("mmap: %s\n", strerror(errno));
gup.addr = (unsigned long)p;
if (thp == 1)
@@ -264,7 +264,8 @@ int main(int argc, char **argv)
ret = pthread_join(tid[i], NULL);
assert(ret == 0);
}
+
free(tid);
- return 0;
+ return ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index cc5f144430..7dbfa53d93 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -137,7 +137,7 @@ static void test_mprotect(int pagemap_fd, int pagesize, bool anon)
if (!map)
ksft_exit_fail_msg("anon mmap failed\n");
} else {
- test_fd = open(fname, O_RDWR | O_CREAT);
+ test_fd = open(fname, O_RDWR | O_CREAT, 0664);
if (test_fd < 0) {
ksft_test_result_skip("Test %s open() file failed\n", __func__);
return;
diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c
index 0e74635c8c..dff3be2348 100644
--- a/tools/testing/selftests/mm/split_huge_page_test.c
+++ b/tools/testing/selftests/mm/split_huge_page_test.c
@@ -253,7 +253,7 @@ void split_file_backed_thp(void)
goto cleanup;
}
- fd = open(testfile, O_CREAT|O_WRONLY);
+ fd = open(testfile, O_CREAT|O_WRONLY, 0664);
if (fd == -1) {
perror("Cannot open testing file\n");
goto cleanup;
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index 02b89860e1..ba6777cdf4 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -17,6 +17,7 @@ bool map_shared;
bool test_uffdio_wp = true;
unsigned long long *count_verify;
uffd_test_ops_t *uffd_test_ops;
+atomic_bool ready_for_fork;
static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
{
@@ -507,6 +508,8 @@ void *uffd_poll_thread(void *arg)
pollfd[1].fd = pipefd[cpu*2];
pollfd[1].events = POLLIN;
+ ready_for_fork = true;
+
for (;;) {
ret = poll(pollfd, 2, -1);
if (ret <= 0) {
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index 7c4fa964c3..1f0d573f30 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -32,6 +32,7 @@
#include <inttypes.h>
#include <stdint.h>
#include <sys/random.h>
+#include <stdatomic.h>
#include "../kselftest.h"
#include "vm_util.h"
@@ -97,6 +98,7 @@ extern bool map_shared;
extern bool test_uffdio_wp;
extern unsigned long long *count_verify;
extern volatile bool test_uffdio_copy_eexist;
+extern atomic_bool ready_for_fork;
extern uffd_test_ops_t anon_uffd_test_ops;
extern uffd_test_ops_t shmem_uffd_test_ops;
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index 2c68709062..92d51768b7 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -770,6 +770,8 @@ static void uffd_sigbus_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
+ ready_for_fork = false;
+
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
@@ -785,6 +787,9 @@ static void uffd_sigbus_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
+
pid = fork();
if (pid < 0)
err("fork");
@@ -824,6 +829,8 @@ static void uffd_events_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
+ ready_for_fork = false;
+
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
true, wp, false))
@@ -833,6 +840,9 @@ static void uffd_events_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
+
pid = fork();
if (pid < 0)
err("fork");
@@ -1219,7 +1229,8 @@ uffd_test_case_t uffd_tests[] = {
.uffd_fn = uffd_sigbus_wp_test,
.mem_targets = MEM_ALL,
.uffd_feature_required = UFFD_FEATURE_SIGBUS |
- UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP,
+ UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_PAGEFAULT_FLAG_WP |
+ UFFD_FEATURE_WP_HUGETLBFS_SHMEM,
},
{
.name = "events",
diff --git a/tools/testing/selftests/mqueue/setting b/tools/testing/selftests/mqueue/setting
new file mode 100644
index 0000000000..a953c96aa1
--- /dev/null
+++ b/tools/testing/selftests/mqueue/setting
@@ -0,0 +1 @@
+timeout=180
diff --git a/tools/testing/selftests/net/forwarding/config b/tools/testing/selftests/net/forwarding/config
index 697994a927..8d7a1a004b 100644
--- a/tools/testing/selftests/net/forwarding/config
+++ b/tools/testing/selftests/net/forwarding/config
@@ -6,14 +6,49 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NET_VRF=m
CONFIG_BPF_SYSCALL=y
CONFIG_CGROUP_BPF=y
+CONFIG_DUMMY=m
+CONFIG_IPV6=y
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_MACVLAN=m
CONFIG_NET_ACT_CT=m
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_MPLS=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_SAMPLE=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_TUNNEL_KEY=m
CONFIG_NET_ACT_VLAN=m
CONFIG_NET_CLS_FLOWER=m
CONFIG_NET_CLS_MATCHALL=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_META=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPIP=m
+CONFIG_NET_SCH_ETS=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_ACT_GACT=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_TC_SKB_EXT=y
+CONFIG_NET_TEAM=y
+CONFIG_NET_TEAM_MODE_LOADBALANCE=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NF_TABLES=m
CONFIG_VETH=m
CONFIG_NAMESPACES=y
CONFIG_NET_NS=y
+CONFIG_VXLAN=m
+CONFIG_XFRM_USER=m
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
index ac97f07e5c..bd3f7d492a 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh
@@ -354,7 +354,7 @@ __ping_ipv4()
# Send 100 packets and verify that at least 100 packets hit the rule,
# to overcome ARP noise.
- PING_COUNT=100 PING_TIMEOUT=11 ping_do $dev $dst_ip
+ PING_COUNT=100 PING_TIMEOUT=20 ping_do $dev $dst_ip
check_err $? "Ping failed"
tc_check_at_least_x_packets "dev $rp1 egress" 101 10 100
@@ -410,7 +410,7 @@ __ping_ipv6()
# Send 100 packets and verify that at least 100 packets hit the rule,
# to overcome neighbor discovery noise.
- PING_COUNT=100 PING_TIMEOUT=11 ping6_do $dev $dst_ip
+ PING_COUNT=100 PING_TIMEOUT=20 ping6_do $dev $dst_ip
check_err $? "Ping failed"
tc_check_at_least_x_packets "dev $rp1 egress" 101 100
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
index d880df89bc..e83fde79f4 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh
@@ -457,7 +457,7 @@ __ping_ipv4()
# Send 100 packets and verify that at least 100 packets hit the rule,
# to overcome ARP noise.
- PING_COUNT=100 PING_TIMEOUT=11 ping_do $dev $dst_ip
+ PING_COUNT=100 PING_TIMEOUT=20 ping_do $dev $dst_ip
check_err $? "Ping failed"
tc_check_at_least_x_packets "dev $rp1 egress" 101 10 100
@@ -522,7 +522,7 @@ __ping_ipv6()
# Send 100 packets and verify that at least 100 packets hit the rule,
# to overcome neighbor discovery noise.
- PING_COUNT=100 PING_TIMEOUT=11 ping6_do $dev $dst_ip
+ PING_COUNT=100 PING_TIMEOUT=20 ping6_do $dev $dst_ip
check_err $? "Ping failed"
tc_check_at_least_x_packets "dev $rp1 egress" 101 100
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
index 4d8c59be1b..7f89623f10 100755
--- a/tools/testing/selftests/net/mptcp/diag.sh
+++ b/tools/testing/selftests/net/mptcp/diag.sh
@@ -69,7 +69,7 @@ __chk_nr()
else
echo "[ fail ] expected $expected found $nr"
mptcp_lib_result_fail "${msg}"
- ret=$test_cnt
+ ret=${KSFT_FAIL}
fi
else
echo "[ ok ]"
@@ -115,11 +115,11 @@ wait_msk_nr()
if [ $i -ge $timeout ]; then
echo "[ fail ] timeout while expecting $expected max $max last $nr"
mptcp_lib_result_fail "${msg} # timeout"
- ret=$test_cnt
+ ret=${KSFT_FAIL}
elif [ $nr != $expected ]; then
echo "[ fail ] expected $expected found $nr"
mptcp_lib_result_fail "${msg} # unexpected result"
- ret=$test_cnt
+ ret=${KSFT_FAIL}
else
echo "[ ok ]"
mptcp_lib_result_pass "${msg}"
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 9096bf5794..25693b37f8 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -302,12 +302,12 @@ done
setup
run_test 10 10 0 0 "balanced bwidth"
-run_test 10 10 1 50 "balanced bwidth with unbalanced delay"
+run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
# we still need some additional infrastructure to pass the following test-cases
-run_test 30 10 0 0 "unbalanced bwidth"
-run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay"
-run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay"
+run_test 10 3 0 0 "unbalanced bwidth"
+run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
+run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
mptcp_lib_result_print_all_tap
exit $ret
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index f8499d4c87..36e40256ab 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -502,9 +502,22 @@ test_netlink_checks () {
wc -l) == 2 ] || \
return 1
+ info "Checking clone depth"
ERR_MSG="Flow actions may not be safe on all matching packets"
PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
ovs_add_flow "test_netlink_checks" nv0 \
+ 'in_port(1),eth(),eth_type(0x800),ipv4()' \
+ 'clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(clone(drop)))))))))))))))))' \
+ >/dev/null 2>&1 && return 1
+ POST_TEST=$(dmesg | grep -c "${ERR_MSG}")
+
+ if [ "$PRE_TEST" == "$POST_TEST" ]; then
+ info "failed - clone depth too large"
+ return 1
+ fi
+
+ PRE_TEST=$(dmesg | grep -c "${ERR_MSG}")
+ ovs_add_flow "test_netlink_checks" nv0 \
'in_port(1),eth(),eth_type(0x0806),arp()' 'drop(0),2' \
&> /dev/null && return 1
POST_TEST=$(dmesg | grep -c "${ERR_MSG}")
diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
index b97e621fac..5e0e539a32 100644
--- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
+++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py
@@ -299,7 +299,7 @@ class ovsactions(nla):
("OVS_ACTION_ATTR_PUSH_NSH", "none"),
("OVS_ACTION_ATTR_POP_NSH", "flag"),
("OVS_ACTION_ATTR_METER", "none"),
- ("OVS_ACTION_ATTR_CLONE", "none"),
+ ("OVS_ACTION_ATTR_CLONE", "recursive"),
("OVS_ACTION_ATTR_CHECK_PKT_LEN", "none"),
("OVS_ACTION_ATTR_ADD_MPLS", "none"),
("OVS_ACTION_ATTR_DEC_TTL", "none"),
@@ -465,29 +465,42 @@ class ovsactions(nla):
print_str += "pop_mpls"
else:
datum = self.get_attr(field[0])
- print_str += datum.dpstr(more)
+ if field[0] == "OVS_ACTION_ATTR_CLONE":
+ print_str += "clone("
+ print_str += datum.dpstr(more)
+ print_str += ")"
+ else:
+ print_str += datum.dpstr(more)
return print_str
def parse(self, actstr):
+ totallen = len(actstr)
while len(actstr) != 0:
parsed = False
+ parencount = 0
if actstr.startswith("drop"):
# If no reason is provided, the implicit drop is used (i.e no
# action). If some reason is given, an explicit action is used.
- actstr, reason = parse_extract_field(
- actstr,
- "drop(",
- "([0-9]+)",
- lambda x: int(x, 0),
- False,
- None,
- )
+ reason = None
+ if actstr.startswith("drop("):
+ parencount += 1
+
+ actstr, reason = parse_extract_field(
+ actstr,
+ "drop(",
+ "([0-9]+)",
+ lambda x: int(x, 0),
+ False,
+ None,
+ )
+
if reason is not None:
self["attrs"].append(["OVS_ACTION_ATTR_DROP", reason])
parsed = True
else:
- return
+ actstr = actstr[len("drop"): ]
+ return (totallen - len(actstr))
elif parse_starts_block(actstr, "^(\d+)", False, True):
actstr, output = parse_extract_field(
@@ -504,6 +517,7 @@ class ovsactions(nla):
False,
0,
)
+ parencount += 1
self["attrs"].append(["OVS_ACTION_ATTR_RECIRC", recircid])
parsed = True
@@ -516,12 +530,22 @@ class ovsactions(nla):
for flat_act in parse_flat_map:
if parse_starts_block(actstr, flat_act[0], False):
- actstr += len(flat_act[0])
+ actstr = actstr[len(flat_act[0]):]
self["attrs"].append([flat_act[1]])
actstr = actstr[strspn(actstr, ", ") :]
parsed = True
- if parse_starts_block(actstr, "ct(", False):
+ if parse_starts_block(actstr, "clone(", False):
+ parencount += 1
+ subacts = ovsactions()
+ actstr = actstr[len("clone("):]
+ parsedLen = subacts.parse(actstr)
+ lst = []
+ self["attrs"].append(("OVS_ACTION_ATTR_CLONE", subacts))
+ actstr = actstr[parsedLen:]
+ parsed = True
+ elif parse_starts_block(actstr, "ct(", False):
+ parencount += 1
actstr = actstr[len("ct(") :]
ctact = ovsactions.ctact()
@@ -553,6 +577,7 @@ class ovsactions(nla):
natact = ovsactions.ctact.natattr()
if actstr.startswith("("):
+ parencount += 1
t = None
actstr = actstr[1:]
if actstr.startswith("src"):
@@ -607,15 +632,29 @@ class ovsactions(nla):
actstr = actstr[strspn(actstr, ", ") :]
ctact["attrs"].append(["OVS_CT_ATTR_NAT", natact])
- actstr = actstr[strspn(actstr, ",) ") :]
+ actstr = actstr[strspn(actstr, ", ") :]
self["attrs"].append(["OVS_ACTION_ATTR_CT", ctact])
parsed = True
- actstr = actstr[strspn(actstr, "), ") :]
+ actstr = actstr[strspn(actstr, ", ") :]
+ while parencount > 0:
+ parencount -= 1
+ actstr = actstr[strspn(actstr, " "):]
+ if len(actstr) and actstr[0] != ")":
+ raise ValueError("Action str: '%s' unbalanced" % actstr)
+ actstr = actstr[1:]
+
+ if len(actstr) and actstr[0] == ")":
+ return (totallen - len(actstr))
+
+ actstr = actstr[strspn(actstr, ", ") :]
+
if not parsed:
raise ValueError("Action str: '%s' not supported" % actstr)
+ return (totallen - len(actstr))
+
class ovskey(nla):
nla_flags = NLA_F_NESTED
@@ -2111,6 +2150,8 @@ def main(argv):
ovsflow = OvsFlow()
ndb = NDB()
+ sys.setrecursionlimit(100000)
+
if hasattr(args, "showdp"):
found = False
for iface in ndb.interfaces:
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 464853a7f9..ad993ab3ac 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -988,12 +988,12 @@ TEST_F(tls, recv_partial)
memset(recv_mem, 0, sizeof(recv_mem));
EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_first),
- MSG_WAITALL), -1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_first),
+ MSG_WAITALL), strlen(test_str_first));
EXPECT_EQ(memcmp(test_str_first, recv_mem, strlen(test_str_first)), 0);
memset(recv_mem, 0, sizeof(recv_mem));
- EXPECT_NE(recv(self->cfd, recv_mem, strlen(test_str_second),
- MSG_WAITALL), -1);
+ EXPECT_EQ(recv(self->cfd, recv_mem, strlen(test_str_second),
+ MSG_WAITALL), strlen(test_str_second));
EXPECT_EQ(memcmp(test_str_second, recv_mem, strlen(test_str_second)),
0);
}
diff --git a/tools/testing/selftests/wireguard/qemu/arch/riscv32.config b/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
index 2fc36efb16..a7f8e8a956 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/riscv32.config
@@ -3,6 +3,7 @@ CONFIG_ARCH_RV32I=y
CONFIG_MMU=y
CONFIG_FPU=y
CONFIG_SOC_VIRT=y
+CONFIG_RISCV_ISA_FALLBACK=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
diff --git a/tools/testing/selftests/wireguard/qemu/arch/riscv64.config b/tools/testing/selftests/wireguard/qemu/arch/riscv64.config
index dc266f3b19..daeb3e5e09 100644
--- a/tools/testing/selftests/wireguard/qemu/arch/riscv64.config
+++ b/tools/testing/selftests/wireguard/qemu/arch/riscv64.config
@@ -2,6 +2,7 @@ CONFIG_ARCH_RV64I=y
CONFIG_MMU=y
CONFIG_FPU=y
CONFIG_SOC_VIRT=y
+CONFIG_RISCV_ISA_FALLBACK=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index e033c79d52..28658b9e0d 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -87,7 +87,27 @@ static void async_pf_execute(struct work_struct *work)
__kvm_vcpu_wake_up(vcpu);
mmput(mm);
- kvm_put_kvm(vcpu->kvm);
+}
+
+static void kvm_flush_and_free_async_pf_work(struct kvm_async_pf *work)
+{
+ /*
+ * The async #PF is "done", but KVM must wait for the work item itself,
+ * i.e. async_pf_execute(), to run to completion. If KVM is a module,
+ * KVM must ensure *no* code owned by the KVM (the module) can be run
+ * after the last call to module_put(). Note, flushing the work item
+ * is always required when the item is taken off the completion queue.
+ * E.g. even if the vCPU handles the item in the "normal" path, the VM
+ * could be terminated before async_pf_execute() completes.
+ *
+ * Wake all events skip the queue and go straight done, i.e. don't
+ * need to be flushed (but sanity check that the work wasn't queued).
+ */
+ if (work->wakeup_all)
+ WARN_ON_ONCE(work->work.func);
+ else
+ flush_work(&work->work);
+ kmem_cache_free(async_pf_cache, work);
}
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
@@ -114,7 +134,6 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
#else
if (cancel_work_sync(&work->work)) {
mmput(work->mm);
- kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
}
#endif
@@ -126,7 +145,10 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
list_first_entry(&vcpu->async_pf.done,
typeof(*work), link);
list_del(&work->link);
- kmem_cache_free(async_pf_cache, work);
+
+ spin_unlock(&vcpu->async_pf.lock);
+ kvm_flush_and_free_async_pf_work(work);
+ spin_lock(&vcpu->async_pf.lock);
}
spin_unlock(&vcpu->async_pf.lock);
@@ -151,7 +173,7 @@ void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
list_del(&work->queue);
vcpu->async_pf.queued--;
- kmem_cache_free(async_pf_cache, work);
+ kvm_flush_and_free_async_pf_work(work);
}
}
@@ -186,7 +208,6 @@ bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
work->arch = *arch;
work->mm = current->mm;
mmget(work->mm);
- kvm_get_kvm(work->vcpu->kvm);
INIT_WORK(&work->work, async_pf_execute);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 89912a17f5..c0e230f4c3 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -61,7 +61,7 @@ static void irqfd_resampler_notify(struct kvm_kernel_irqfd_resampler *resampler)
list_for_each_entry_srcu(irqfd, &resampler->list, resampler_link,
srcu_read_lock_held(&resampler->kvm->irq_srcu))
- eventfd_signal(irqfd->resamplefd, 1);
+ eventfd_signal(irqfd->resamplefd);
}
/*
@@ -786,7 +786,7 @@ ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
if (!ioeventfd_in_range(p, addr, len, val))
return -EOPNOTSUPP;
- eventfd_signal(p->eventfd, 1);
+ eventfd_signal(p->eventfd);
return 0;
}