summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.editorconfig3
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml4
-rw-r--r--Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml5
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml18
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci-host.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml16
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml4
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml92
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml52
-rw-r--r--Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml24
-rw-r--r--Documentation/devicetree/bindings/usb/realtek,rts5411.yaml1
-rw-r--r--Documentation/driver-api/fpga/fpga-bridge.rst7
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst34
-rw-r--r--Documentation/driver-api/fpga/fpga-region.rst13
-rw-r--r--Documentation/iio/adis16475.rst8
-rw-r--r--Documentation/mm/arch_pgtable_helpers.rst6
-rw-r--r--Documentation/networking/af_xdp.rst33
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile7
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi2
-rw-r--r--arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso6
-rw-r--r--arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts2
-rw-r--r--arch/arm/boot/dts/samsung/exynos4412-origen.dts2
-rw-r--r--arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts2
-rw-r--r--arch/arm/configs/sunxi_defconfig1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-s4.dtsi13
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi3
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-mek.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts1
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132-norrin.dts4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra132.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qcs404-evb.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sc8280xp.dtsi5
-rw-r--r--arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi2
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/asm-bug.h1
-rw-r--r--arch/arm64/include/asm/irqflags.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h24
-rw-r--r--arch/arm64/kernel/fpsimd.c44
-rw-r--r--arch/arm64/kvm/arm.c50
-rw-r--r--arch/arm64/kvm/guest.c3
-rw-r--r--arch/arm64/kvm/hyp/aarch32.c18
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c15
-rw-r--r--arch/arm64/kvm/vgic/vgic.h2
-rw-r--r--arch/csky/kernel/probes/ftrace.c3
-rw-r--r--arch/loongarch/Kconfig5
-rw-r--r--arch/loongarch/Kconfig.debug1
-rw-r--r--arch/loongarch/include/asm/hw_breakpoint.h4
-rw-r--r--arch/loongarch/include/asm/numa.h1
-rw-r--r--arch/loongarch/include/asm/perf_event.h3
-rw-r--r--arch/loongarch/include/asm/stackframe.h2
-rw-r--r--arch/loongarch/kernel/ftrace_dyn.c3
-rw-r--r--arch/loongarch/kernel/head.S2
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c96
-rw-r--r--arch/loongarch/kernel/ptrace.c47
-rw-r--r--arch/loongarch/kernel/setup.c2
-rw-r--r--arch/loongarch/kernel/smp.c5
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S10
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/kernel/entry.S4
-rw-r--r--arch/m68k/mac/misc.c36
-rw-r--r--arch/microblaze/kernel/Makefile1
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c2
-rw-r--r--arch/mips/bmips/setup.c3
-rw-r--r--arch/mips/include/asm/mipsmtregs.h2
-rw-r--r--arch/mips/pci/ops-rc32434.c4
-rwxr-xr-x[-rw-r--r--]arch/mips/pci/pcie-octeon.c6
-rw-r--r--arch/openrisc/kernel/process.c8
-rw-r--r--arch/openrisc/kernel/traps.c48
-rw-r--r--arch/parisc/include/asm/cacheflush.h15
-rw-r--r--arch/parisc/include/asm/page.h1
-rw-r--r--arch/parisc/include/asm/pgtable.h27
-rw-r--r--arch/parisc/include/asm/signal.h12
-rw-r--r--arch/parisc/include/uapi/asm/signal.h10
-rw-r--r--arch/parisc/kernel/cache.c413
-rw-r--r--arch/parisc/kernel/ftrace.c3
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c1
-rw-r--r--arch/powerpc/crypto/.gitignore2
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/io.h24
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h4
-rw-r--r--arch/powerpc/include/asm/uaccess.h16
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c4
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c1
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c149
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c42
-rw-r--r--arch/powerpc/platforms/85xx/smp.c9
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c6
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c10
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/riscv/Kconfig2
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi99
-rw-r--r--arch/riscv/include/asm/csr.h2
-rw-r--r--arch/riscv/include/asm/sbi.h2
-rw-r--r--arch/riscv/kernel/cpu.c40
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c2
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c3
-rw-r--r--arch/riscv/kernel/cpufeature.c10
-rw-r--r--arch/riscv/kernel/probes/ftrace.c3
-rw-r--r--arch/riscv/kernel/smpboot.c7
-rw-r--r--arch/riscv/kernel/stacktrace.c20
-rw-r--r--arch/riscv/kvm/aia_device.c7
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c4
-rw-r--r--arch/riscv/mm/init.c24
-rw-r--r--arch/riscv/mm/pageattr.c28
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c20
-rw-r--r--arch/s390/boot/startup.c1
-rw-r--r--arch/s390/include/asm/cpacf.h109
-rw-r--r--arch/s390/include/asm/ftrace.h8
-rw-r--r--arch/s390/include/asm/gmap.h2
-rw-r--r--arch/s390/include/asm/mmu.h5
-rw-r--r--arch/s390/include/asm/mmu_context.h1
-rw-r--r--arch/s390/include/asm/pgtable.h20
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/stacktrace.h12
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/ftrace.c3
-rw-r--r--arch/s390/kernel/ipl.c10
-rw-r--r--arch/s390/kernel/perf_event.c34
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/stacktrace.c108
-rw-r--r--arch/s390/kernel/vdso.c13
-rw-r--r--arch/s390/kernel/vdso32/Makefile4
-rw-r--r--arch/s390/kernel/vdso64/Makefile4
-rw-r--r--arch/s390/kernel/vdso64/vdso_user_wrapper.S19
-rw-r--r--arch/s390/kernel/vmlinux.lds.S2
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/gmap.c165
-rw-r--r--arch/s390/net/bpf_jit_comp.c8
-rw-r--r--arch/sh/kernel/kprobes.c7
-rw-r--r--arch/sh/lib/checksum.S67
-rw-r--r--arch/sparc/include/asm/smp_64.h2
-rw-r--r--arch/sparc/include/uapi/asm/termbits.h10
-rw-r--r--arch/sparc/include/uapi/asm/termios.h9
-rw-r--r--arch/sparc/kernel/prom_64.c4
-rw-r--r--arch/sparc/kernel/setup_64.c1
-rw-r--r--arch/sparc/kernel/smp_64.c14
-rw-r--r--arch/sparc/mm/tlb.c1
-rw-r--r--arch/um/drivers/line.c14
-rw-r--r--arch/um/drivers/ubd_kern.c4
-rw-r--r--arch/um/drivers/vector_kern.c2
-rw-r--r--arch/um/include/asm/kasan.h1
-rw-r--r--arch/um/include/asm/mmu.h2
-rw-r--r--arch/um/include/asm/processor-generic.h1
-rw-r--r--arch/um/include/shared/kern_util.h2
-rw-r--r--arch/um/include/shared/skas/mm_id.h2
-rw-r--r--arch/um/os-Linux/mem.c1
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Kconfig.debug5
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/head_64.S5
-rw-r--r--arch/x86/boot/main.c4
-rw-r--r--arch/x86/crypto/nh-avx2-x86_64.S1
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S1
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S1
-rw-r--r--arch/x86/include/asm/alternative.h22
-rw-r--r--arch/x86/include/asm/atomic64_32.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h2
-rw-r--r--arch/x86/include/asm/cpu_device_id.h98
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/efi.h1
-rw-r--r--arch/x86/include/asm/irq_stack.h2
-rw-r--r--arch/x86/include/asm/percpu.h42
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/sparsemem.h2
-rw-r--r--arch/x86/include/asm/uaccess.h6
-rw-r--r--arch/x86/kernel/amd_nb.c9
-rw-r--r--arch/x86/kernel/apic/vector.c9
-rw-r--r--arch/x86/kernel/cpu/common.c24
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/intel.c25
-rw-r--r--arch/x86/kernel/cpu/match.c4
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c3
-rw-r--r--arch/x86/kernel/cpu/topology.c53
-rw-r--r--arch/x86/kernel/cpu/topology_amd.c4
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c3
-rw-r--r--arch/x86/kernel/machine_kexec_64.c11
-rw-r--r--arch/x86/kernel/tsc_sync.c6
-rw-r--r--arch/x86/kvm/cpuid.c21
-rw-r--r--arch/x86/kvm/svm/sev.c19
-rw-r--r--arch/x86/kvm/svm/svm.c51
-rw-r--r--arch/x86/kvm/svm/svm.h4
-rw-r--r--arch/x86/kvm/x86.c9
-rw-r--r--arch/x86/lib/getuser.S6
-rw-r--r--arch/x86/lib/x86-opcode-map.txt10
-rw-r--r--arch/x86/mm/numa.c10
-rw-r--r--arch/x86/mm/pat/set_memory.c68
-rw-r--r--arch/x86/mm/pgtable.c2
-rw-r--r--arch/x86/pci/mmconfig-shared.c40
-rw-r--r--arch/x86/platform/efi/memmap.c12
-rw-r--r--arch/x86/purgatory/Makefile3
-rw-r--r--arch/x86/tools/relocs.c9
-rw-r--r--arch/x86/um/shared/sysdep/archsetjmp.h7
-rw-r--r--arch/x86/xen/enlighten.c33
-rw-r--r--block/blk-cgroup.c87
-rw-r--r--block/blk-core.c9
-rw-r--r--block/blk-flush.c3
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-settings.c2
-rw-r--r--block/blk.h1
-rw-r--r--block/fops.c2
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c2
-rw-r--r--block/partitions/cmdline.c49
-rw-r--r--block/sed-opal.c2
-rw-r--r--crypto/asymmetric_keys/Kconfig3
-rw-r--r--crypto/ecdsa.c3
-rw-r--r--crypto/ecrdsa.c1
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/acpi/acpi_lpss.c1
-rw-r--r--drivers/acpi/acpica/Makefile1
-rw-r--r--drivers/acpi/acpica/acevents.h4
-rw-r--r--drivers/acpi/acpica/evregion.c6
-rw-r--r--drivers/acpi/acpica/evxfregn.c54
-rw-r--r--drivers/acpi/acpica/exregion.c23
-rw-r--r--drivers/acpi/apei/einj-core.c2
-rw-r--r--drivers/acpi/bus.c5
-rw-r--r--drivers/acpi/ec.c28
-rw-r--r--drivers/acpi/internal.h5
-rw-r--r--drivers/acpi/mipi-disco-img.c28
-rw-r--r--drivers/acpi/numa/srat.c5
-rw-r--r--drivers/acpi/resource.c25
-rw-r--r--drivers/acpi/thermal.c8
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/acpi/x86/utils.c44
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/libata-core.c9
-rw-r--r--drivers/ata/libata-scsi.c8
-rw-r--r--drivers/ata/pata_legacy.c8
-rw-r--r--drivers/base/base.h9
-rw-r--r--drivers/base/bus.c9
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/base/module.c42
-rw-r--r--drivers/block/nbd.c34
-rw-r--r--drivers/block/null_blk/main.c44
-rw-r--r--drivers/block/null_blk/zoned.c2
-rw-r--r--drivers/bluetooth/ath3k.c25
-rw-r--r--drivers/bluetooth/btmrvl_main.c9
-rw-r--r--drivers/bluetooth/btqca.c4
-rw-r--r--drivers/bluetooth/btrsi.c1
-rw-r--r--drivers/bluetooth/btsdio.c8
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bluetooth/hci_bcm4377.c1
-rw-r--r--drivers/bluetooth/hci_ldisc.c6
-rw-r--r--drivers/bluetooth/hci_serdev.c5
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bluetooth/hci_vhci.c10
-rw-r--r--drivers/bluetooth/virtio_bt.c2
-rw-r--r--drivers/char/hw_random/stm32-rng.c18
-rw-r--r--drivers/char/ppdev.c15
-rw-r--r--drivers/char/tpm/tpm_tis_core.c3
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c3
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c3
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c2
-rw-r--r--drivers/clk/clk-renesas-pcie.c10
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c2
-rw-r--r--drivers/clk/mediatek/clk-pllfh.c2
-rw-r--r--drivers/clk/qcom/Kconfig2
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c33
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c3
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c11
-rw-r--r--drivers/clk/qcom/dispcc-sm8450.c20
-rw-r--r--drivers/clk/qcom/dispcc-sm8550.c20
-rw-r--r--drivers/clk/qcom/dispcc-sm8650.c20
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c8
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c2
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c9
-rw-r--r--drivers/clk/samsung/clk-exynosautov9.c8
-rw-r--r--drivers/clk/samsung/clk-gs101.c225
-rw-r--r--drivers/clk/samsung/clk.h11
-rw-r--r--drivers/clk/sifive/sifive-prci.c8
-rw-r--r--drivers/cpufreq/amd-pstate-ut.c3
-rw-r--r--drivers/cpufreq/amd-pstate.c218
-rw-r--r--drivers/cpufreq/amd-pstate.h (renamed from include/linux/amd-pstate.h)27
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c5
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq.c11
-rw-r--r--drivers/crypto/bcm/spu2.c2
-rw-r--r--drivers/crypto/ccp/sp-platform.c14
-rw-r--r--drivers/crypto/hisilicon/qm.c5
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c4
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c2
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c19
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c21
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h1
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c4
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c1
-rw-r--r--drivers/cxl/core/pci.c29
-rw-r--r--drivers/cxl/core/region.c19
-rw-r--r--drivers/cxl/core/trace.h4
-rw-r--r--drivers/cxl/cxl.h2
-rw-r--r--drivers/cxl/pci.c22
-rw-r--r--drivers/dax/bus.c66
-rw-r--r--drivers/dma-buf/st-dma-fence.c6
-rw-r--r--drivers/dma-buf/sync_debug.c4
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dma-axi-dmac.c2
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c6
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h1
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/cdev.c1
-rw-r--r--drivers/dma/idxd/irq.c4
-rw-r--r--drivers/dma/ioat/init.c55
-rw-r--r--drivers/dma/ti/k3-udma-glue.c5
-rw-r--r--drivers/dma/xilinx/xdma.c4
-rw-r--r--drivers/dpll/dpll_core.c2
-rw-r--r--drivers/edac/amd64_edac.c8
-rw-r--r--drivers/edac/igen6_edac.c4
-rw-r--r--drivers/edac/skx_common.c2
-rw-r--r--drivers/extcon/Kconfig3
-rw-r--r--drivers/firmware/dmi-id.c7
-rw-r--r--drivers/firmware/efi/libstub/fdt.c4
-rw-r--r--drivers/firmware/efi/libstub/loongarch.c2
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c28
-rw-r--r--drivers/firmware/efi/memmap.c9
-rw-r--r--drivers/firmware/psci/psci.c4
-rw-r--r--drivers/firmware/qcom/qcom_scm.c30
-rw-r--r--drivers/firmware/raspberrypi.c7
-rw-r--r--drivers/fpga/fpga-bridge.c57
-rw-r--r--drivers/fpga/fpga-mgr.c82
-rw-r--r--drivers/fpga/fpga-region.c24
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-npcm-sgpio.c10
-rw-r--r--drivers/gpio/gpio-tqmx86.c110
-rw-r--r--drivers/gpio/gpiolib-acpi.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h43
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c20
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c15
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c3
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c6
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig4
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c6
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c6
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c27
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c17
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c1
-rw-r--r--drivers/gpu/drm/ci/test.yml6
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c4
-rw-r--r--drivers/gpu/drm/drm_bridge.c10
-rw-r--r--drivers/gpu/drm/drm_edid.c2
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c1
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c3
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c15
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h6
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c46
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm_mips.c4
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c12
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h3
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c8
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c18
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c9
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c3
-rw-r--r--drivers/gpu/drm/meson/meson_dw_mipi_dsi.c7
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c25
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.h3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h14
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c10
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c40
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c9
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c5
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c22
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c3
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c20
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c45
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c3
-rw-r--r--drivers/gpu/drm/xe/xe_device.c21
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c9
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c70
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c1
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c12
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c115
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h6
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c36
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c18
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c7
-rw-r--r--drivers/greybus/interface.c1
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c10
-rw-r--r--drivers/hid/hid-asus.c51
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-multitouch.c6
-rw-r--r--drivers/hid/hid-nvidia-shield.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c59
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c5
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c2
-rw-r--r--drivers/hwmon/ltc2992.c4
-rw-r--r--drivers/hwmon/shtc1.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c29
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h31
-rw-r--r--drivers/hwtracing/intel_th/pci.c30
-rw-r--r--drivers/hwtracing/stm/core.c11
-rw-r--r--drivers/i2c/busses/i2c-at91-slave.c3
-rw-r--r--drivers/i2c/busses/i2c-cadence.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c2
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c19
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c20
-rw-r--r--drivers/i2c/i2c-core-acpi.c19
-rw-r--r--drivers/i3c/master/svc-i3c-master.c18
-rw-r--r--drivers/iio/adc/ad9467.c4
-rw-r--r--drivers/iio/adc/adi-axi-adc.c9
-rw-r--r--drivers/iio/adc/pac1934.c9
-rw-r--r--drivers/iio/adc/stm32-adc.c1
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c15
-rw-r--r--drivers/iio/dac/ad5592r-base.c2
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c4
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c4
-rw-r--r--drivers/iio/industrialio-core.c6
-rw-r--r--drivers/iio/pressure/bmp280-core.c10
-rw-r--r--drivers/iio/pressure/dps310.c11
-rw-r--r--drivers/iio/temperature/mcp9600.c3
-rw-r--r--drivers/iio/temperature/mlx90635.c6
-rw-r--r--drivers/infiniband/core/addr.c12
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c24
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h12
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c6
-rw-r--r--drivers/infiniband/hw/mana/cq.c54
-rw-r--r--drivers/infiniband/hw/mana/main.c43
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h14
-rw-r--r--drivers/infiniband/hw/mana/mr.c1
-rw-r--r--drivers/infiniband/hw/mana/qp.c26
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c41
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c8
-rw-r--r--drivers/input/input.c104
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c7
-rw-r--r--drivers/input/mouse/cyapa.c12
-rw-r--r--drivers/input/serio/ioc3kbd.c7
-rw-r--r--drivers/interconnect/qcom/qcm2290.c2
-rw-r--r--drivers/iommu/amd/init.c13
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c2
-rw-r--r--drivers/iommu/intel/iommu.c19
-rw-r--r--drivers/iommu/iommu.c21
-rw-r--r--drivers/irqchip/irq-alpine-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c44
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-riscv-intc.c9
-rw-r--r--drivers/irqchip/irq-sifive-plic.c34
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/leds/leds-pwm.c8
-rw-r--r--drivers/macintosh/via-macii.c11
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c2
-rw-r--r--drivers/md/bcache/bset.c44
-rw-r--r--drivers/md/bcache/bset.h28
-rw-r--r--drivers/md/bcache/btree.c40
-rw-r--r--drivers/md/bcache/super.c5
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/writeback.c10
-rw-r--r--drivers/md/dm-delay.c14
-rw-r--r--drivers/md/dm-integrity.c1
-rw-r--r--drivers/md/md-bitmap.c6
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/media/cec/core/cec-adap.c24
-rw-r--r--drivers/media/cec/core/cec-api.c5
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c5
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c22
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c4
-rw-r--r--drivers/media/i2c/ov2680.c13
-rw-r--r--drivers/media/i2c/ov2740.c11
-rw-r--r--drivers/media/mc/mc-devnode.c5
-rw-r--r--drivers/media/mc/mc-entity.c6
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c66
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c10
-rw-r--r--drivers/media/pci/mgb4/mgb4_core.c7
-rw-r--r--drivers/media/pci/ngene/ngene-core.c4
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c26
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h2
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c5
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h2
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig1
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c5
-rw-r--r--drivers/media/radio/radio-shark2.c2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c20
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c31
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h1
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c24
-rw-r--r--drivers/misc/lkdtm/Makefile2
-rw-r--r--drivers/misc/lkdtm/perms.c2
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c9
-rw-r--r--drivers/misc/mei/pci-me.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c39
-rw-r--r--drivers/misc/mei/vsc-fw-loader.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c10
-rw-r--r--drivers/mmc/core/slot-gpio.c20
-rw-r--r--drivers/mmc/host/davinci_mmc.c4
-rw-r--r--drivers/mmc/host/sdhci-acpi.c61
-rw-r--r--drivers/mmc/host/sdhci.c10
-rw-r--r--drivers/mmc/host/sdhci.h3
-rw-r--r--drivers/mmc/host/sdhci_am654.c168
-rw-r--r--drivers/mtd/mtdcore.c6
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c2
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/bonding/bond_main.c13
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c50
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c89
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c12
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c87
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c7
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c11
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c12
-rw-r--r--drivers/net/ethernet/cortina/gemini.c12
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c36
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c8
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c12
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c56
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c338
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c44
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c48
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h28
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c12
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c9
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h3
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c9
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c73
-rw-r--r--drivers/net/ethernet/sun/sungem.c14
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c14
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c56
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h22
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c31
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h1
-rw-r--r--drivers/net/geneve.c10
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/netdevsim/netdev.c3
-rw-r--r--drivers/net/netkit.c30
-rw-r--r--drivers/net/phy/dp83tg720.c38
-rw-r--r--drivers/net/phy/micrel.c118
-rw-r--r--drivers/net/phy/mxl-gpy.c58
-rw-r--r--drivers/net/phy/sfp.c24
-rw-r--r--drivers/net/usb/aqc111.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c18
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/usb/smsc95xx.c26
-rw-r--r--drivers/net/usb/sr9700.c10
-rw-r--r--drivers/net/virtio_net.c34
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c10
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c14
-rw-r--r--drivers/net/wireless/ath/ath.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c26
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c47
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c1
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c64
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c11
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h9
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h79
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c22
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c23
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c12
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c2
-rw-r--r--drivers/nvme/host/core.c17
-rw-r--r--drivers/nvme/host/multipath.c3
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pr.c2
-rw-r--r--drivers/nvme/target/configfs.c8
-rw-r--r--drivers/nvme/target/passthru.c6
-rw-r--r--drivers/of/module.c7
-rw-r--r--drivers/opp/core.c31
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c120
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c3
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c6
-rw-r--r--drivers/pci/of_property.c2
-rw-r--r--drivers/pci/pci.c19
-rw-r--r--drivers/pci/pcie/edr.c28
-rw-r--r--drivers/perf/arm_dmc620_pmu.c9
-rw-r--r--drivers/perf/hisilicon/hisi_pcie_pmu.c14
-rw-r--r--drivers/perf/hisilicon/hns3_pmu.c16
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c247
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c20
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779h0.c24
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c2
-rw-r--r--drivers/platform/chrome/cros_ec.c16
-rw-r--r--drivers/platform/chrome/cros_usbpd_logger.c9
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c9
-rw-r--r--drivers/platform/x86/dell/dell-smbios-base.c92
-rw-r--r--drivers/platform/x86/intel/tpmi.c7
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c7
-rw-r--r--drivers/platform/x86/p2sb.c29
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c5
-rw-r--r--drivers/platform/x86/toshiba_acpi.c36
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c8
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c18
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c216
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h1
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c18
-rw-r--r--drivers/pmdomain/core.c10
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c20
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c11
-rw-r--r--drivers/power/supply/power_supply_sysfs.c20
-rw-r--r--drivers/ptp/ptp_chardev.c3
-rw-r--r--drivers/ptp/ptp_ocp.c6
-rw-r--r--drivers/ptp/ptp_sysfs.c3
-rw-r--r--drivers/pwm/pwm-meson.c15
-rw-r--r--drivers/pwm/pwm-sti.c39
-rw-r--r--drivers/ras/amd/atl/internal.h2
-rw-r--r--drivers/ras/amd/atl/system.c2
-rw-r--r--drivers/ras/amd/atl/umc.c160
-rw-r--r--drivers/regulator/bd71815-regulator.c2
-rw-r--r--drivers/regulator/bd71828-regulator.c58
-rw-r--r--drivers/regulator/core.c1
-rw-r--r--drivers/regulator/helpers.c43
-rw-r--r--drivers/regulator/tps6287x-regulator.c1
-rw-r--r--drivers/regulator/tps6594-regulator.c16
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c58
-rw-r--r--drivers/s390/cio/trace.h2
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c4
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c3
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_app.c62
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c19
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c23
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c2
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/scsi.c14
-rw-r--r--drivers/scsi/scsi_transport_sas.c23
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c5
-rw-r--r--drivers/soc/qcom/cmd-db.c32
-rw-r--r--drivers/soc/qcom/pmic_glink.c26
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c3
-rw-r--r--drivers/soundwire/cadence_master.c2
-rw-r--r--drivers/spi/spi-cs42l43.c2
-rw-r--r--drivers/spi/spi-imx.c14
-rw-r--r--drivers/spi/spi-stm32-qspi.c12
-rw-r--r--drivers/spi/spi-stm32.c16
-rw-r--r--drivers/spi/spi.c14
-rw-r--r--drivers/spmi/hisi-spmi-controller.c1
-rw-r--r--drivers/spmi/spmi-pmic-arb.c12
-rw-r--r--drivers/ssb/main.c4
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c1
-rw-r--r--drivers/staging/greybus/arche-platform.c9
-rw-r--r--drivers/staging/greybus/light.c8
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c1
-rw-r--r--drivers/staging/media/starfive/camss/stf-camss.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c3
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c10
-rw-r--r--drivers/thermal/qcom/lmh.c3
-rw-r--r--drivers/thermal/qcom/tsens.c2
-rw-r--r--drivers/thermal/thermal_core.c27
-rw-r--r--drivers/thermal/thermal_debugfs.c27
-rw-r--r--drivers/thermal/thermal_debugfs.h4
-rw-r--r--drivers/thunderbolt/debugfs.c5
-rw-r--r--drivers/tty/n_gsm.c140
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c101
-rw-r--r--drivers/tty/serial/8250/8250_dw.c36
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.c3
-rw-r--r--drivers/tty/serial/8250/8250_dwlib.h33
-rw-r--r--drivers/tty/serial/8250/8250_exar.c42
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c8
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c1
-rw-r--r--drivers/tty/serial/imx.c7
-rw-r--r--drivers/tty/serial/max3100.c22
-rw-r--r--drivers/tty/serial/sc16is7xx.c25
-rw-r--r--drivers/tty/serial/serial_port.c7
-rw-r--r--drivers/tty/serial/sh-sci.c5
-rw-r--r--drivers/tty/tty_ldisc.c6
-rw-r--r--drivers/tty/vt/vt.c10
-rw-r--r--drivers/ufs/core/ufs-mcq.c20
-rw-r--r--drivers/ufs/core/ufshcd.c13
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c2
-rw-r--r--drivers/ufs/host/ufs-qcom.c7
-rw-r--r--drivers/ufs/host/ufs-qcom.h12
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/core/hcd.c12
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c8
-rw-r--r--drivers/usb/fotg210/fotg210-core.c1
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/rndis.c4
-rw-r--r--drivers/usb/gadget/function/u_audio.c21
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c14
-rw-r--r--drivers/usb/host/xhci-mem.c22
-rw-r--r--drivers/usb/host/xhci-pci.c22
-rw-r--r--drivers/usb/host/xhci-rcar.c6
-rw-r--r--drivers/usb/host/xhci-ring.c74
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/misc/uss720.c20
-rw-r--r--drivers/usb/storage/alauda.c9
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/uas.c7
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c10
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c5
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c21
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c22
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c78
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c4
-rw-r--r--drivers/video/backlight/mp3309c.c3
-rw-r--r--drivers/video/fbdev/Kconfig4
-rw-r--r--drivers/video/fbdev/core/Kconfig6
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c5
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c2
-rw-r--r--drivers/video/fbdev/sis/init301.c3
-rw-r--r--drivers/virt/acrn/mm.c61
-rw-r--r--drivers/virtio/virtio_balloon.c13
-rw-r--r--drivers/virtio/virtio_pci_common.c4
-rw-r--r--drivers/watchdog/bd9576_wdt.c12
-rw-r--r--drivers/watchdog/cpu5wdt.c2
-rw-r--r--drivers/watchdog/rti_wdt.c34
-rw-r--r--drivers/watchdog/sa1100_wdt.c5
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c36
-rw-r--r--fs/9p/vfs_dentry.c9
-rw-r--r--fs/afs/mntpt.c5
-rw-r--r--fs/btrfs/bio.c4
-rw-r--r--fs/btrfs/block-group.c11
-rw-r--r--fs/btrfs/disk-io.c10
-rw-r--r--fs/btrfs/extent_io.c70
-rw-r--r--fs/btrfs/qgroup.c34
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/tree-log.c17
-rw-r--r--fs/btrfs/zoned.c13
-rw-r--r--fs/cachefiles/daemon.c3
-rw-r--r--fs/cachefiles/internal.h5
-rw-r--r--fs/cachefiles/ondemand.c165
-rw-r--r--fs/dlm/ast.c14
-rw-r--r--fs/dlm/dlm_internal.h1
-rw-r--r--fs/dlm/user.c15
-rw-r--r--fs/ecryptfs/keystore.c4
-rw-r--r--fs/erofs/decompressor_deflate.c55
-rw-r--r--fs/exec.c11
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/ext4/mballoc.c5
-rw-r--r--fs/ext4/mballoc.h2
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/super.c22
-rw-r--r--fs/ext4/sysfs.c24
-rw-r--r--fs/ext4/xattr.c117
-rw-r--r--fs/f2fs/checkpoint.c9
-rw-r--r--fs/f2fs/data.c19
-rw-r--r--fs/f2fs/f2fs.h15
-rw-r--r--fs/f2fs/file.c92
-rw-r--r--fs/f2fs/gc.c9
-rw-r--r--fs/f2fs/inode.c6
-rw-r--r--fs/f2fs/node.c14
-rw-r--r--fs/f2fs/segment.c2
-rw-r--r--fs/f2fs/super.c12
-rw-r--r--fs/fs-writeback.c7
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/gfs2/glock.c91
-rw-r--r--fs/gfs2/glock.h1
-rw-r--r--fs/gfs2/glops.c3
-rw-r--r--fs/gfs2/incore.h1
-rw-r--r--fs/gfs2/lock_dlm.c32
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/super.c3
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/iomap/buffered-io.c2
-rw-r--r--fs/jffs2/xattr.c3
-rw-r--r--fs/jfs/xattr.c4
-rw-r--r--fs/libfs.c55
-rw-r--r--fs/netfs/buffered_write.c2
-rw-r--r--fs/nfs/dir.c47
-rw-r--r--fs/nfs/filelayout/filelayout.c4
-rw-r--r--fs/nfs/fs_context.c9
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/nfs4proc.c25
-rw-r--r--fs/nfs/nfs4state.c12
-rw-r--r--fs/nfsd/nfsctl.c4
-rw-r--r--fs/nfsd/nfsfh.c4
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/segment.c66
-rw-r--r--fs/ntfs3/dir.c1
-rw-r--r--fs/ntfs3/fslog.c3
-rw-r--r--fs/ntfs3/index.c6
-rw-r--r--fs/ntfs3/inode.c24
-rw-r--r--fs/ntfs3/ntfs.h2
-rw-r--r--fs/ntfs3/record.c11
-rw-r--r--fs/ntfs3/super.c2
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/journal.c192
-rw-r--r--fs/ocfs2/localalloc.c19
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/ocfs2/ocfs2.h27
-rw-r--r--fs/ocfs2/reservations.c2
-rw-r--r--fs/ocfs2/suballoc.c6
-rw-r--r--fs/ocfs2/super.c4
-rw-r--r--fs/openpromfs/inode.c8
-rw-r--r--fs/overlayfs/dir.c3
-rw-r--r--fs/overlayfs/export.c6
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/fd.c42
-rw-r--r--fs/proc/task_mmu.c9
-rw-r--r--fs/proc/vmcore.c2
-rw-r--r--fs/smb/client/cifsfs.c14
-rw-r--r--fs/smb/client/cifspdu.h2
-rw-r--r--fs/smb/client/inode.c4
-rw-r--r--fs/smb/client/smb2ops.c4
-rw-r--r--fs/smb/client/smb2transport.c2
-rw-r--r--fs/smb/server/mgmt/share_config.c6
-rw-r--r--fs/smb/server/oplock.c21
-rw-r--r--fs/smb/server/smb2pdu.c22
-rw-r--r--fs/smb/server/vfs.c17
-rw-r--r--fs/smb/server/vfs.h3
-rw-r--r--fs/smb/server/vfs_cache.c3
-rw-r--r--fs/tracefs/event_inode.c64
-rw-r--r--fs/tracefs/inode.c33
-rw-r--r--fs/udf/inode.c27
-rw-r--r--fs/udf/udftime.c11
-rw-r--r--fs/verity/init.c7
-rw-r--r--include/acpi/acpixf.h4
-rw-r--r--include/drm/bridge/aux-bridge.h2
-rw-r--r--include/drm/display/drm_dp_mst_helper.h1
-rw-r--r--include/drm/drm_displayid.h1
-rw-r--r--include/drm/drm_mipi_dsi.h6
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h6
-rw-r--r--include/linux/atomic/atomic-instrumented.h8
-rw-r--r--include/linux/atomic/atomic-long.h4
-rw-r--r--include/linux/bitops.h1
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--include/linux/counter.h1
-rw-r--r--include/linux/cpuset.h3
-rw-r--r--include/linux/dev_printk.h25
-rw-r--r--include/linux/etherdevice.h8
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/fortify-string.h25
-rw-r--r--include/linux/fpga/fpga-bridge.h10
-rw-r--r--include/linux/fpga/fpga-mgr.h26
-rw-r--r--include/linux/fpga/fpga-region.h13
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/iommu.h2
-rw-r--r--include/linux/kcov.h49
-rw-r--r--include/linux/kexec.h6
-rw-r--r--include/linux/kprobes.h7
-rw-r--r--include/linux/ksm.h30
-rw-r--r--include/linux/lsm_hook_defs.h2
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h4
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmc/slot-gpio.h1
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/numa.h7
-rw-r--r--include/linux/pagemap.h38
-rw-r--r--include/linux/pci.h7
-rw-r--r--include/linux/pm_domain.h6
-rw-r--r--include/linux/printk.h2
-rw-r--r--include/linux/pse-pd/pse.h4
-rw-r--r--include/linux/regulator/driver.h3
-rw-r--r--include/linux/security.h5
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tty_driver.h8
-rw-r--r--include/media/cec.h1
-rw-r--r--include/net/ax25.h3
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h122
-rw-r--r--include/net/bluetooth/hci_core.h86
-rw-r--r--include/net/bluetooth/l2cap.h11
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_fib.h6
-rw-r--r--include/net/ip6_route.h11
-rw-r--r--include/net/ip_tunnels.h5
-rw-r--r--include/net/netns/netfilter.h3
-rw-r--r--include/net/route.h11
-rw-r--r--include/net/rtnetlink.h1
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sock.h13
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/net/tcp_ao.h7
-rw-r--r--include/scsi/scsi_devinfo.h4
-rw-r--r--include/scsi/scsi_transport_sas.h2
-rw-r--r--include/soc/qcom/cmd-db.h10
-rw-r--r--include/sound/tas2781-dsp.h7
-rw-r--r--include/trace/events/asoc.h2
-rw-r--r--include/trace/events/cachefiles.h8
-rw-r--r--include/uapi/drm/nouveau_drm.h7
-rw-r--r--include/uapi/linux/bpf.h2
-rw-r--r--include/uapi/linux/virtio_bt.h1
-rw-r--r--init/Kconfig4
-rw-r--r--io_uring/cancel.h4
-rw-r--r--io_uring/io-wq.c70
-rw-r--r--io_uring/io_uring.c1
-rw-r--r--io_uring/io_uring.h4
-rw-r--r--io_uring/napi.c24
-rw-r--r--io_uring/net.c1
-rw-r--r--io_uring/nop.c2
-rw-r--r--io_uring/rsrc.c2
-rw-r--r--io_uring/sqpoll.c14
-rw-r--r--kernel/auditfilter.c5
-rw-r--r--kernel/bpf/lpm_trie.c13
-rw-r--r--kernel/bpf/syscall.c16
-rw-r--r--kernel/bpf/verifier.c53
-rw-r--r--kernel/cgroup/cpuset.c143
-rw-r--r--kernel/cpu.c48
-rw-r--r--kernel/debug/kdb/kdb_io.c99
-rw-r--r--kernel/dma/map_benchmark.c22
-rw-r--r--kernel/events/core.c13
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rwxr-xr-xkernel/gen_kheaders.sh9
-rw-r--r--kernel/irq/cpuhotplug.c16
-rw-r--r--kernel/irq/irqdesc.c5
-rw-r--r--kernel/kcov.c1
-rw-r--r--kernel/kprobes.c6
-rw-r--r--kernel/padata.c8
-rw-r--r--kernel/pid_namespace.c1
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/rcu/rcutorture.c16
-rw-r--r--kernel/rcu/tasks.h2
-rw-r--r--kernel/rcu/tree_stall.h3
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c53
-rw-r--r--kernel/sched/topology.c2
-rw-r--r--kernel/time/clocksource.c42
-rw-r--r--kernel/time/tick-common.c42
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/bpf_trace.c8
-rw-r--r--kernel/trace/ftrace.c40
-rw-r--r--kernel/trace/preemptirq_delay_test.c1
-rw-r--r--kernel/trace/ring_buffer.c9
-rw-r--r--kernel/trace/rv/rv.c2
-rw-r--r--kernel/trace/trace_events_user.c76
-rw-r--r--kernel/trace/trace_probe.c4
-rw-r--r--lib/Kconfig.ubsan1
-rw-r--r--lib/fortify_kunit.c22
-rw-r--r--lib/kunit/device.c2
-rw-r--r--lib/kunit/test.c3
-rw-r--r--lib/kunit/try-catch.c9
-rw-r--r--lib/slub_kunit.c2
-rw-r--r--lib/strcat_kunit.c12
-rw-r--r--lib/string_kunit.c155
-rw-r--r--lib/strscpy_kunit.c51
-rw-r--r--lib/test_hmm.c8
-rw-r--r--lib/ubsan.h41
-rw-r--r--mm/cma.c4
-rw-r--r--mm/huge_memory.c77
-rw-r--r--mm/hugetlb.c22
-rw-r--r--mm/kmsan/core.c15
-rw-r--r--mm/ksm.c17
-rw-r--r--mm/memblock.c4
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/memory-failure.c11
-rw-r--r--mm/page_table_check.c11
-rw-r--r--mm/pgtable-generic.c2
-rw-r--r--mm/shmem.c5
-rw-r--r--mm/userfaultfd.c35
-rw-r--r--mm/vmalloc.c5
-rw-r--r--net/9p/client.c2
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/ax25/af_ax25.c6
-rw-r--r--net/ax25/ax25_dev.c50
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bluetooth/hci_conn.c3
-rw-r--r--net/bluetooth/hci_core.c141
-rw-r--r--net/bluetooth/hci_event.c208
-rw-r--r--net/bluetooth/hci_sock.c5
-rw-r--r--net/bluetooth/hci_sync.c209
-rw-r--r--net/bluetooth/iso.c149
-rw-r--r--net/bluetooth/l2cap_core.c85
-rw-r--r--net/bluetooth/l2cap_sock.c91
-rw-r--r--net/bluetooth/mgmt.c84
-rw-r--r--net/bpf/test_run.c6
-rw-r--r--net/bridge/br_device.c6
-rw-r--r--net/bridge/br_mst.c29
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/drop_monitor.c20
-rw-r--r--net/core/dst_cache.c4
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/net_namespace.c9
-rw-r--r--net/core/netdev-genl.c16
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/rtnetlink.c44
-rw-r--r--net/core/sock.c3
-rw-r--r--net/core/sock_map.c16
-rw-r--r--net/devlink/core.c6
-rw-r--r--net/ethernet/eth.c4
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ipv4/af_inet.c6
-rw-r--r--net/ipv4/cipso_ipv4.c12
-rw-r--r--net/ipv4/devinet.c22
-rw-r--r--net/ipv4/fib_frontend.c7
-rw-r--r--net/ipv4/icmp.c26
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ip_tunnel.c2
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c2
-rw-r--r--net/ipv4/route.c46
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_ao.c19
-rw-r--r--net/ipv4/tcp_dctcp.c13
-rw-r--r--net/ipv4/tcp_input.c1
-rw-r--r--net/ipv4/tcp_ipv4.c13
-rw-r--r--net/ipv4/tcp_timer.c6
-rw-r--r--net/ipv4/udp.c23
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/icmp.c8
-rw-r--r--net/ipv6/ila/ila_lwt.c4
-rw-r--r--net/ipv6/ioam6_iptunnel.c8
-rw-r--r--net/ipv6/ip6_fib.c6
-rw-r--r--net/ipv6/ip6_output.c18
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c71
-rw-r--r--net/ipv6/seg6.c5
-rw-r--r--net/ipv6/seg6_hmac.c42
-rw-r--r--net/ipv6/seg6_iptunnel.c25
-rw-r--r--net/ipv6/seg6_local.c8
-rw-r--r--net/ipv6/tcp_ipv6.c7
-rw-r--r--net/ipv6/udp.c31
-rw-r--r--net/ipv6/xfrm6_policy.c10
-rw-r--r--net/l2tp/l2tp_core.c44
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/mac80211/cfg.c16
-rw-r--r--net/mac80211/driver-ops.c17
-rw-r--r--net/mac80211/he.c10
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/iface.c26
-rw-r--r--net/mac80211/mesh_pathtbl.c13
-rw-r--r--net/mac80211/mlme.c53
-rw-r--r--net/mac80211/parse.c2
-rw-r--r--net/mac80211/scan.c16
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mpls/mpls_iptunnel.c4
-rw-r--r--net/mptcp/pm_netlink.c21
-rw-r--r--net/mptcp/protocol.c10
-rw-r--r--net/mptcp/protocol.h3
-rw-r--r--net/mptcp/sockopt.c60
-rw-r--r--net/ncsi/internal.h2
-rw-r--r--net/ncsi/ncsi-manage.c73
-rw-r--r--net/ncsi/ncsi-rsp.c4
-rw-r--r--net/netfilter/core.c13
-rw-r--r--net/netfilter/ipset/ip_set_core.c92
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c33
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c16
-rw-r--r--net/netfilter/nf_conntrack_standalone.c15
-rw-r--r--net/netfilter/nf_flow_table_core.c8
-rw-r--r--net/netfilter/nf_flow_table_ip.c8
-rw-r--r--net/netfilter/nf_hooks_lwtunnel.c67
-rw-r--r--net/netfilter/nf_internals.h6
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_fib.c8
-rw-r--r--net/netfilter/nft_meta.c3
-rw-r--r--net/netfilter/nft_payload.c99
-rw-r--r--net/netfilter/nft_rt.c4
-rw-r--r--net/netrom/nr_route.c19
-rw-r--r--net/netrom/nr_timer.c3
-rw-r--r--net/nfc/nci/core.c18
-rw-r--r--net/openvswitch/actions.c6
-rw-r--r--net/openvswitch/flow.c3
-rw-r--r--net/packet/af_packet.c29
-rw-r--r--net/qrtr/ns.c27
-rw-r--r--net/sched/act_api.c3
-rw-r--r--net/sched/act_ct.c16
-rw-r--r--net/sched/sch_api.c1
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sched/sch_htb.c22
-rw-r--r--net/sched/sch_multiq.c2
-rw-r--r--net/sched/sch_taprio.c29
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/smc/af_smc.c22
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c12
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c6
-rw-r--r--net/tipc/node.c1
-rw-r--r--net/tipc/udp_media.c2
-rw-r--r--net/tls/tls_main.c10
-rw-r--r--net/unix/af_unix.c153
-rw-r--r--net/unix/diag.c12
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/wireless/pmsr.c8
-rw-r--r--net/wireless/scan.c50
-rw-r--r--net/wireless/sysfs.c4
-rw-r--r--net/wireless/util.c7
-rw-r--r--net/xdp/xsk.c5
-rw-r--r--net/xfrm/xfrm_policy.c14
-rw-r--r--samples/landlock/sandboxer.c5
-rw-r--r--scripts/Makefile.vdsoinst2
-rw-r--r--scripts/atomic/kerneldoc/sub_and_test2
-rw-r--r--scripts/kconfig/symbol.c6
-rw-r--r--scripts/mod/modpost.c5
-rw-r--r--scripts/module.lds.S1
-rw-r--r--security/apparmor/audit.c6
-rw-r--r--security/apparmor/include/audit.h2
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_api.c16
-rw-r--r--security/integrity/ima/ima_policy.c15
-rw-r--r--security/integrity/ima/ima_template_lib.c17
-rw-r--r--security/landlock/fs.c13
-rw-r--r--security/security.c6
-rw-r--r--security/selinux/include/audit.h4
-rw-r--r--security/selinux/ss/services.c5
-rw-r--r--security/smack/smack_lsm.c4
-rw-r--r--sound/core/init.c20
-rw-r--r--sound/core/jack.c21
-rw-r--r--sound/core/seq/seq_ump_convert.c50
-rw-r--r--sound/core/timer.c8
-rw-r--r--sound/core/ump.c7
-rw-r--r--sound/core/ump_convert.c1
-rw-r--r--sound/hda/intel-dsp-config.c2
-rw-r--r--sound/pci/hda/cs35l41_hda.c6
-rw-r--r--sound/pci/hda/cs35l41_hda_property.c4
-rw-r--r--sound/pci/hda/cs35l56_hda.c10
-rw-r--r--sound/pci/hda/hda_component.c16
-rw-r--r--sound/pci/hda/hda_component.h7
-rw-r--r--sound/pci/hda/hda_cs_dsp_ctl.c47
-rw-r--r--sound/pci/hda/patch_realtek.c23
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c4
-rw-r--r--sound/soc/amd/acp/acp-legacy-common.c96
-rw-r--r--sound/soc/amd/acp/acp-pci.c9
-rw-r--r--sound/soc/amd/acp/amd.h10
-rw-r--r--sound/soc/amd/acp/chip_offset_byte.h1
-rw-r--r--sound/soc/codecs/cs42l43.c5
-rw-r--r--sound/soc/codecs/rt715-sdca-sdw.c4
-rw-r--r--sound/soc/codecs/tas2552.c15
-rw-r--r--sound/soc/codecs/tas2781-fmwlib.c109
-rw-r--r--sound/soc/codecs/tas2781-i2c.c4
-rw-r--r--sound/soc/intel/avs/boards/ssm4567.c1
-rw-r--r--sound/soc/intel/avs/cldma.c2
-rw-r--r--sound/soc/intel/avs/icl.c5
-rw-r--r--sound/soc/intel/avs/path.c1
-rw-r--r--sound/soc/intel/avs/pcm.c4
-rw-r--r--sound/soc/intel/avs/probes.c14
-rw-r--r--sound/soc/intel/boards/bxt_da7219_max98357a.c1
-rw-r--r--sound/soc/intel/boards/bxt_rt298.c1
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c2
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c1
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c4
-rw-r--r--sound/soc/intel/boards/kbl_rt5660.c1
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c2
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c1
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c2
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c1
-rw-r--r--sound/soc/intel/boards/skl_rt286.c1
-rw-r--r--sound/soc/intel/boards/sof_sdw.c18
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c3
-rw-r--r--sound/soc/mediatek/common/mtk-soundcard-driver.c6
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-dai-tdm.c4
-rw-r--r--sound/soc/sof/debug.c23
-rw-r--r--sound/soc/sof/intel/hda-dai.c31
-rw-r--r--sound/soc/sof/intel/lnl.c3
-rw-r--r--sound/soc/sof/intel/lnl.h15
-rw-r--r--sound/soc/sof/intel/mtl.c42
-rw-r--r--sound/soc/sof/intel/mtl.h4
-rw-r--r--sound/soc/sof/ipc4-topology.c8
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h24
-rw-r--r--tools/arch/x86/intel_sdsi/intel_sdsi.c48
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt10
-rw-r--r--tools/bpf/bpftool/common.c96
-rw-r--r--tools/bpf/bpftool/iter.c2
-rw-r--r--tools/bpf/bpftool/main.h3
-rw-r--r--tools/bpf/bpftool/prog.c5
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c4
-rw-r--r--tools/bpf/bpftool/struct_ops.c2
-rw-r--r--tools/bpf/resolve_btfids/main.c2
-rw-r--r--tools/include/nolibc/stdlib.h2
-rw-r--r--tools/include/uapi/linux/bpf.h2
-rw-r--r--tools/lib/bpf/bpf.c2
-rw-r--r--tools/lib/bpf/features.c2
-rw-r--r--tools/lib/bpf/libbpf.c9
-rw-r--r--tools/lib/subcmd/parse-options.c8
-rw-r--r--tools/perf/Documentation/perf-list.txt1
-rw-r--r--tools/perf/Makefile.perf7
-rw-r--r--tools/perf/bench/inject-buildid.c2
-rw-r--r--tools/perf/bench/uprobe.c2
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-daemon.c4
-rw-r--r--tools/perf/builtin-record.c14
-rw-r--r--tools/perf/builtin-report.c2
-rw-r--r--tools/perf/builtin-sched.c7
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/transaction.json28
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv2
-rw-r--r--tools/perf/tests/builtin-test.c37
-rw-r--r--tools/perf/tests/code-reading.c10
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh2
-rw-r--r--tools/perf/tests/workloads/datasym.c16
-rw-r--r--tools/perf/ui/browser.c6
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/util/annotate.c19
-rw-r--r--tools/perf/util/auxtrace.c4
-rw-r--r--tools/perf/util/dwarf-aux.c202
-rw-r--r--tools/perf/util/dwarf-aux.h17
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c2
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/machine.c2
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c26
-rw-r--r--tools/perf/util/pmu.c119
-rw-r--r--tools/perf/util/pmu.h7
-rw-r--r--tools/perf/util/probe-event.c1
-rw-r--r--tools/perf/util/python.c10
-rw-r--r--tools/perf/util/stat-display.c3
-rw-r--r--tools/perf/util/symbol.c49
-rw-r--r--tools/perf/util/thread.c14
-rw-r--r--tools/testing/cxl/test/mem.c1
-rw-r--r--tools/testing/selftests/alsa/Makefile2
-rw-r--r--tools/testing/selftests/arm64/tags/tags_test.c4
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c3
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bench_local_storage_create.c5
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage.c20
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup.c8
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_global_subprogs.c7
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh13
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c8
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h2
-rw-r--r--tools/testing/selftests/cgroup/test_core.c7
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c2
-rw-r--r--tools/testing/selftests/cgroup/test_cpuset.c2
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c2
-rw-r--r--tools/testing/selftests/cgroup/test_hugetlb_memcg.c2
-rw-r--r--tools/testing/selftests/cgroup/test_kill.c2
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c2
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c2
-rw-r--r--tools/testing/selftests/cgroup/test_zswap.c2
-rw-r--r--tools/testing/selftests/damon/_damon_sysfs.py2
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh1
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile2
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc2
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc20
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc2
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi.c2
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c2
-rw-r--r--tools/testing/selftests/kselftest/ktap_helpers.sh4
-rw-r--r--tools/testing/selftests/kselftest_harness.h11
-rw-r--r--tools/testing/selftests/lib.mk12
-rw-r--r--tools/testing/selftests/mm/compaction_test.c91
-rw-r--r--tools/testing/selftests/mm/cow.c2
-rw-r--r--tools/testing/selftests/mm/gup_longterm.c2
-rw-r--r--tools/testing/selftests/mm/gup_test.c5
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c2
-rw-r--r--tools/testing/selftests/mm/madv_populate.c2
-rw-r--r--tools/testing/selftests/mm/mdwe_test.c1
-rw-r--r--tools/testing/selftests/mm/mkdirty.c2
-rw-r--r--tools/testing/selftests/mm/pagemap_ioctl.c4
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c2
-rw-r--r--tools/testing/selftests/mm/uffd-common.h1
-rwxr-xr-xtools/testing/selftests/net/amt.sh20
-rwxr-xr-xtools/testing/selftests/net/arp_ndisc_untracked_subnets.sh53
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c20
-rw-r--r--tools/testing/selftests/net/config1
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mld.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool_rmon.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh186
-rw-r--r--tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh2
-rw-r--r--tools/testing/selftests/net/lib.sh135
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh21
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh10
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh46
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh2
-rwxr-xr-xtools/testing/selftests/power_supply/test_power_supply_properties.sh2
-rw-r--r--tools/testing/selftests/powerpc/dexcr/Makefile2
-rw-r--r--tools/testing/selftests/resctrl/Makefile4
-rw-r--r--tools/testing/selftests/riscv/hwprobe/.gitignore2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json44
-rw-r--r--tools/tracing/latency/latency-collector.c8
-rw-r--r--tools/tracing/rtla/src/timerlat_aa.c109
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c60
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c17
-rw-r--r--virt/kvm/guest_memfd.c5
-rw-r--r--virt/kvm/kvm_main.c5
1518 files changed, 15861 insertions, 9136 deletions
diff --git a/.editorconfig b/.editorconfig
index 854773350..29a30ccfc 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -5,7 +5,6 @@ root = true
[{*.{awk,c,dts,dtsi,dtso,h,mk,s,S},Kconfig,Makefile,Makefile.*}]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = true
insert_final_newline = true
indent_style = tab
indent_size = 8
@@ -13,7 +12,6 @@ indent_size = 8
[*.{json,py,rs}]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = true
insert_final_newline = true
indent_style = space
indent_size = 4
@@ -26,7 +24,6 @@ indent_size = 8
[*.yaml]
charset = utf-8
end_of_line = lf
-trim_trailing_whitespace = unset
insert_final_newline = true
indent_style = space
indent_size = 2
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
index aa51d278c..83e77ee48 100644
--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -48,8 +48,8 @@ properties:
- 3
dma-channels:
- minItems: 1
- maxItems: 64
+ minimum: 1
+ maximum: 64
clocks:
minItems: 1
diff --git a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
index b1c13bab2..b2d19cfb8 100644
--- a/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/atmel,at91sam-i2c.yaml
@@ -77,7 +77,7 @@ required:
- clocks
allOf:
- - $ref: i2c-controller.yaml
+ - $ref: /schemas/i2c/i2c-controller.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
index ab151c9db..580003cdf 100644
--- a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+++ b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
@@ -21,7 +21,7 @@ description: |
google,cros-ec-spi or google,cros-ec-i2c.
allOf:
- - $ref: i2c-controller.yaml#
+ - $ref: /schemas/i2c/i2c-controller.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
index 3d49d21ad..e1f450b80 100644
--- a/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/adi,axi-adc.yaml
@@ -28,6 +28,9 @@ properties:
reg:
maxItems: 1
+ clocks:
+ maxItems: 1
+
dmas:
maxItems: 1
@@ -48,6 +51,7 @@ required:
- compatible
- dmas
- reg
+ - clocks
additionalProperties: false
@@ -58,6 +62,7 @@ examples:
reg = <0x44a00000 0x10000>;
dmas = <&rx_dma 0>;
dma-names = "rx";
+ clocks = <&axi_clk>;
#io-backend-cells = <0>;
};
...
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
index cf456f8d9..c87677f5e 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov2680.yaml
@@ -37,15 +37,15 @@ properties:
active low.
maxItems: 1
- dovdd-supply:
+ DOVDD-supply:
description:
Definition of the regulator used as interface power supply.
- avdd-supply:
+ AVDD-supply:
description:
Definition of the regulator used as analog power supply.
- dvdd-supply:
+ DVDD-supply:
description:
Definition of the regulator used as digital power supply.
@@ -59,9 +59,9 @@ required:
- reg
- clocks
- clock-names
- - dovdd-supply
- - avdd-supply
- - dvdd-supply
+ - DOVDD-supply
+ - AVDD-supply
+ - DVDD-supply
- reset-gpios
- port
@@ -82,9 +82,9 @@ examples:
clock-names = "xvclk";
reset-gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
- dovdd-supply = <&sw2_reg>;
- dvdd-supply = <&sw2_reg>;
- avdd-supply = <&reg_peri_3p15v>;
+ DOVDD-supply = <&sw2_reg>;
+ DVDD-supply = <&sw2_reg>;
+ AVDD-supply = <&reg_peri_3p15v>;
port {
ov2680_to_mipi: endpoint {
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
index b6a7cb32f..835b6db00 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/rcar-pci-host.yaml
@@ -77,6 +77,9 @@ properties:
vpcie12v-supply:
description: The 12v regulator to use for PCIe.
+ iommu-map: true
+ iommu-map-mask: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml b/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
index 531008f0b..002b728cb 100644
--- a/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/rockchip,rk3399-pcie.yaml
@@ -37,6 +37,7 @@ properties:
description: This property is needed if using 24MHz OSC for RC's PHY.
ep-gpios:
+ maxItems: 1
description: pre-reset GPIO
vpcie12v-supply:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index ba966a78a..754345686 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -198,7 +198,6 @@ allOf:
enum:
- qcom,sm8550-qmp-gen4x2-pcie-phy
- qcom,sm8650-qmp-gen4x2-pcie-phy
- - qcom,x1e80100-qmp-gen3x2-pcie-phy
- qcom,x1e80100-qmp-gen4x2-pcie-phy
then:
properties:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index 91a6cc38f..c4c4fb38c 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -71,7 +71,6 @@ required:
- reg
- clocks
- clock-names
- - power-domains
- resets
- reset-names
- vdda-phy-supply
@@ -127,6 +126,21 @@ allOf:
- const: ref
- const: qref
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-qmp-ufs-phy
+ - qcom,msm8998-qmp-ufs-phy
+ then:
+ properties:
+ power-domains:
+ false
+ else:
+ required:
+ - power-domains
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
index 0f200e3f9..fce7f8a19 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
@@ -15,9 +15,6 @@ description: |
properties:
compatible:
oneOf:
- - enum:
- - qcom,sc8180x-usb-hs-phy
- - qcom,usb-snps-femto-v2-phy
- items:
- enum:
- qcom,sa8775p-usb-hs-phy
@@ -26,6 +23,7 @@ properties:
- items:
- enum:
- qcom,sc7280-usb-hs-phy
+ - qcom,sc8180x-usb-hs-phy
- qcom,sdx55-usb-hs-phy
- qcom,sdx65-usb-hs-phy
- qcom,sm6375-usb-hs-phy
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
index bd72a326e..60f30a59f 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
@@ -97,7 +97,8 @@ patternProperties:
then:
properties:
groups:
- enum: [emmc, emmc_rst]
+ items:
+ enum: [emmc, emmc_rst]
- if:
properties:
function:
@@ -105,8 +106,9 @@ patternProperties:
then:
properties:
groups:
- enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
- rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
+ items:
+ enum: [esw, esw_p0_p1, esw_p2_p3_p4, rgmii_via_esw,
+ rgmii_via_gmac1, rgmii_via_gmac2, mdc_mdio]
- if:
properties:
function:
@@ -123,10 +125,11 @@ patternProperties:
then:
properties:
groups:
- enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
- i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
- i2s1_out_data, i2s2_out_data, i2s3_out_data,
- i2s4_out_data]
+ items:
+ enum: [i2s_in_mclk_bclk_ws, i2s1_in_data, i2s2_in_data,
+ i2s3_in_data, i2s4_in_data, i2s_out_mclk_bclk_ws,
+ i2s1_out_data, i2s2_out_data, i2s3_out_data,
+ i2s4_out_data]
- if:
properties:
function:
@@ -159,10 +162,11 @@ patternProperties:
then:
properties:
groups:
- enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
- pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
- pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
- pcie_wake, pcie_clkreq]
+ items:
+ enum: [pcie0_0_waken, pcie0_1_waken, pcie1_0_waken,
+ pcie0_0_clkreq, pcie0_1_clkreq, pcie1_0_clkreq,
+ pcie0_pad_perst, pcie1_pad_perst, pcie_pereset,
+ pcie_wake, pcie_clkreq]
- if:
properties:
function:
@@ -178,11 +182,12 @@ patternProperties:
then:
properties:
groups:
- enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
- pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
- pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
- pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
- pwm_ch7_0, pwm_0, pwm_1]
+ items:
+ enum: [pwm_ch1_0, pwm_ch1_1, pwm_ch1_2, pwm_ch2_0, pwm_ch2_1,
+ pwm_ch2_2, pwm_ch3_0, pwm_ch3_1, pwm_ch3_2, pwm_ch4_0,
+ pwm_ch4_1, pwm_ch4_2, pwm_ch4_3, pwm_ch5_0, pwm_ch5_1,
+ pwm_ch5_2, pwm_ch6_0, pwm_ch6_1, pwm_ch6_2, pwm_ch6_3,
+ pwm_ch7_0, pwm_0, pwm_1]
- if:
properties:
function:
@@ -260,33 +265,34 @@ patternProperties:
pins:
description:
An array of strings. Each string contains the name of a pin.
- enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
- RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
- I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
- I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
- G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
- G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
- NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
- MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
- MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
- MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
- MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
- PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
- GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
- PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
- AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
- PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
- WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
- WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
- EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
- EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
- WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
- UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
- UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
- PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
- GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
- TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
- WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
+ items:
+ enum: [GPIO_A, I2S1_IN, I2S1_OUT, I2S_BCLK, I2S_WS, I2S_MCLK, TXD0,
+ RXD0, SPI_WP, SPI_HOLD, SPI_CLK, SPI_MOSI, SPI_MISO, SPI_CS,
+ I2C_SDA, I2C_SCL, I2S2_IN, I2S3_IN, I2S4_IN, I2S2_OUT,
+ I2S3_OUT, I2S4_OUT, GPIO_B, MDC, MDIO, G2_TXD0, G2_TXD1,
+ G2_TXD2, G2_TXD3, G2_TXEN, G2_TXC, G2_RXD0, G2_RXD1, G2_RXD2,
+ G2_RXD3, G2_RXDV, G2_RXC, NCEB, NWEB, NREB, NDL4, NDL5, NDL6,
+ NDL7, NRB, NCLE, NALE, NDL0, NDL1, NDL2, NDL3, MDI_TP_P0,
+ MDI_TN_P0, MDI_RP_P0, MDI_RN_P0, MDI_TP_P1, MDI_TN_P1,
+ MDI_RP_P1, MDI_RN_P1, MDI_RP_P2, MDI_RN_P2, MDI_TP_P2,
+ MDI_TN_P2, MDI_TP_P3, MDI_TN_P3, MDI_RP_P3, MDI_RN_P3,
+ MDI_RP_P4, MDI_RN_P4, MDI_TP_P4, MDI_TN_P4, PMIC_SCL,
+ PMIC_SDA, SPIC1_CLK, SPIC1_MOSI, SPIC1_MISO, SPIC1_CS,
+ GPIO_D, WATCHDOG, RTS3_N, CTS3_N, TXD3, RXD3, PERST0_N,
+ PERST1_N, WLED_N, EPHY_LED0_N, AUXIN0, AUXIN1, AUXIN2,
+ AUXIN3, TXD4, RXD4, RTS4_N, CST4_N, PWM1, PWM2, PWM3, PWM4,
+ PWM5, PWM6, PWM7, GPIO_E, TOP_5G_CLK, TOP_5G_DATA,
+ WF0_5G_HB0, WF0_5G_HB1, WF0_5G_HB2, WF0_5G_HB3, WF0_5G_HB4,
+ WF0_5G_HB5, WF0_5G_HB6, XO_REQ, TOP_RST_N, SYS_WATCHDOG,
+ EPHY_LED0_N_JTDO, EPHY_LED1_N_JTDI, EPHY_LED2_N_JTMS,
+ EPHY_LED3_N_JTCLK, EPHY_LED4_N_JTRST_N, WF2G_LED_N,
+ WF5G_LED_N, GPIO_9, GPIO_10, GPIO_11, GPIO_12, UART1_TXD,
+ UART1_RXD, UART1_CTS, UART1_RTS, UART2_TXD, UART2_RXD,
+ UART2_CTS, UART2_RTS, SMI_MDC, SMI_MDIO, PCIE_PERESET_N,
+ PWM_0, GPIO_0, GPIO_1, GPIO_2, GPIO_3, GPIO_4, GPIO_5,
+ GPIO_6, GPIO_7, GPIO_8, UART0_TXD, UART0_RXD, TOP_2G_CLK,
+ TOP_2G_DATA, WF0_2G_HB0, WF0_2G_HB1, WF0_2G_HB2, WF0_2G_HB3,
+ WF0_2G_HB4, WF0_2G_HB5, WF0_2G_HB6]
bias-disable: true
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
index bb675c8ec..1b941b276 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm4450-tlmm.yaml
@@ -72,40 +72,24 @@ $defs:
description:
Specify the alternative function to be configured for the specified
pins.
- enum: [ gpio, atest_char, atest_char0, atest_char1, atest_char2,
- atest_char3, atest_usb0, atest_usb00, atest_usb01, atest_usb02,
- atest_usb03, audio_ref, cam_mclk, cci_async, cci_i2c,
- cci_timer0, cci_timer1, cci_timer2, cci_timer3, cci_timer4,
- cmu_rng0, cmu_rng1, cmu_rng2, cmu_rng3, coex_uart1, cri_trng,
- cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
- dp0_hot, gcc_gp1, gcc_gp2, gcc_gp3, host2wlan_sol, ibi_i3c,
- jitter_bist, mdp_vsync, mdp_vsync0, mdp_vsync1, mdp_vsync2,
- mdp_vsync3, mi2s0_data0, mi2s0_data1, mi2s0_sck, mi2s0_ws,
- mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws, mi2s_mclk0,
- mi2s_mclk1, nav_gpio0, nav_gpio1, nav_gpio2, pcie0_clk,
- phase_flag0, phase_flag1, phase_flag10, phase_flag11,
- phase_flag12, phase_flag13, phase_flag14, phase_flag15,
- phase_flag16, phase_flag17, phase_flag18, phase_flag19,
- phase_flag2, phase_flag20, phase_flag21, phase_flag22,
- phase_flag23, phase_flag24, phase_flag25, phase_flag26,
- phase_flag27, phase_flag28, phase_flag29, phase_flag3,
- phase_flag30, phase_flag31, phase_flag4, phase_flag5,
- phase_flag6, phase_flag7, phase_flag8, phase_flag9,
- pll_bist, pll_clk, prng_rosc0, prng_rosc1, prng_rosc2,
- prng_rosc3, qdss_cti, qdss_gpio, qdss_gpio0, qdss_gpio1,
- qdss_gpio10, qdss_gpio11, qdss_gpio12, qdss_gpio13, qdss_gpio14,
- qdss_gpio15, qdss_gpio2, qdss_gpio3, qdss_gpio4, qdss_gpio5,
- qdss_gpio6, qdss_gpio7, qdss_gpio8, qdss_gpio9, qlink0_enable,
- qlink0_request, qlink0_wmss, qlink1_enable, qlink1_request,
- qlink1_wmss, qlink2_enable, qlink2_request, qlink2_wmss,
- qup0_se0, qup0_se1, qup0_se2, qup0_se3, qup0_se4, qup0_se5,
- qup0_se6, qup0_se7, qup1_se0, qup1_se1, qup1_se2, qup1_se3,
- qup1_se4, qup1_se5, qup1_se6, sd_write, tb_trig, tgu_ch0,
- tgu_ch1, tgu_ch2, tgu_ch3, tmess_prng0, tmess_prng1,
- tmess_prng2, tmess_prng3, tsense_pwm1, tsense_pwm2, uim0_clk,
- uim0_data, uim0_present, uim0_reset, uim1_clk, uim1_data,
- uim1_present, uim1_reset, usb0_hs, usb0_phy, vfr_0, vfr_1,
- vsense_trigger ]
+ enum: [ gpio, atest_char, atest_usb0, audio_ref_clk, cam_mclk,
+ cci_async_in0, cci_i2c, cci, cmu_rng, coex_uart1_rx,
+ coex_uart1_tx, cri_trng, dbg_out_clk, ddr_bist,
+ ddr_pxi0_test, ddr_pxi1_test, gcc_gp1_clk, gcc_gp2_clk,
+ gcc_gp3_clk, host2wlan_sol, ibi_i3c_qup0, ibi_i3c_qup1,
+ jitter_bist_ref, mdp_vsync0_out, mdp_vsync1_out,
+ mdp_vsync2_out, mdp_vsync3_out, mdp_vsync, nav,
+ pcie0_clk_req, phase_flag, pll_bist_sync, pll_clk_aux,
+ prng_rosc, qdss_cti_trig0, qdss_cti_trig1, qdss_gpio,
+ qlink0_enable, qlink0_request, qlink0_wmss_reset,
+ qup0_se0, qup0_se1, qup0_se2, qup0_se3, qup0_se4,
+ qup1_se0, qup1_se1, qup1_se2, qup1_se2_l2, qup1_se3,
+ qup1_se4, sd_write_protect, tb_trig_sdc1, tb_trig_sdc2,
+ tgu_ch0_trigout, tgu_ch1_trigout, tgu_ch2_trigout,
+ tgu_ch3_trigout, tmess_prng, tsense_pwm1_out,
+ tsense_pwm2_out, uim0, uim1, usb0_hs_ac, usb0_phy_ps,
+ vfr_0_mira, vfr_0_mirb, vfr_1, vsense_trigger_mirnat,
+ wlan1_adc_dtest0, wlan1_adc_dtest1 ]
required:
- pins
diff --git a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
index b634f57cd..ca81c8afb 100644
--- a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
@@ -18,13 +18,15 @@ properties:
oneOf:
- enum:
- loongson,ls2k1000-thermal
+ - loongson,ls2k2000-thermal
- items:
- enum:
- - loongson,ls2k2000-thermal
+ - loongson,ls2k0500-thermal
- const: loongson,ls2k1000-thermal
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
interrupts:
maxItems: 1
@@ -38,6 +40,24 @@ required:
- interrupts
- '#thermal-sensor-cells'
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - loongson,ls2k2000-thermal
+
+then:
+ properties:
+ reg:
+ minItems: 2
+ maxItems: 2
+
+else:
+ properties:
+ reg:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
index 0874fc21f..6577a61cc 100644
--- a/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
+++ b/Documentation/devicetree/bindings/usb/realtek,rts5411.yaml
@@ -65,6 +65,7 @@ patternProperties:
description: The hard wired USB devices
type: object
$ref: /schemas/usb/usb-device.yaml
+ additionalProperties: true
required:
- peer-hub
diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst
index 604208534..833f68fb0 100644
--- a/Documentation/driver-api/fpga/fpga-bridge.rst
+++ b/Documentation/driver-api/fpga/fpga-bridge.rst
@@ -6,9 +6,12 @@ API to implement a new FPGA bridge
* struct fpga_bridge - The FPGA Bridge structure
* struct fpga_bridge_ops - Low level Bridge driver ops
-* fpga_bridge_register() - Create and register a bridge
+* __fpga_bridge_register() - Create and register a bridge
* fpga_bridge_unregister() - Unregister a bridge
+The helper macro ``fpga_bridge_register()`` automatically sets
+the module that registers the FPGA bridge as the owner.
+
.. kernel-doc:: include/linux/fpga/fpga-bridge.h
:functions: fpga_bridge
@@ -16,7 +19,7 @@ API to implement a new FPGA bridge
:functions: fpga_bridge_ops
.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: fpga_bridge_register
+ :functions: __fpga_bridge_register
.. kernel-doc:: drivers/fpga/fpga-bridge.c
:functions: fpga_bridge_unregister
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 49c0a9512..8d2b79f69 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -24,7 +24,8 @@ How to support a new FPGA device
--------------------------------
To add another FPGA manager, write a driver that implements a set of ops. The
-probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as::
+probe function calls ``fpga_mgr_register()`` or ``fpga_mgr_register_full()``,
+such as::
static const struct fpga_manager_ops socfpga_fpga_ops = {
.write_init = socfpga_fpga_ops_configure_init,
@@ -69,10 +70,11 @@ probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as::
}
Alternatively, the probe function could call one of the resource managed
-register functions, devm_fpga_mgr_register() or devm_fpga_mgr_register_full().
-When these functions are used, the parameter syntax is the same, but the call
-to fpga_mgr_unregister() should be removed. In the above example, the
-socfpga_fpga_remove() function would not be required.
+register functions, ``devm_fpga_mgr_register()`` or
+``devm_fpga_mgr_register_full()``. When these functions are used, the
+parameter syntax is the same, but the call to ``fpga_mgr_unregister()`` should be
+removed. In the above example, the ``socfpga_fpga_remove()`` function would not be
+required.
The ops will implement whatever device specific register writes are needed to
do the programming sequence for this particular FPGA. These ops return 0 for
@@ -125,15 +127,19 @@ API for implementing a new FPGA Manager driver
* struct fpga_manager - the FPGA manager struct
* struct fpga_manager_ops - Low level FPGA manager driver ops
* struct fpga_manager_info - Parameter structure for fpga_mgr_register_full()
-* fpga_mgr_register_full() - Create and register an FPGA manager using the
+* __fpga_mgr_register_full() - Create and register an FPGA manager using the
fpga_mgr_info structure to provide the full flexibility of options
-* fpga_mgr_register() - Create and register an FPGA manager using standard
+* __fpga_mgr_register() - Create and register an FPGA manager using standard
arguments
-* devm_fpga_mgr_register_full() - Resource managed version of
- fpga_mgr_register_full()
-* devm_fpga_mgr_register() - Resource managed version of fpga_mgr_register()
+* __devm_fpga_mgr_register_full() - Resource managed version of
+ __fpga_mgr_register_full()
+* __devm_fpga_mgr_register() - Resource managed version of __fpga_mgr_register()
* fpga_mgr_unregister() - Unregister an FPGA manager
+Helper macros ``fpga_mgr_register_full()``, ``fpga_mgr_register()``,
+``devm_fpga_mgr_register_full()``, and ``devm_fpga_mgr_register()`` are available
+to ease the registration.
+
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_mgr_states
@@ -147,16 +153,16 @@ API for implementing a new FPGA Manager driver
:functions: fpga_manager_info
.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_register_full
+ :functions: __fpga_mgr_register_full
.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_register
+ :functions: __fpga_mgr_register
.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: devm_fpga_mgr_register_full
+ :functions: __devm_fpga_mgr_register_full
.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: devm_fpga_mgr_register
+ :functions: __devm_fpga_mgr_register
.. kernel-doc:: drivers/fpga/fpga-mgr.c
:functions: fpga_mgr_unregister
diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
index dc55d60a0..2d03b5fb7 100644
--- a/Documentation/driver-api/fpga/fpga-region.rst
+++ b/Documentation/driver-api/fpga/fpga-region.rst
@@ -46,13 +46,16 @@ API to add a new FPGA region
----------------------------
* struct fpga_region - The FPGA region struct
-* struct fpga_region_info - Parameter structure for fpga_region_register_full()
-* fpga_region_register_full() - Create and register an FPGA region using the
+* struct fpga_region_info - Parameter structure for __fpga_region_register_full()
+* __fpga_region_register_full() - Create and register an FPGA region using the
fpga_region_info structure to provide the full flexibility of options
-* fpga_region_register() - Create and register an FPGA region using standard
+* __fpga_region_register() - Create and register an FPGA region using standard
arguments
* fpga_region_unregister() - Unregister an FPGA region
+Helper macros ``fpga_region_register()`` and ``fpga_region_register_full()``
+automatically set the module that registers the FPGA region as the owner.
+
The FPGA region's probe function will need to get a reference to the FPGA
Manager it will be using to do the programming. This usually would happen
during the region's probe function.
@@ -82,10 +85,10 @@ following APIs to handle building or tearing down that list.
:functions: fpga_region_info
.. kernel-doc:: drivers/fpga/fpga-region.c
- :functions: fpga_region_register_full
+ :functions: __fpga_region_register_full
.. kernel-doc:: drivers/fpga/fpga-region.c
- :functions: fpga_region_register
+ :functions: __fpga_region_register
.. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_unregister
diff --git a/Documentation/iio/adis16475.rst b/Documentation/iio/adis16475.rst
index 91cabb7d8..130f9e97c 100644
--- a/Documentation/iio/adis16475.rst
+++ b/Documentation/iio/adis16475.rst
@@ -66,11 +66,9 @@ specific device folder path ``/sys/bus/iio/devices/iio:deviceX``.
+-------------------------------------------+----------------------------------------------------------+
| in_accel_x_calibbias | Calibration offset for the X-axis accelerometer channel. |
+-------------------------------------------+----------------------------------------------------------+
-| in_accel_calibbias_x | x-axis acceleration offset correction |
-+-------------------------------------------+----------------------------------------------------------+
| in_accel_x_raw | Raw X-axis accelerometer channel value. |
+-------------------------------------------+----------------------------------------------------------+
-| in_accel_calibbias_y | y-axis acceleration offset correction |
+| in_accel_y_calibbias | Calibration offset for the Y-axis accelerometer channel. |
+-------------------------------------------+----------------------------------------------------------+
| in_accel_y_raw | Raw Y-axis accelerometer channel value. |
+-------------------------------------------+----------------------------------------------------------+
@@ -94,11 +92,9 @@ specific device folder path ``/sys/bus/iio/devices/iio:deviceX``.
+---------------------------------------+------------------------------------------------------+
| in_anglvel_x_calibbias | Calibration offset for the X-axis gyroscope channel. |
+---------------------------------------+------------------------------------------------------+
-| in_anglvel_calibbias_x | x-axis gyroscope offset correction |
-+---------------------------------------+------------------------------------------------------+
| in_anglvel_x_raw | Raw X-axis gyroscope channel value. |
+---------------------------------------+------------------------------------------------------+
-| in_anglvel_calibbias_y | y-axis gyroscope offset correction |
+| in_anglvel_y_calibbias | Calibration offset for the Y-axis gyroscope channel. |
+---------------------------------------+------------------------------------------------------+
| in_anglvel_y_raw | Raw Y-axis gyroscope channel value. |
+---------------------------------------+------------------------------------------------------+
diff --git a/Documentation/mm/arch_pgtable_helpers.rst b/Documentation/mm/arch_pgtable_helpers.rst
index 2466d3363..ad50ca6f4 100644
--- a/Documentation/mm/arch_pgtable_helpers.rst
+++ b/Documentation/mm/arch_pgtable_helpers.rst
@@ -140,7 +140,8 @@ PMD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pmd_swp_clear_soft_dirty | Clears a soft dirty swapped PMD |
+---------------------------+--------------------------------------------------+
-| pmd_mkinvalid | Invalidates a mapped PMD [1] |
+| pmd_mkinvalid | Invalidates a present PMD; do not call for |
+| | non-present PMD [1] |
+---------------------------+--------------------------------------------------+
| pmd_set_huge | Creates a PMD huge mapping |
+---------------------------+--------------------------------------------------+
@@ -196,7 +197,8 @@ PUD Page Table Helpers
+---------------------------+--------------------------------------------------+
| pud_mkdevmap | Creates a ZONE_DEVICE mapped PUD |
+---------------------------+--------------------------------------------------+
-| pud_mkinvalid | Invalidates a mapped PUD [1] |
+| pud_mkinvalid | Invalidates a present PUD; do not call for |
+| | non-present PUD [1] |
+---------------------------+--------------------------------------------------+
| pud_set_huge | Creates a PUD huge mapping |
+---------------------------+--------------------------------------------------+
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 72da7057e..dceeb0d76 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
sxdp_shared_umem_fd field as you registered the UMEM on that
socket. These two sockets will now share one and the same UMEM.
-In this case, it is possible to use the NIC's packet steering
-capabilities to steer the packets to the right queue. This is not
-possible in the previous example as there is only one queue shared
-among sockets, so the NIC cannot do this steering as it can only steer
-between queues.
-
-In libxdp (or libbpf prior to version 1.0), you need to use the
-xsk_socket__create_shared() API as it takes a reference to a FILL ring
-and a COMPLETION ring that will be created for you and bound to the
-shared UMEM. You can use this function for all the sockets you create,
-or you can use it for the second and following ones and use
-xsk_socket__create() for the first one. Both methods yield the same
-result.
+There is no need to supply an XDP program like the one in the previous
+case where sockets were bound to the same queue id and
+device. Instead, use the NIC's packet steering capabilities to steer
+the packets to the right queue. In the previous example, there is only
+one queue shared among sockets, so the NIC cannot do this steering. It
+can only steer between queues.
+
+In libbpf, you need to use the xsk_socket__create_shared() API as it
+takes a reference to a FILL ring and a COMPLETION ring that will be
+created for you and bound to the shared UMEM. You can use this
+function for all the sockets you create, or you can use it for the
+second and following ones and use xsk_socket__create() for the first
+one. Both methods yield the same result.
Note that a UMEM can be shared between sockets on the same queue id
and device, as well as between queues on the same device and between
-devices at the same time. It is also possible to redirect to any
-socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
+devices at the same time.
XDP_USE_NEED_WAKEUP bind flag
-----------------------------
@@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
switch, or other distribution mechanism, in your NIC to direct
traffic to the correct queue id and socket.
- Note that if you are using the XDP_SHARED_UMEM option, it is
- possible to switch traffic between any socket bound to the same
- umem.
-
Q: My packets are sometimes corrupted. What is wrong?
A: Care has to be taken not to feed the same buffer in the UMEM into
diff --git a/MAINTAINERS b/MAINTAINERS
index 28e20975c..3121709d9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1066,7 +1066,6 @@ L: linux-pm@vger.kernel.org
S: Supported
F: Documentation/admin-guide/pm/amd-pstate.rst
F: drivers/cpufreq/amd-pstate*
-F: include/linux/amd-pstate.h
F: tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py
AMD PTDMA DRIVER
diff --git a/Makefile b/Makefile
index 8302496ee..17dc3e553 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 9
-SUBLEVEL = 2
+SUBLEVEL = 7
EXTRAVERSION =
NAME = Hurr durr I'ma ninja sloth
@@ -942,7 +942,6 @@ endif
ifdef CONFIG_LTO_CLANG
ifdef CONFIG_LTO_CLANG_THIN
CC_FLAGS_LTO := -flto=thin -fsplit-lto-unit
-KBUILD_LDFLAGS += --thinlto-cache-dir=$(extmod_prefix).thinlto-cache
else
CC_FLAGS_LTO := -flto
endif
@@ -1477,7 +1476,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
- compile_commands.json .thinlto-cache rust/test \
+ compile_commands.json rust/test \
rust-project.json .vmlinux.objs .vmlinux.export.c
# Directories & files removed with 'make mrproper'
@@ -1783,7 +1782,7 @@ PHONY += compile_commands.json
clean-dirs := $(KBUILD_EXTMOD)
clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers $(KBUILD_EXTMOD)/modules.nsdeps \
- $(KBUILD_EXTMOD)/compile_commands.json $(KBUILD_EXTMOD)/.thinlto-cache
+ $(KBUILD_EXTMOD)/compile_commands.json
PHONY += prepare
# now expand this into a simple variable to reduce the cost of shell evaluations
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi b/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
index d80440446..05d7a462e 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-common.dtsi
@@ -85,7 +85,7 @@
};
};
- panel {
+ panel_dpi: panel {
compatible = "sii,43wvf1g";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_display_power>;
diff --git a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
index c84e9b052..151e9cee3 100644
--- a/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
+++ b/arch/arm/boot/dts/nxp/imx/imx53-qsb-hdmi.dtso
@@ -10,8 +10,6 @@
/plugin/;
&{/} {
- /delete-node/ panel;
-
hdmi: connector-hdmi {
compatible = "hdmi-connector";
label = "hdmi";
@@ -82,6 +80,10 @@
};
};
+&panel_dpi {
+ status = "disabled";
+};
+
&tve {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts b/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
index b566f878e..18f4f4940 100644
--- a/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
+++ b/arch/arm/boot/dts/samsung/exynos4210-smdkv310.dts
@@ -88,7 +88,7 @@
&keypad {
samsung,keypad-num-rows = <2>;
samsung,keypad-num-columns = <8>;
- linux,keypad-no-autorepeat;
+ linux,input-no-autorepeat;
wakeup-source;
pinctrl-names = "default";
pinctrl-0 = <&keypad_rows &keypad_cols>;
diff --git a/arch/arm/boot/dts/samsung/exynos4412-origen.dts b/arch/arm/boot/dts/samsung/exynos4412-origen.dts
index 23b151645..10ab7bc90 100644
--- a/arch/arm/boot/dts/samsung/exynos4412-origen.dts
+++ b/arch/arm/boot/dts/samsung/exynos4412-origen.dts
@@ -453,7 +453,7 @@
&keypad {
samsung,keypad-num-rows = <3>;
samsung,keypad-num-columns = <2>;
- linux,keypad-no-autorepeat;
+ linux,input-no-autorepeat;
wakeup-source;
pinctrl-0 = <&keypad_rows &keypad_cols>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts b/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
index 715dfcba1..e16df9e75 100644
--- a/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
+++ b/arch/arm/boot/dts/samsung/exynos4412-smdk4412.dts
@@ -69,7 +69,7 @@
&keypad {
samsung,keypad-num-rows = <3>;
samsung,keypad-num-columns = <8>;
- linux,keypad-no-autorepeat;
+ linux,input-no-autorepeat;
wakeup-source;
pinctrl-0 = <&keypad_rows &keypad_cols>;
pinctrl-names = "default";
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index bddc82f78..a83d29fed 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -110,6 +110,7 @@ CONFIG_DRM_PANEL_LVDS=y
CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_EDP=y
CONFIG_DRM_SIMPLE_BRIDGE=y
+CONFIG_DRM_DW_HDMI=y
CONFIG_DRM_LIMA=y
CONFIG_FB_SIMPLE=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
diff --git a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
index ce90b3568..10896f9df 100644
--- a/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-s4.dtsi
@@ -65,10 +65,15 @@
#clock-cells = <0>;
};
- pwrc: power-controller {
- compatible = "amlogic,meson-s4-pwrc";
- #power-domain-cells = <1>;
- status = "okay";
+ firmware {
+ sm: secure-monitor {
+ compatible = "amlogic,meson-gxbb-sm";
+
+ pwrc: power-controller {
+ compatible = "amlogic,meson-s4-pwrc";
+ #power-domain-cells = <1>;
+ };
+ };
};
soc {
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index 6f0811587..1b1880c60 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/phy/phy-imx8-pcie.h>
#include <dt-bindings/pwm/pwm.h>
#include "imx8mm.dtsi"
+#include "imx8mm-overdrive.dtsi"
/ {
chosen {
@@ -929,7 +930,7 @@
/* Verdin GPIO_9_DSI (pulled-up as active-low) */
pinctrl_gpio_9_dsi: gpio9dsigrp {
fsl,pins =
- <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x146>; /* SODIMM 17 */
+ <MX8MM_IOMUXC_NAND_RE_B_GPIO3_IO15 0x1c6>; /* SODIMM 17 */
};
/* Verdin GPIO_10_DSI (pulled-up as active-low) */
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
index 43f1d45cc..f5115f9e8 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
@@ -254,7 +254,7 @@
<&clk IMX8MP_CLK_CLKOUT2>,
<&clk IMX8MP_AUDIO_PLL2_OUT>;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
- assigned-clock-rates = <13000000>, <13000000>, <156000000>;
+ assigned-clock-rates = <13000000>, <13000000>, <208000000>;
reset-gpios = <&gpio4 1 GPIO_ACTIVE_HIGH>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
index f5491a608..3c063f8a9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
@@ -183,7 +183,7 @@
bluetooth {
compatible = "brcm,bcm4330-bt";
- shutdown-gpios = <&gpio4 16 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
index 77ac0efdf..c5585bfc4 100644
--- a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
@@ -36,7 +36,7 @@
regulator-name = "SD1_SPWR";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
- gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
+ gpio = <&lsio_gpio4 7 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
index 9921ea13a..a7cb571d7 100644
--- a/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx93-11x11-evk.dts
@@ -175,7 +175,6 @@
vmmc-supply = <&reg_usdhc2_vmmc>;
bus-width = <4>;
status = "okay";
- no-sdio;
no-mmc;
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
index ed1b5a7a6..d01023401 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
+++ b/arch/arm64/boot/dts/hisilicon/hi3798cv200.dtsi
@@ -58,7 +58,7 @@
gic: interrupt-controller@f1001000 {
compatible = "arm,gic-400";
reg = <0x0 0xf1001000 0x0 0x1000>, /* GICD */
- <0x0 0xf1002000 0x0 0x100>; /* GICC */
+ <0x0 0xf1002000 0x0 0x2000>; /* GICC */
#address-cells = <0>;
#interrupt-cells = <3>;
interrupt-controller;
diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
index 14d58859b..683ac1245 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
@@ -9,8 +9,8 @@
compatible = "nvidia,norrin", "nvidia,tegra132", "nvidia,tegra124";
aliases {
- rtc0 = "/i2c@7000d000/as3722@40";
- rtc1 = "/rtc@7000e000";
+ rtc0 = &as3722;
+ rtc1 = &tegra_rtc;
serial0 = &uarta;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra132.dtsi b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
index 7e24a212c..5bcccfef3 100644
--- a/arch/arm64/boot/dts/nvidia/tegra132.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra132.dtsi
@@ -572,7 +572,7 @@
status = "disabled";
};
- rtc@7000e000 {
+ tegra_rtc: rtc@7000e000 {
compatible = "nvidia,tegra124-rtc", "nvidia,tegra20-rtc";
reg = <0x0 0x7000e000 0x0 0x100>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
index 106554015..a22b4501c 100644
--- a/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs404-evb.dtsi
@@ -62,7 +62,7 @@
vddrf-supply = <&vreg_l1_1p3>;
vddch0-supply = <&vdd_ch0_3p3>;
- local-bd-address = [ 02 00 00 00 5a ad ];
+ local-bd-address = [ 00 00 00 00 00 00 ];
max-speed = <3200000>;
};
diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
index d0f82e122..91871a6ee 100644
--- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi
@@ -1799,6 +1799,7 @@
assigned-clock-rates = <100000000>;
power-domains = <&gcc PCIE_4_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
resets = <&gcc GCC_PCIE_4_PHY_BCR>;
reset-names = "phy";
@@ -1898,6 +1899,7 @@
assigned-clock-rates = <100000000>;
power-domains = <&gcc PCIE_3B_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
resets = <&gcc GCC_PCIE_3B_PHY_BCR>;
reset-names = "phy";
@@ -1998,6 +2000,7 @@
assigned-clock-rates = <100000000>;
power-domains = <&gcc PCIE_3A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
resets = <&gcc GCC_PCIE_3A_PHY_BCR>;
reset-names = "phy";
@@ -2099,6 +2102,7 @@
assigned-clock-rates = <100000000>;
power-domains = <&gcc PCIE_2B_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
resets = <&gcc GCC_PCIE_2B_PHY_BCR>;
reset-names = "phy";
@@ -2199,6 +2203,7 @@
assigned-clock-rates = <100000000>;
power-domains = <&gcc PCIE_2A_GDSC>;
+ required-opps = <&rpmhpd_opp_nom>;
resets = <&gcc GCC_PCIE_2A_PHY_BCR>;
reset-names = "phy";
diff --git a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
index e8d8857ad..8c8374670 100644
--- a/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am62-verdin.dtsi
@@ -76,7 +76,7 @@
memory@80000000 {
device_type = "memory";
- reg = <0x00000000 0x80000000 0x00000000 0x40000000>; /* 1G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>; /* 2G RAM */
};
opp-table {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 2c30d617e..8d39b8632 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1585,6 +1585,7 @@ CONFIG_INTERCONNECT_QCOM_SC8180X=y
CONFIG_INTERCONNECT_QCOM_SC8280XP=y
CONFIG_INTERCONNECT_QCOM_SDM845=y
CONFIG_INTERCONNECT_QCOM_SDX75=y
+CONFIG_INTERCONNECT_QCOM_SM6115=y
CONFIG_INTERCONNECT_QCOM_SM8150=m
CONFIG_INTERCONNECT_QCOM_SM8250=y
CONFIG_INTERCONNECT_QCOM_SM8350=m
diff --git a/arch/arm64/include/asm/asm-bug.h b/arch/arm64/include/asm/asm-bug.h
index c762038ba..6e73809f6 100644
--- a/arch/arm64/include/asm/asm-bug.h
+++ b/arch/arm64/include/asm/asm-bug.h
@@ -28,6 +28,7 @@
14470: .long 14471f - .; \
_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
.short flags; \
+ .align 2; \
.popsection; \
14471:
#else
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 0a7186a93..d4d7451c2 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -5,7 +5,6 @@
#ifndef __ASM_IRQFLAGS_H
#define __ASM_IRQFLAGS_H
-#include <asm/alternative.h>
#include <asm/barrier.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 9e8999592..af3b206fa 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -1036,18 +1036,18 @@
* Permission Indirection Extension (PIE) permission encodings.
* Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
*/
-#define PIE_NONE_O 0x0
-#define PIE_R_O 0x1
-#define PIE_X_O 0x2
-#define PIE_RX_O 0x3
-#define PIE_RW_O 0x5
-#define PIE_RWnX_O 0x6
-#define PIE_RWX_O 0x7
-#define PIE_R 0x8
-#define PIE_GCS 0x9
-#define PIE_RX 0xa
-#define PIE_RW 0xc
-#define PIE_RWX 0xe
+#define PIE_NONE_O UL(0x0)
+#define PIE_R_O UL(0x1)
+#define PIE_X_O UL(0x2)
+#define PIE_RX_O UL(0x3)
+#define PIE_RW_O UL(0x5)
+#define PIE_RWnX_O UL(0x6)
+#define PIE_RWX_O UL(0x7)
+#define PIE_R UL(0x8)
+#define PIE_GCS UL(0x9)
+#define PIE_RX UL(0xa)
+#define PIE_RW UL(0xc)
+#define PIE_RWX UL(0xe)
#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4))
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index ebb015899..82e8a6017 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -1535,6 +1535,27 @@ static void fpsimd_save_kernel_state(struct task_struct *task)
task->thread.kernel_fpsimd_cpu = smp_processor_id();
}
+/*
+ * Invalidate any task's FPSIMD state that is present on this cpu.
+ * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
+ * before calling this function.
+ */
+static void fpsimd_flush_cpu_state(void)
+{
+ WARN_ON(!system_supports_fpsimd());
+ __this_cpu_write(fpsimd_last_state.st, NULL);
+
+ /*
+ * Leaving streaming mode enabled will cause issues for any kernel
+ * NEON and leaving streaming mode or ZA enabled may increase power
+ * consumption.
+ */
+ if (system_supports_sme())
+ sme_smstop();
+
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+}
+
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
@@ -1552,7 +1573,7 @@ void fpsimd_thread_switch(struct task_struct *next)
if (test_tsk_thread_flag(next, TIF_KERNEL_FPSTATE)) {
fpsimd_load_kernel_state(next);
- set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE);
+ fpsimd_flush_cpu_state();
} else {
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
@@ -1843,27 +1864,6 @@ void fpsimd_flush_task_state(struct task_struct *t)
}
/*
- * Invalidate any task's FPSIMD state that is present on this cpu.
- * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
- * before calling this function.
- */
-static void fpsimd_flush_cpu_state(void)
-{
- WARN_ON(!system_supports_fpsimd());
- __this_cpu_write(fpsimd_last_state.st, NULL);
-
- /*
- * Leaving streaming mode enabled will cause issues for any kernel
- * NEON and leaving streaming mode or ZA enabled may increase power
- * consumption.
- */
- if (system_supports_sme())
- sme_smstop();
-
- set_thread_flag(TIF_FOREIGN_FPSTATE);
-}
-
-/*
* Save the FPSIMD state to memory and invalidate cpu view.
* This function must be called with preemption disabled.
*/
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c4a0a35e0..6cda738a4 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -195,6 +195,23 @@ void kvm_arch_create_vm_debugfs(struct kvm *kvm)
kvm_sys_regs_create_debugfs(kvm);
}
+static void kvm_destroy_mpidr_data(struct kvm *kvm)
+{
+ struct kvm_mpidr_data *data;
+
+ mutex_lock(&kvm->arch.config_lock);
+
+ data = rcu_dereference_protected(kvm->arch.mpidr_data,
+ lockdep_is_held(&kvm->arch.config_lock));
+ if (data) {
+ rcu_assign_pointer(kvm->arch.mpidr_data, NULL);
+ synchronize_rcu();
+ kfree(data);
+ }
+
+ mutex_unlock(&kvm->arch.config_lock);
+}
+
/**
* kvm_arch_destroy_vm - destroy the VM data structure
* @kvm: pointer to the KVM struct
@@ -209,7 +226,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm);
- kfree(kvm->arch.mpidr_data);
+ kvm_destroy_mpidr_data(kvm);
+
kfree(kvm->arch.sysreg_masks);
kvm_destroy_vcpus(kvm);
@@ -395,6 +413,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
+ /*
+ * This vCPU may have been created after mpidr_data was initialized.
+ * Throw out the pre-computed mappings if that is the case which forces
+ * KVM to fall back to iteratively searching the vCPUs.
+ */
+ kvm_destroy_mpidr_data(vcpu->kvm);
+
err = kvm_vgic_vcpu_init(vcpu);
if (err)
return err;
@@ -594,7 +619,8 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
mutex_lock(&kvm->arch.config_lock);
- if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1)
+ if (rcu_access_pointer(kvm->arch.mpidr_data) ||
+ atomic_read(&kvm->online_vcpus) == 1)
goto out;
kvm_for_each_vcpu(c, vcpu, kvm) {
@@ -631,7 +657,7 @@ static void kvm_init_mpidr_data(struct kvm *kvm)
data->cmpidr_to_idx[index] = c;
}
- kvm->arch.mpidr_data = data;
+ rcu_assign_pointer(kvm->arch.mpidr_data, data);
out:
mutex_unlock(&kvm->arch.config_lock);
}
@@ -2470,21 +2496,27 @@ out_err:
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
- struct kvm_vcpu *vcpu;
+ struct kvm_vcpu *vcpu = NULL;
+ struct kvm_mpidr_data *data;
unsigned long i;
mpidr &= MPIDR_HWID_BITMASK;
- if (kvm->arch.mpidr_data) {
- u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr);
+ rcu_read_lock();
+ data = rcu_dereference(kvm->arch.mpidr_data);
- vcpu = kvm_get_vcpu(kvm,
- kvm->arch.mpidr_data->cmpidr_to_idx[idx]);
+ if (data) {
+ u16 idx = kvm_mpidr_index(data, mpidr);
+
+ vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]);
if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu))
vcpu = NULL;
+ }
+ rcu_read_unlock();
+
+ if (vcpu)
return vcpu;
- }
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index e2f762d95..11098eb7e 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -251,6 +251,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
+ case PSR_AA32_MODE_SYS:
if (!vcpu_el1_is_32bit(vcpu))
return -EINVAL;
break;
@@ -276,7 +277,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if (*vcpu_cpsr(vcpu) & PSR_MODE32_BIT) {
int i, nr_reg;
- switch (*vcpu_cpsr(vcpu)) {
+ switch (*vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK) {
/*
* Either we are dealing with user mode, and only the
* first 15 registers (+ PC) must be narrowed to 32bit.
diff --git a/arch/arm64/kvm/hyp/aarch32.c b/arch/arm64/kvm/hyp/aarch32.c
index 8d9670e66..449fa58cf 100644
--- a/arch/arm64/kvm/hyp/aarch32.c
+++ b/arch/arm64/kvm/hyp/aarch32.c
@@ -50,9 +50,23 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
u32 cpsr_cond;
int cond;
- /* Top two bits non-zero? Unconditional. */
- if (kvm_vcpu_get_esr(vcpu) >> 30)
+ /*
+ * These are the exception classes that could fire with a
+ * conditional instruction.
+ */
+ switch (kvm_vcpu_trap_get_class(vcpu)) {
+ case ESR_ELx_EC_CP15_32:
+ case ESR_ELx_EC_CP15_64:
+ case ESR_ELx_EC_CP14_MR:
+ case ESR_ELx_EC_CP14_LS:
+ case ESR_ELx_EC_FP_ASIMD:
+ case ESR_ELx_EC_CP10_ID:
+ case ESR_ELx_EC_CP14_64:
+ case ESR_ELx_EC_SVC32:
+ break;
+ default:
return true;
+ }
/* Is condition field valid? */
cond = kvm_vcpu_get_condition(vcpu);
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index f20941f83..ce3bcff34 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -355,7 +355,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
- vgic_v3_free_redist_region(rdreg);
+ vgic_v3_free_redist_region(kvm, rdreg);
INIT_LIST_HEAD(&dist->rd_regions);
} else {
dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index c15ee1df0..dad60b1e2 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -919,8 +919,19 @@ free:
return ret;
}
-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
+void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg)
{
+ struct kvm_vcpu *vcpu;
+ unsigned long c;
+
+ lockdep_assert_held(&kvm->arch.config_lock);
+
+ /* Garbage collect the region */
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (vcpu->arch.vgic_cpu.rdreg == rdreg)
+ vcpu->arch.vgic_cpu.rdreg = NULL;
+ }
+
list_del(&rdreg->list);
kfree(rdreg);
}
@@ -945,7 +956,7 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
mutex_lock(&kvm->arch.config_lock);
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
- vgic_v3_free_redist_region(rdreg);
+ vgic_v3_free_redist_region(kvm, rdreg);
mutex_unlock(&kvm->arch.config_lock);
return ret;
}
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 0c2b82de8..08b4c09a0 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -317,7 +317,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index);
-void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
+void vgic_v3_free_redist_region(struct kvm *kvm, struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
index 834cffcfb..7ba4b9807 100644
--- a/arch/csky/kernel/probes/ftrace.c
+++ b/arch/csky/kernel/probes/ftrace.c
@@ -12,6 +12,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe_ctlblk *kcb;
struct pt_regs *regs;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 54ad04dac..d1b94ce58 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -138,7 +138,7 @@ config LOONGARCH
select HAVE_LIVEPATCH
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
- select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS
+ select HAVE_OBJTOOL if AS_HAS_EXPLICIT_RELOCS && AS_HAS_THIN_ADD_SUB && !CC_IS_CLANG
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -257,6 +257,9 @@ config AS_HAS_EXPLICIT_RELOCS
config AS_HAS_FCSR_CLASS
def_bool $(as-instr,movfcsr2gr \$t0$(comma)\$fcsr0)
+config AS_HAS_THIN_ADD_SUB
+ def_bool $(cc-option,-Wa$(comma)-mthin-add-sub)
+
config AS_HAS_LSX_EXTENSION
def_bool $(as-instr,vld \$vr0$(comma)\$a0$(comma)0)
diff --git a/arch/loongarch/Kconfig.debug b/arch/loongarch/Kconfig.debug
index 98d60630c..8b2ce5b5d 100644
--- a/arch/loongarch/Kconfig.debug
+++ b/arch/loongarch/Kconfig.debug
@@ -28,6 +28,7 @@ config UNWINDER_PROLOGUE
config UNWINDER_ORC
bool "ORC unwinder"
+ depends on HAVE_OBJTOOL
select OBJTOOL
help
This option enables the ORC (Oops Rewind Capability) unwinder for
diff --git a/arch/loongarch/include/asm/hw_breakpoint.h b/arch/loongarch/include/asm/hw_breakpoint.h
index 21447fb1e..d78330916 100644
--- a/arch/loongarch/include/asm/hw_breakpoint.h
+++ b/arch/loongarch/include/asm/hw_breakpoint.h
@@ -75,6 +75,8 @@ do { \
#define CSR_MWPC_NUM 0x3f
#define CTRL_PLV_ENABLE 0x1e
+#define CTRL_PLV0_ENABLE 0x02
+#define CTRL_PLV3_ENABLE 0x10
#define MWPnCFG3_LoadEn 8
#define MWPnCFG3_StoreEn 9
@@ -101,7 +103,7 @@ struct perf_event;
struct perf_event_attr;
extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type, int *offset);
+ int *gen_len, int *gen_type);
extern int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw);
extern int hw_breakpoint_arch_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
diff --git a/arch/loongarch/include/asm/numa.h b/arch/loongarch/include/asm/numa.h
index 27f319b49..b5f9de9f1 100644
--- a/arch/loongarch/include/asm/numa.h
+++ b/arch/loongarch/include/asm/numa.h
@@ -56,6 +56,7 @@ extern int early_cpu_to_node(int cpu);
static inline void early_numa_add_cpu(int cpuid, s16 node) { }
static inline void numa_add_cpu(unsigned int cpu) { }
static inline void numa_remove_cpu(unsigned int cpu) { }
+static inline void set_cpuid_to_node(int cpuid, s16 node) { }
static inline int early_cpu_to_node(int cpu)
{
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
index 52b638059..f948a0676 100644
--- a/arch/loongarch/include/asm/perf_event.h
+++ b/arch/loongarch/include/asm/perf_event.h
@@ -13,8 +13,7 @@
#define perf_arch_fetch_caller_regs(regs, __ip) { \
(regs)->csr_era = (__ip); \
- (regs)->regs[3] = current_stack_pointer; \
- (regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
+ (regs)->regs[3] = (unsigned long) __builtin_frame_address(0); \
}
#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 45b507a7b..d9eafd3ee 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -42,7 +42,7 @@
.macro JUMP_VIRT_ADDR temp1 temp2
li.d \temp1, CACHE_BASE
pcaddi \temp2, 0
- or \temp1, \temp1, \temp2
+ bstrins.d \temp1, \temp2, (DMW_PABITS - 1), 0
jirl zero, \temp1, 0xc
.endm
diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
index 73858c902..bff058317 100644
--- a/arch/loongarch/kernel/ftrace_dyn.c
+++ b/arch/loongarch/kernel/ftrace_dyn.c
@@ -287,6 +287,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index c4f7de2e2..4677ea8fa 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -22,7 +22,7 @@
_head:
.word MZ_MAGIC /* "MZ", MS-DOS header */
.org 0x8
- .dword kernel_entry /* Kernel entry point */
+ .dword _kernel_entry /* Kernel entry point (physical address) */
.dword _kernel_asize /* Kernel image effective size */
.quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
.org 0x38 /* 0x20 ~ 0x37 reserved */
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
index fc55c4de2..621ad7634 100644
--- a/arch/loongarch/kernel/hw_breakpoint.c
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -174,11 +174,21 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
static int hw_breakpoint_control(struct perf_event *bp,
enum hw_breakpoint_ops ops)
{
- u32 ctrl;
+ u32 ctrl, privilege;
int i, max_slots, enable;
+ struct pt_regs *regs;
struct perf_event **slots;
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+ if (arch_check_bp_in_kernelspace(info))
+ privilege = CTRL_PLV0_ENABLE;
+ else
+ privilege = CTRL_PLV3_ENABLE;
+
+ /* Whether bp belongs to a task. */
+ if (bp->hw.target)
+ regs = task_pt_regs(bp->hw.target);
+
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
/* Breakpoint */
slots = this_cpu_ptr(bp_on_reg);
@@ -197,31 +207,38 @@ static int hw_breakpoint_control(struct perf_event *bp,
switch (ops) {
case HW_BREAKPOINT_INSTALL:
/* Set the FWPnCFG/MWPnCFG 1~4 register. */
- write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
- write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
- write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
- write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
- write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, privilege);
} else {
+ write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | privilege);
}
enable = csr_read64(LOONGARCH_CSR_CRMD);
csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
+ if (bp->hw.target)
+ regs->csr_prmd |= CSR_PRMD_PWE;
break;
case HW_BREAKPOINT_UNINSTALL:
/* Reset the FWPnCFG/MWPnCFG 1~4 register. */
- write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
- write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
- write_wb_reg(CSR_CFG_MASK, i, 0, 0);
- write_wb_reg(CSR_CFG_MASK, i, 1, 0);
- write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
- write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
- write_wb_reg(CSR_CFG_ASID, i, 0, 0);
- write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ } else {
+ write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 1, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ }
+ if (bp->hw.target)
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
break;
}
@@ -283,7 +300,7 @@ int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
* to generic breakpoint descriptions.
*/
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
- int *gen_len, int *gen_type, int *offset)
+ int *gen_len, int *gen_type)
{
/* Type */
switch (ctrl.type) {
@@ -303,11 +320,6 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
return -EINVAL;
}
- if (!ctrl.len)
- return -EINVAL;
-
- *offset = __ffs(ctrl.len);
-
/* Len */
switch (ctrl.len) {
case LOONGARCH_BREAKPOINT_LEN_1:
@@ -386,21 +398,17 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
struct arch_hw_breakpoint *hw)
{
int ret;
- u64 alignment_mask, offset;
+ u64 alignment_mask;
/* Build the arch_hw_breakpoint. */
ret = arch_build_bp_info(bp, attr, hw);
if (ret)
return ret;
- if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
- alignment_mask = 0x7;
- else
+ if (hw->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
alignment_mask = 0x3;
- offset = hw->address & alignment_mask;
-
- hw->address &= ~alignment_mask;
- hw->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ }
return 0;
}
@@ -471,12 +479,15 @@ void breakpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(bp_on_reg);
for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
- bp = slots[i];
- if (bp == NULL)
- continue;
- perf_bp_event(bp, regs);
+ if ((csr_read32(LOONGARCH_CSR_FWPS) & (0x1 << i))) {
+ bp = slots[i];
+ if (bp == NULL)
+ continue;
+ perf_bp_event(bp, regs);
+ csr_write32(0x1 << i, LOONGARCH_CSR_FWPS);
+ update_bp_registers(regs, 0, 0);
+ }
}
- update_bp_registers(regs, 0, 0);
}
NOKPROBE_SYMBOL(breakpoint_handler);
@@ -488,12 +499,15 @@ void watchpoint_handler(struct pt_regs *regs)
slots = this_cpu_ptr(wp_on_reg);
for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
- wp = slots[i];
- if (wp == NULL)
- continue;
- perf_bp_event(wp, regs);
+ if ((csr_read32(LOONGARCH_CSR_MWPS) & (0x1 << i))) {
+ wp = slots[i];
+ if (wp == NULL)
+ continue;
+ perf_bp_event(wp, regs);
+ csr_write32(0x1 << i, LOONGARCH_CSR_MWPS);
+ update_bp_registers(regs, 0, 1);
+ }
}
- update_bp_registers(regs, 0, 1);
}
NOKPROBE_SYMBOL(watchpoint_handler);
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index c114c5ef1..200109de1 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -494,28 +494,14 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
- int err, len, type, offset;
+ int err, len, type;
- err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
+ err = arch_bp_generic_fields(ctrl, &len, &type);
if (err)
return err;
- switch (note_type) {
- case NT_LOONGARCH_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_LOONGARCH_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
attr->bp_len = len;
attr->bp_type = type;
- attr->bp_addr += offset;
return 0;
}
@@ -609,10 +595,27 @@ static int ptrace_hbp_set_ctrl(unsigned int note_type,
return PTR_ERR(bp);
attr = bp->attr;
- decode_ctrl_reg(uctrl, &ctrl);
- err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
- if (err)
- return err;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
+ ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ decode_ctrl_reg(uctrl, &ctrl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (uctrl & CTRL_PLV_ENABLE) {
+ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+ if (err)
+ return err;
+ attr.disabled = 0;
+ } else {
+ attr.disabled = 1;
+ }
return modify_user_hw_breakpoint(bp, &attr);
}
@@ -643,6 +646,10 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
struct perf_event *bp;
struct perf_event_attr attr;
+ /* Kernel-space address cannot be monitored by user-space */
+ if ((unsigned long)addr >= XKPRANGE)
+ return -EINVAL;
+
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
if (IS_ERR(bp))
return PTR_ERR(bp);
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 60e0fe97f..e8f7a54ff 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -282,7 +282,7 @@ static void __init fdt_setup(void)
return;
/* Prefer to use built-in dtb, checking its legality first. */
- if (!fdt_check_header(__dtb_start))
+ if (IS_ENABLED(CONFIG_BUILTIN_DTB) && !fdt_check_header(__dtb_start))
fdt_pointer = __dtb_start;
else
fdt_pointer = efi_fdt_pointer(); /* Fallback to firmware dtb */
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index aabee0b28..fd22a3275 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -262,7 +262,6 @@ static void __init fdt_smp_setup(void)
if (cpuid == loongson_sysconf.boot_cpu_id) {
cpu = 0;
- numa_add_cpu(cpu);
} else {
cpu = cpumask_next_zero(-1, cpu_present_mask);
}
@@ -272,6 +271,9 @@ static void __init fdt_smp_setup(void)
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
+
+ early_numa_add_cpu(cpu, 0);
+ set_cpuid_to_node(cpuid, 0);
}
loongson_sysconf.nr_cpus = num_processors;
@@ -456,6 +458,7 @@ void smp_prepare_boot_cpu(void)
set_cpu_possible(0, true);
set_cpu_online(0, true);
set_my_cpu_offset(per_cpu_offset(0));
+ numa_add_cpu(0);
rr_node = first_node(node_online_map);
for_each_possible_cpu(cpu) {
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index e8e97dbf9..3c7595342 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -6,6 +6,7 @@
#define PAGE_SIZE _PAGE_SIZE
#define RO_EXCEPTION_TABLE_ALIGN 4
+#define PHYSADDR_MASK 0xffffffffffff /* 48-bit */
/*
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
@@ -142,10 +143,11 @@ SECTIONS
#ifdef CONFIG_EFI_STUB
/* header symbols */
- _kernel_asize = _end - _text;
- _kernel_fsize = _edata - _text;
- _kernel_vsize = _end - __initdata_begin;
- _kernel_rsize = _edata - __initdata_begin;
+ _kernel_entry = ABSOLUTE(kernel_entry & PHYSADDR_MASK);
+ _kernel_asize = ABSOLUTE(_end - _text);
+ _kernel_fsize = ABSOLUTE(_edata - _text);
+ _kernel_vsize = ABSOLUTE(_end - __initdata_begin);
+ _kernel_rsize = ABSOLUTE(_edata - __initdata_begin);
#endif
.gptab.sdata : {
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 6ffa29585..99837d4e8 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -3,8 +3,8 @@ config M68K
bool
default y
select ARCH_32BIT_OFF_T
- select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_HAS_CPU_FINALIZE_INIT if MMU
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DMA_PREP_COHERENT if M68K_NONCOHERENT_DMA && !COLDFIRE
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 3bcdd32a6..338b47491 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -430,7 +430,9 @@ resume:
movec %a0,%dfc
/* restore status register */
- movew %a1@(TASK_THREAD+THREAD_SR),%sr
+ movew %a1@(TASK_THREAD+THREAD_SR),%d0
+ oriw #0x0700,%d0
+ movew %d0,%sr
rts
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 4c8f8cbfa..e7f0f72c1 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -453,30 +453,18 @@ void mac_poweroff(void)
void mac_reset(void)
{
- if (macintosh_config->adb_type == MAC_ADB_II &&
- macintosh_config->ident != MAC_MODEL_SE30) {
- /* need ROMBASE in booter */
- /* indeed, plus need to MAP THE ROM !! */
-
- if (mac_bi_data.rombase == 0)
- mac_bi_data.rombase = 0x40800000;
-
- /* works on some */
- rom_reset = (void *) (mac_bi_data.rombase + 0xa);
-
- local_irq_disable();
- rom_reset();
#ifdef CONFIG_ADB_CUDA
- } else if (macintosh_config->adb_type == MAC_ADB_EGRET ||
- macintosh_config->adb_type == MAC_ADB_CUDA) {
+ if (macintosh_config->adb_type == MAC_ADB_EGRET ||
+ macintosh_config->adb_type == MAC_ADB_CUDA) {
cuda_restart();
+ } else
#endif
#ifdef CONFIG_ADB_PMU
- } else if (macintosh_config->adb_type == MAC_ADB_PB2) {
+ if (macintosh_config->adb_type == MAC_ADB_PB2) {
pmu_restart();
+ } else
#endif
- } else if (CPU_IS_030) {
-
+ if (CPU_IS_030) {
/* 030-specific reset routine. The idea is general, but the
* specific registers to reset are '030-specific. Until I
* have a non-030 machine, I can't test anything else.
@@ -524,6 +512,18 @@ void mac_reset(void)
"jmp %/a0@\n\t" /* jump to the reset vector */
".chip 68k"
: : "r" (offset), "a" (rombase) : "a0");
+ } else {
+ /* need ROMBASE in booter */
+ /* indeed, plus need to MAP THE ROM !! */
+
+ if (mac_bi_data.rombase == 0)
+ mac_bi_data.rombase = 0x40800000;
+
+ /* works on some */
+ rom_reset = (void *)(mac_bi_data.rombase + 0xa);
+
+ local_irq_disable();
+ rom_reset();
}
/* should never get here */
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index 4393bee64..85c4d29ef 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -7,7 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code and low level code
CFLAGS_REMOVE_timer.o = -pg
CFLAGS_REMOVE_intc.o = -pg
-CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_process.o = -pg
endif
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
index 85dbda4a0..03da36dc6 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo-static.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -18,7 +18,7 @@ static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
#define err_printk(x) \
- early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
+ pr_err("ERROR: Microblaze " x "-different for kernel and DTS\n");
void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
{
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index ec180ab92..66a8ba19c 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -110,7 +110,8 @@ static void bcm6358_quirks(void)
* RAC flush causes kernel panics on BCM6358 when booting from TP1
* because the bootloader is not initializing it properly.
*/
- bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ bmips_rac_flush_disable = !!(read_c0_brcm_cmt_local() & (1 << 31)) ||
+ !!BMIPS_GET_CBR();
}
static void bcm6368_quirks(void)
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 30e86861c..b1ee3c48e 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -322,7 +322,7 @@ static inline void ehb(void)
" .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
_ASM_SET_MFTC0 \
- " mftc0 $1, " #rt ", " #sel " \n" \
+ " mftc0 %0, " #rt ", " #sel " \n" \
_ASM_UNSET_MFTC0 \
" .set pop \n" \
: "=r" (__res)); \
diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
index 874ed6df9..34b9323bd 100644
--- a/arch/mips/pci/ops-rc32434.c
+++ b/arch/mips/pci/ops-rc32434.c
@@ -112,8 +112,8 @@ retry:
* gives them time to settle
*/
if (where == PCI_VENDOR_ID) {
- if (ret == 0xffffffff || ret == 0x00000000 ||
- ret == 0x0000ffff || ret == 0xffff0000) {
+ if (*val == 0xffffffff || *val == 0x00000000 ||
+ *val == 0x0000ffff || *val == 0xffff0000) {
if (delay > 4)
return 0;
delay *= 2;
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 2583e318e..b080c7c6c 100644..100755
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -230,12 +230,18 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
{
union cvmx_pcie_address pcie_addr;
union cvmx_pciercx_cfg006 pciercx_cfg006;
+ union cvmx_pciercx_cfg032 pciercx_cfg032;
pciercx_cfg006.u32 =
cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG006(pcie_port));
if ((bus <= pciercx_cfg006.s.pbnum) && (dev != 0))
return 0;
+ pciercx_cfg032.u32 =
+ cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
+ if ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1))
+ return 0;
+
pcie_addr.u64 = 0;
pcie_addr.config.upper = 2;
pcie_addr.config.io = 1;
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 86e02929f..3c27d1c72 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -65,7 +65,7 @@ void machine_restart(char *cmd)
}
/*
- * This is used if pm_power_off has not been set by a power management
+ * This is used if a sys-off handler was not set by a power management
* driver, in this case we can assume we are on a simulator. On
* OpenRISC simulators l.nop 1 will trigger the simulator exit.
*/
@@ -89,10 +89,8 @@ void machine_halt(void)
void machine_power_off(void)
{
printk(KERN_INFO "*** MACHINE POWER OFF ***\n");
- if (pm_power_off != NULL)
- pm_power_off();
- else
- default_power_off();
+ do_kernel_power_off();
+ default_power_off();
}
/*
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 9370888c9..90554a555 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -180,29 +180,39 @@ asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector)
asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address)
{
- int code = FPE_FLTUNK;
- unsigned long fpcsr = regs->fpcsr;
-
- if (fpcsr & SPR_FPCSR_IVF)
- code = FPE_FLTINV;
- else if (fpcsr & SPR_FPCSR_OVF)
- code = FPE_FLTOVF;
- else if (fpcsr & SPR_FPCSR_UNF)
- code = FPE_FLTUND;
- else if (fpcsr & SPR_FPCSR_DZF)
- code = FPE_FLTDIV;
- else if (fpcsr & SPR_FPCSR_IXF)
- code = FPE_FLTRES;
-
- /* Clear all flags */
- regs->fpcsr &= ~SPR_FPCSR_ALLF;
-
- force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
+ if (user_mode(regs)) {
+ int code = FPE_FLTUNK;
+ unsigned long fpcsr = regs->fpcsr;
+
+ if (fpcsr & SPR_FPCSR_IVF)
+ code = FPE_FLTINV;
+ else if (fpcsr & SPR_FPCSR_OVF)
+ code = FPE_FLTOVF;
+ else if (fpcsr & SPR_FPCSR_UNF)
+ code = FPE_FLTUND;
+ else if (fpcsr & SPR_FPCSR_DZF)
+ code = FPE_FLTDIV;
+ else if (fpcsr & SPR_FPCSR_IXF)
+ code = FPE_FLTRES;
+
+ /* Clear all flags */
+ regs->fpcsr &= ~SPR_FPCSR_ALLF;
+
+ force_sig_fault(SIGFPE, code, (void __user *)regs->pc);
+ } else {
+ pr_emerg("KERNEL: Illegal fpe exception 0x%.8lx\n", regs->pc);
+ die("Die:", regs, SIGFPE);
+ }
}
asmlinkage void do_trap(struct pt_regs *regs, unsigned long address)
{
- force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
+ if (user_mode(regs)) {
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc);
+ } else {
+ pr_emerg("KERNEL: Illegal trap exception 0x%.8lx\n", regs->pc);
+ die("Die:", regs, SIGILL);
+ }
}
asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index ba4c05bc2..839471887 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -31,18 +31,17 @@ void flush_cache_all_local(void);
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
-void flush_kernel_dcache_page_addr(const void *addr);
-
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
+/* The only way to flush a vmap range is to flush whole cache */
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
void flush_kernel_vmap_range(void *vaddr, int size);
void invalidate_kernel_vmap_range(void *vaddr, int size);
-#define flush_cache_vmap(start, end) flush_cache_all()
+void flush_cache_vmap(unsigned long start, unsigned long end);
#define flush_cache_vmap_early(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) flush_cache_all()
+void flush_cache_vunmap(unsigned long start, unsigned long end);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio
@@ -77,17 +76,11 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
-/* defined in pacache.S exported in cache.c used by flush_anon_page */
-void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
#define ARCH_HAS_FLUSH_ON_KUNMAP
-static inline void kunmap_flush_on_unmap(const void *addr)
-{
- flush_kernel_dcache_page_addr(addr);
-}
+void kunmap_flush_on_unmap(const void *addr);
#endif /* _PARISC_CACHEFLUSH_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index ad4e15d12..4bea2e957 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -8,6 +8,7 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#ifndef __ASSEMBLY__
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 974accac0..babf65751 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -448,14 +448,17 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
return pte;
}
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ return READ_ONCE(*ptep);
+}
+#define ptep_get ptep_get
+
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
pte_t pte;
- if (!pte_young(*ptep))
- return 0;
-
- pte = *ptep;
+ pte = ptep_get(ptep);
if (!pte_young(pte)) {
return 0;
}
@@ -463,17 +466,10 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
return 1;
}
-struct mm_struct;
-static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- pte_t old_pte;
-
- old_pte = *ptep;
- set_pte(ptep, __pte(0));
-
- return old_pte;
-}
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep);
+struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
set_pte(ptep, pte_wrprotect(*ptep));
@@ -511,7 +507,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTE_SAME
diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
index 715c96ba2..e84883c6b 100644
--- a/arch/parisc/include/asm/signal.h
+++ b/arch/parisc/include/asm/signal.h
@@ -4,23 +4,11 @@
#include <uapi/asm/signal.h>
-#define _NSIG 64
-/* bits-per-word, where word apparently means 'long' not 'int' */
-#define _NSIG_BPW BITS_PER_LONG
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
# ifndef __ASSEMBLY__
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- /* next_signal() assumes this is a long - no choice */
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
#include <asm/sigcontext.h>
#endif /* !__ASSEMBLY */
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index 8e4895c5e..40d7a574c 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -57,10 +57,20 @@
#include <asm-generic/signal-defs.h>
+#define _NSIG 64
+#define _NSIG_BPW (sizeof(unsigned long) * 8)
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
# ifndef __ASSEMBLY__
# include <linux/types.h>
+typedef unsigned long old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
/* Avoid too many header ordering problems. */
struct siginfo;
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 422f3e1e6..483bfafd9 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/syscalls.h>
+#include <linux/vmalloc.h>
#include <asm/pdc.h>
#include <asm/cache.h>
#include <asm/cacheflush.h>
@@ -31,20 +32,31 @@
#include <asm/mmu_context.h>
#include <asm/cachectl.h>
+#define PTR_PAGE_ALIGN_DOWN(addr) PTR_ALIGN_DOWN(addr, PAGE_SIZE)
+
+/*
+ * When nonzero, use _PAGE_ACCESSED bit to try to reduce the number
+ * of page flushes done flush_cache_page_if_present. There are some
+ * pros and cons in using this option. It may increase the risk of
+ * random segmentation faults.
+ */
+#define CONFIG_FLUSH_PAGE_ACCESSED 0
+
int split_tlb __ro_after_init;
int dcache_stride __ro_after_init;
int icache_stride __ro_after_init;
EXPORT_SYMBOL(dcache_stride);
+/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
EXPORT_SYMBOL(flush_dcache_page_asm);
void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
-
-/* Internal implementation in arch/parisc/kernel/pacache.S */
void flush_data_cache_local(void *); /* flushes local data-cache only */
void flush_instruction_cache_local(void); /* flushes local code-cache only */
+static void flush_kernel_dcache_page_addr(const void *addr);
+
/* On some machines (i.e., ones with the Merced bus), there can be
* only a single PxTLB broadcast at a time; this must be guaranteed
* by software. We need a spinlock around all TLB flushes to ensure
@@ -321,6 +333,18 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
{
if (!static_branch_likely(&parisc_has_cache))
return;
+
+ /*
+ * The TLB is the engine of coherence on parisc. The CPU is
+ * entitled to speculate any page with a TLB mapping, so here
+ * we kill the mapping then flush the page along a special flush
+ * only alias mapping. This guarantees that the page is no-longer
+ * in the cache for any process and nor may it be speculatively
+ * read in (until the user or kernel specifically accesses it,
+ * of course).
+ */
+ flush_tlb_page(vma, vmaddr);
+
preempt_disable();
flush_dcache_page_asm(physaddr, vmaddr);
if (vma->vm_flags & VM_EXEC)
@@ -328,46 +352,44 @@ __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
preempt_enable();
}
-static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
+static void flush_kernel_dcache_page_addr(const void *addr)
{
- unsigned long flags, space, pgd, prot;
-#ifdef CONFIG_TLB_PTLOCK
- unsigned long pgd_lock;
-#endif
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- vmaddr &= PAGE_MASK;
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent data cache move-in */
preempt_disable();
+ flush_dcache_page_asm(__pa(vaddr), vaddr);
+ preempt_enable();
+}
- /* Set context for flush */
- local_irq_save(flags);
- prot = mfctl(8);
- space = mfsp(SR_USER);
- pgd = mfctl(25);
-#ifdef CONFIG_TLB_PTLOCK
- pgd_lock = mfctl(28);
-#endif
- switch_mm_irqs_off(NULL, vma->vm_mm, NULL);
- local_irq_restore(flags);
-
- flush_user_dcache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(vmaddr, vmaddr + PAGE_SIZE);
- flush_tlb_page(vma, vmaddr);
+static void flush_kernel_icache_page_addr(const void *addr)
+{
+ unsigned long vaddr = (unsigned long)addr;
+ unsigned long flags;
- /* Restore previous context */
- local_irq_save(flags);
-#ifdef CONFIG_TLB_PTLOCK
- mtctl(pgd_lock, 28);
-#endif
- mtctl(pgd, 25);
- mtsp(space, SR_USER);
- mtctl(prot, 8);
- local_irq_restore(flags);
+ /* Purge TLB entry to remove translation on all CPUs */
+ purge_tlb_start(flags);
+ pdtlb(SR_KERNEL, addr);
+ purge_tlb_end(flags);
+ /* Use tmpalias flush to prevent instruction cache move-in */
+ preempt_disable();
+ flush_icache_page_asm(__pa(vaddr), vaddr);
preempt_enable();
}
+void kunmap_flush_on_unmap(const void *addr)
+{
+ flush_kernel_dcache_page_addr(addr);
+}
+EXPORT_SYMBOL(kunmap_flush_on_unmap);
+
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
unsigned int nr)
{
@@ -375,13 +397,16 @@ void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
for (;;) {
flush_kernel_dcache_page_addr(kaddr);
- flush_kernel_icache_page(kaddr);
+ flush_kernel_icache_page_addr(kaddr);
if (--nr == 0)
break;
kaddr += PAGE_SIZE;
}
}
+/*
+ * Walk page directory for MM to find PTEP pointer for address ADDR.
+ */
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@@ -410,6 +435,41 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
+/*
+ * Return user physical address. Returns 0 if page is not present.
+ */
+static inline unsigned long get_upa(struct mm_struct *mm, unsigned long addr)
+{
+ unsigned long flags, space, pgd, prot, pa;
+#ifdef CONFIG_TLB_PTLOCK
+ unsigned long pgd_lock;
+#endif
+
+ /* Save context */
+ local_irq_save(flags);
+ prot = mfctl(8);
+ space = mfsp(SR_USER);
+ pgd = mfctl(25);
+#ifdef CONFIG_TLB_PTLOCK
+ pgd_lock = mfctl(28);
+#endif
+
+ /* Set context for lpa_user */
+ switch_mm_irqs_off(NULL, mm, NULL);
+ pa = lpa_user(addr);
+
+ /* Restore previous context */
+#ifdef CONFIG_TLB_PTLOCK
+ mtctl(pgd_lock, 28);
+#endif
+ mtctl(pgd, 25);
+ mtsp(space, SR_USER);
+ mtctl(prot, 8);
+ local_irq_restore(flags);
+
+ return pa;
+}
+
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping = folio_flush_mapping(folio);
@@ -458,50 +518,23 @@ void flush_dcache_folio(struct folio *folio)
if (addr + nr * PAGE_SIZE > vma->vm_end)
nr = (vma->vm_end - addr) / PAGE_SIZE;
- if (parisc_requires_coherency()) {
- for (i = 0; i < nr; i++) {
- pte_t *ptep = get_ptep(vma->vm_mm,
- addr + i * PAGE_SIZE);
- if (!ptep)
- continue;
- if (pte_needs_flush(*ptep))
- flush_user_cache_page(vma,
- addr + i * PAGE_SIZE);
- /* Optimise accesses to the same table? */
- pte_unmap(ptep);
- }
- } else {
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
+ for (i = 0; i < nr; i++)
+ __flush_cache_page(vma,
+ addr + i * PAGE_SIZE,
+ (pfn + i) * PAGE_SIZE);
/*
- * The TLB is the engine of coherence on parisc:
- * The CPU is entitled to speculate any page
- * with a TLB mapping, so here we kill the
- * mapping then flush the page along a special
- * flush only alias mapping. This guarantees that
- * the page is no-longer in the cache for any
- * process and nor may it be speculatively read
- * in (until the user or kernel specifically
- * accesses it, of course)
+ * Software is allowed to have any number
+ * of private mappings to a page.
*/
- for (i = 0; i < nr; i++)
- flush_tlb_page(vma, addr + i * PAGE_SIZE);
- if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
- != (addr & (SHM_COLOUR - 1))) {
- for (i = 0; i < nr; i++)
- __flush_cache_page(vma,
- addr + i * PAGE_SIZE,
- (pfn + i) * PAGE_SIZE);
- /*
- * Software is allowed to have any number
- * of private mappings to a page.
- */
- if (!(vma->vm_flags & VM_SHARED))
- continue;
- if (old_addr)
- pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
- old_addr, addr, vma->vm_file);
- if (nr == folio_nr_pages(folio))
- old_addr = addr;
- }
+ if (!(vma->vm_flags & VM_SHARED))
+ continue;
+ if (old_addr)
+ pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
+ old_addr, addr, vma->vm_file);
+ if (nr == folio_nr_pages(folio))
+ old_addr = addr;
}
WARN_ON(++count == 4096);
}
@@ -591,35 +624,28 @@ extern void purge_kernel_dcache_page_asm(unsigned long);
extern void clear_user_page_asm(void *, unsigned long);
extern void copy_user_page_asm(void *, void *, unsigned long);
-void flush_kernel_dcache_page_addr(const void *addr)
-{
- unsigned long flags;
-
- flush_kernel_dcache_page_asm(addr);
- purge_tlb_start(flags);
- pdtlb(SR_KERNEL, addr);
- purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-
static void flush_cache_page_if_present(struct vm_area_struct *vma,
- unsigned long vmaddr, unsigned long pfn)
+ unsigned long vmaddr)
{
+#if CONFIG_FLUSH_PAGE_ACCESSED
bool needs_flush = false;
- pte_t *ptep;
+ pte_t *ptep, pte;
- /*
- * The pte check is racy and sometimes the flush will trigger
- * a non-access TLB miss. Hopefully, the page has already been
- * flushed.
- */
ptep = get_ptep(vma->vm_mm, vmaddr);
if (ptep) {
- needs_flush = pte_needs_flush(*ptep);
+ pte = ptep_get(ptep);
+ needs_flush = pte_needs_flush(pte);
pte_unmap(ptep);
}
if (needs_flush)
- flush_cache_page(vma, vmaddr, pfn);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pte_pfn(pte)));
+#else
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long physaddr = get_upa(mm, vmaddr);
+
+ if (physaddr)
+ __flush_cache_page(vma, vmaddr, PAGE_ALIGN_DOWN(physaddr));
+#endif
}
void copy_user_highpage(struct page *to, struct page *from,
@@ -629,7 +655,7 @@ void copy_user_highpage(struct page *to, struct page *from,
kfrom = kmap_local_page(from);
kto = kmap_local_page(to);
- flush_cache_page_if_present(vma, vaddr, page_to_pfn(from));
+ __flush_cache_page(vma, vaddr, PFN_PHYS(page_to_pfn(from)));
copy_page_asm(kto, kfrom);
kunmap_local(kto);
kunmap_local(kfrom);
@@ -638,16 +664,17 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
- flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(dst));
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long user_vaddr, void *dst, void *src, int len)
{
- flush_cache_page_if_present(vma, user_vaddr, page_to_pfn(page));
+ __flush_cache_page(vma, user_vaddr, PFN_PHYS(page_to_pfn(page)));
memcpy(dst, src, len);
+ flush_kernel_dcache_page_addr(PTR_PAGE_ALIGN_DOWN(src));
}
/* __flush_tlb_range()
@@ -681,32 +708,10 @@ int __flush_tlb_range(unsigned long sid, unsigned long start,
static void flush_cache_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- unsigned long addr, pfn;
- pte_t *ptep;
-
- for (addr = start; addr < end; addr += PAGE_SIZE) {
- bool needs_flush = false;
- /*
- * The vma can contain pages that aren't present. Although
- * the pte search is expensive, we need the pte to find the
- * page pfn and to check whether the page should be flushed.
- */
- ptep = get_ptep(vma->vm_mm, addr);
- if (ptep) {
- needs_flush = pte_needs_flush(*ptep);
- pfn = pte_pfn(*ptep);
- pte_unmap(ptep);
- }
- if (needs_flush) {
- if (parisc_requires_coherency()) {
- flush_user_cache_page(vma, addr);
- } else {
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
- }
- }
- }
+ unsigned long addr;
+
+ for (addr = start; addr < end; addr += PAGE_SIZE)
+ flush_cache_page_if_present(vma, addr);
}
static inline unsigned long mm_total_size(struct mm_struct *mm)
@@ -757,21 +762,19 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled()))
return;
flush_tlb_range(vma, start, end);
- flush_cache_all();
+ if (vma->vm_flags & VM_EXEC)
+ flush_cache_all();
+ else
+ flush_data_cache();
return;
}
- flush_cache_pages(vma, start, end);
+ flush_cache_pages(vma, start & PAGE_MASK, end);
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
- if (WARN_ON(!pfn_valid(pfn)))
- return;
- if (parisc_requires_coherency())
- flush_user_cache_page(vma, vmaddr);
- else
- __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
@@ -779,34 +782,133 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned lon
if (!PageAnon(page))
return;
- if (parisc_requires_coherency()) {
- if (vma->vm_flags & VM_SHARED)
- flush_data_cache();
- else
- flush_user_cache_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(page_to_pfn(page)));
+}
+
+int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t pte = ptep_get(ptep);
+
+ if (!pte_young(pte))
+ return 0;
+ set_pte(ptep, pte_mkold(pte));
+#if CONFIG_FLUSH_PAGE_ACCESSED
+ __flush_cache_page(vma, addr, PFN_PHYS(pte_pfn(pte)));
+#endif
+ return 1;
+}
+
+/*
+ * After a PTE is cleared, we have no way to flush the cache for
+ * the physical page. On PA8800 and PA8900 processors, these lines
+ * can cause random cache corruption. Thus, we must flush the cache
+ * as well as the TLB when clearing a PTE that's valid.
+ */
+pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
+{
+ struct mm_struct *mm = (vma)->vm_mm;
+ pte_t pte = ptep_get_and_clear(mm, addr, ptep);
+ unsigned long pfn = pte_pfn(pte);
+
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ else if (pte_accessible(mm, pte))
+ flush_tlb_page(vma, addr);
+
+ return pte;
+}
+
+/*
+ * The physical address for pages in the ioremap case can be obtained
+ * from the vm_struct struct. I wasn't able to successfully handle the
+ * vmalloc and vmap cases. We have an array of struct page pointers in
+ * the uninitialized vmalloc case but the flush failed using page_to_pfn.
+ */
+void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ unsigned long addr, physaddr;
+ struct vm_struct *vm;
+
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+
+ if (end - start >= parisc_cache_flush_threshold) {
+ flush_cache_all();
return;
}
- flush_tlb_page(vma, vmaddr);
- preempt_disable();
- flush_dcache_page_asm(page_to_phys(page), vmaddr);
- preempt_enable();
+ if (WARN_ON_ONCE(!is_vmalloc_addr((void *)start))) {
+ flush_cache_all();
+ return;
+ }
+
+ vm = find_vm_area((void *)start);
+ if (WARN_ON_ONCE(!vm)) {
+ flush_cache_all();
+ return;
+ }
+
+ /* The physical addresses of IOREMAP regions are contiguous */
+ if (vm->flags & VM_IOREMAP) {
+ physaddr = vm->phys_addr;
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ preempt_disable();
+ flush_dcache_page_asm(physaddr, start);
+ flush_icache_page_asm(physaddr, start);
+ preempt_enable();
+ physaddr += PAGE_SIZE;
+ }
+ return;
+ }
+
+ flush_cache_all();
}
+EXPORT_SYMBOL(flush_cache_vmap);
+/*
+ * The vm_struct has been retired and the page table is set up. The
+ * last page in the range is a guard page. Its physical address can't
+ * be determined using lpa, so there is no way to flush the range
+ * using flush_dcache_page_asm.
+ */
+void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ /* Prevent cache move-in */
+ flush_tlb_kernel_range(start, end);
+ flush_data_cache();
+}
+EXPORT_SYMBOL(flush_cache_vunmap);
+
+/*
+ * On systems with PA8800/PA8900 processors, there is no way to flush
+ * a vmap range other than using the architected loop to flush the
+ * entire cache. The page directory is not set up, so we can't use
+ * fdc, etc. FDCE/FICE don't work to flush a portion of the cache.
+ * L2 is physically indexed but FDCE/FICE instructions in virtual
+ * mode output their virtual address on the core bus, not their
+ * real address. As a result, the L2 cache index formed from the
+ * virtual address will most likely not be the same as the L2 index
+ * formed from the real address.
+ */
void flush_kernel_vmap_range(void *vaddr, int size)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- flush_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
@@ -818,15 +920,18 @@ void invalidate_kernel_vmap_range(void *vaddr, int size)
/* Ensure DMA is complete */
asm_syncdma();
- if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
- (unsigned long)size >= parisc_cache_flush_threshold) {
- flush_tlb_kernel_range(start, end);
- flush_data_cache();
+ flush_tlb_kernel_range(start, end);
+
+ if (!static_branch_likely(&parisc_has_dcache))
+ return;
+
+ /* If interrupts are disabled, we can only do local flush */
+ if (WARN_ON(IS_ENABLED(CONFIG_SMP) && arch_irqs_disabled())) {
+ flush_data_cache_local(NULL);
return;
}
- purge_kernel_dcache_range_asm(start, end);
- flush_tlb_kernel_range(start, end);
+ flush_data_cache();
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 621a4b386..c91f9c2e6 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -206,6 +206,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 6f0c92e81..dcf61cbd3 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -22,6 +22,7 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
+EXPORT_SYMBOL(__cmpxchg_u8);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/crypto/.gitignore b/arch/powerpc/crypto/.gitignore
index e1094f08f..e9fe73aac 100644
--- a/arch/powerpc/crypto/.gitignore
+++ b/arch/powerpc/crypto/.gitignore
@@ -1,3 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
aesp10-ppc.S
+aesp8-ppc.S
ghashp10-ppc.S
+ghashp8-ppc.S
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a41e542ba..7a8495660 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -524,7 +524,7 @@ long plpar_hcall_norets_notrace(unsigned long opcode, ...);
* Used for all but the craziest of phyp interfaces (see plpar_hcall9)
*/
#define PLPAR_HCALL_BUFSIZE 4
-long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
/**
* plpar_hcall_raw: - Make a hypervisor call without calculating hcall stats
@@ -538,7 +538,7 @@ long plpar_hcall(unsigned long opcode, unsigned long *retbuf, ...);
* plpar_hcall, but plpar_hcall_raw works in real mode and does not
* calculate hypervisor call statistics.
*/
-long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL_BUFSIZE], ...);
/**
* plpar_hcall9: - Make a pseries hypervisor call with up to 9 return arguments
@@ -549,8 +549,8 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
* PLPAR_HCALL9_BUFSIZE to size the return argument buffer.
*/
#define PLPAR_HCALL9_BUFSIZE 9
-long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
-long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
+long plpar_hcall9(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
+long plpar_hcall9_raw(unsigned long opcode, unsigned long retbuf[static PLPAR_HCALL9_BUFSIZE], ...);
/* pseries hcall tracing */
extern struct static_key hcall_tracepoint_key;
@@ -570,7 +570,7 @@ struct hvcall_mpp_data {
unsigned long backing_mem;
};
-int h_get_mpp(struct hvcall_mpp_data *);
+long h_get_mpp(struct hvcall_mpp_data *mpp_data);
struct hvcall_mpp_x_data {
unsigned long coalesced_bytes;
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 08c550ed4..ba2e13bb8 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -585,12 +585,12 @@ __do_out_asm(_rec_outl, "stwbrx")
#define __do_inw(port) _rec_inw(port)
#define __do_inl(port) _rec_inl(port)
#else /* CONFIG_PPC32 */
-#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)_IO_BASE+port);
-#define __do_inb(port) readb((PCI_IO_ADDR)_IO_BASE + port);
-#define __do_inw(port) readw((PCI_IO_ADDR)_IO_BASE + port);
-#define __do_inl(port) readl((PCI_IO_ADDR)_IO_BASE + port);
+#define __do_outb(val, port) writeb(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_outw(val, port) writew(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_outl(val, port) writel(val,(PCI_IO_ADDR)(_IO_BASE+port));
+#define __do_inb(port) readb((PCI_IO_ADDR)(_IO_BASE + port));
+#define __do_inw(port) readw((PCI_IO_ADDR)(_IO_BASE + port));
+#define __do_inl(port) readl((PCI_IO_ADDR)(_IO_BASE + port));
#endif /* !CONFIG_PPC32 */
#ifdef CONFIG_EEH
@@ -606,12 +606,12 @@ __do_out_asm(_rec_outl, "stwbrx")
#define __do_writesw(a, b, n) _outsw(PCI_FIX_ADDR(a),(b),(n))
#define __do_writesl(a, b, n) _outsl(PCI_FIX_ADDR(a),(b),(n))
-#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)_IO_BASE+(p), (b), (n))
-#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
-#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
-#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)_IO_BASE+(p),(b),(n))
+#define __do_insb(p, b, n) readsb((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_insw(p, b, n) readsw((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_insl(p, b, n) readsl((PCI_IO_ADDR)(_IO_BASE+(p)), (b), (n))
+#define __do_outsb(p, b, n) writesb((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+#define __do_outsw(p, b, n) writesw((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
+#define __do_outsl(p, b, n) writesl((PCI_IO_ADDR)(_IO_BASE+(p)),(b),(n))
#define __do_memset_io(addr, c, n) \
_memset_io(PCI_FIX_ADDR(addr), c, n)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 005601243..076ae60b4 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -510,6 +510,7 @@
#define PPC_RAW_STB(r, base, i) (0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LBZ(r, base, i) (0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LHA(r, base, i) (0xa8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
@@ -532,6 +533,7 @@
#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_DIVW(d, a, b) (0x7c0003d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
@@ -550,6 +552,8 @@
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_EXTSB(d, a) (0x7c000774 | ___PPC_RA(d) | ___PPC_RS(a))
+#define PPC_RAW_EXTSH(d, a) (0x7c000734 | ___PPC_RA(d) | ___PPC_RS(a))
#define PPC_RAW_EXTSW(d, a) (0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a))
#define PPC_RAW_SLW(d, a, s) (0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
#define PPC_RAW_SLD(d, a, s) (0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index de10437fd..4cba724c8 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -92,9 +92,25 @@ __pu_failed: \
: label)
#endif
+#ifdef CONFIG_CC_IS_CLANG
+#define DS_FORM_CONSTRAINT "Z<>"
+#else
+#define DS_FORM_CONSTRAINT "YZ<>"
+#endif
+
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PREFIXED
#define __put_user_asm2_goto(x, ptr, label) \
__put_user_asm_goto(x, ptr, label, "std")
+#else
+#define __put_user_asm2_goto(x, addr, label) \
+ asm goto ("1: std%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), DS_FORM_CONSTRAINT (*addr) \
+ : \
+ : label)
+#endif // CONFIG_PPC_KERNEL_PREFIXED
#else /* __powerpc64__ */
#define __put_user_asm2_goto(x, addr, label) \
asm goto( \
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 072ebe7f2..f8208c027 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
struct pt_regs *regs;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(nip, parent_nip);
if (bit < 0)
return;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 8e86eb577..692a7c6f5 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4857,7 +4857,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* entering a nested guest in which case the decrementer is now owned
* by L2 and the L1 decrementer is provided in hdec_expires
*/
- if (!kvmhv_is_nestedv2() && kvmppc_core_pending_dec(vcpu) &&
+ if (kvmppc_core_pending_dec(vcpu) &&
((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
(trap == BOOK3S_INTERRUPT_SYSCALL &&
kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
index 8e6f5355f..1091f7a83 100644
--- a/arch/powerpc/kvm/book3s_hv_nestedv2.c
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -71,8 +71,8 @@ gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
}
if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
- kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
- cfg->vcpu_run_output_cfg);
+ rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
+ cfg->vcpu_run_output_cfg);
if (rc < 0)
return rc;
}
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 83823db34..2975ea084 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -170,6 +170,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
unsigned long old_pmd;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return __pmd(old_pmd);
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 2f39c50ca..a0c4f1bde 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -450,10 +450,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
}
break;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
- EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, src_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
- EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(_R0, src2_reg, src_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
break;
@@ -467,10 +473,16 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
if (imm == 1) {
EMIT(PPC_RAW_MR(dst_reg, src2_reg));
} else if (is_power_of_2((u32)imm)) {
- EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
+ if (off)
+ EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, ilog2(imm)));
+ else
+ EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
} else {
PPC_LI32(_R0, imm);
- EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
+ if (off)
+ EMIT(PPC_RAW_DIVW(dst_reg, src2_reg, _R0));
+ else
+ EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
}
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
@@ -480,11 +492,19 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
if (!is_power_of_2((u32)imm)) {
bpf_set_seen_register(ctx, tmp_reg);
PPC_LI32(tmp_reg, imm);
- EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
+ if (off)
+ EMIT(PPC_RAW_DIVW(_R0, src2_reg, tmp_reg));
+ else
+ EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
} else if (imm == 1) {
EMIT(PPC_RAW_LI(dst_reg, 0));
+ } else if (off) {
+ EMIT(PPC_RAW_SRAWI(_R0, src2_reg, ilog2(imm)));
+ EMIT(PPC_RAW_ADDZE(_R0, _R0));
+ EMIT(PPC_RAW_SLWI(_R0, _R0, ilog2(imm)));
+ EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
} else {
imm = ilog2((u32)imm);
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
@@ -497,11 +517,21 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
imm = -imm;
if (!is_power_of_2(imm))
return -EOPNOTSUPP;
- if (imm == 1)
+ if (imm == 1) {
EMIT(PPC_RAW_LI(dst_reg, 0));
- else
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ } else if (off) {
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
+ EMIT(PPC_RAW_XOR(dst_reg, src2_reg, dst_reg_h));
+ EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, dst_reg_h));
+ EMIT(PPC_RAW_SUBFC(dst_reg, dst_reg_h, dst_reg));
+ EMIT(PPC_RAW_SUBFE(dst_reg_h, dst_reg_h, dst_reg_h));
+ } else {
EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
break;
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (!imm)
@@ -727,15 +757,30 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* MOV
*/
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
- if (dst_reg == src_reg)
- break;
- EMIT(PPC_RAW_MR(dst_reg, src_reg));
- EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
+ if (off == 8) {
+ EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
+ } else if (off == 16) {
+ EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
+ } else if (off == 32 && dst_reg == src_reg) {
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
+ } else if (off == 32) {
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, src_reg, 31));
+ } else if (dst_reg != src_reg) {
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
+ }
break;
case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
/* special mov32 for zext */
if (imm == 1)
EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ else if (off == 8)
+ EMIT(PPC_RAW_EXTSB(dst_reg, src_reg));
+ else if (off == 16)
+ EMIT(PPC_RAW_EXTSH(dst_reg, src_reg));
else if (dst_reg != src_reg)
EMIT(PPC_RAW_MR(dst_reg, src_reg));
break;
@@ -751,6 +796,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* BPF_FROM_BE/LE
*/
case BPF_ALU | BPF_END | BPF_FROM_LE:
+ case BPF_ALU64 | BPF_END | BPF_FROM_LE:
switch (imm) {
case 16:
/* Copy 16 bits to upper part */
@@ -785,6 +831,8 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
break;
}
+ if (BPF_CLASS(code) == BPF_ALU64 && imm != 64)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
switch (imm) {
@@ -852,6 +900,15 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* Get offset into TMP_REG */
EMIT(PPC_RAW_LI(tmp_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u32() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into r0 */
EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
@@ -905,6 +962,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
@@ -918,11 +978,17 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* BPF_LDX
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
+ case BPF_LDX | BPF_MEMSX | BPF_W:
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
/*
@@ -931,7 +997,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* load only if addr is kernel address (see is_kernel_addr()), otherwise
* set dst_reg=0 and move on.
*/
- if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ if (BPF_MODE(code) == BPF_PROBE_MEM || BPF_MODE(code) == BPF_PROBE_MEMSX) {
PPC_LI32(_R0, TASK_SIZE - off);
EMIT(PPC_RAW_CMPLW(src_reg, _R0));
PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
@@ -953,30 +1019,48 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
* as there are two load instructions for dst_reg_h & dst_reg
* respectively.
*/
- if (size == BPF_DW)
+ if (size == BPF_DW ||
+ (size == BPF_B && BPF_MODE(code) == BPF_PROBE_MEMSX))
PPC_JMP((ctx->idx + 3) * 4);
else
PPC_JMP((ctx->idx + 2) * 4);
}
- switch (size) {
- case BPF_B:
- EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
- break;
- case BPF_H:
- EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
- break;
- case BPF_W:
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
- break;
- case BPF_DW:
- EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
- break;
- }
+ if (BPF_MODE(code) == BPF_MEMSX || BPF_MODE(code) == BPF_PROBE_MEMSX) {
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ EMIT(PPC_RAW_EXTSB(dst_reg, dst_reg));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHA(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ }
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg, 31));
- if (size != BPF_DW && !fp->aux->verifier_zext)
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ } else {
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
+ break;
+ }
+ if (size != BPF_DW && !fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
if (BPF_MODE(code) == BPF_PROBE_MEM) {
int insn_idx = ctx->idx - 1;
@@ -1068,6 +1152,9 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct code
case BPF_JMP | BPF_JA:
PPC_JMP(addrs[i + 1 + off]);
break;
+ case BPF_JMP32 | BPF_JA:
+ PPC_JMP(addrs[i + 1 + imm]);
+ break;
case BPF_JMP | BPF_JGT | BPF_K:
case BPF_JMP | BPF_JGT | BPF_X:
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 79f23974a..58522de61 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -202,7 +202,8 @@ void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
EMIT(PPC_RAW_BLR());
}
-static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
+static int
+bpf_jit_emit_func_call_hlp(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
{
unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
long reladdr;
@@ -211,19 +212,20 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
return -EINVAL;
if (IS_ENABLED(CONFIG_PPC_KERNEL_PCREL)) {
- reladdr = func_addr - CTX_NIA(ctx);
+ reladdr = func_addr - local_paca->kernelbase;
if (reladdr >= (long)SZ_8G || reladdr < -(long)SZ_8G) {
- pr_err("eBPF: address of %ps out of range of pcrel address.\n",
- (void *)func);
+ pr_err("eBPF: address of %ps out of range of 34-bit relative address.\n",
+ (void *)func);
return -ERANGE;
}
- /* pla r12,addr */
- EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(1) | IMM_H18(reladdr));
- EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | IMM_L(reladdr));
- EMIT(PPC_RAW_MTCTR(_R12));
- EMIT(PPC_RAW_BCTR());
-
+ EMIT(PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernelbase)));
+ /* Align for subsequent prefix instruction */
+ if (!IS_ALIGNED((unsigned long)fimage + CTX_NIA(ctx), 8))
+ EMIT(PPC_RAW_NOP());
+ /* paddi r12,r12,addr */
+ EMIT(PPC_PREFIX_MLS | __PPC_PRFX_R(0) | IMM_H18(reladdr));
+ EMIT(PPC_INST_PADDI | ___PPC_RT(_R12) | ___PPC_RA(_R12) | IMM_L(reladdr));
} else {
reladdr = func_addr - kernel_toc_addr();
if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
@@ -233,9 +235,9 @@ static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u
EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
- EMIT(PPC_RAW_MTCTR(_R12));
- EMIT(PPC_RAW_BCTRL());
}
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
return 0;
}
@@ -285,7 +287,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
int b2p_index = bpf_to_ppc(BPF_REG_3);
int bpf_tailcall_prologue_size = 8;
- if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ if (!IS_ENABLED(CONFIG_PPC_KERNEL_PCREL) && IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
bpf_tailcall_prologue_size += 4; /* skip past the toc load */
/*
@@ -803,6 +805,15 @@ emit_clear:
/* Get offset into TMP_REG_1 */
EMIT(PPC_RAW_LI(tmp1_reg, off));
+ /*
+ * Enforce full ordering for operations with BPF_FETCH by emitting a 'sync'
+ * before and after the operation.
+ *
+ * This is a requirement in the Linux Kernel Memory Model.
+ * See __cmpxchg_u64() in asm/cmpxchg.h as an example.
+ */
+ if ((imm & BPF_FETCH) && IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
if (size == BPF_DW)
@@ -865,6 +876,9 @@ emit_clear:
PPC_BCC_SHORT(COND_NE, tmp_idx);
if (imm & BPF_FETCH) {
+ /* Emit 'sync' to enforce full ordering */
+ if (IS_ENABLED(CONFIG_SMP))
+ EMIT(PPC_RAW_SYNC());
EMIT(PPC_RAW_MR(ret_reg, _R0));
/*
* Skip unnecessary zero-extension for 32-bit cmpxchg.
@@ -993,7 +1007,7 @@ emit_clear:
return ret;
if (func_addr_fixed)
- ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_hlp(image, fimage, ctx, func_addr);
else
ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 40aa58206..e52b848b6 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -398,6 +398,7 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
hard_irq_disable();
mpic_teardown_this_cpu(secondary);
+#ifdef CONFIG_CRASH_DUMP
if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
/*
* We enter the crash kernel on whatever cpu crashed,
@@ -406,9 +407,11 @@ static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
*/
disable_threadbit = 1;
disable_cpu = cpu_first_thread_sibling(cpu);
- } else if (sibling != crashing_cpu &&
- cpu_thread_in_core(cpu) == 0 &&
- cpu_thread_in_core(sibling) != 0) {
+ } else if (sibling == crashing_cpu) {
+ return;
+ }
+#endif
+ if (cpu_thread_in_core(cpu) == 0 && cpu_thread_in_core(sibling) != 0) {
disable_threadbit = 2;
disable_cpu = sibling;
}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 4e9916bb0..c1d8bee8f 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -1886,10 +1886,10 @@ out:
* h_get_mpp
* H_GET_MPP hcall returns info in 7 parms
*/
-int h_get_mpp(struct hvcall_mpp_data *mpp_data)
+long h_get_mpp(struct hvcall_mpp_data *mpp_data)
{
- int rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
rc = plpar_hcall9(H_GET_MPP, retbuf);
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index f73c4d1c2..0ed56e562 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -113,8 +113,8 @@ struct hvcall_ppp_data {
*/
static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
{
- unsigned long rc;
- unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
rc = plpar_hcall9(H_GET_PPP, retbuf);
@@ -193,7 +193,7 @@ static void parse_ppp_data(struct seq_file *m)
struct hvcall_ppp_data ppp_data;
struct device_node *root;
const __be32 *perf_level;
- int rc;
+ long rc;
rc = h_get_ppp(&ppp_data);
if (rc)
@@ -361,8 +361,8 @@ static int read_dt_lpar_name(struct seq_file *m)
static void read_lpar_name(struct seq_file *m)
{
- if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
- pr_err_once("Error can't get the LPAR name");
+ if (read_rtas_lpar_name(m))
+ read_dt_lpar_name(m);
}
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 8e6c84df4..e205135ae 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -564,10 +564,12 @@ static const struct fsl_msi_feature ipic_msi_feature = {
.msiir_offset = 0x38,
};
+#ifdef CONFIG_EPAPR_PARAVIRT
static const struct fsl_msi_feature vmpic_msi_feature = {
.fsl_pic_ip = FSL_PIC_IP_VMPIC,
.msiir_offset = 0,
};
+#endif
static const struct of_device_id fsl_of_msi_ids[] = {
{
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index be09c8836..8025ca7ef 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -103,7 +103,7 @@ config RISCV
select HAS_IOPORT if MMU
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
- select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL
+ select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
index 45b58b6f3..ccd0ce55a 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
@@ -238,7 +238,6 @@
axp15060: pmic@36 {
compatible = "x-powers,axp15060";
reg = <0x36>;
- interrupts = <0>;
interrupt-controller;
#interrupt-cells = <1>;
@@ -279,24 +278,6 @@
status = "okay";
};
-&i2srx {
- pinctrl-names = "default";
- pinctrl-0 = <&i2srx_pins>;
- status = "okay";
-};
-
-&i2stx0 {
- pinctrl-names = "default";
- pinctrl-0 = <&mclk_ext_pins>;
- status = "okay";
-};
-
-&i2stx1 {
- pinctrl-names = "default";
- pinctrl-0 = <&i2stx1_pins>;
- status = "okay";
-};
-
&mmc0 {
max-frequency = <100000000>;
assigned-clocks = <&syscrg JH7110_SYSCLK_SDIO0_SDCARD>;
@@ -447,46 +428,6 @@
};
};
- i2srx_pins: i2srx-0 {
- clk-sd-pins {
- pinmux = <GPIOMUX(38, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2SRX_BCLK)>,
- <GPIOMUX(63, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2SRX_LRCK)>,
- <GPIOMUX(38, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2STX1_BCLK)>,
- <GPIOMUX(63, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2STX1_LRCK)>,
- <GPIOMUX(61, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_I2SRX_SDIN0)>;
- input-enable;
- };
- };
-
- i2stx1_pins: i2stx1-0 {
- sd-pins {
- pinmux = <GPIOMUX(44, GPOUT_SYS_I2STX1_SDO0,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-disable;
- input-disable;
- };
- };
-
- mclk_ext_pins: mclk-ext-0 {
- mclk-ext-pins {
- pinmux = <GPIOMUX(4, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_MCLK_EXT)>;
- input-enable;
- };
- };
-
mmc0_pins: mmc0-0 {
rst-pins {
pinmux = <GPIOMUX(62, GPOUT_SYS_SDIO0_RST,
@@ -622,40 +563,6 @@
};
};
- tdm_pins: tdm-0 {
- tx-pins {
- pinmux = <GPIOMUX(44, GPOUT_SYS_TDM_TXD,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-pull-up;
- drive-strength = <2>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
-
- rx-pins {
- pinmux = <GPIOMUX(61, GPOUT_HIGH,
- GPOEN_DISABLE,
- GPI_SYS_TDM_RXD)>;
- input-enable;
- };
-
- sync-pins {
- pinmux = <GPIOMUX(63, GPOUT_HIGH,
- GPOEN_DISABLE,
- GPI_SYS_TDM_SYNC)>;
- input-enable;
- };
-
- pcmclk-pins {
- pinmux = <GPIOMUX(38, GPOUT_HIGH,
- GPOEN_DISABLE,
- GPI_SYS_TDM_CLK)>;
- input-enable;
- };
- };
-
uart0_pins: uart0-0 {
tx-pins {
pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
@@ -681,12 +588,6 @@
};
};
-&tdm {
- pinctrl-names = "default";
- pinctrl-0 = <&tdm_pins>;
- status = "okay";
-};
-
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart0_pins>;
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 2468c5593..9d1b07932 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -281,7 +281,7 @@
#define CSR_HPMCOUNTER30H 0xc9e
#define CSR_HPMCOUNTER31H 0xc9f
-#define CSR_SSCOUNTOVF 0xda0
+#define CSR_SCOUNTOVF 0xda0
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 6e68f8dff..0fab508a6 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -370,6 +370,8 @@ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1
static inline void sbi_init(void) {}
#endif /* CONFIG_RISCV_SBI */
+unsigned long riscv_get_mvendorid(void);
+unsigned long riscv_get_marchid(void);
unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
unsigned long riscv_cached_marchid(unsigned int cpu_id);
unsigned long riscv_cached_mimpid(unsigned int cpu_id);
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index d11d6320f..c1f365523 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -139,6 +139,34 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
return -1;
}
+unsigned long __init riscv_get_marchid(void)
+{
+ struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
+
+#if IS_ENABLED(CONFIG_RISCV_SBI)
+ ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
+#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
+ ci->marchid = csr_read(CSR_MARCHID);
+#else
+ ci->marchid = 0;
+#endif
+ return ci->marchid;
+}
+
+unsigned long __init riscv_get_mvendorid(void)
+{
+ struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
+
+#if IS_ENABLED(CONFIG_RISCV_SBI)
+ ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
+#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
+ ci->mvendorid = csr_read(CSR_MVENDORID);
+#else
+ ci->mvendorid = 0;
+#endif
+ return ci->mvendorid;
+}
+
DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
@@ -170,12 +198,16 @@ static int riscv_cpuinfo_starting(unsigned int cpu)
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
#if IS_ENABLED(CONFIG_RISCV_SBI)
- ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
- ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
+ if (!ci->mvendorid)
+ ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
+ if (!ci->marchid)
+ ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
- ci->mvendorid = csr_read(CSR_MVENDORID);
- ci->marchid = csr_read(CSR_MARCHID);
+ if (!ci->mvendorid)
+ ci->mvendorid = csr_read(CSR_MVENDORID);
+ if (!ci->marchid)
+ ci->marchid = csr_read(CSR_MARCHID);
ci->mimpid = csr_read(CSR_MIMPID);
#else
ci->mvendorid = 0;
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index 1cc7df740..e6fbaaf54 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
/* Make sure tidle is updated */
smp_mb();
bdata->task_ptr = tidle;
- bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
+ bdata->stack_ptr = task_pt_regs(tidle);
/* Make sure boot data is updated */
smp_mb();
hsm_data = __pa(bdata);
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
index 613872b0a..24869eb88 100644
--- a/arch/riscv/kernel/cpu_ops_spinwait.c
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
/* Make sure tidle is updated */
smp_mb();
- WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
- task_stack_page(tidle) + THREAD_SIZE);
+ WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
}
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 3ed2359ea..5ef48cb20 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -490,6 +490,8 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
struct acpi_table_header *rhct;
acpi_status status;
unsigned int cpu;
+ u64 boot_vendorid;
+ u64 boot_archid;
if (!acpi_disabled) {
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
@@ -497,6 +499,9 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
return;
}
+ boot_vendorid = riscv_get_mvendorid();
+ boot_archid = riscv_get_marchid();
+
for_each_possible_cpu(cpu) {
struct riscv_isainfo *isainfo = &hart_isa[cpu];
unsigned long this_hwcap = 0;
@@ -544,8 +549,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
* CPU cores with the ratified spec will contain non-zero
* marchid.
*/
- if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
- riscv_cached_marchid(cpu) == 0x0) {
+ if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) {
this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
}
@@ -599,7 +603,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
if (ext->subset_ext_size) {
for (int j = 0; j < ext->subset_ext_size; j++) {
- if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
+ if (riscv_isa_extension_check(ext->subset_ext_ids[j]))
set_bit(ext->subset_ext_ids[j], isainfo->isa);
}
}
diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c
index 7142ec42e..a69dfa610 100644
--- a/arch/riscv/kernel/probes/ftrace.c
+++ b/arch/riscv/kernel/probes/ftrace.c
@@ -11,6 +11,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe_ctlblk *kcb;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index d41090fc3..4b3c50da4 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -26,7 +26,7 @@
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
-#include <asm/cpufeature.h>
+#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
@@ -234,9 +234,10 @@ asmlinkage __visible void smp_callin(void)
riscv_user_isa_enable();
/*
- * Remote TLB flushes are ignored while the CPU is offline, so emit
- * a local TLB flush right now just in case.
+ * Remote cache and TLB flushes are ignored while the CPU is offline,
+ * so flush them both right now just in case.
*/
+ local_flush_icache_all();
local_flush_tlb_all();
complete(&cpu_running);
/*
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 64a9c093a..528ec7cc9 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -18,6 +18,16 @@
extern asmlinkage void ret_from_exception(void);
+static inline int fp_is_valid(unsigned long fp, unsigned long sp)
+{
+ unsigned long low, high;
+
+ low = sp + sizeof(struct stackframe);
+ high = ALIGN(sp, THREAD_SIZE);
+
+ return !(fp < low || fp > high || fp & 0x07);
+}
+
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg)
{
@@ -41,21 +51,19 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
}
for (;;) {
- unsigned long low, high;
struct stackframe *frame;
if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
break;
- /* Validate frame pointer */
- low = sp + sizeof(struct stackframe);
- high = ALIGN(sp, THREAD_SIZE);
- if (unlikely(fp < low || fp > high || fp & 0x7))
+ if (unlikely(!fp_is_valid(fp, sp)))
break;
+
/* Unwind stack frame */
frame = (struct stackframe *)fp - 1;
sp = fp;
- if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
+ if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
+ /* We hit function where ra is not saved on the stack */
fp = frame->ra;
pc = regs->ra;
} else {
diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
index 0eb689351..5cd407c6a 100644
--- a/arch/riscv/kvm/aia_device.c
+++ b/arch/riscv/kvm/aia_device.c
@@ -237,10 +237,11 @@ static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
{
- u32 hart, group = 0;
+ u32 hart = 0, group = 0;
- hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
- GENMASK_ULL(aia->nr_hart_bits - 1, 0);
+ if (aia->nr_hart_bits)
+ hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
+ GENMASK_ULL(aia->nr_hart_bits - 1, 0);
if (aia->nr_group_bits)
group = (addr >> aia->nr_group_shift) &
GENMASK_ULL(aia->nr_group_bits - 1, 0);
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 994adc26d..e5706f5f2 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -718,9 +718,9 @@ static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
switch (reg_subtype) {
case KVM_REG_RISCV_ISA_SINGLE:
return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
- case KVM_REG_RISCV_SBI_MULTI_EN:
+ case KVM_REG_RISCV_ISA_MULTI_EN:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
- case KVM_REG_RISCV_SBI_MULTI_DIS:
+ case KVM_REG_RISCV_ISA_MULTI_DIS:
return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
default:
return -ENOENT;
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 968761843..46b4ad418 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -235,18 +235,19 @@ static void __init setup_bootmem(void)
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
- * memblock allocator is not aware of the fact that last 4K bytes of
- * the addressable memory can not be mapped because of IS_ERR_VALUE
- * macro. Make sure that last 4k bytes are not usable by memblock
- * if end of dram is equal to maximum addressable memory. For 64-bit
- * kernel, this problem can't happen here as the end of the virtual
- * address space is occupied by the kernel mapping then this check must
- * be done as soon as the kernel mapping base address is determined.
+ * Reserve physical address space that would be mapped to virtual
+ * addresses greater than (void *)(-PAGE_SIZE) because:
+ * - This memory would overlap with ERR_PTR
+ * - This memory belongs to high memory, which is not supported
+ *
+ * This is not applicable to 64-bit kernel, because virtual addresses
+ * after (void *)(-PAGE_SIZE) are not linearly mapped: they are
+ * occupied by kernel mapping. Also it is unrealistic for high memory
+ * to exist on 64-bit platforms.
*/
if (!IS_ENABLED(CONFIG_64BIT)) {
- max_mapped_addr = __pa(~(ulong)0);
- if (max_mapped_addr == (phys_ram_end - 1))
- memblock_set_current_limit(max_mapped_addr - 4096);
+ max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE);
+ memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
min_low_pfn = PFN_UP(phys_ram_base);
@@ -668,6 +669,9 @@ void __init create_pgd_mapping(pgd_t *pgdp,
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
phys_addr_t size)
{
+ if (debug_pagealloc_enabled())
+ return PAGE_SIZE;
+
if (pgtable_l5_enabled &&
!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
return P4D_SIZE;
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 410056a50..271d01a5b 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -387,17 +387,33 @@ int set_direct_map_default_noflush(struct page *page)
}
#ifdef CONFIG_DEBUG_PAGEALLOC
+static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
+{
+ int enable = *(int *)data;
+
+ unsigned long val = pte_val(ptep_get(pte));
+
+ if (enable)
+ val |= _PAGE_PRESENT;
+ else
+ val &= ~_PAGE_PRESENT;
+
+ set_pte(pte, __pte(val));
+
+ return 0;
+}
+
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!debug_pagealloc_enabled())
return;
- if (enable)
- __set_memory((unsigned long)page_address(page), numpages,
- __pgprot(_PAGE_PRESENT), __pgprot(0));
- else
- __set_memory((unsigned long)page_address(page), numpages,
- __pgprot(0), __pgprot(_PAGE_PRESENT));
+ unsigned long start = (unsigned long)page_address(page);
+ unsigned long size = PAGE_SIZE * numpages;
+
+ apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
+
+ flush_tlb_kernel_range(start, start + size);
}
#endif
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index ec9d69283..fb5d19500 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -498,33 +498,33 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
break;
/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
case BPF_ADD | BPF_FETCH:
- emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
- rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) :
+ rv_amoadd_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
case BPF_AND | BPF_FETCH:
- emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
- rv_amoand_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) :
+ rv_amoand_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
case BPF_OR | BPF_FETCH:
- emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
- rv_amoor_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) :
+ rv_amoor_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
case BPF_XOR | BPF_FETCH:
- emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
- rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) :
+ rv_amoxor_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
case BPF_XCHG:
- emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
- rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
+ emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) :
+ rv_amoswap_w(rs, rs, rd, 1, 1), ctx);
if (!is64)
emit_zextw(rs, rs, ctx);
break;
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 6cf893142..9e2c9027e 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -32,7 +32,6 @@ unsigned long __bootdata_preserved(max_mappable);
unsigned long __bootdata(ident_map_size);
u64 __bootdata_preserved(stfle_fac_list[16]);
-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
struct oldmem_data __bootdata_preserved(oldmem_data);
struct machine_info machine;
diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h
index b378e2b57..c786538e3 100644
--- a/arch/s390/include/asm/cpacf.h
+++ b/arch/s390/include/asm/cpacf.h
@@ -166,28 +166,86 @@
typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
-/**
- * cpacf_query() - check if a specific CPACF function is available
- * @opcode: the opcode of the crypto instruction
- * @func: the function code to test for
- *
- * Executes the query function for the given crypto instruction @opcode
- * and checks if @func is available
- *
- * Returns 1 if @func is available for @opcode, 0 otherwise
+/*
+ * Prototype for a not existing function to produce a link
+ * error if __cpacf_query() or __cpacf_check_opcode() is used
+ * with an invalid compile time const opcode.
*/
-static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+void __cpacf_bad_opcode(void);
+
+static __always_inline void __cpacf_query_rre(u32 opc, u8 r1, u8 r2,
+ cpacf_mask_t *mask)
{
asm volatile(
- " lghi 0,0\n" /* query function */
- " lgr 1,%[mask]\n"
- " spm 0\n" /* pckmo doesn't change the cc */
- /* Parameter regs are ignored, but must be nonzero and unique */
- "0: .insn rrf,%[opc] << 16,2,4,6,0\n"
- " brc 1,0b\n" /* handle partial completion */
- : "=m" (*mask)
- : [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
- : "cc", "0", "1");
+ " la %%r1,%[mask]\n"
+ " xgr %%r0,%%r0\n"
+ " .insn rre,%[opc] << 16,%[r1],%[r2]\n"
+ : [mask] "=R" (*mask)
+ : [opc] "i" (opc),
+ [r1] "i" (r1), [r2] "i" (r2)
+ : "cc", "r0", "r1");
+}
+
+static __always_inline void __cpacf_query_rrf(u32 opc,
+ u8 r1, u8 r2, u8 r3, u8 m4,
+ cpacf_mask_t *mask)
+{
+ asm volatile(
+ " la %%r1,%[mask]\n"
+ " xgr %%r0,%%r0\n"
+ " .insn rrf,%[opc] << 16,%[r1],%[r2],%[r3],%[m4]\n"
+ : [mask] "=R" (*mask)
+ : [opc] "i" (opc), [r1] "i" (r1), [r2] "i" (r2),
+ [r3] "i" (r3), [m4] "i" (m4)
+ : "cc", "r0", "r1");
+}
+
+static __always_inline void __cpacf_query(unsigned int opcode,
+ cpacf_mask_t *mask)
+{
+ switch (opcode) {
+ case CPACF_KDSA:
+ __cpacf_query_rre(CPACF_KDSA, 0, 2, mask);
+ break;
+ case CPACF_KIMD:
+ __cpacf_query_rre(CPACF_KIMD, 0, 2, mask);
+ break;
+ case CPACF_KLMD:
+ __cpacf_query_rre(CPACF_KLMD, 0, 2, mask);
+ break;
+ case CPACF_KM:
+ __cpacf_query_rre(CPACF_KM, 2, 4, mask);
+ break;
+ case CPACF_KMA:
+ __cpacf_query_rrf(CPACF_KMA, 2, 4, 6, 0, mask);
+ break;
+ case CPACF_KMAC:
+ __cpacf_query_rre(CPACF_KMAC, 0, 2, mask);
+ break;
+ case CPACF_KMC:
+ __cpacf_query_rre(CPACF_KMC, 2, 4, mask);
+ break;
+ case CPACF_KMCTR:
+ __cpacf_query_rrf(CPACF_KMCTR, 2, 4, 6, 0, mask);
+ break;
+ case CPACF_KMF:
+ __cpacf_query_rre(CPACF_KMF, 2, 4, mask);
+ break;
+ case CPACF_KMO:
+ __cpacf_query_rre(CPACF_KMO, 2, 4, mask);
+ break;
+ case CPACF_PCC:
+ __cpacf_query_rre(CPACF_PCC, 0, 0, mask);
+ break;
+ case CPACF_PCKMO:
+ __cpacf_query_rre(CPACF_PCKMO, 0, 0, mask);
+ break;
+ case CPACF_PRNO:
+ __cpacf_query_rre(CPACF_PRNO, 2, 4, mask);
+ break;
+ default:
+ __cpacf_bad_opcode();
+ }
}
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
@@ -211,10 +269,21 @@ static __always_inline int __cpacf_check_opcode(unsigned int opcode)
case CPACF_KMA:
return test_facility(146); /* check for MSA8 */
default:
- BUG();
+ __cpacf_bad_opcode();
+ return 0;
}
}
+/**
+ * cpacf_query() - check if a specific CPACF function is available
+ * @opcode: the opcode of the crypto instruction
+ * @func: the function code to test for
+ *
+ * Executes the query function for the given crypto instruction @opcode
+ * and checks if @func is available
+ *
+ * Returns 1 if @func is available for @opcode, 0 otherwise
+ */
static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
{
if (__cpacf_check_opcode(opcode)) {
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 621f23d5a..77e479d44 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -8,12 +8,8 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_CC_IS_CLANG
-/* https://llvm.org/pr41424 */
-#define ftrace_return_address(n) 0UL
-#else
-#define ftrace_return_address(n) __builtin_return_address(n)
-#endif
+unsigned long return_address(unsigned int n);
+#define ftrace_return_address(n) return_address(n)
void ftrace_caller(void);
diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 5cc46e0dd..9725586f4 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -146,7 +146,7 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
unsigned long gaddr, unsigned long vmaddr);
-int gmap_mark_unmergeable(void);
+int s390_disable_cow_sharing(void);
void s390_unlist_old_asce(struct gmap *gmap);
int s390_replace_asce(struct gmap *gmap);
void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index bb1b4bef1..4c2dc7abc 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -32,6 +32,11 @@ typedef struct {
unsigned int uses_skeys:1;
/* The mmu context uses CMM. */
unsigned int uses_cmm:1;
+ /*
+ * The mmu context allows COW-sharing of memory pages (KSM, zeropage).
+ * Note that COW-sharing during fork() is currently always allowed.
+ */
+ unsigned int allow_cow_sharing:1;
/* The gmaps associated with this context are allowed to use huge pages. */
unsigned int allow_gmap_hpage_1m:1;
} mm_context_t;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 929af18b0..a7789a9f6 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -35,6 +35,7 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.uses_skeys = 0;
mm->context.uses_cmm = 0;
+ mm->context.allow_cow_sharing = 1;
mm->context.allow_gmap_hpage_1m = 0;
#endif
switch (mm->context.asce_limit) {
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 60950e7a2..1077c1dae 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -566,10 +566,20 @@ static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
}
/*
- * In the case that a guest uses storage keys
- * faults should no longer be backed by zero pages
+ * As soon as the guest uses storage keys or enables PV, we deduplicate all
+ * mapped shared zeropages and prevent new shared zeropages from getting
+ * mapped.
*/
-#define mm_forbids_zeropage mm_has_pgste
+#define mm_forbids_zeropage mm_forbids_zeropage
+static inline int mm_forbids_zeropage(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (!mm->context.allow_cow_sharing)
+ return 1;
+#endif
+ return 0;
+}
+
static inline int mm_uses_skeys(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
@@ -1768,8 +1778,10 @@ static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
+ pmd_t pmd;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+ pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
}
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index db9982f0e..bbbdc5abe 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -98,6 +98,7 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void);
+unsigned long vdso_text_size(void);
unsigned long vdso_size(void);
/*
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index 433fde85b..85b6738b8 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -2,6 +2,7 @@
#ifndef _ASM_S390_STACKTRACE_H
#define _ASM_S390_STACKTRACE_H
+#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
@@ -12,6 +13,17 @@ struct stack_frame_user {
unsigned long empty2[4];
};
+struct stack_frame_vdso_wrapper {
+ struct stack_frame_user sf;
+ unsigned long return_address;
+};
+
+struct perf_callchain_entry_ctx;
+
+void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry,
+ const struct pt_regs *regs, bool perf);
+
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index fa029d0dc..db2d9ba5a 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -11,6 +11,8 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace early setup code
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_stacktrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_unwind_bc.o = $(CC_FLAGS_FTRACE)
endif
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index fa5f6885c..2f65bca2f 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -66,6 +66,11 @@ int main(void)
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK();
+ OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
+ DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user));
+ OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address);
+ DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper));
+ BLANK();
/* idle data offsets */
OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter);
OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index c46381ea0..7f6f8c438 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -296,6 +296,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe *p;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 1486350a4..469e8d3fb 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -962,8 +962,8 @@ static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj,
scpdata_len += padding;
}
- reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len;
- reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len;
+ reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN + scpdata_len;
+ reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN + scpdata_len;
reipl_block_nvme->nvme.scp_data_len = scpdata_len;
return count;
@@ -1858,9 +1858,9 @@ static int __init dump_nvme_init(void)
}
dump_block_nvme->hdr.len = IPL_BP_NVME_LEN;
dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION;
- dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN;
- dump_block_nvme->fcp.pbt = IPL_PBT_NVME;
- dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP;
+ dump_block_nvme->nvme.len = IPL_BP0_NVME_LEN;
+ dump_block_nvme->nvme.pbt = IPL_PBT_NVME;
+ dump_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_DUMP;
dump_capabilities |= DUMP_TYPE_NVME;
return 0;
}
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index dfa77da2f..5fff629b1 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- struct stack_frame_user __user *sf;
- unsigned long ip, sp;
- bool first = true;
-
- if (is_compat_task())
- return;
- perf_callchain_store(entry, instruction_pointer(regs));
- sf = (void __user *)user_stack_pointer(regs);
- pagefault_disable();
- while (entry->nr < entry->max_stack) {
- if (__get_user(sp, &sf->back_chain))
- break;
- if (__get_user(ip, &sf->gprs[8]))
- break;
- if (ip & 0x1) {
- /*
- * If the instruction address is invalid, and this
- * is the first stack frame, assume r14 has not
- * been written to the stack yet. Otherwise exit.
- */
- if (first && !(regs->gprs[14] & 0x1))
- ip = regs->gprs[14];
- else
- break;
- }
- perf_callchain_store(entry, ip);
- /* Sanity check: ABI requires SP to be aligned 8 bytes. */
- if (!sp || sp & 0x7)
- break;
- sf = (void __user *)sp;
- first = false;
- }
- pagefault_enable();
+ arch_stack_walk_user_common(NULL, NULL, entry, regs, true);
}
/* Perf definitions for PMU event attributes in sysfs */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 24ed33f04..7ecd27c62 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -155,7 +155,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
EXPORT_SYMBOL(stfle_fac_list);
-u64 __bootdata_preserved(alt_stfle_fac_list[16]);
+u64 alt_stfle_fac_list[16];
struct oldmem_data __bootdata_preserved(oldmem_data);
unsigned long VMALLOC_START;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 94f440e38..640363b2a 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2006
*/
+#include <linux/perf_event.h>
#include <linux/stacktrace.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
@@ -62,42 +63,121 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
return 0;
}
-void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
- const struct pt_regs *regs)
+static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry, bool perf,
+ unsigned long ip)
+{
+#ifdef CONFIG_PERF_EVENTS
+ if (perf) {
+ if (perf_callchain_store(entry, ip))
+ return false;
+ return true;
+ }
+#endif
+ return consume_entry(cookie, ip);
+}
+
+static inline bool ip_invalid(unsigned long ip)
+{
+ /*
+ * Perform some basic checks if an instruction address taken
+ * from unreliable source is invalid.
+ */
+ if (ip & 1)
+ return true;
+ if (ip < mmap_min_addr)
+ return true;
+ if (ip >= current->mm->context.asce_limit)
+ return true;
+ return false;
+}
+
+static inline bool ip_within_vdso(unsigned long ip)
{
+ return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
+}
+
+void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
+ struct perf_callchain_entry_ctx *entry,
+ const struct pt_regs *regs, bool perf)
+{
+ struct stack_frame_vdso_wrapper __user *sf_vdso;
struct stack_frame_user __user *sf;
unsigned long ip, sp;
bool first = true;
if (is_compat_task())
return;
- if (!consume_entry(cookie, instruction_pointer(regs)))
+ if (!current->mm)
+ return;
+ ip = instruction_pointer(regs);
+ if (!store_ip(consume_entry, cookie, entry, perf, ip))
return;
sf = (void __user *)user_stack_pointer(regs);
pagefault_disable();
while (1) {
if (__get_user(sp, &sf->back_chain))
break;
- if (__get_user(ip, &sf->gprs[8]))
+ /*
+ * VDSO entry code has a non-standard stack frame layout.
+ * See VDSO user wrapper code for details.
+ */
+ if (!sp && ip_within_vdso(ip)) {
+ sf_vdso = (void __user *)sf;
+ if (__get_user(ip, &sf_vdso->return_address))
+ break;
+ sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
+ sf = (void __user *)sp;
+ if (__get_user(sp, &sf->back_chain))
+ break;
+ } else {
+ sf = (void __user *)sp;
+ if (__get_user(ip, &sf->gprs[8]))
+ break;
+ }
+ /* Sanity check: ABI requires SP to be 8 byte aligned. */
+ if (sp & 0x7)
break;
- if (ip & 0x1) {
+ if (ip_invalid(ip)) {
/*
* If the instruction address is invalid, and this
* is the first stack frame, assume r14 has not
* been written to the stack yet. Otherwise exit.
*/
- if (first && !(regs->gprs[14] & 0x1))
- ip = regs->gprs[14];
- else
+ if (!first)
+ break;
+ ip = regs->gprs[14];
+ if (ip_invalid(ip))
break;
}
- if (!consume_entry(cookie, ip))
- break;
- /* Sanity check: ABI requires SP to be aligned 8 bytes. */
- if (!sp || sp & 0x7)
- break;
- sf = (void __user *)sp;
+ if (!store_ip(consume_entry, cookie, entry, perf, ip))
+ return;
first = false;
}
pagefault_enable();
}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
+}
+
+unsigned long return_address(unsigned int n)
+{
+ struct unwind_state state;
+ unsigned long addr;
+
+ /* Increment to skip current stack entry */
+ n++;
+
+ unwind_for_each_frame(&state, NULL, NULL, 0) {
+ addr = unwind_get_return_address(&state);
+ if (!addr)
+ break;
+ if (!n--)
+ return addr;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a45b3a4c9..2f967ac2b 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
return addr;
}
-unsigned long vdso_size(void)
+unsigned long vdso_text_size(void)
{
- unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
+ unsigned long size;
if (is_compat_task())
- size += vdso32_end - vdso32_start;
+ size = vdso32_end - vdso32_start;
else
- size += vdso64_end - vdso64_start;
+ size = vdso64_end - vdso64_start;
return PAGE_ALIGN(size);
}
+unsigned long vdso_size(void)
+{
+ return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
+}
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
unsigned long addr = VDSO_BASE;
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index b12a274cb..4800d80de 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -19,8 +19,10 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_32 += -m31 -s
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
+KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables
LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \
--hash-style=both --build-id=sha1 -melf_s390 -T
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index ef9832726..2f2e4e997 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -24,9 +24,11 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS))
KBUILD_AFLAGS_64 += -m64
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64))
KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64))
-KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin
+KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64))
+KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables
ldflags-y := -shared -soname=linux-vdso64.so.1 \
--hash-style=both --build-id=sha1 -T
diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
index 85247ef5a..e26e68675 100644
--- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S
+++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S
@@ -6,8 +6,6 @@
#include <asm/dwarf.h>
#include <asm/ptrace.h>
-#define WRAPPER_FRAME_SIZE (STACK_FRAME_OVERHEAD+8)
-
/*
* Older glibc version called vdso without allocating a stackframe. This wrapper
* is just used to allocate a stackframe. See
@@ -20,16 +18,17 @@
__ALIGN
__kernel_\func:
CFI_STARTPROC
- aghi %r15,-WRAPPER_FRAME_SIZE
- CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
- CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
- stg %r14,STACK_FRAME_OVERHEAD(%r15)
- CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD
+ aghi %r15,-STACK_FRAME_VDSO_OVERHEAD
+ CFI_DEF_CFA_OFFSET (STACK_FRAME_USER_OVERHEAD + STACK_FRAME_VDSO_OVERHEAD)
+ CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD
+ stg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
+ CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS
+ xc __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15)
brasl %r14,__s390_vdso_\func
- lg %r14,STACK_FRAME_OVERHEAD(%r15)
+ lg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
CFI_RESTORE 14
- aghi %r15,WRAPPER_FRAME_SIZE
- CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
+ aghi %r15,STACK_FRAME_VDSO_OVERHEAD
+ CFI_DEF_CFA_OFFSET STACK_FRAME_USER_OVERHEAD
CFI_RESTORE 15
br %r14
CFI_ENDPROC
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 48de296e8..fb9b32f93 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -209,13 +209,13 @@ SECTIONS
.dynstr ALIGN(8) : {
*(.dynstr)
}
-#endif
.hash ALIGN(8) : {
*(.hash)
}
.gnu.hash ALIGN(8) : {
*(.gnu.hash)
}
+#endif
. = ALIGN(PAGE_SIZE);
__init_end = .; /* freed after init ends here */
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 7721eb522..82e9631cd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -2631,9 +2631,7 @@ static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
if (r)
break;
- mmap_write_lock(current->mm);
- r = gmap_mark_unmergeable();
- mmap_write_unlock(current->mm);
+ r = s390_disable_cow_sharing();
if (r)
break;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 12d22a7fa..474a25ca5 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2550,41 +2550,6 @@ static inline void thp_split_mm(struct mm_struct *mm)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * Remove all empty zero pages from the mapping for lazy refaulting
- * - This must be called after mm->context.has_pgste is set, to avoid
- * future creation of zero pages
- * - This must be called after THP was disabled.
- *
- * mm contracts with s390, that even if mm were to remove a page table,
- * racing with the loop below and so causing pte_offset_map_lock() to fail,
- * it will never insert a page table containing empty zero pages once
- * mm_forbids_zeropage(mm) i.e. mm->context.has_pgste is set.
- */
-static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
- unsigned long end, struct mm_walk *walk)
-{
- unsigned long addr;
-
- for (addr = start; addr != end; addr += PAGE_SIZE) {
- pte_t *ptep;
- spinlock_t *ptl;
-
- ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
- if (!ptep)
- break;
- if (is_zero_pfn(pte_pfn(*ptep)))
- ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
- pte_unmap_unlock(ptep, ptl);
- }
- return 0;
-}
-
-static const struct mm_walk_ops zap_zero_walk_ops = {
- .pmd_entry = __zap_zero_pages,
- .walk_lock = PGWALK_WRLOCK,
-};
-
-/*
* switch on pgstes for its userspace process (for kvm)
*/
int s390_enable_sie(void)
@@ -2601,22 +2566,142 @@ int s390_enable_sie(void)
mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
- walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
mmap_write_unlock(mm);
return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
-int gmap_mark_unmergeable(void)
+static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ unsigned long *found_addr = walk->private;
+
+ /* Return 1 of the page is a zeropage. */
+ if (is_zero_pfn(pte_pfn(*pte))) {
+ /*
+ * Shared zeropage in e.g., a FS DAX mapping? We cannot do the
+ * right thing and likely don't care: FAULT_FLAG_UNSHARE
+ * currently only works in COW mappings, which is also where
+ * mm_forbids_zeropage() is checked.
+ */
+ if (!is_cow_mapping(walk->vma->vm_flags))
+ return -EFAULT;
+
+ *found_addr = addr;
+ return 1;
+ }
+ return 0;
+}
+
+static const struct mm_walk_ops find_zeropage_ops = {
+ .pte_entry = find_zeropage_pte_entry,
+ .walk_lock = PGWALK_WRLOCK,
+};
+
+/*
+ * Unshare all shared zeropages, replacing them by anonymous pages. Note that
+ * we cannot simply zap all shared zeropages, because this could later
+ * trigger unexpected userfaultfd missing events.
+ *
+ * This must be called after mm->context.allow_cow_sharing was
+ * set to 0, to avoid future mappings of shared zeropages.
+ *
+ * mm contracts with s390, that even if mm were to remove a page table,
+ * and racing with walk_page_range_vma() calling pte_offset_map_lock()
+ * would fail, it will never insert a page table containing empty zero
+ * pages once mm_forbids_zeropage(mm) i.e.
+ * mm->context.allow_cow_sharing is set to 0.
+ */
+static int __s390_unshare_zeropages(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, 0);
+ unsigned long addr;
+ vm_fault_t fault;
+ int rc;
+
+ for_each_vma(vmi, vma) {
+ /*
+ * We could only look at COW mappings, but it's more future
+ * proof to catch unexpected zeropages in other mappings and
+ * fail.
+ */
+ if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma))
+ continue;
+ addr = vma->vm_start;
+
+retry:
+ rc = walk_page_range_vma(vma, addr, vma->vm_end,
+ &find_zeropage_ops, &addr);
+ if (rc < 0)
+ return rc;
+ else if (!rc)
+ continue;
+
+ /* addr was updated by find_zeropage_pte_entry() */
+ fault = handle_mm_fault(vma, addr,
+ FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
+ NULL);
+ if (fault & VM_FAULT_OOM)
+ return -ENOMEM;
+ /*
+ * See break_ksm(): even after handle_mm_fault() returned 0, we
+ * must start the lookup from the current address, because
+ * handle_mm_fault() may back out if there's any difficulty.
+ *
+ * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but
+ * maybe they could trigger in the future on concurrent
+ * truncation. In that case, the shared zeropage would be gone
+ * and we can simply retry and make progress.
+ */
+ cond_resched();
+ goto retry;
+ }
+
+ return 0;
+}
+
+static int __s390_disable_cow_sharing(struct mm_struct *mm)
{
+ int rc;
+
+ if (!mm->context.allow_cow_sharing)
+ return 0;
+
+ mm->context.allow_cow_sharing = 0;
+
+ /* Replace all shared zeropages by anonymous pages. */
+ rc = __s390_unshare_zeropages(mm);
/*
* Make sure to disable KSM (if enabled for the whole process or
* individual VMAs). Note that nothing currently hinders user space
* from re-enabling it.
*/
- return ksm_disable(current->mm);
+ if (!rc)
+ rc = ksm_disable(mm);
+ if (rc)
+ mm->context.allow_cow_sharing = 1;
+ return rc;
+}
+
+/*
+ * Disable most COW-sharing of memory pages for the whole process:
+ * (1) Disable KSM and unmerge/unshare any KSM pages.
+ * (2) Disallow shared zeropages and unshare any zerpages that are mapped.
+ *
+ * Not that we currently don't bother with COW-shared pages that are shared
+ * with parent/child processes due to fork().
+ */
+int s390_disable_cow_sharing(void)
+{
+ int rc;
+
+ mmap_write_lock(current->mm);
+ rc = __s390_disable_cow_sharing(current->mm);
+ mmap_write_unlock(current->mm);
+ return rc;
}
-EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
+EXPORT_SYMBOL_GPL(s390_disable_cow_sharing);
/*
* Enable storage key handling from now on and initialize the storage
@@ -2685,7 +2770,7 @@ int s390_enable_skey(void)
goto out_up;
mm->context.uses_skeys = 1;
- rc = gmap_mark_unmergeable();
+ rc = __s390_disable_cow_sharing(mm);
if (rc) {
mm->context.uses_skeys = 0;
goto out_up;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 5af0402e9..1d168a98a 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1427,8 +1427,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
(insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
src_reg, dst_reg, off); \
- if (is32 && (insn->imm & BPF_FETCH)) \
- EMIT_ZERO(src_reg); \
+ if (insn->imm & BPF_FETCH) { \
+ /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \
+ _EMIT2(0x07e0); \
+ if (is32) \
+ EMIT_ZERO(src_reg); \
+ } \
} while (0)
case BPF_ADD:
case BPF_ADD | BPF_FETCH:
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index aed1ea8e2..74051b8dd 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -44,17 +44,12 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (OPCODE_RTE(opcode))
return -EFAULT; /* Bad breakpoint */
+ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = opcode;
return 0;
}
-void __kprobes arch_copy_kprobe(struct kprobe *p)
-{
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
- p->opcode = *p->addr;
-}
-
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index 3e07074e0..06fed5a21 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -33,7 +33,8 @@
*/
/*
- * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
+ * unsigned int csum_partial(const unsigned char *buf, int len,
+ * unsigned int sum);
*/
.text
@@ -45,31 +46,11 @@ ENTRY(csum_partial)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
+ mov r5, r1
mov r4, r0
- tst #3, r0 ! Check alignment.
- bt/s 2f ! Jump if alignment is ok.
- mov r4, r7 ! Keep a copy to check for alignment
+ tst #2, r0 ! Check alignment.
+ bt 2f ! Jump if alignment is ok.
!
- tst #1, r0 ! Check alignment.
- bt 21f ! Jump if alignment is boundary of 2bytes.
-
- ! buf is odd
- tst r5, r5
- add #-1, r5
- bt 9f
- mov.b @r4+, r0
- extu.b r0, r0
- addc r0, r6 ! t=0 from previous tst
- mov r6, r0
- shll8 r6
- shlr16 r0
- shlr8 r0
- or r0, r6
- mov r4, r0
- tst #2, r0
- bt 2f
-21:
- ! buf is 2 byte aligned (len could be 0)
add #-2, r5 ! Alignment uses up two bytes.
cmp/pz r5 !
bt/s 1f ! Jump if we had at least two bytes.
@@ -77,17 +58,16 @@ ENTRY(csum_partial)
bra 6f
add #2, r5 ! r5 was < 2. Deal with it.
1:
+ mov r5, r1 ! Save new len for later use.
mov.w @r4+, r0
extu.w r0, r0
addc r0, r6
bf 2f
add #1, r6
2:
- ! buf is 4 byte aligned (len could be 0)
- mov r5, r1
mov #-5, r0
- shld r0, r1
- tst r1, r1
+ shld r0, r5
+ tst r5, r5
bt/s 4f ! if it's =0, go to 4f
clrt
.align 2
@@ -109,31 +89,30 @@ ENTRY(csum_partial)
addc r0, r6
addc r2, r6
movt r0
- dt r1
+ dt r5
bf/s 3b
cmp/eq #1, r0
- ! here, we know r1==0
- addc r1, r6 ! add carry to r6
+ ! here, we know r5==0
+ addc r5, r6 ! add carry to r6
4:
- mov r5, r0
+ mov r1, r0
and #0x1c, r0
tst r0, r0
- bt 6f
- ! 4 bytes or more remaining
- mov r0, r1
- shlr2 r1
+ bt/s 6f
+ mov r0, r5
+ shlr2 r5
mov #0, r2
5:
addc r2, r6
mov.l @r4+, r2
movt r0
- dt r1
+ dt r5
bf/s 5b
cmp/eq #1, r0
addc r2, r6
- addc r1, r6 ! r1==0 here, so it means add carry-bit
+ addc r5, r6 ! r5==0 here, so it means add carry-bit
6:
- ! 3 bytes or less remaining
+ mov r1, r5
mov #3, r0
and r0, r5
tst r5, r5
@@ -159,16 +138,6 @@ ENTRY(csum_partial)
mov #0, r0
addc r0, r6
9:
- ! Check if the buffer was misaligned, if so realign sum
- mov r7, r0
- tst #1, r0
- bt 10f
- mov r6, r0
- shll8 r6
- shlr16 r0
- shlr8 r0
- or r0, r6
-10:
rts
mov r6, r0
diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h
index 505b67008..0964fede0 100644
--- a/arch/sparc/include/asm/smp_64.h
+++ b/arch/sparc/include/asm/smp_64.h
@@ -47,7 +47,6 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask);
int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu)
-void smp_fill_in_cpu_possible_map(void);
void smp_fill_in_sib_core_maps(void);
void __noreturn cpu_play_dead(void);
@@ -77,7 +76,6 @@ void __cpu_die(unsigned int cpu);
#define smp_fill_in_sib_core_maps() do { } while (0)
#define smp_fetch_global_regs() do { } while (0)
#define smp_fetch_global_pmu() do { } while (0)
-#define smp_fill_in_cpu_possible_map() do { } while (0)
#define smp_init_cpu_poke() do { } while (0)
#define scheduler_poke() do { } while (0)
diff --git a/arch/sparc/include/uapi/asm/termbits.h b/arch/sparc/include/uapi/asm/termbits.h
index 432132270..0da2b1adc 100644
--- a/arch/sparc/include/uapi/asm/termbits.h
+++ b/arch/sparc/include/uapi/asm/termbits.h
@@ -10,16 +10,6 @@ typedef unsigned int tcflag_t;
typedef unsigned long tcflag_t;
#endif
-#define NCC 8
-struct termio {
- unsigned short c_iflag; /* input mode flags */
- unsigned short c_oflag; /* output mode flags */
- unsigned short c_cflag; /* control mode flags */
- unsigned short c_lflag; /* local mode flags */
- unsigned char c_line; /* line discipline */
- unsigned char c_cc[NCC]; /* control characters */
-};
-
#define NCCS 17
struct termios {
tcflag_t c_iflag; /* input mode flags */
diff --git a/arch/sparc/include/uapi/asm/termios.h b/arch/sparc/include/uapi/asm/termios.h
index ee86f4093..cceb32260 100644
--- a/arch/sparc/include/uapi/asm/termios.h
+++ b/arch/sparc/include/uapi/asm/termios.h
@@ -40,5 +40,14 @@ struct winsize {
unsigned short ws_ypixel;
};
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
#endif /* _UAPI_SPARC_TERMIOS_H */
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index 998aa693d..ba82884cb 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -483,7 +483,9 @@ static void *record_one_cpu(struct device_node *dp, int cpuid, int arg)
ncpus_probed++;
#ifdef CONFIG_SMP
set_cpu_present(cpuid, true);
- set_cpu_possible(cpuid, true);
+
+ if (num_possible_cpus() < nr_cpu_ids)
+ set_cpu_possible(cpuid, true);
#endif
return NULL;
}
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 6a4797dec..6bbe8e394 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -671,7 +671,6 @@ void __init setup_arch(char **cmdline_p)
paging_init();
init_sparc64_elf_hwcap();
- smp_fill_in_cpu_possible_map();
/*
* Once the OF device tree and MDESC have been setup and nr_cpus has
* been parsed, we know the list of possible cpus. Therefore we can
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index a0cc9bb41..e40c395db 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1216,20 +1216,6 @@ void __init smp_setup_processor_id(void)
xcall_deliver_impl = hypervisor_xcall_deliver;
}
-void __init smp_fill_in_cpu_possible_map(void)
-{
- int possible_cpus = num_possible_cpus();
- int i;
-
- if (possible_cpus > nr_cpu_ids)
- possible_cpus = nr_cpu_ids;
-
- for (i = 0; i < possible_cpus; i++)
- set_cpu_possible(i, true);
- for (; i < NR_CPUS; i++)
- set_cpu_possible(i, false);
-}
-
void smp_fill_in_sib_core_maps(void)
{
unsigned int i;
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b44d79d77..ef69127d7 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -249,6 +249,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
pmd_t old, entry;
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
old = pmdp_establish(vma, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index ffc5cb92f..d82bc3fdb 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -676,24 +676,26 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
goto cleanup;
}
- *winch = ((struct winch) { .list = LIST_HEAD_INIT(winch->list),
- .fd = fd,
+ *winch = ((struct winch) { .fd = fd,
.tty_fd = tty_fd,
.pid = pid,
.port = port,
.stack = stack });
+ spin_lock(&winch_handler_lock);
+ list_add(&winch->list, &winch_handlers);
+ spin_unlock(&winch_handler_lock);
+
if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
IRQF_SHARED, "winch", winch) < 0) {
printk(KERN_ERR "register_winch_irq - failed to register "
"IRQ\n");
+ spin_lock(&winch_handler_lock);
+ list_del(&winch->list);
+ spin_unlock(&winch_handler_lock);
goto out_free;
}
- spin_lock(&winch_handler_lock);
- list_add(&winch->list, &winch_handlers);
- spin_unlock(&winch_handler_lock);
-
return;
out_free:
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 63fc062ad..ef805eaa9 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1092,7 +1092,7 @@ static int __init ubd_init(void)
if (irq_req_buffer == NULL) {
printk(KERN_ERR "Failed to initialize ubd buffering\n");
- return -1;
+ return -ENOMEM;
}
io_req_buffer = kmalloc_array(UBD_REQ_BUFFER_SIZE,
sizeof(struct io_thread_req *),
@@ -1103,7 +1103,7 @@ static int __init ubd_init(void)
if (io_req_buffer == NULL) {
printk(KERN_ERR "Failed to initialize ubd buffering\n");
- return -1;
+ return -ENOMEM;
}
platform_driver_register(&ubd_driver);
mutex_lock(&ubd_lock);
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index dc2feae78..63e5f108a 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -141,7 +141,7 @@ static bool get_bpf_flash(struct arglist *def)
if (allow != NULL) {
if (kstrtoul(allow, 10, &result) == 0)
- return (allow > 0);
+ return result > 0;
}
return false;
}
diff --git a/arch/um/include/asm/kasan.h b/arch/um/include/asm/kasan.h
index 0d6547f4e..f97bb1f7b 100644
--- a/arch/um/include/asm/kasan.h
+++ b/arch/um/include/asm/kasan.h
@@ -24,7 +24,6 @@
#ifdef CONFIG_KASAN
void kasan_init(void);
-void kasan_map_memory(void *start, unsigned long len);
extern int kasan_um_is_ready;
#ifdef CONFIG_STATIC_LINK
diff --git a/arch/um/include/asm/mmu.h b/arch/um/include/asm/mmu.h
index a7555e43e..f2923c767 100644
--- a/arch/um/include/asm/mmu.h
+++ b/arch/um/include/asm/mmu.h
@@ -14,8 +14,6 @@ typedef struct mm_context {
struct uml_arch_mm_context arch;
} mm_context_t;
-extern void __switch_mm(struct mm_id * mm_idp);
-
/* Avoid tangled inclusion with asm/ldt.h */
extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
extern void free_ldt(struct mm_context *mm);
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 6c3779541..5a7c05275 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -94,7 +94,6 @@ extern struct cpuinfo_um boot_cpu_data;
#define current_cpu_data boot_cpu_data
#define cache_line_size() (boot_cpu_data.cache_alignment)
-extern unsigned long get_thread_reg(int reg, jmp_buf *buf);
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
extern unsigned long __get_wchan(struct task_struct *p);
diff --git a/arch/um/include/shared/kern_util.h b/arch/um/include/shared/kern_util.h
index 789b83013..af8cdfc75 100644
--- a/arch/um/include/shared/kern_util.h
+++ b/arch/um/include/shared/kern_util.h
@@ -66,4 +66,6 @@ extern void fatal_sigsegv(void) __attribute__ ((noreturn));
void um_idle_sleep(void);
+void kasan_map_memory(void *start, size_t len);
+
#endif
diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h
index e82e203f5..92dbf727e 100644
--- a/arch/um/include/shared/skas/mm_id.h
+++ b/arch/um/include/shared/skas/mm_id.h
@@ -15,4 +15,6 @@ struct mm_id {
int kill;
};
+void __switch_mm(struct mm_id *mm_idp);
+
#endif
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index 8530b2e08..c6c9495b1 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -15,6 +15,7 @@
#include <sys/vfs.h>
#include <linux/magic.h>
#include <init.h>
+#include <kern_util.h>
#include <os.h>
/*
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 928820e61..f5b3d14ff 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -501,7 +501,7 @@ config X86_FRED
When enabled, try to use Flexible Return and Event Delivery
instead of the legacy SYSCALL/SYSENTER/IDT architecture for
ring transitions and exception/interrupt handling if the
- system supports.
+ system supports it.
if X86_32
config X86_BIGSMP
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index c5d614d28..74777a97e 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -248,6 +248,7 @@ config UNWINDER_ORC
config UNWINDER_FRAME_POINTER
bool "Frame pointer unwinder"
+ select ARCH_WANT_FRAME_POINTERS
select FRAME_POINTER
help
This option enables the frame pointer unwinder for unwinding kernel
@@ -271,7 +272,3 @@ config UNWINDER_GUESS
overhead.
endchoice
-
-config FRAME_POINTER
- depends on !UNWINDER_ORC && !UNWINDER_GUESS
- bool
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index e9522c689..8da346677 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -116,9 +116,9 @@ vmlinux-objs-$(CONFIG_UNACCEPTED_MEMORY) += $(obj)/mem.o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
-vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
-$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+$(obj)/vmlinux: $(vmlinux-objs-y) $(vmlinux-libs-y) FORCE
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index bf4a10a57..1dcb794c5 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -398,6 +398,11 @@ SYM_CODE_START(startup_64)
call sev_enable
#endif
+ /* Preserve only the CR4 bits that must be preserved, and clear the rest */
+ movq %cr4, %rax
+ andl $(X86_CR4_PAE | X86_CR4_MCE | X86_CR4_LA57), %eax
+ movq %rax, %cr4
+
/*
* configure_5level_paging() updates the number of paging levels using
* a trampoline in 32-bit addressable memory if the current number does
diff --git a/arch/x86/boot/main.c b/arch/x86/boot/main.c
index c4ea5258a..9049f390d 100644
--- a/arch/x86/boot/main.c
+++ b/arch/x86/boot/main.c
@@ -119,8 +119,8 @@ static void init_heap(void)
char *stack_end;
if (boot_params.hdr.loadflags & CAN_USE_HEAP) {
- asm("leal %P1(%%esp),%0"
- : "=r" (stack_end) : "i" (-STACK_SIZE));
+ asm("leal %n1(%%esp),%0"
+ : "=r" (stack_end) : "i" (STACK_SIZE));
heap_end = (char *)
((size_t)boot_params.hdr.heap_end_ptr + 0x200);
diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S
index ef73a3ab8..791386d9a 100644
--- a/arch/x86/crypto/nh-avx2-x86_64.S
+++ b/arch/x86/crypto/nh-avx2-x86_64.S
@@ -154,5 +154,6 @@ SYM_TYPED_FUNC_START(nh_avx2)
vpaddq T1, T0, T0
vpaddq T4, T0, T0
vmovdqu T0, (HASH)
+ vzeroupper
RET
SYM_FUNC_END(nh_avx2)
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 9918212fa..0ffb072be 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -716,6 +716,7 @@ SYM_TYPED_FUNC_START(sha256_transform_rorx)
popq %r13
popq %r12
popq %rbx
+ vzeroupper
RET
SYM_FUNC_END(sha256_transform_rorx)
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index f08496cd6..24973f42c 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -680,6 +680,7 @@ SYM_TYPED_FUNC_START(sha512_transform_rorx)
pop %r12
pop %rbx
+ vzeroupper
RET
SYM_FUNC_END(sha512_transform_rorx)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 67b68d0d1..0cb2396de 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -294,10 +294,10 @@ static inline int alternatives_text_reserved(void *start, void *end)
* Otherwise, if CPU has feature1, newinstr1 is used.
* Otherwise, oldinstr is used.
*/
-#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2, \
- ft_flags2, input...) \
- asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
- newinstr2, ft_flags2) \
+#define alternative_input_2(oldinstr, newinstr1, ft_flags1, newinstr2, \
+ ft_flags2, input...) \
+ asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, \
+ newinstr2, ft_flags2) \
: : "i" (0), ## input)
/* Like alternative_input, but with a single output argument */
@@ -307,7 +307,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
/* Like alternative_io, but for replacing a direct call with another one. */
#define alternative_call(oldfunc, newfunc, ft_flags, output, input...) \
- asm_inline volatile (ALTERNATIVE("call %P[old]", "call %P[new]", ft_flags) \
+ asm_inline volatile (ALTERNATIVE("call %c[old]", "call %c[new]", ft_flags) \
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
@@ -316,12 +316,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
* Otherwise, if CPU has feature1, function1 is used.
* Otherwise, old function is used.
*/
-#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
- output, input...) \
- asm_inline volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", ft_flags1,\
- "call %P[new2]", ft_flags2) \
- : output, ASM_CALL_CONSTRAINT \
- : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+#define alternative_call_2(oldfunc, newfunc1, ft_flags1, newfunc2, ft_flags2, \
+ output, input...) \
+ asm_inline volatile (ALTERNATIVE_2("call %c[old]", "call %c[new1]", ft_flags1, \
+ "call %c[new2]", ft_flags2) \
+ : output, ASM_CALL_CONSTRAINT \
+ : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
[new2] "i" (newfunc2), ## input)
/*
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index 3486d91b8..d510405e4 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -24,7 +24,7 @@ typedef struct {
#ifdef CONFIG_X86_CMPXCHG64
#define __alternative_atomic64(f, g, out, in...) \
- asm volatile("call %P[func]" \
+ asm volatile("call %c[func]" \
: out : [func] "i" (atomic64_##g##_cx8), ## in)
#define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 44b08b53a..c1d6cd58f 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -62,7 +62,7 @@ static __always_inline u128 arch_cmpxchg128_local(volatile u128 *ptr, u128 old,
asm volatile(_lock "cmpxchg16b %[ptr]" \
CC_SET(e) \
: CC_OUT(e) (ret), \
- [ptr] "+m" (*ptr), \
+ [ptr] "+m" (*(_ptr)), \
"+a" (o.low), "+d" (o.high) \
: "b" (n.low), "c" (n.high) \
: "memory"); \
diff --git a/arch/x86/include/asm/cpu_device_id.h b/arch/x86/include/asm/cpu_device_id.h
index eb8fcede9..e8e3dbe7f 100644
--- a/arch/x86/include/asm/cpu_device_id.h
+++ b/arch/x86/include/asm/cpu_device_id.h
@@ -3,6 +3,39 @@
#define _ASM_X86_CPU_DEVICE_ID
/*
+ * Can't use <linux/bitfield.h> because it generates expressions that
+ * cannot be used in structure initializers. Bitfield construction
+ * here must match the union in struct cpuinfo_86:
+ * union {
+ * struct {
+ * __u8 x86_model;
+ * __u8 x86;
+ * __u8 x86_vendor;
+ * __u8 x86_reserved;
+ * };
+ * __u32 x86_vfm;
+ * };
+ */
+#define VFM_MODEL_BIT 0
+#define VFM_FAMILY_BIT 8
+#define VFM_VENDOR_BIT 16
+#define VFM_RSVD_BIT 24
+
+#define VFM_MODEL_MASK GENMASK(VFM_FAMILY_BIT - 1, VFM_MODEL_BIT)
+#define VFM_FAMILY_MASK GENMASK(VFM_VENDOR_BIT - 1, VFM_FAMILY_BIT)
+#define VFM_VENDOR_MASK GENMASK(VFM_RSVD_BIT - 1, VFM_VENDOR_BIT)
+
+#define VFM_MODEL(vfm) (((vfm) & VFM_MODEL_MASK) >> VFM_MODEL_BIT)
+#define VFM_FAMILY(vfm) (((vfm) & VFM_FAMILY_MASK) >> VFM_FAMILY_BIT)
+#define VFM_VENDOR(vfm) (((vfm) & VFM_VENDOR_MASK) >> VFM_VENDOR_BIT)
+
+#define VFM_MAKE(_vendor, _family, _model) ( \
+ ((_model) << VFM_MODEL_BIT) | \
+ ((_family) << VFM_FAMILY_BIT) | \
+ ((_vendor) << VFM_VENDOR_BIT) \
+)
+
+/*
* Declare drivers belonging to specific x86 CPUs
* Similar in spirit to pci_device_id and related PCI functions
*
@@ -20,6 +53,9 @@
#define X86_CENTAUR_FAM6_C7_D 0xd
#define X86_CENTAUR_FAM6_NANO 0xf
+/* x86_cpu_id::flags */
+#define X86_CPU_ID_FLAG_ENTRY_VALID BIT(0)
+
#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
/**
* X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
@@ -46,6 +82,18 @@
.model = _model, \
.steppings = _steppings, \
.feature = _feature, \
+ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, \
+ .driver_data = (unsigned long) _data \
+}
+
+#define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
+ _steppings, _feature, _data) { \
+ .vendor = _vendor, \
+ .family = _family, \
+ .model = _model, \
+ .steppings = _steppings, \
+ .feature = _feature, \
+ .flags = X86_CPU_ID_FLAG_ENTRY_VALID, \
.driver_data = (unsigned long) _data \
}
@@ -164,6 +212,56 @@
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
steppings, X86_FEATURE_ANY, data)
+/**
+ * X86_MATCH_VFM - Match encoded vendor/family/model
+ * @vfm: Encoded 8-bits each for vendor, family, model
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is cast to unsigned long internally.
+ *
+ * Stepping and feature are set to wildcards
+ */
+#define X86_MATCH_VFM(vfm, data) \
+ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
+ VFM_VENDOR(vfm), \
+ VFM_FAMILY(vfm), \
+ VFM_MODEL(vfm), \
+ X86_STEPPING_ANY, X86_FEATURE_ANY, data)
+
+/**
+ * X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
+ * @vfm: Encoded 8-bits each for vendor, family, model
+ * @steppings: Bitmask of steppings to match
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is cast to unsigned long internally.
+ *
+ * feature is set to wildcard
+ */
+#define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data) \
+ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
+ VFM_VENDOR(vfm), \
+ VFM_FAMILY(vfm), \
+ VFM_MODEL(vfm), \
+ steppings, X86_FEATURE_ANY, data)
+
+/**
+ * X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
+ * @vfm: Encoded 8-bits each for vendor, family, model
+ * @feature: A X86_FEATURE bit
+ * @data: Driver specific data or NULL. The internal storage
+ * format is unsigned long. The supplied value, pointer
+ * etc. is cast to unsigned long internally.
+ *
+ * Steppings is set to wildcard
+ */
+#define X86_MATCH_VFM_FEATURE(vfm, feature, data) \
+ X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
+ VFM_VENDOR(vfm), \
+ VFM_FAMILY(vfm), \
+ VFM_MODEL(vfm), \
+ X86_STEPPING_ANY, feature, data)
+
/*
* Match specific microcode revisions.
*
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 686e92d26..3508f3fc9 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -173,7 +173,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
static __always_inline bool _static_cpu_has(u16 bit)
{
asm goto(
- ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
+ ALTERNATIVE_TERNARY("jmp 6f", %c[feature], "", "jmp %l[t_no]")
".pushsection .altinstr_aux,\"ax\"\n"
"6:\n"
" testb %[bitnum]," _ASM_RIP(%P[cap_byte]) "\n"
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 1dc600fa3..481096177 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -401,7 +401,6 @@ extern int __init efi_memmap_alloc(unsigned int num_entries,
struct efi_memory_map_data *data);
extern void __efi_memmap_free(u64 phys, unsigned long size,
unsigned long flags);
-#define __efi_memmap_free __efi_memmap_free
extern int __init efi_memmap_install(struct efi_memory_map_data *data);
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 798183867..b71ad173f 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -100,7 +100,7 @@
}
#define ASM_CALL_ARG0 \
- "call %P[__func] \n" \
+ "call %c[__func] \n" \
ASM_REACHABLE
#define ASM_CALL_ARG1 \
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index b77bbb67e..ce5111cec 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -59,36 +59,30 @@
#define __force_percpu_prefix "%%"__stringify(__percpu_seg)":"
#define __my_cpu_offset this_cpu_read(this_cpu_off)
-#ifdef CONFIG_USE_X86_SEG_SUPPORT
-/*
- * Efficient implementation for cases in which the compiler supports
- * named address spaces. Allows the compiler to perform additional
- * optimizations that can save more instructions.
- */
-#define arch_raw_cpu_ptr(ptr) \
-({ \
- unsigned long tcp_ptr__; \
- tcp_ptr__ = __raw_cpu_read(, this_cpu_off); \
- \
- tcp_ptr__ += (__force unsigned long)(ptr); \
- (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
-})
-#else /* CONFIG_USE_X86_SEG_SUPPORT */
+#ifdef CONFIG_X86_64
+#define __raw_my_cpu_offset raw_cpu_read_8(this_cpu_off);
+#else
+#define __raw_my_cpu_offset raw_cpu_read_4(this_cpu_off);
+#endif
+
/*
* Compared to the generic __my_cpu_offset version, the following
* saves one instruction and avoids clobbering a temp register.
+ *
+ * arch_raw_cpu_ptr should not be used in 32-bit VDSO for a 64-bit
+ * kernel, because games are played with CONFIG_X86_64 there and
+ * sizeof(this_cpu_off) becames 4.
*/
-#define arch_raw_cpu_ptr(ptr) \
+#ifndef BUILD_VDSO32_64
+#define arch_raw_cpu_ptr(_ptr) \
({ \
- unsigned long tcp_ptr__; \
- asm ("mov " __percpu_arg(1) ", %0" \
- : "=r" (tcp_ptr__) \
- : "m" (__my_cpu_var(this_cpu_off))); \
- \
- tcp_ptr__ += (unsigned long)(ptr); \
- (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
+ unsigned long tcp_ptr__ = __raw_my_cpu_offset; \
+ tcp_ptr__ += (__force unsigned long)(_ptr); \
+ (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \
})
-#endif /* CONFIG_USE_X86_SEG_SUPPORT */
+#else
+#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; })
+#endif
#define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 9abb8cc4c..b78644962 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -567,6 +567,8 @@ static inline void update_page_count(int level, unsigned long pages) { }
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level);
+pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
+ unsigned int *level, bool *nx, bool *rw);
extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h
index 1be13b2df..64df897c0 100644
--- a/arch/x86/include/asm/sparsemem.h
+++ b/arch/x86/include/asm/sparsemem.h
@@ -37,8 +37,6 @@ extern int phys_to_target_node(phys_addr_t start);
#define phys_to_target_node phys_to_target_node
extern int memory_add_physaddr_to_nid(u64 start);
#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
-extern int numa_fill_memblks(u64 start, u64 end);
-#define numa_fill_memblks numa_fill_memblks
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 237dc8cdd..3a7755c1a 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -78,10 +78,10 @@ extern int __get_user_bad(void);
int __ret_gu; \
register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
- asm volatile("call __" #fn "_%P4" \
+ asm volatile("call __" #fn "_%c[size]" \
: "=a" (__ret_gu), "=r" (__val_gu), \
ASM_CALL_CONSTRAINT \
- : "0" (ptr), "i" (sizeof(*(ptr)))); \
+ : "0" (ptr), [size] "i" (sizeof(*(ptr)))); \
instrument_get_user(__val_gu); \
(x) = (__force __typeof__(*(ptr))) __val_gu; \
__builtin_expect(__ret_gu, 0); \
@@ -177,7 +177,7 @@ extern void __put_user_nocheck_8(void);
__chk_user_ptr(__ptr); \
__ptr_pu = __ptr; \
__val_pu = __x; \
- asm volatile("call __" #fn "_%P[size]" \
+ asm volatile("call __" #fn "_%c[size]" \
: "=c" (__ret_pu), \
ASM_CALL_CONSTRAINT \
: "0" (__ptr_pu), \
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 3cf156f70..027a8c7a2 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -215,7 +215,14 @@ out:
int amd_smn_read(u16 node, u32 address, u32 *value)
{
- return __amd_smn_rw(node, address, value, false);
+ int err = __amd_smn_rw(node, address, value, false);
+
+ if (PCI_POSSIBLE_ERROR(*value)) {
+ err = -ENODEV;
+ *value = 0;
+ }
+
+ return err;
}
EXPORT_SYMBOL_GPL(amd_smn_read);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 185738c72..29cd0543f 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -1036,7 +1036,8 @@ static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
add_timer_on(&cl->timer, cpu);
}
} else {
- apicd->prev_vector = 0;
+ pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
+ free_moved_vector(apicd);
}
raw_spin_unlock(&vector_lock);
}
@@ -1073,6 +1074,7 @@ void irq_complete_move(struct irq_cfg *cfg)
*/
void irq_force_complete_move(struct irq_desc *desc)
{
+ unsigned int cpu = smp_processor_id();
struct apic_chip_data *apicd;
struct irq_data *irqd;
unsigned int vector;
@@ -1097,10 +1099,11 @@ void irq_force_complete_move(struct irq_desc *desc)
goto unlock;
/*
- * If prev_vector is empty, no action required.
+ * If prev_vector is empty or the descriptor is neither currently
+ * nor previously on the outgoing CPU no action required.
*/
vector = apicd->prev_vector;
- if (!vector)
+ if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
goto unlock;
/*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 605c26c00..198200782 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1053,18 +1053,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
void get_cpu_address_sizes(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
- bool vp_bits_from_cpuid = true;
if (!cpu_has(c, X86_FEATURE_CPUID) ||
- (c->extended_cpuid_level < 0x80000008))
- vp_bits_from_cpuid = false;
-
- if (vp_bits_from_cpuid) {
- cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
-
- c->x86_virt_bits = (eax >> 8) & 0xff;
- c->x86_phys_bits = eax & 0xff;
- } else {
+ (c->extended_cpuid_level < 0x80000008)) {
if (IS_ENABLED(CONFIG_X86_64)) {
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;
@@ -1078,7 +1069,17 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
cpu_has(c, X86_FEATURE_PSE36))
c->x86_phys_bits = 36;
}
+ } else {
+ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
+
+ c->x86_virt_bits = (eax >> 8) & 0xff;
+ c->x86_phys_bits = eax & 0xff;
+
+ /* Provide a sane default if not enumerated: */
+ if (!c->x86_clflush_size)
+ c->x86_clflush_size = 32;
}
+
c->x86_cache_bits = c->x86_phys_bits;
c->x86_cache_alignment = c->x86_clflush_size;
}
@@ -1589,6 +1590,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (have_cpuid_p()) {
cpu_detect(c);
get_cpu_vendor(c);
+ intel_unlock_cpuid_leafs(c);
get_cpu_cap(c);
setup_force_cpu_cap(X86_FEATURE_CPUID);
get_cpu_address_sizes(c);
@@ -1748,7 +1750,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
cpu_detect(c);
get_cpu_vendor(c);
-
+ intel_unlock_cpuid_leafs(c);
get_cpu_cap(c);
get_cpu_address_sizes(c);
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index ea9e07d57..1beccefba 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -61,9 +61,11 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
void tsx_ap_init(void);
+void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c);
#else
static inline void tsx_init(void) { }
static inline void tsx_ap_init(void) { }
+static inline void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c) { }
#endif /* CONFIG_CPU_SUP_INTEL */
extern void init_spectral_chicken(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index be30d7fa2..93efd9edc 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -268,19 +268,26 @@ detect_keyid_bits:
c->x86_phys_bits -= keyid_bits;
}
+void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
+{
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
+
+ if (c->x86 < 6 || (c->x86 == 6 && c->x86_model < 0xd))
+ return;
+
+ /*
+ * The BIOS can have limited CPUID to leaf 2, which breaks feature
+ * enumeration. Unlock it and update the maximum leaf info.
+ */
+ if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
+ c->cpuid_level = cpuid_eax(0);
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
- /* Unmask CPUID levels if masked: */
- if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
- if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
- MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
- c->cpuid_level = cpuid_eax(0);
- get_cpu_cap(c);
- }
- }
-
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index ad6776081..ae71b8ef9 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -39,9 +39,7 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
const struct x86_cpu_id *m;
struct cpuinfo_x86 *c = &boot_cpu_data;
- for (m = match;
- m->vendor | m->family | m->model | m->steppings | m->feature;
- m++) {
+ for (m = match; m->flags & X86_CPU_ID_FLAG_ENTRY_VALID; m++) {
if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
continue;
if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 13b45b9c8..620f0af71 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -465,7 +465,7 @@ static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, siz
return !__apply_microcode_amd(mc);
}
-static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
+static bool get_builtin_microcode(struct cpio_data *cp, u8 family)
{
char fw_name[36] = "amd-ucode/microcode_amd.bin";
struct firmware fw;
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index c34a35ec0..2ce5f4913 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -508,7 +508,8 @@ void free_rmid(u32 closid, u32 rmid)
* allows architectures that ignore the closid parameter to avoid an
* unnecessary check.
*/
- if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
+ if (!resctrl_arch_mon_capable() ||
+ idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
RESCTRL_RESERVED_RMID))
return;
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index d17c9b71e..621a151cc 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -128,6 +128,9 @@ static void topo_set_cpuids(unsigned int cpu, u32 apic_id, u32 acpi_id)
static __init bool check_for_real_bsp(u32 apic_id)
{
+ bool is_bsp = false, has_apic_base = boot_cpu_data.x86 >= 6;
+ u64 msr;
+
/*
* There is no real good way to detect whether this a kdump()
* kernel, but except on the Voyager SMP monstrosity which is not
@@ -144,17 +147,61 @@ static __init bool check_for_real_bsp(u32 apic_id)
if (topo_info.real_bsp_apic_id != BAD_APICID)
return false;
+ /*
+ * Check whether the enumeration order is broken by evaluating the
+ * BSP bit in the APICBASE MSR. If the CPU does not have the
+ * APICBASE MSR then the BSP detection is not possible and the
+ * kernel must rely on the firmware enumeration order.
+ */
+ if (has_apic_base) {
+ rdmsrl(MSR_IA32_APICBASE, msr);
+ is_bsp = !!(msr & MSR_IA32_APICBASE_BSP);
+ }
+
if (apic_id == topo_info.boot_cpu_apic_id) {
- topo_info.real_bsp_apic_id = apic_id;
- return false;
+ /*
+ * If the boot CPU has the APIC BSP bit set then the
+ * firmware enumeration is agreeing. If the CPU does not
+ * have the APICBASE MSR then the only choice is to trust
+ * the enumeration order.
+ */
+ if (is_bsp || !has_apic_base) {
+ topo_info.real_bsp_apic_id = apic_id;
+ return false;
+ }
+ /*
+ * If the boot APIC is enumerated first, but the APICBASE
+ * MSR does not have the BSP bit set, then there is no way
+ * to discover the real BSP here. Assume a crash kernel and
+ * limit the number of CPUs to 1 as an INIT to the real BSP
+ * would reset the machine.
+ */
+ pr_warn("Enumerated BSP APIC %x is not marked in APICBASE MSR\n", apic_id);
+ pr_warn("Assuming crash kernel. Limiting to one CPU to prevent machine INIT\n");
+ set_nr_cpu_ids(1);
+ goto fwbug;
}
- pr_warn("Boot CPU APIC ID not the first enumerated APIC ID: %x > %x\n",
+ pr_warn("Boot CPU APIC ID not the first enumerated APIC ID: %x != %x\n",
topo_info.boot_cpu_apic_id, apic_id);
+
+ if (is_bsp) {
+ /*
+ * The boot CPU has the APIC BSP bit set. Use it and complain
+ * about the broken firmware enumeration.
+ */
+ topo_info.real_bsp_apic_id = topo_info.boot_cpu_apic_id;
+ goto fwbug;
+ }
+
pr_warn("Crash kernel detected. Disabling real BSP to prevent machine INIT\n");
topo_info.real_bsp_apic_id = apic_id;
return true;
+
+fwbug:
+ pr_warn(FW_BUG "APIC enumeration order not specification compliant\n");
+ return false;
}
static unsigned int topo_unit_count(u32 lvlid, enum x86_topology_domains at_level,
diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
index ce2d507c3..5ee6373d4 100644
--- a/arch/x86/kernel/cpu/topology_amd.c
+++ b/arch/x86/kernel/cpu/topology_amd.c
@@ -84,9 +84,9 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
/*
* If leaf 0xb is available, then the domain shifts are set
- * already and nothing to do here.
+ * already and nothing to do here. Only valid for family >= 0x17.
*/
- if (!has_0xb) {
+ if (!has_0xb && tscan->c->x86 >= 0x17) {
/*
* Leaf 0x80000008 set the CORE domain shift already.
* Update the SMT domain, but do not propagate it.
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index dd2ec14ad..15af7e98e 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -21,6 +21,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct kprobe_ctlblk *kcb;
int bit;
+ if (unlikely(kprobe_ftrace_disabled))
+ return;
+
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index b180d8e49..cc0f7f70b 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -295,8 +295,15 @@ void machine_kexec_cleanup(struct kimage *image)
void machine_kexec(struct kimage *image)
{
unsigned long page_list[PAGES_NR];
- void *control_page;
+ unsigned int host_mem_enc_active;
int save_ftrace_enabled;
+ void *control_page;
+
+ /*
+ * This must be done before load_segments() since if call depth tracking
+ * is used then GS must be valid to make any function calls.
+ */
+ host_mem_enc_active = cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
@@ -358,7 +365,7 @@ void machine_kexec(struct kimage *image)
(unsigned long)page_list,
image->start,
image->preserve_context,
- cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));
+ host_mem_enc_active);
#ifdef CONFIG_KEXEC_JUMP
if (image->preserve_context)
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 1123ef3cc..433403365 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -193,11 +193,9 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu)
cur->warned = false;
/*
- * If a non-zero TSC value for socket 0 may be valid then the default
- * adjusted value cannot assumed to be zero either.
+ * The default adjust value cannot be assumed to be zero on any socket.
*/
- if (tsc_async_resets)
- cur->adjusted = bootval;
+ cur->adjusted = bootval;
/*
* Check whether this CPU is the first in a package to come up. In
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 77352a4ab..b1002b798 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -1232,9 +1232,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = entry->ebx = entry->ecx = 0;
break;
case 0x80000008: {
- unsigned g_phys_as = (entry->eax >> 16) & 0xff;
- unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
- unsigned phys_as = entry->eax & 0xff;
+ unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U);
+ unsigned int phys_as;
/*
* If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as
@@ -1242,16 +1241,16 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
* reductions in MAXPHYADDR for memory encryption affect shadow
* paging, too.
*
- * If TDP is enabled but an explicit guest MAXPHYADDR is not
- * provided, use the raw bare metal MAXPHYADDR as reductions to
- * the HPAs do not affect GPAs.
+ * If TDP is enabled, use the raw bare metal MAXPHYADDR as
+ * reductions to the HPAs do not affect GPAs.
*/
- if (!tdp_enabled)
- g_phys_as = boot_cpu_data.x86_phys_bits;
- else if (!g_phys_as)
- g_phys_as = phys_as;
+ if (!tdp_enabled) {
+ phys_as = boot_cpu_data.x86_phys_bits;
+ } else {
+ phys_as = entry->eax & 0xff;
+ }
- entry->eax = g_phys_as | (virt_as << 8);
+ entry->eax = phys_as | (virt_as << 8);
entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0008_EBX);
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 759581bb2..4471b4e08 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -666,6 +666,14 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
return ret;
vcpu->arch.guest_state_protected = true;
+
+ /*
+ * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it
+ * only after setting guest_state_protected because KVM_SET_MSRS allows
+ * dynamic toggling of LBRV (for performance reason) on write access to
+ * MSR_IA32_DEBUGCTLMSR when guest_state_protected is not set.
+ */
+ svm_enable_lbrv(vcpu);
return 0;
}
@@ -2269,6 +2277,12 @@ void __init sev_hardware_setup(void)
if (!boot_cpu_has(X86_FEATURE_SEV_ES))
goto out;
+ if (!lbrv) {
+ WARN_ONCE(!boot_cpu_has(X86_FEATURE_LBRV),
+ "LBRV must be present for SEV-ES support");
+ goto out;
+ }
+
/* Has the system been allocated ASIDs for SEV-ES? */
if (min_sev_asid == 1)
goto out;
@@ -3034,7 +3048,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
- svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
/*
* An SEV-ES guest requires a VMSA area that is a separate from the
@@ -3086,10 +3099,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/* Clear intercepts on selected MSRs */
set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
- set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}
void sev_init_vmcb(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9aaf83c8d..4650153af 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -99,6 +99,7 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_IA32_SPEC_CTRL, .always = false },
{ .index = MSR_IA32_PRED_CMD, .always = false },
{ .index = MSR_IA32_FLUSH_CMD, .always = false },
+ { .index = MSR_IA32_DEBUGCTLMSR, .always = false },
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
@@ -215,7 +216,7 @@ int vgif = true;
module_param(vgif, int, 0444);
/* enable/disable LBR virtualization */
-static int lbrv = true;
+int lbrv = true;
module_param(lbrv, int, 0444);
static int tsc_scaling = true;
@@ -990,7 +991,7 @@ void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
vmcb_mark_dirty(to_vmcb, VMCB_LBR);
}
-static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
+void svm_enable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1000,6 +1001,9 @@ static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+ if (sev_es_guest(vcpu->kvm))
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_DEBUGCTLMSR, 1, 1);
+
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
if (is_guest_mode(vcpu))
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
@@ -1009,6 +1013,8 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
+
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
@@ -3843,16 +3849,27 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
/*
- * KVM should never request an NMI window when vNMI is enabled, as KVM
- * allows at most one to-be-injected NMI and one pending NMI, i.e. if
- * two NMIs arrive simultaneously, KVM will inject one and set
- * V_NMI_PENDING for the other. WARN, but continue with the standard
- * single-step approach to try and salvage the pending NMI.
+ * If NMIs are outright masked, i.e. the vCPU is already handling an
+ * NMI, and KVM has not yet intercepted an IRET, then there is nothing
+ * more to do at this time as KVM has already enabled IRET intercepts.
+ * If KVM has already intercepted IRET, then single-step over the IRET,
+ * as NMIs aren't architecturally unmasked until the IRET completes.
+ *
+ * If vNMI is enabled, KVM should never request an NMI window if NMIs
+ * are masked, as KVM allows at most one to-be-injected NMI and one
+ * pending NMI. If two NMIs arrive simultaneously, KVM will inject one
+ * NMI and set V_NMI_PENDING for the other, but if and only if NMIs are
+ * unmasked. KVM _will_ request an NMI window in some situations, e.g.
+ * if the vCPU is in an STI shadow or if GIF=0, KVM can't immediately
+ * inject the NMI. In those situations, KVM needs to single-step over
+ * the STI shadow or intercept STGI.
*/
- WARN_ON_ONCE(is_vnmi_enabled(svm));
+ if (svm_get_nmi_mask(vcpu)) {
+ WARN_ON_ONCE(is_vnmi_enabled(svm));
- if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
- return; /* IRET will cause a vm exit */
+ if (!svm->awaiting_iret_completion)
+ return; /* IRET will cause a vm exit */
+ }
/*
* SEV-ES guests are responsible for signaling when a vCPU is ready to
@@ -5249,6 +5266,12 @@ static __init int svm_hardware_setup(void)
nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
+ if (lbrv) {
+ if (!boot_cpu_has(X86_FEATURE_LBRV))
+ lbrv = false;
+ else
+ pr_info("LBR virtualization supported\n");
+ }
/*
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
* may be modified by svm_adjust_mmio_mask()), as well as nrips.
@@ -5302,14 +5325,6 @@ static __init int svm_hardware_setup(void)
svm_x86_ops.set_vnmi_pending = NULL;
}
-
- if (lbrv) {
- if (!boot_cpu_has(X86_FEATURE_LBRV))
- lbrv = false;
- else
- pr_info("LBR virtualization supported\n");
- }
-
if (!enable_pmu)
pr_info("PMU virtualization is disabled\n");
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 33878efde..2ed3015e0 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -30,7 +30,7 @@
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
-#define MAX_DIRECT_ACCESS_MSRS 47
+#define MAX_DIRECT_ACCESS_MSRS 48
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
@@ -39,6 +39,7 @@ extern int vgif;
extern bool intercept_smi;
extern bool x2avic_enabled;
extern bool vnmi;
+extern int lbrv;
/*
* Clean bits in VMCB.
@@ -543,6 +544,7 @@ u32 *svm_vcpu_alloc_msrpm(void);
void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
void svm_vcpu_free_msrpm(u32 *msrpm);
void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
+void svm_enable_lbrv(struct kvm_vcpu *vcpu);
void svm_update_lbrv(struct kvm_vcpu *vcpu);
int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 91478b769..4dbd9d99f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10677,13 +10677,12 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
+
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
- else {
- static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
- if (ioapic_in_kernel(vcpu->kvm))
- kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
- }
+ else if (ioapic_in_kernel(vcpu->kvm))
+ kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
if (is_guest_mode(vcpu))
vcpu->arch.load_eoi_exitmap_pending = true;
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 10d5ed8b5..a1cb3a4e6 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -44,7 +44,11 @@
or %rdx, %rax
.else
cmp $TASK_SIZE_MAX-\size+1, %eax
+.if \size != 8
jae .Lbad_get_user
+.else
+ jae .Lbad_get_user_8
+.endif
sbb %edx, %edx /* array_index_mask_nospec() */
and %edx, %eax
.endif
@@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception)
#ifdef CONFIG_X86_32
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
ASM_CLAC
-bad_get_user_8:
+.Lbad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 12af57220..da9347552 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -148,7 +148,7 @@ AVXcode:
65: SEG=GS (Prefix)
66: Operand-Size (Prefix)
67: Address-Size (Prefix)
-68: PUSH Iz (d64)
+68: PUSH Iz
69: IMUL Gv,Ev,Iz
6a: PUSH Ib (d64)
6b: IMUL Gv,Ev,Ib
@@ -698,10 +698,10 @@ AVXcode: 2
4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-50: vpdpbusd Vx,Hx,Wx (66),(ev)
-51: vpdpbusds Vx,Hx,Wx (66),(ev)
-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+50: vpdpbusd Vx,Hx,Wx (66)
+51: vpdpbusds Vx,Hx,Wx (66)
+52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
54: vpopcntb/w Vx,Wx (66),(ev)
55: vpopcntd/q Vx,Wx (66),(ev)
58: vpbroadcastd Vx,Wx (66),(v)
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 65e9a6e39..6ce10e3c6 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -493,7 +493,7 @@ static void __init numa_clear_kernel_node_hotplug(void)
for_each_reserved_mem_region(mb_region) {
int nid = memblock_get_region_node(mb_region);
- if (nid != MAX_NUMNODES)
+ if (nid != NUMA_NO_NODE)
node_set(nid, reserved_nodemask);
}
@@ -614,9 +614,9 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
- MAX_NUMNODES));
+ NUMA_NO_NODE));
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
- MAX_NUMNODES));
+ NUMA_NO_NODE));
/* In case that parsing SRAT failed. */
WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
numa_reset_distance();
@@ -929,6 +929,8 @@ int memory_add_physaddr_to_nid(u64 start)
}
EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+
static int __init cmp_memblk(const void *a, const void *b)
{
const struct numa_memblk *ma = *(const struct numa_memblk **)a;
@@ -1001,5 +1003,3 @@ int __init numa_fill_memblks(u64 start, u64 end)
}
return 0;
}
-
-#endif
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 80c9037ff..19fdfbb17 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
* Validate strict W^X semantics.
*/
static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
- unsigned long pfn, unsigned long npg)
+ unsigned long pfn, unsigned long npg,
+ bool nx, bool rw)
{
unsigned long end;
@@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
return new;
+ /* Non-leaf translation entries can disable writing or execution. */
+ if (!rw || nx)
+ return new;
+
end = start + npg * PAGE_SIZE - 1;
WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
(unsigned long long)pgprot_val(old),
@@ -657,20 +662,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
/*
* Lookup the page table entry for a virtual address in a specific pgd.
- * Return a pointer to the entry and the level of the mapping.
+ * Return a pointer to the entry, the level of the mapping, and the effective
+ * NX and RW bits of all page table levels.
*/
-pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
- unsigned int *level)
+pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
+ unsigned int *level, bool *nx, bool *rw)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
*level = PG_LEVEL_NONE;
+ *nx = false;
+ *rw = true;
if (pgd_none(*pgd))
return NULL;
+ *nx |= pgd_flags(*pgd) & _PAGE_NX;
+ *rw &= pgd_flags(*pgd) & _PAGE_RW;
+
p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d))
return NULL;
@@ -679,6 +690,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (p4d_leaf(*p4d) || !p4d_present(*p4d))
return (pte_t *)p4d;
+ *nx |= p4d_flags(*p4d) & _PAGE_NX;
+ *rw &= p4d_flags(*p4d) & _PAGE_RW;
+
pud = pud_offset(p4d, address);
if (pud_none(*pud))
return NULL;
@@ -687,6 +701,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (pud_leaf(*pud) || !pud_present(*pud))
return (pte_t *)pud;
+ *nx |= pud_flags(*pud) & _PAGE_NX;
+ *rw &= pud_flags(*pud) & _PAGE_RW;
+
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return NULL;
@@ -695,12 +712,27 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
if (pmd_leaf(*pmd) || !pmd_present(*pmd))
return (pte_t *)pmd;
+ *nx |= pmd_flags(*pmd) & _PAGE_NX;
+ *rw &= pmd_flags(*pmd) & _PAGE_RW;
+
*level = PG_LEVEL_4K;
return pte_offset_kernel(pmd, address);
}
/*
+ * Lookup the page table entry for a virtual address in a specific pgd.
+ * Return a pointer to the entry and the level of the mapping.
+ */
+pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level)
+{
+ bool nx, rw;
+
+ return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw);
+}
+
+/*
* Lookup the page table entry for a virtual address. Return a pointer
* to the entry and the level of the mapping.
*
@@ -715,13 +747,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
EXPORT_SYMBOL_GPL(lookup_address);
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
- unsigned int *level)
+ unsigned int *level, bool *nx, bool *rw)
{
- if (cpa->pgd)
- return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
- address, level);
+ pgd_t *pgd;
+
+ if (!cpa->pgd)
+ pgd = pgd_offset_k(address);
+ else
+ pgd = cpa->pgd + pgd_index(address);
- return lookup_address(address, level);
+ return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
}
/*
@@ -849,12 +884,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
pgprot_t old_prot, new_prot, req_prot, chk_prot;
pte_t new_pte, *tmp;
enum pg_level level;
+ bool nx, rw;
/*
* Check for races, another CPU might have split this page
* up already:
*/
- tmp = _lookup_address_cpa(cpa, address, &level);
+ tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (tmp != kpte)
return 1;
@@ -965,7 +1001,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
psize, CPA_DETECT);
- new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages);
+ new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
+ nx, rw);
/*
* If there is a conflict, split the large page.
@@ -1046,6 +1083,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
pte_t *pbase = (pte_t *)page_address(base);
unsigned int i, level;
pgprot_t ref_prot;
+ bool nx, rw;
pte_t *tmp;
spin_lock(&pgd_lock);
@@ -1053,7 +1091,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
* Check for races, another CPU might have split this page
* up for us already:
*/
- tmp = _lookup_address_cpa(cpa, address, &level);
+ tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (tmp != kpte) {
spin_unlock(&pgd_lock);
return 1;
@@ -1594,10 +1632,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
int do_split, err;
unsigned int level;
pte_t *kpte, old_pte;
+ bool nx, rw;
address = __cpa_addr(cpa, cpa->curpage);
repeat:
- kpte = _lookup_address_cpa(cpa, address, &level);
+ kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
if (!kpte)
return __cpa_process_fault(cpa, address, primary);
@@ -1619,7 +1658,8 @@ repeat:
new_prot = static_protections(new_prot, address, pfn, 1, 0,
CPA_PROTECT);
- new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1);
+ new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
+ nx, rw);
new_prot = pgprot_clear_protnone_bits(new_prot);
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index d007591b8..103cbccf1 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -631,6 +631,8 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
+
/*
* No flush is necessary. Once an invalid PTE is established, the PTE's
* access and dirty bits cannot be updated.
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 0cc952066..39255f0eb 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -518,7 +518,34 @@ static bool __ref pci_mmcfg_reserved(struct device *dev,
{
struct resource *conflict;
- if (!early && !acpi_disabled) {
+ if (early) {
+
+ /*
+ * Don't try to do this check unless configuration type 1
+ * is available. How about type 2?
+ */
+
+ /*
+ * 946f2ee5c731 ("Check that MCFG points to an e820
+ * reserved area") added this E820 check in 2006 to work
+ * around BIOS defects.
+ *
+ * Per PCI Firmware r3.3, sec 4.1.2, ECAM space must be
+ * reserved by a PNP0C02 resource, but it need not be
+ * mentioned in E820. Before the ACPI interpreter is
+ * available, we can't check for PNP0C02 resources, so
+ * there's no reliable way to verify the region in this
+ * early check. Keep it only for the old machines that
+ * motivated 946f2ee5c731.
+ */
+ if (dmi_get_bios_year() < 2016 && raw_pci_ops)
+ return is_mmconf_reserved(e820__mapped_all, cfg, dev,
+ "E820 entry");
+
+ return true;
+ }
+
+ if (!acpi_disabled) {
if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
"ACPI motherboard resource"))
return true;
@@ -551,16 +578,7 @@ static bool __ref pci_mmcfg_reserved(struct device *dev,
* For MCFG information constructed from hotpluggable host bridge's
* _CBA method, just assume it's reserved.
*/
- if (pci_mmcfg_running_state)
- return true;
-
- /* Don't try to do this check unless configuration
- type 1 is available. how about type 2 ?*/
- if (raw_pci_ops)
- return is_mmconf_reserved(e820__mapped_all, cfg, dev,
- "E820 entry");
-
- return false;
+ return pci_mmcfg_running_state;
}
static void __init pci_mmcfg_reject_broken(int early)
diff --git a/arch/x86/platform/efi/memmap.c b/arch/x86/platform/efi/memmap.c
index 4ef20b49e..6ed193550 100644
--- a/arch/x86/platform/efi/memmap.c
+++ b/arch/x86/platform/efi/memmap.c
@@ -92,12 +92,22 @@ int __init efi_memmap_alloc(unsigned int num_entries,
*/
int __init efi_memmap_install(struct efi_memory_map_data *data)
{
+ unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
+ unsigned long flags = efi.memmap.flags;
+ u64 phys = efi.memmap.phys_map;
+ int ret;
+
efi_memmap_unmap();
if (efi_enabled(EFI_PARAVIRT))
return 0;
- return __efi_memmap_init(data);
+ ret = __efi_memmap_init(data);
+ if (ret)
+ return ret;
+
+ __efi_memmap_free(phys, size, flags);
+ return 0;
}
/**
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index bc31863c5..a18591f6e 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -42,7 +42,8 @@ KCOV_INSTRUMENT := n
# make up the standalone purgatory.ro
PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
-PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss -g0
+PURGATORY_CFLAGS := -mcmodel=small -ffreestanding -fno-zero-initialized-in-bss -g0
+PURGATORY_CFLAGS += -fpic -fvisibility=hidden
PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
PURGATORY_CFLAGS += -fno-stack-protector
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b029fb81e..e7a44a7f6 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -746,6 +746,15 @@ static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) {
continue;
}
+
+ /*
+ * Do not perform relocations in .notes sections; any
+ * values there are meant for pre-boot consumption (e.g.
+ * startup_xen).
+ */
+ if (sec_applies->shdr.sh_type == SHT_NOTE)
+ continue;
+
sh_symtab = sec_symtab->symtab;
sym_strtab = sec_symtab->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
diff --git a/arch/x86/um/shared/sysdep/archsetjmp.h b/arch/x86/um/shared/sysdep/archsetjmp.h
index 166cedbab..8c81d1a60 100644
--- a/arch/x86/um/shared/sysdep/archsetjmp.h
+++ b/arch/x86/um/shared/sysdep/archsetjmp.h
@@ -1,6 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __X86_UM_SYSDEP_ARCHSETJMP_H
+#define __X86_UM_SYSDEP_ARCHSETJMP_H
+
#ifdef __i386__
#include "archsetjmp_32.h"
#else
#include "archsetjmp_64.h"
#endif
+
+unsigned long get_thread_reg(int reg, jmp_buf *buf);
+
+#endif /* __X86_UM_SYSDEP_ARCHSETJMP_H */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a01ca255b..b88722dfc 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -382,3 +382,36 @@ void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns)
memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
}
+
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int __init arch_xen_unpopulated_init(struct resource **res)
+{
+ unsigned int i;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ /* Must be set strictly before calling xen_free_unpopulated_pages(). */
+ *res = &iomem_resource;
+
+ /*
+ * Initialize with pages from the extra memory regions (see
+ * arch/x86/xen/setup.c).
+ */
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ unsigned int j;
+
+ for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
+ struct page *pg =
+ pfn_to_page(xen_extra_mem[i].start_pfn + j);
+
+ xen_free_unpopulated_pages(1, &pg);
+ }
+
+ /* Zero so region is not also added to the balloon driver. */
+ xen_extra_mem[i].n_pfns = 0;
+ }
+
+ return 0;
+}
+#endif
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 059467086..96af82249 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -323,6 +323,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
blkg->q = disk->queue;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
+ blkg->iostat.blkg = blkg;
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
spin_lock_init(&blkg->async_bio_lock);
bio_list_init(&blkg->async_bios);
@@ -619,12 +620,45 @@ restart:
spin_unlock_irq(&q->queue_lock);
}
+static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
+{
+ int i;
+
+ for (i = 0; i < BLKG_IOSTAT_NR; i++) {
+ dst->bytes[i] = src->bytes[i];
+ dst->ios[i] = src->ios[i];
+ }
+}
+
+static void __blkg_clear_stat(struct blkg_iostat_set *bis)
+{
+ struct blkg_iostat cur = {0};
+ unsigned long flags;
+
+ flags = u64_stats_update_begin_irqsave(&bis->sync);
+ blkg_iostat_set(&bis->cur, &cur);
+ blkg_iostat_set(&bis->last, &cur);
+ u64_stats_update_end_irqrestore(&bis->sync, flags);
+}
+
+static void blkg_clear_stat(struct blkcg_gq *blkg)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct blkg_iostat_set *s = per_cpu_ptr(blkg->iostat_cpu, cpu);
+
+ __blkg_clear_stat(s);
+ }
+ __blkg_clear_stat(&blkg->iostat);
+}
+
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 val)
{
struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
- int i, cpu;
+ int i;
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
@@ -635,18 +669,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
* anyway. If you get hit by a race, retry.
*/
hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
- for_each_possible_cpu(cpu) {
- struct blkg_iostat_set *bis =
- per_cpu_ptr(blkg->iostat_cpu, cpu);
- memset(bis, 0, sizeof(*bis));
-
- /* Re-initialize the cleared blkg_iostat_set */
- u64_stats_init(&bis->sync);
- bis->blkg = blkg;
- }
- memset(&blkg->iostat, 0, sizeof(blkg->iostat));
- u64_stats_init(&blkg->iostat.sync);
-
+ blkg_clear_stat(blkg);
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -949,16 +972,6 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
}
EXPORT_SYMBOL_GPL(blkg_conf_exit);
-static void blkg_iostat_set(struct blkg_iostat *dst, struct blkg_iostat *src)
-{
- int i;
-
- for (i = 0; i < BLKG_IOSTAT_NR; i++) {
- dst->bytes[i] = src->bytes[i];
- dst->ios[i] = src->ios[i];
- }
-}
-
static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
{
int i;
@@ -1024,7 +1037,19 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
struct blkg_iostat cur;
unsigned int seq;
+ /*
+ * Order assignment of `next_bisc` from `bisc->lnode.next` in
+ * llist_for_each_entry_safe and clearing `bisc->lqueued` for
+ * avoiding to assign `next_bisc` with new next pointer added
+ * in blk_cgroup_bio_start() in case of re-ordering.
+ *
+ * The pair barrier is implied in llist_add() in blk_cgroup_bio_start().
+ */
+ smp_mb();
+
WRITE_ONCE(bisc->lqueued, false);
+ if (bisc == &blkg->iostat)
+ goto propagate_up; /* propagate up to parent only */
/* fetch the current per-cpu values */
do {
@@ -1034,10 +1059,24 @@ static void __blkcg_rstat_flush(struct blkcg *blkcg, int cpu)
blkcg_iostat_update(blkg, &cur, &bisc->last);
+propagate_up:
/* propagate global delta to parent (unless that's root) */
- if (parent && parent->parent)
+ if (parent && parent->parent) {
blkcg_iostat_update(parent, &blkg->iostat.cur,
&blkg->iostat.last);
+ /*
+ * Queue parent->iostat to its blkcg's lockless
+ * list to propagate up to the grandparent if the
+ * iostat hasn't been queued yet.
+ */
+ if (!parent->iostat.lqueued) {
+ struct llist_head *plhead;
+
+ plhead = per_cpu_ptr(parent->blkcg->lhead, cpu);
+ llist_add(&parent->iostat.lnode, plhead);
+ parent->iostat.lqueued = true;
+ }
+ }
}
raw_spin_unlock_irqrestore(&blkg_stat_lock, flags);
out:
diff --git a/block/blk-core.c b/block/blk-core.c
index b795ac177..6fcf8ed5f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -987,10 +987,11 @@ void update_io_ticks(struct block_device *part, unsigned long now, bool end)
unsigned long stamp;
again:
stamp = READ_ONCE(part->bd_stamp);
- if (unlikely(time_after(now, stamp))) {
- if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now)))
- __part_stat_add(part, io_ticks, end ? now - stamp : 1);
- }
+ if (unlikely(time_after(now, stamp)) &&
+ likely(try_cmpxchg(&part->bd_stamp, &stamp, now)) &&
+ (end || part_in_flight(part)))
+ __part_stat_add(part, io_ticks, now - stamp);
+
if (part->bd_partno) {
part = bdev_whole(part);
goto again;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index b0f314f4b..9944414bf 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -183,7 +183,7 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */
if (list_empty(pending))
fq->flush_pending_since = jiffies;
- list_move_tail(&rq->queuelist, pending);
+ list_add_tail(&rq->queuelist, pending);
break;
case REQ_FSEQ_DATA:
@@ -261,6 +261,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
+ list_del_init(&rq->queuelist);
blk_flush_complete_seq(rq, fq, seq, error);
}
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 4e3483a16..ae61a9c2f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -779,6 +779,8 @@ static void blk_account_io_merge_request(struct request *req)
if (blk_do_io_stat(req)) {
part_stat_lock();
part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
+ part_stat_local_dec(req->part,
+ in_flight[op_is_write(req_op(req))]);
part_stat_unlock();
}
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 32afb87ef..5ac2087b0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -997,6 +997,8 @@ static inline void blk_account_io_done(struct request *req, u64 now)
update_io_ticks(req->part, jiffies, true);
part_stat_inc(req->part, ios[sgrp]);
part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
+ part_stat_local_dec(req->part,
+ in_flight[op_is_write(req_op(req))]);
part_stat_unlock();
}
}
@@ -1019,6 +1021,8 @@ static inline void blk_account_io_start(struct request *req)
part_stat_lock();
update_io_ticks(req->part, jiffies, false);
+ part_stat_local_inc(req->part,
+ in_flight[op_is_write(req_op(req))]);
part_stat_unlock();
}
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 9d6033e01..15319b217 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -751,6 +751,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
unsigned int top, bottom, alignment, ret = 0;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+ t->max_user_sectors = min_not_zero(t->max_user_sectors,
+ b->max_user_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
diff --git a/block/blk.h b/block/blk.h
index d9f584984..da5dbc6b1 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -357,6 +357,7 @@ static inline bool blk_do_io_stat(struct request *rq)
}
void update_io_ticks(struct block_device *part, unsigned long now, bool end);
+unsigned int part_in_flight(struct block_device *part);
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
diff --git a/block/fops.c b/block/fops.c
index 679d9b752..df2c68d3f 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -390,7 +390,7 @@ static int blkdev_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
iomap->bdev = bdev;
iomap->offset = ALIGN_DOWN(offset, bdev_logical_block_size(bdev));
- if (iomap->offset >= isize)
+ if (offset >= isize)
return -EIO;
iomap->type = IOMAP_MAPPED;
iomap->addr = iomap->offset;
diff --git a/block/genhd.c b/block/genhd.c
index 52a4521df..345d80ab9 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -118,7 +118,7 @@ static void part_stat_read_all(struct block_device *part,
}
}
-static unsigned int part_in_flight(struct block_device *part)
+unsigned int part_in_flight(struct block_device *part)
{
unsigned int inflight = 0;
int cpu;
diff --git a/block/ioctl.c b/block/ioctl.c
index f505f9c34..2639ce9df 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -33,7 +33,7 @@ static int blkpg_do_ioctl(struct block_device *bdev,
if (op == BLKPG_DEL_PARTITION)
return bdev_del_partition(disk, p.pno);
- if (p.start < 0 || p.length <= 0 || p.start + p.length < 0)
+ if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
return -EINVAL;
/* Check that the partition is aligned to the block size */
if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
diff --git a/block/partitions/cmdline.c b/block/partitions/cmdline.c
index c03bc105e..152c85df9 100644
--- a/block/partitions/cmdline.c
+++ b/block/partitions/cmdline.c
@@ -70,8 +70,8 @@ static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
}
if (*partdef == '(') {
- int length;
- char *next = strchr(++partdef, ')');
+ partdef++;
+ char *next = strsep(&partdef, ")");
if (!next) {
pr_warn("cmdline partition format is invalid.");
@@ -79,11 +79,7 @@ static int parse_subpart(struct cmdline_subpart **subpart, char *partdef)
goto fail;
}
- length = min_t(int, next - partdef,
- sizeof(new_subpart->name) - 1);
- strscpy(new_subpart->name, partdef, length);
-
- partdef = ++next;
+ strscpy(new_subpart->name, next, sizeof(new_subpart->name));
} else
new_subpart->name[0] = '\0';
@@ -117,14 +113,12 @@ static void free_subpart(struct cmdline_parts *parts)
}
}
-static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
+static int parse_parts(struct cmdline_parts **parts, char *bdevdef)
{
int ret = -EINVAL;
char *next;
- int length;
struct cmdline_subpart **next_subpart;
struct cmdline_parts *newparts;
- char buf[BDEVNAME_SIZE + 32 + 4];
*parts = NULL;
@@ -132,28 +126,19 @@ static int parse_parts(struct cmdline_parts **parts, const char *bdevdef)
if (!newparts)
return -ENOMEM;
- next = strchr(bdevdef, ':');
+ next = strsep(&bdevdef, ":");
if (!next) {
pr_warn("cmdline partition has no block device.");
goto fail;
}
- length = min_t(int, next - bdevdef, sizeof(newparts->name) - 1);
- strscpy(newparts->name, bdevdef, length);
+ strscpy(newparts->name, next, sizeof(newparts->name));
newparts->nr_subparts = 0;
next_subpart = &newparts->subpart;
- while (next && *(++next)) {
- bdevdef = next;
- next = strchr(bdevdef, ',');
-
- length = (!next) ? (sizeof(buf) - 1) :
- min_t(int, next - bdevdef, sizeof(buf) - 1);
-
- strscpy(buf, bdevdef, length);
-
- ret = parse_subpart(next_subpart, buf);
+ while ((next = strsep(&bdevdef, ","))) {
+ ret = parse_subpart(next_subpart, next);
if (ret)
goto fail;
@@ -199,24 +184,17 @@ static int cmdline_parts_parse(struct cmdline_parts **parts,
*parts = NULL;
- next = pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
+ pbuf = buf = kstrdup(cmdline, GFP_KERNEL);
if (!buf)
return -ENOMEM;
next_parts = parts;
- while (next && *pbuf) {
- next = strchr(pbuf, ';');
- if (next)
- *next = '\0';
-
- ret = parse_parts(next_parts, pbuf);
+ while ((next = strsep(&pbuf, ";"))) {
+ ret = parse_parts(next_parts, next);
if (ret)
goto fail;
- if (next)
- pbuf = ++next;
-
next_parts = &(*next_parts)->next_parts;
}
@@ -250,7 +228,6 @@ static struct cmdline_parts *bdev_parts;
static int add_part(int slot, struct cmdline_subpart *subpart,
struct parsed_partitions *state)
{
- int label_min;
struct partition_meta_info *info;
char tmp[sizeof(info->volname) + 4];
@@ -262,9 +239,7 @@ static int add_part(int slot, struct cmdline_subpart *subpart,
info = &state->parts[slot].info;
- label_min = min_t(int, sizeof(info->volname) - 1,
- sizeof(subpart->name));
- strscpy(info->volname, subpart->name, label_min);
+ strscpy(info->volname, subpart->name, sizeof(info->volname));
snprintf(tmp, sizeof(tmp), "(%s)", info->volname);
strlcat(state->pp_buf, tmp, PAGE_SIZE);
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 14fe0fef8..598fd3e7f 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
&key_type_user, key_name, true);
if (IS_ERR(kref))
- ret = PTR_ERR(kref);
+ return PTR_ERR(kref);
key = key_ref_to_ptr(kref);
down_read(&key->sem);
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index 59ec726b7..684767ab2 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
select MPILIB
select CRYPTO_HASH_INFO
select CRYPTO_AKCIPHER
+ select CRYPTO_SIG
select CRYPTO_HASH
help
This option provides support for asymmetric public key type handling.
@@ -85,5 +86,7 @@ config FIPS_SIGNATURE_SELFTEST
depends on ASYMMETRIC_KEY_TYPE
depends on PKCS7_MESSAGE_PARSER=X509_CERTIFICATE_PARSER
depends on X509_CERTIFICATE_PARSER
+ depends on CRYPTO_RSA
+ depends on CRYPTO_SHA256
endif # ASYMMETRIC_KEY_TYPE
diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
index fbd76498a..3f9ec273a 100644
--- a/crypto/ecdsa.c
+++ b/crypto/ecdsa.c
@@ -373,4 +373,7 @@ module_exit(ecdsa_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stefan Berger <stefanb@linux.ibm.com>");
MODULE_DESCRIPTION("ECDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p192");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p256");
+MODULE_ALIAS_CRYPTO("ecdsa-nist-p384");
MODULE_ALIAS_CRYPTO("ecdsa-generic");
diff --git a/crypto/ecrdsa.c b/crypto/ecrdsa.c
index f3c6b5e15..3811f3805 100644
--- a/crypto/ecrdsa.c
+++ b/crypto/ecrdsa.c
@@ -294,4 +294,5 @@ module_exit(ecrdsa_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
MODULE_DESCRIPTION("EC-RDSA generic algorithm");
+MODULE_ALIAS_CRYPTO("ecrdsa");
MODULE_ALIAS_CRYPTO("ecrdsa-generic");
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 736c2eb8c..f677ad217 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -574,7 +574,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < ARRAY_SIZE(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 04e273167..8e0179222 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -325,6 +325,7 @@ static const struct lpss_device_desc bsw_i2c_dev_desc = {
static const struct property_entry bsw_spi_properties[] = {
PROPERTY_ENTRY_U32("intel,spi-pxa2xx-type", LPSS_BSW_SSP),
+ PROPERTY_ENTRY_U32("num-cs", 2),
{ }
};
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 30f3fc13c..8d18af396 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
+CFLAGS_tbfind.o += $(call cc-disable-warning, stringop-truncation)
# use acpi.o to put all files here into acpi.o modparam namespace
obj-y += acpi.o
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index ddd072cbc..2133085de 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -191,6 +191,10 @@ void
acpi_ev_execute_reg_methods(struct acpi_namespace_node *node,
acpi_adr_space_type space_id, u32 function);
+void
+acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *node,
+ acpi_adr_space_type space_id);
+
acpi_status
acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function);
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 18fdf2bc2..dc6004daf 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -20,10 +20,6 @@ extern u8 acpi_gbl_default_address_spaces[];
/* Local prototypes */
-static void
-acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
- acpi_adr_space_type space_id);
-
static acpi_status
acpi_ev_reg_run(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
@@ -818,7 +814,7 @@ acpi_ev_reg_run(acpi_handle obj_handle,
*
******************************************************************************/
-static void
+void
acpi_ev_execute_orphan_reg_method(struct acpi_namespace_node *device_node,
acpi_adr_space_type space_id)
{
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 3197e6303..624361a5f 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -306,3 +306,57 @@ acpi_execute_reg_methods(acpi_handle device, acpi_adr_space_type space_id)
}
ACPI_EXPORT_SYMBOL(acpi_execute_reg_methods)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_execute_orphan_reg_method
+ *
+ * PARAMETERS: device - Handle for the device
+ * space_id - The address space ID
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Execute an "orphan" _REG method that appears under an ACPI
+ * device. This is a _REG method that has no corresponding region
+ * within the device's scope.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_execute_orphan_reg_method(acpi_handle device, acpi_adr_space_type space_id)
+{
+ struct acpi_namespace_node *node;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(acpi_execute_orphan_reg_method);
+
+ /* Parameter validation */
+
+ if (!device) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /* Convert and validate the device handle */
+
+ node = acpi_ns_validate_handle(device);
+ if (node) {
+
+ /*
+ * If an "orphan" _REG method is present in the device's scope
+ * for the given address space ID, run it.
+ */
+
+ acpi_ev_execute_orphan_reg_method(node, space_id);
+ } else {
+ status = AE_BAD_PARAMETER;
+ }
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_execute_orphan_reg_method)
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 8907b8bf4..c49b9f8de 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -44,7 +44,6 @@ acpi_ex_system_memory_space_handler(u32 function,
struct acpi_mem_mapping *mm = mem_info->cur_mm;
u32 length;
acpi_size map_length;
- acpi_size page_boundary_map_length;
#ifdef ACPI_MISALIGNMENT_NOT_SUPPORTED
u32 remainder;
#endif
@@ -138,26 +137,8 @@ acpi_ex_system_memory_space_handler(u32 function,
map_length = (acpi_size)
((mem_info->address + mem_info->length) - address);
- /*
- * If mapping the entire remaining portion of the region will cross
- * a page boundary, just map up to the page boundary, do not cross.
- * On some systems, crossing a page boundary while mapping regions
- * can cause warnings if the pages have different attributes
- * due to resource management.
- *
- * This has the added benefit of constraining a single mapping to
- * one page, which is similar to the original code that used a 4k
- * maximum window.
- */
- page_boundary_map_length = (acpi_size)
- (ACPI_ROUND_UP(address, ACPI_DEFAULT_PAGE_SIZE) - address);
- if (page_boundary_map_length == 0) {
- page_boundary_map_length = ACPI_DEFAULT_PAGE_SIZE;
- }
-
- if (map_length > page_boundary_map_length) {
- map_length = page_boundary_map_length;
- }
+ if (map_length > ACPI_DEFAULT_PAGE_SIZE)
+ map_length = ACPI_DEFAULT_PAGE_SIZE;
/* Create a new mapping starting at the address given */
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 01faca3a2..bb9f8475c 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -903,7 +903,7 @@ static void __exit einj_exit(void)
if (einj_initialized)
platform_driver_unregister(&einj_driver);
- platform_device_del(einj_dev);
+ platform_device_unregister(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index d9fa73041..a87b10eef 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -316,9 +316,14 @@ static void acpi_bus_osc_negotiate_platform_control(void)
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_PROCESSOR))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
+ if (IS_ENABLED(CONFIG_ACPI_THERMAL))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GED_SUPPORT;
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_PRMT))
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT;
if (IS_ENABLED(CONFIG_ACPI_FFH))
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 02255795b..1cec29ab6 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1482,13 +1482,14 @@ static bool install_gpio_irq_event_handler(struct acpi_ec *ec)
static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
bool call_reg)
{
+ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
acpi_status status;
acpi_ec_start(ec, false);
if (!test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
acpi_ec_enter_noirq(ec);
- status = acpi_install_address_space_handler_no_reg(ec->handle,
+ status = acpi_install_address_space_handler_no_reg(scope_handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
NULL, ec);
@@ -1497,11 +1498,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
return -ENODEV;
}
set_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
- ec->address_space_handler_holder = ec->handle;
}
if (call_reg && !test_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags)) {
- acpi_execute_reg_methods(ec->handle, ACPI_ADR_SPACE_EC);
+ acpi_execute_reg_methods(scope_handle, ACPI_ADR_SPACE_EC);
+ if (scope_handle != ec->handle)
+ acpi_execute_orphan_reg_method(ec->handle, ACPI_ADR_SPACE_EC);
+
set_bit(EC_FLAGS_EC_REG_CALLED, &ec->flags);
}
@@ -1553,10 +1556,13 @@ static int ec_install_handlers(struct acpi_ec *ec, struct acpi_device *device,
static void ec_remove_handlers(struct acpi_ec *ec)
{
+ acpi_handle scope_handle = ec == first_ec ? ACPI_ROOT_OBJECT : ec->handle;
+
if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
if (ACPI_FAILURE(acpi_remove_address_space_handler(
- ec->address_space_handler_holder,
- ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
+ scope_handle,
+ ACPI_ADR_SPACE_EC,
+ &acpi_ec_space_handler)))
pr_err("failed to remove space handler\n");
clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
}
@@ -1595,14 +1601,18 @@ static int acpi_ec_setup(struct acpi_ec *ec, struct acpi_device *device, bool ca
{
int ret;
- ret = ec_install_handlers(ec, device, call_reg);
- if (ret)
- return ret;
-
/* First EC capable of handling transactions */
if (!first_ec)
first_ec = ec;
+ ret = ec_install_handlers(ec, device, call_reg);
+ if (ret) {
+ if (ec == first_ec)
+ first_ec = NULL;
+
+ return ret;
+ }
+
pr_info("EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", ec->command_addr,
ec->data_addr);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ca72a0dc5..a96d1bc66 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -185,7 +185,6 @@ enum acpi_ec_event_state {
struct acpi_ec {
acpi_handle handle;
- acpi_handle address_space_handler_holder;
int gpe;
int irq;
unsigned long command_addr;
@@ -302,6 +301,10 @@ void acpi_mipi_check_crs_csi2(acpi_handle handle);
void acpi_mipi_scan_crs_csi2(void);
void acpi_mipi_init_crs_csi2_swnodes(void);
void acpi_mipi_crs_csi2_cleanup(void);
+#ifdef CONFIG_X86
bool acpi_graph_ignore_port(acpi_handle handle);
+#else
+static inline bool acpi_graph_ignore_port(acpi_handle handle) { return false; }
+#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/mipi-disco-img.c b/drivers/acpi/mipi-disco-img.c
index d05413a06..0ab13751f 100644
--- a/drivers/acpi/mipi-disco-img.c
+++ b/drivers/acpi/mipi-disco-img.c
@@ -725,14 +725,20 @@ void acpi_mipi_crs_csi2_cleanup(void)
acpi_mipi_del_crs_csi2(csi2);
}
-static const struct dmi_system_id dmi_ignore_port_nodes[] = {
- {
- .matches = {
- DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS 9315"),
- },
- },
- { }
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+/* CPU matches for Dell generations with broken ACPI MIPI DISCO info */
+static const struct x86_cpu_id dell_broken_mipi_disco_cpu_gens[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
+ {}
};
static const char *strnext(const char *s1, const char *s2)
@@ -761,7 +767,10 @@ bool acpi_graph_ignore_port(acpi_handle handle)
static bool dmi_tested, ignore_port;
if (!dmi_tested) {
- ignore_port = dmi_first_match(dmi_ignore_port_nodes);
+ if (dmi_name_in_vendors("Dell Inc.") &&
+ x86_match_cpu(dell_broken_mipi_disco_cpu_gens))
+ ignore_port = true;
+
dmi_tested = true;
}
@@ -794,3 +803,4 @@ out_free:
kfree(orig_path);
return false;
}
+#endif
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index e45e64993..3b09fd39e 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -208,6 +208,11 @@ int __init srat_disabled(void)
return acpi_numa < 0;
}
+__weak int __init numa_fill_memblks(u64 start, u64 end)
+{
+ return NUMA_NO_MEMBLK;
+}
+
#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH)
/*
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 59423fe9d..b5bf8b81a 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -518,6 +518,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506MV */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506MV"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -534,6 +541,12 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
*/
static const struct dmi_system_id irq1_edge_low_force_override[] = {
{
+ /* XMG APEX 17 (M23) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxBGxx"),
+ },
+ },
+ {
/* TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
@@ -630,6 +643,18 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
DMI_MATCH(DMI_BOARD_NAME, "X565"),
},
},
+ {
+ /* TongFang GXxHRXx/TUXEDO InfinityBook Pro Gen9 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
+ {
+ /* TongFang GMxHGxx/TUXEDO Stellaris Slim Gen1 AMD */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ },
+ },
{ }
};
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index d67881b50..a0cfc857f 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -168,11 +168,17 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
static int acpi_thermal_temp(struct acpi_thermal *tz, int temp_deci_k)
{
+ int temp;
+
if (temp_deci_k == THERMAL_TEMP_INVALID)
return THERMAL_TEMP_INVALID;
- return deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
+ temp = deci_kelvin_to_millicelsius_with_offset(temp_deci_k,
tz->kelvin_offset);
+ if (temp <= 0)
+ return THERMAL_TEMP_INVALID;
+
+ return temp;
}
static bool acpi_thermal_trip_valid(struct acpi_thermal_trip *acpi_trip)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 9fdcc620c..2cc3821b2 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -499,6 +499,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
{
.callback = video_detect_force_native,
+ /* Lenovo Slim 7 16ARH7 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82UX"),
+ },
+ },
+ {
+ .callback = video_detect_force_native,
/* Lenovo ThinkPad X131e (3371 AMD version) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 90c3d2eab..448e0d14f 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -197,16 +197,16 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
}
/*
- * AMD systems from Renoir and Lucienne *require* that the NVME controller
+ * AMD systems from Renoir onwards *require* that the NVME controller
* is put into D3 over a Modern Standby / suspend-to-idle cycle.
*
* This is "typically" accomplished using the `StorageD3Enable`
* property in the _DSD that is checked via the `acpi_storage_d3` function
- * but this property was introduced after many of these systems launched
- * and most OEM systems don't have it in their BIOS.
+ * but some OEM systems still don't have it in their BIOS.
*
* The Microsoft documentation for StorageD3Enable mentioned that Windows has
- * a hardcoded allowlist for D3 support, which was used for these platforms.
+ * a hardcoded allowlist for D3 support as well as a registry key to override
+ * the BIOS, which has been used for these cases.
*
* This allows quirking on Linux in a similar fashion.
*
@@ -219,19 +219,15 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* https://bugzilla.kernel.org/show_bug.cgi?id=216773
* https://bugzilla.kernel.org/show_bug.cgi?id=217003
* 2) On at least one HP system StorageD3Enable is missing on the second NVME
- disk in the system.
+ * disk in the system.
+ * 3) On at least one HP Rembrandt system StorageD3Enable is missing on the only
+ * NVME device.
*/
-static const struct x86_cpu_id storage_d3_cpu_ids[] = {
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 24, NULL), /* Picasso */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
- X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
- {}
-};
-
bool force_storage_d3(void)
{
- return x86_match_cpu(storage_d3_cpu_ids);
+ if (!cpu_feature_enabled(X86_FEATURE_ZEN))
+ return false;
+ return acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0;
}
/*
@@ -260,9 +256,10 @@ bool force_storage_d3(void)
#define ACPI_QUIRK_SKIP_I2C_CLIENTS BIT(0)
#define ACPI_QUIRK_UART1_SKIP BIT(1)
#define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(2)
-#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(3)
-#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(4)
-#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(5)
+#define ACPI_QUIRK_PNP_UART1_SKIP BIT(3)
+#define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(4)
+#define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(5)
+#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(6)
static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
/*
@@ -342,6 +339,7 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = {
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
.driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS |
+ ACPI_QUIRK_PNP_UART1_SKIP |
ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY),
},
{
@@ -440,14 +438,18 @@ static int acpi_dmi_skip_serdev_enumeration(struct device *controller_parent, bo
if (ret)
return 0;
- /* to not match on PNP enumerated debug UARTs */
- if (!dev_is_platform(controller_parent))
- return 0;
-
dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids);
if (dmi_id)
quirks = (unsigned long)dmi_id->driver_data;
+ if (!dev_is_platform(controller_parent)) {
+ /* PNP enumerated UARTs */
+ if ((quirks & ACPI_QUIRK_PNP_UART1_SKIP) && uid == 1)
+ *skip = true;
+
+ return 0;
+ }
+
if ((quirks & ACPI_QUIRK_UART1_SKIP) && uid == 1)
*skip = true;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6548f10e6..5eb38fbbb 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -429,7 +429,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_pcs_quirk }, /* Comet Lake PCH RAID */
/* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
{ PCI_VDEVICE(INTEL, 0x4b63), board_ahci_pcs_quirk }, /* Elkhart Lake AHCI */
- { PCI_VDEVICE(INTEL, 0x7ae2), board_ahci_pcs_quirk }, /* Alder Lake-P AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1736,6 +1735,14 @@ static void ahci_update_initial_lpm_policy(struct ata_port *ap)
if (ap->pflags & ATA_PFLAG_EXTERNAL)
return;
+ /* If no LPM states are supported by the HBA, do not bother with LPM */
+ if ((ap->host->flags & ATA_HOST_NO_PART) &&
+ (ap->host->flags & ATA_HOST_NO_SSC) &&
+ (ap->host->flags & ATA_HOST_NO_DEVSLP)) {
+ ata_port_dbg(ap, "no LPM states supported, not enabling LPM\n");
+ return;
+ }
+
/* user modified policy via module param */
if (mobile_lpm_policy != -1) {
policy = mobile_lpm_policy;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c449d60d9..28caed151 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4180,8 +4180,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER BD-RW BDR-207M", NULL, ATA_HORKAGE_NOLPM },
{ "PIONEER BD-RW BDR-205", NULL, ATA_HORKAGE_NOLPM },
- /* Crucial BX100 SSD 500GB has broken LPM support */
+ /* Crucial devices with broken LPM support */
{ "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
+ { "CT240BX500SSD1", NULL, ATA_HORKAGE_NOLPM },
/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
{ "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4199,6 +4200,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM },
+ /* Apacer models with LPM issues */
+ { "Apacer AS340*", NULL, ATA_HORKAGE_NOLPM },
+
+ /* AMD Radeon devices with broken LPM support */
+ { "R3SL240G", NULL, ATA_HORKAGE_NOLPM },
+
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e95497689..9c3daa7d1 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1828,11 +1828,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
2
};
- /* set scsi removable (RMB) bit per ata bit, or if the
- * AHCI port says it's external (Hotplug-capable, eSATA).
+ /*
+ * Set the SCSI Removable Media Bit (RMB) if the ATA removable media
+ * device bit (obsolete since ATA-8 ACS) is set.
*/
- if (ata_id_removable(args->id) ||
- (args->dev->link->ap->pflags & ATA_PFLAG_EXTERNAL))
+ if (ata_id_removable(args->id))
hdr[1] |= (1 << 7);
if (args->dev->class == ATA_DEV_ZAC) {
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index 448a511cb..e7ac142c2 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -173,8 +173,6 @@ static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
static struct legacy_probe probe_list[NR_HOST];
static struct legacy_data legacy_data[NR_HOST];
static struct ata_host *legacy_host[NR_HOST];
-static int nr_legacy_host;
-
/**
* legacy_probe_add - Add interface to probe list
@@ -1276,9 +1274,11 @@ static __exit void legacy_exit(void)
{
int i;
- for (i = 0; i < nr_legacy_host; i++) {
+ for (i = 0; i < NR_HOST; i++) {
struct legacy_data *ld = &legacy_data[i];
- ata_host_detach(legacy_host[i]);
+
+ if (legacy_host[i])
+ ata_host_detach(legacy_host[i]);
platform_device_unregister(ld->platform_dev);
}
}
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 0738ccad0..db4f910e8 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -192,11 +192,14 @@ extern struct kset *devices_kset;
void devices_kset_move_last(struct device *dev);
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
-void module_add_driver(struct module *mod, struct device_driver *drv);
+int module_add_driver(struct module *mod, struct device_driver *drv);
void module_remove_driver(struct device_driver *drv);
#else
-static inline void module_add_driver(struct module *mod,
- struct device_driver *drv) { }
+static inline int module_add_driver(struct module *mod,
+ struct device_driver *drv)
+{
+ return 0;
+}
static inline void module_remove_driver(struct device_driver *drv) { }
#endif
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index daee55c9b..ffea0728b 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -674,7 +674,12 @@ int bus_add_driver(struct device_driver *drv)
if (error)
goto out_del_list;
}
- module_add_driver(drv->owner, drv);
+ error = module_add_driver(drv->owner, drv);
+ if (error) {
+ printk(KERN_ERR "%s: failed to create module links for %s\n",
+ __func__, drv->name);
+ goto out_detach;
+ }
error = driver_create_file(drv, &driver_attr_uevent);
if (error) {
@@ -699,6 +704,8 @@ int bus_add_driver(struct device_driver *drv)
return 0;
+out_detach:
+ driver_detach(drv);
out_del_list:
klist_del(&priv->knode_bus);
out_unregister:
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5f4e03336..8e3bd230b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2738,8 +2738,11 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
if (!env)
return -ENOMEM;
+ /* Synchronize with really_probe() */
+ device_lock(dev);
/* let the kset specific function add its keys */
retval = kset->uevent_ops->uevent(&dev->kobj, env);
+ device_unlock(dev);
if (retval)
goto out;
diff --git a/drivers/base/module.c b/drivers/base/module.c
index 46ad4d636..a1b55da07 100644
--- a/drivers/base/module.c
+++ b/drivers/base/module.c
@@ -30,14 +30,14 @@ static void module_create_drivers_dir(struct module_kobject *mk)
mutex_unlock(&drivers_dir_mutex);
}
-void module_add_driver(struct module *mod, struct device_driver *drv)
+int module_add_driver(struct module *mod, struct device_driver *drv)
{
char *driver_name;
- int no_warn;
struct module_kobject *mk = NULL;
+ int ret;
if (!drv)
- return;
+ return 0;
if (mod)
mk = &mod->mkobj;
@@ -56,17 +56,37 @@ void module_add_driver(struct module *mod, struct device_driver *drv)
}
if (!mk)
- return;
+ return 0;
+
+ ret = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
+ if (ret)
+ return ret;
- /* Don't check return codes; these calls are idempotent */
- no_warn = sysfs_create_link(&drv->p->kobj, &mk->kobj, "module");
driver_name = make_driver_name(drv);
- if (driver_name) {
- module_create_drivers_dir(mk);
- no_warn = sysfs_create_link(mk->drivers_dir, &drv->p->kobj,
- driver_name);
- kfree(driver_name);
+ if (!driver_name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ module_create_drivers_dir(mk);
+ if (!mk->drivers_dir) {
+ ret = -EINVAL;
+ goto out;
}
+
+ ret = sysfs_create_link(mk->drivers_dir, &drv->p->kobj, driver_name);
+ if (ret)
+ goto out;
+
+ kfree(driver_name);
+
+ return 0;
+out:
+ sysfs_remove_link(&drv->p->kobj, "module");
+ sysfs_remove_link(mk->drivers_dir, driver_name);
+ kfree(driver_name);
+
+ return ret;
}
void module_remove_driver(struct device_driver *drv)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9d4ec9273..1ddd3e549 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -588,7 +588,10 @@ static inline int was_interrupted(int result)
return result == -ERESTARTSYS || result == -EINTR;
}
-/* always call with the tx_lock held */
+/*
+ * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
+ * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
+ */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
@@ -605,6 +608,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
u32 nbd_cmd_flags = 0;
int sent = nsock->sent, skip = 0;
+ lockdep_assert_held(&cmd->lock);
+ lockdep_assert_held(&nsock->tx_lock);
+
iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
type = req_to_nbd_cmd_type(req);
@@ -669,7 +675,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->sent = sent;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ return (__force int)BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
@@ -710,7 +716,7 @@ send_pages:
nsock->pending = req;
nsock->sent = sent;
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
- return BLK_STS_RESOURCE;
+ return (__force int)BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
@@ -1007,7 +1013,7 @@ static int wait_for_reconnect(struct nbd_device *nbd)
return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
}
-static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_device *nbd = cmd->nbd;
@@ -1015,18 +1021,20 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_sock *nsock;
int ret;
+ lockdep_assert_held(&cmd->lock);
+
config = nbd_get_config_unlocked(nbd);
if (!config) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
- return -EINVAL;
+ return BLK_STS_IOERR;
}
if (index >= config->num_connections) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
- return -EINVAL;
+ return BLK_STS_IOERR;
}
cmd->status = BLK_STS_OK;
again:
@@ -1049,7 +1057,7 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
- return -EIO;
+ return BLK_STS_IOERR;
}
goto again;
}
@@ -1062,7 +1070,7 @@ again:
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
nbd_requeue_cmd(cmd);
- ret = 0;
+ ret = BLK_STS_OK;
goto out;
}
/*
@@ -1081,19 +1089,19 @@ again:
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
nbd_requeue_cmd(cmd);
- ret = 0;
+ ret = BLK_STS_OK;
}
out:
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
- return ret;
+ return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
}
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
- int ret;
+ blk_status_t ret;
/*
* Since we look at the bio's to send the request over the network we
@@ -1113,10 +1121,6 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* appropriate.
*/
ret = nbd_handle_cmd(cmd, hctx->queue_num);
- if (ret < 0)
- ret = BLK_STS_IOERR;
- else if (!ret)
- ret = BLK_STS_OK;
mutex_unlock(&cmd->lock);
return ret;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index ed33cf719..620679a0a 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -404,13 +404,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
static int nullb_apply_submit_queues(struct nullb_device *dev,
unsigned int submit_queues)
{
- return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
+ int ret;
+
+ mutex_lock(&lock);
+ ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
+ mutex_unlock(&lock);
+
+ return ret;
}
static int nullb_apply_poll_queues(struct nullb_device *dev,
unsigned int poll_queues)
{
- return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
+ int ret;
+
+ mutex_lock(&lock);
+ ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
+ mutex_unlock(&lock);
+
+ return ret;
}
NULLB_DEVICE_ATTR(size, ulong, NULL);
@@ -457,28 +469,32 @@ static ssize_t nullb_device_power_store(struct config_item *item,
if (ret < 0)
return ret;
+ ret = count;
+ mutex_lock(&lock);
if (!dev->power && newp) {
if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
- return count;
+ goto out;
+
ret = null_add_dev(dev);
if (ret) {
clear_bit(NULLB_DEV_FL_UP, &dev->flags);
- return ret;
+ goto out;
}
set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
dev->power = newp;
+ ret = count;
} else if (dev->power && !newp) {
if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
- mutex_lock(&lock);
dev->power = newp;
null_del_dev(dev->nullb);
- mutex_unlock(&lock);
}
clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
}
- return count;
+out:
+ mutex_unlock(&lock);
+ return ret;
}
CONFIGFS_ATTR(nullb_device_, power);
@@ -1918,15 +1934,12 @@ static int null_add_dev(struct nullb_device *dev)
nullb->q->queuedata = nullb;
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
- mutex_lock(&lock);
rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
- if (rv < 0) {
- mutex_unlock(&lock);
+ if (rv < 0)
goto out_cleanup_disk;
- }
+
nullb->index = rv;
dev->index = rv;
- mutex_unlock(&lock);
if (config_item_name(&dev->group.cg_item)) {
/* Use configfs dir name as the device name */
@@ -1955,9 +1968,7 @@ static int null_add_dev(struct nullb_device *dev)
if (rv)
goto out_ida_free;
- mutex_lock(&lock);
list_add_tail(&nullb->list, &nullb_list);
- mutex_unlock(&lock);
pr_info("disk %s created\n", nullb->disk_name);
@@ -2006,7 +2017,9 @@ static int null_create_dev(void)
if (!dev)
return -ENOMEM;
+ mutex_lock(&lock);
ret = null_add_dev(dev);
+ mutex_unlock(&lock);
if (ret) {
null_free_dev(dev);
return ret;
@@ -2113,10 +2126,13 @@ static void __exit null_exit(void)
if (tag_set.ops)
blk_mq_free_tag_set(&tag_set);
+
+ mutex_destroy(&lock);
}
module_init(null_init);
module_exit(null_exit);
MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
+MODULE_DESCRIPTION("multi queue aware block test driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 1689e2584..27928decc 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -113,7 +113,7 @@ int null_init_zoned_dev(struct nullb_device *dev,
if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
dev->zone_max_open = dev->zone_max_active;
pr_info("changed the maximum number of open zones to %u\n",
- dev->nr_zones);
+ dev->zone_max_open);
} else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
dev->zone_max_open = 0;
pr_info("zone_max_open limit disabled, limit >= zone count\n");
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 88262d3a9..ce97b336f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -3,7 +3,6 @@
* Copyright (c) 2008-2009 Atheros Communications Inc.
*/
-
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -128,7 +127,6 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
* for AR3012
*/
static const struct usb_device_id ath3k_blist_tbl[] = {
-
/* Atheros AR3012 with sflash firmware*/
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
@@ -202,7 +200,7 @@ static inline void ath3k_log_failed_loading(int err, int len, int size,
#define TIMEGAP_USEC_MAX 100
static int ath3k_load_firmware(struct usb_device *udev,
- const struct firmware *firmware)
+ const struct firmware *firmware)
{
u8 *send_buf;
int len = 0;
@@ -237,9 +235,9 @@ static int ath3k_load_firmware(struct usb_device *udev,
memcpy(send_buf, firmware->data + sent, size);
err = usb_bulk_msg(udev, pipe, send_buf, size,
- &len, 3000);
+ &len, 3000);
- if (err || (len != size)) {
+ if (err || len != size) {
ath3k_log_failed_loading(err, len, size, count);
goto error;
}
@@ -262,7 +260,7 @@ static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
}
static int ath3k_get_version(struct usb_device *udev,
- struct ath3k_version *version)
+ struct ath3k_version *version)
{
return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION,
USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
@@ -271,7 +269,7 @@ static int ath3k_get_version(struct usb_device *udev,
}
static int ath3k_load_fwfile(struct usb_device *udev,
- const struct firmware *firmware)
+ const struct firmware *firmware)
{
u8 *send_buf;
int len = 0;
@@ -310,8 +308,8 @@ static int ath3k_load_fwfile(struct usb_device *udev,
memcpy(send_buf, firmware->data + sent, size);
err = usb_bulk_msg(udev, pipe, send_buf, size,
- &len, 3000);
- if (err || (len != size)) {
+ &len, 3000);
+ if (err || len != size) {
ath3k_log_failed_loading(err, len, size, count);
kfree(send_buf);
return err;
@@ -425,7 +423,6 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
switch (fw_version.ref_clock) {
-
case ATH3K_XTAL_FREQ_26M:
clk_value = 26;
break;
@@ -441,7 +438,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s",
- le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
+ le32_to_cpu(fw_version.rom_version), clk_value, ".dfu");
ret = request_firmware(&firmware, filename, &udev->dev);
if (ret < 0) {
@@ -456,7 +453,7 @@ static int ath3k_load_syscfg(struct usb_device *udev)
}
static int ath3k_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id)
{
const struct firmware *firmware;
struct usb_device *udev = interface_to_usbdev(intf);
@@ -505,10 +502,10 @@ static int ath3k_probe(struct usb_interface *intf,
if (ret < 0) {
if (ret == -ENOENT)
BT_ERR("Firmware file \"%s\" not found",
- ATH3K_FIRMWARE);
+ ATH3K_FIRMWARE);
else
BT_ERR("Firmware file \"%s\" request failed (err=%d)",
- ATH3K_FIRMWARE, ret);
+ ATH3K_FIRMWARE, ret);
return ret;
}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 9658b33c8..18f34998a 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -121,13 +121,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
((event->data[2] == MODULE_BROUGHT_UP) ||
(event->data[2] == MODULE_ALREADY_UP)) ?
"Bring-up succeed" : "Bring-up failed");
-
- if (event->length > 3 && event->data[3])
- priv->btmrvl_dev.dev_type = HCI_AMP;
- else
- priv->btmrvl_dev.dev_type = HCI_PRIMARY;
-
- BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type);
} else if (priv->btmrvl_dev.sendcmdflag &&
event->data[1] == MODULE_SHUTDOWN_REQ) {
BT_DBG("EVENT:%s", (event->data[2]) ?
@@ -686,8 +679,6 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->wakeup = btmrvl_wakeup;
SET_HCIDEV_DEV(hdev, &card->func->dev);
- hdev->dev_type = priv->btmrvl_dev.dev_type;
-
ret = hci_register_dev(hdev);
if (ret < 0) {
BT_ERR("Can not register HCI device");
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 638074992..35fb26cbf 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -148,8 +148,10 @@ static int qca_read_fw_build_info(struct hci_dev *hdev)
}
build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL);
- if (!build_label)
+ if (!build_label) {
+ err = -ENOMEM;
goto out;
+ }
hci_set_fw_info(hdev, "%s", build_label);
diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
index 634cf8f5e..0c91d7635 100644
--- a/drivers/bluetooth/btrsi.c
+++ b/drivers/bluetooth/btrsi.c
@@ -134,7 +134,6 @@ static int rsi_hci_attach(void *priv, struct rsi_proto_ops *ops)
hdev->bus = HCI_USB;
hci_set_drvdata(hdev, h_adapter);
- hdev->dev_type = HCI_PRIMARY;
hdev->open = rsi_hci_open;
hdev->close = rsi_hci_close;
hdev->flush = rsi_hci_flush;
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index f19d31ee3..fdcfe9c50 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -32,9 +32,6 @@ static const struct sdio_device_id btsdio_table[] = {
/* Generic Bluetooth Type-B SDIO device */
{ SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) },
- /* Generic Bluetooth AMP controller */
- { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) },
-
{ } /* Terminating entry */
};
@@ -319,11 +316,6 @@ static int btsdio_probe(struct sdio_func *func,
hdev->bus = HCI_SDIO;
hci_set_drvdata(hdev, data);
- if (id->class == SDIO_CLASS_BT_AMP)
- hdev->dev_type = HCI_AMP;
- else
- hdev->dev_type = HCI_PRIMARY;
-
data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &func->dev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 01b99471d..fb716849b 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -4332,11 +4332,6 @@ static int btusb_probe(struct usb_interface *intf,
hdev->bus = HCI_USB;
hci_set_drvdata(hdev, data);
- if (id->driver_info & BTUSB_AMP)
- hdev->dev_type = HCI_AMP;
- else
- hdev->dev_type = HCI_PRIMARY;
-
data->hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c
index 9a7243d5d..0c2f15235 100644
--- a/drivers/bluetooth/hci_bcm4377.c
+++ b/drivers/bluetooth/hci_bcm4377.c
@@ -2361,7 +2361,6 @@ static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id)
bcm4377->hdev = hdev;
hdev->bus = HCI_PCI;
- hdev->dev_type = HCI_PRIMARY;
hdev->open = bcm4377_hci_open;
hdev->close = bcm4377_hci_close;
hdev->send = bcm4377_hci_send_frame;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index a26367e9f..17a2f158a 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -667,11 +667,6 @@ static int hci_uart_register_dev(struct hci_uart *hu)
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
- if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
- hdev->dev_type = HCI_AMP;
- else
- hdev->dev_type = HCI_PRIMARY;
-
/* Only call open() for the protocol after hdev is fully initialized as
* open() (or a timer/workqueue it starts) may attempt to reference it.
*/
@@ -722,7 +717,6 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
{
unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) |
BIT(HCI_UART_RESET_ON_INIT) |
- BIT(HCI_UART_CREATE_AMP) |
BIT(HCI_UART_INIT_PENDING) |
BIT(HCI_UART_EXT_CONFIG) |
BIT(HCI_UART_VND_DETECT);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 85c0d9b68..89a22e9b3 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -366,11 +366,6 @@ int hci_uart_register_device_priv(struct hci_uart *hu,
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
- if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
- hdev->dev_type = HCI_AMP;
- else
- hdev->dev_type = HCI_PRIMARY;
-
if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
return 0;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 68c8c7e95..00bf7ae82 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -37,7 +37,6 @@
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
-#define HCI_UART_CREATE_AMP 2
#define HCI_UART_INIT_PENDING 3
#define HCI_UART_EXT_CONFIG 4
#define HCI_UART_VND_DETECT 5
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 572d68d52..28750a40f 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -384,17 +384,10 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
struct sk_buff *skb;
- __u8 dev_type;
if (data->hdev)
return -EBADFD;
- /* bits 0-1 are dev_type (Primary or AMP) */
- dev_type = opcode & 0x03;
-
- if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP)
- return -EINVAL;
-
/* bits 2-5 are reserved (must be zero) */
if (opcode & 0x3c)
return -EINVAL;
@@ -412,7 +405,6 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
data->hdev = hdev;
hdev->bus = HCI_VIRTUAL;
- hdev->dev_type = dev_type;
hci_set_drvdata(hdev, data);
hdev->open = vhci_open_dev;
@@ -634,7 +626,7 @@ static void vhci_open_timeout(struct work_struct *work)
struct vhci_data *data = container_of(work, struct vhci_data,
open_timeout.work);
- vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY);
+ vhci_create_device(data, 0x00);
}
static int vhci_open(struct inode *inode, struct file *file)
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 2ac70b560..18208e152 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -274,7 +274,6 @@ static int virtbt_probe(struct virtio_device *vdev)
switch (type) {
case VIRTIO_BT_CONFIG_TYPE_PRIMARY:
- case VIRTIO_BT_CONFIG_TYPE_AMP:
break;
default:
return -EINVAL;
@@ -303,7 +302,6 @@ static int virtbt_probe(struct virtio_device *vdev)
vbt->hdev = hdev;
hdev->bus = HCI_VIRTIO;
- hdev->dev_type = type;
hci_set_drvdata(hdev, vbt);
hdev->open = virtbt_open;
diff --git a/drivers/char/hw_random/stm32-rng.c b/drivers/char/hw_random/stm32-rng.c
index 379bc245c..0e903d6e2 100644
--- a/drivers/char/hw_random/stm32-rng.c
+++ b/drivers/char/hw_random/stm32-rng.c
@@ -220,7 +220,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (err && i > RNG_NB_RECOVER_TRIES) {
dev_err((struct device *)priv->rng.priv,
"Couldn't recover from seed error\n");
- return -ENOTRECOVERABLE;
+ retval = -ENOTRECOVERABLE;
+ goto exit_rpm;
}
continue;
@@ -238,7 +239,8 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
if (err && i > RNG_NB_RECOVER_TRIES) {
dev_err((struct device *)priv->rng.priv,
"Couldn't recover from seed error");
- return -ENOTRECOVERABLE;
+ retval = -ENOTRECOVERABLE;
+ goto exit_rpm;
}
continue;
@@ -250,6 +252,7 @@ static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
max -= sizeof(u32);
}
+exit_rpm:
pm_runtime_mark_last_busy((struct device *) priv->rng.priv);
pm_runtime_put_sync_autosuspend((struct device *) priv->rng.priv);
@@ -353,13 +356,15 @@ static int stm32_rng_init(struct hwrng *rng)
err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg,
reg & RNG_SR_DRDY,
10, 100000);
- if (err | (reg & ~RNG_SR_DRDY)) {
+ if (err || (reg & ~RNG_SR_DRDY)) {
clk_disable_unprepare(priv->clk);
dev_err((struct device *)priv->rng.priv,
"%s: timeout:%x SR: %x!\n", __func__, err, reg);
return -EINVAL;
}
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -384,6 +389,11 @@ static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
static int __maybe_unused stm32_rng_suspend(struct device *dev)
{
struct stm32_rng_private *priv = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
if (priv->data->has_cond_reset) {
priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR);
@@ -465,6 +475,8 @@ static int __maybe_unused stm32_rng_resume(struct device *dev)
writel_relaxed(reg, priv->base + RNG_CR);
}
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index ee951b265..58e9dcc2a 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -296,28 +296,35 @@ static int register_device(int minor, struct pp_struct *pp)
if (!port) {
pr_warn("%s: no associated port!\n", name);
rc = -ENXIO;
- goto err;
+ goto err_free_name;
}
index = ida_alloc(&ida_index, GFP_KERNEL);
+ if (index < 0) {
+ pr_warn("%s: failed to get index!\n", name);
+ rc = index;
+ goto err_put_port;
+ }
+
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
ppdev_cb.irq_func = pp_irq;
ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0;
ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, index);
- parport_put_port(port);
if (!pdev) {
pr_warn("%s: failed to register device!\n", name);
rc = -ENXIO;
ida_free(&ida_index, index);
- goto err;
+ goto err_put_port;
}
pp->pdev = pdev;
pp->index = index;
dev_dbg(&pdev->dev, "registered pardevice\n");
-err:
+err_put_port:
+ parport_put_port(port);
+err_free_name:
kfree(name);
return rc;
}
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 714070ebb..0c20fbc08 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1020,7 +1020,8 @@ void tpm_tis_remove(struct tpm_chip *chip)
interrupt = 0;
tpm_tis_write32(priv, reg, ~TPM_GLOBAL_INT_ENABLE & interrupt);
- flush_work(&priv->free_irq_work);
+ if (priv->free_irq_work.func)
+ flush_work(&priv->free_irq_work);
tpm_tis_clkrun_enable(chip, false);
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 3f9eaf27b..c9eca24bb 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -37,6 +37,7 @@
#include "tpm_tis_spi.h"
#define MAX_SPI_FRAMESIZE 64
+#define SPI_HDRSIZE 4
/*
* TCG SPI flow control is documented in section 6.4 of the spec[1]. In short,
@@ -247,7 +248,7 @@ static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
int tpm_tis_spi_init(struct spi_device *spi, struct tpm_tis_spi_phy *phy,
int irq, const struct tpm_tis_phy_ops *phy_ops)
{
- phy->iobuf = devm_kmalloc(&spi->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
+ phy->iobuf = devm_kmalloc(&spi->dev, SPI_HDRSIZE + MAX_SPI_FRAMESIZE, GFP_KERNEL);
if (!phy->iobuf)
return -ENOMEM;
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
index e4fbbf3c4..3cb235df9 100644
--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -56,6 +56,8 @@ static int clk_dvp_probe(struct platform_device *pdev)
if (ret)
return ret;
+ data->num = NR_CLOCKS;
+
data->hws[0] = clk_hw_register_gate_parent_data(&pdev->dev,
"hdmi0-108MHz",
&clk_dvp_parent, 0,
@@ -76,7 +78,6 @@ static int clk_dvp_probe(struct platform_device *pdev)
goto unregister_clk0;
}
- data->num = NR_CLOCKS;
ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get,
data);
if (ret)
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 829406dc4..4d411408e 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -371,8 +371,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
if (IS_ERR(hw))
return PTR_ERR(hw);
- data->hws[clks->id] = hw;
data->num = clks->id + 1;
+ data->hws[clks->id] = hw;
}
clks++;
diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c
index 53e21ac30..4c3a5e4eb 100644
--- a/drivers/clk/clk-renesas-pcie.c
+++ b/drivers/clk/clk-renesas-pcie.c
@@ -25,10 +25,12 @@
#define RS9_REG_SS_AMP_0V7 0x1
#define RS9_REG_SS_AMP_0V8 0x2
#define RS9_REG_SS_AMP_0V9 0x3
+#define RS9_REG_SS_AMP_DEFAULT RS9_REG_SS_AMP_0V8
#define RS9_REG_SS_AMP_MASK 0x3
#define RS9_REG_SS_SSC_100 0
#define RS9_REG_SS_SSC_M025 (1 << 3)
#define RS9_REG_SS_SSC_M050 (3 << 3)
+#define RS9_REG_SS_SSC_DEFAULT RS9_REG_SS_SSC_100
#define RS9_REG_SS_SSC_MASK (3 << 3)
#define RS9_REG_SS_SSC_LOCK BIT(5)
#define RS9_REG_SR 0x2
@@ -205,8 +207,8 @@ static int rs9_get_common_config(struct rs9_driver_data *rs9)
int ret;
/* Set defaults */
- rs9->pll_amplitude = RS9_REG_SS_AMP_0V7;
- rs9->pll_ssc = RS9_REG_SS_SSC_100;
+ rs9->pll_amplitude = RS9_REG_SS_AMP_DEFAULT;
+ rs9->pll_ssc = RS9_REG_SS_SSC_DEFAULT;
/* Output clock amplitude */
ret = of_property_read_u32(np, "renesas,out-amplitude-microvolt",
@@ -247,13 +249,13 @@ static void rs9_update_config(struct rs9_driver_data *rs9)
int i;
/* If amplitude is non-default, update it. */
- if (rs9->pll_amplitude != RS9_REG_SS_AMP_0V7) {
+ if (rs9->pll_amplitude != RS9_REG_SS_AMP_DEFAULT) {
regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_AMP_MASK,
rs9->pll_amplitude);
}
/* If SSC is non-default, update it. */
- if (rs9->pll_ssc != RS9_REG_SS_SSC_100) {
+ if (rs9->pll_ssc != RS9_REG_SS_SSC_DEFAULT) {
regmap_update_bits(rs9->regmap, RS9_REG_SS, RS9_REG_SS_SSC_MASK,
rs9->pll_ssc);
}
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
index 01a2ef8f5..3f62ec750 100644
--- a/drivers/clk/mediatek/clk-mt8365-mm.c
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -53,7 +53,7 @@ static const struct mtk_gate mm_clks[] = {
GATE_MM0(CLK_MM_MM_DSI0, "mm_dsi0", "mm_sel", 17),
GATE_MM0(CLK_MM_MM_DISP_RDMA1, "mm_disp_rdma1", "mm_sel", 18),
GATE_MM0(CLK_MM_MM_MDP_RDMA1, "mm_mdp_rdma1", "mm_sel", 19),
- GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "vpll_dpix", 20),
+ GATE_MM0(CLK_MM_DPI0_DPI0, "mm_dpi0_dpi0", "dpi0_sel", 20),
GATE_MM0(CLK_MM_MM_FAKE, "mm_fake", "mm_sel", 21),
GATE_MM0(CLK_MM_MM_SMI_COMMON, "mm_smi_common", "mm_sel", 22),
GATE_MM0(CLK_MM_MM_SMI_LARB0, "mm_smi_larb0", "mm_sel", 23),
diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
index 3a2b3f90b..094ec8a26 100644
--- a/drivers/clk/mediatek/clk-pllfh.c
+++ b/drivers/clk/mediatek/clk-pllfh.c
@@ -68,7 +68,7 @@ void fhctl_parse_dt(const u8 *compatible_node, struct mtk_pllfh_data *pllfhs,
node = of_find_compatible_node(NULL, NULL, compatible_node);
if (!node) {
- pr_err("cannot find \"%s\"\n", compatible_node);
+ pr_warn("cannot find \"%s\"\n", compatible_node);
return;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 8ab08e7b5..1bb51a058 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -474,6 +474,7 @@ config SC_CAMCC_7280
config SC_CAMCC_8280XP
tristate "SC8280XP Camera Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SC_GCC_8280XP
help
Support for the camera clock controller on Qualcomm Technologies, Inc
@@ -1094,6 +1095,7 @@ config SM_GPUCC_8550
config SM_GPUCC_8650
tristate "SM8650 Graphics Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_8650
help
Support for the graphics clock controller on SM8650 devices.
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index 678b805f1..d7ab5bd5d 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -55,6 +55,29 @@ static struct clk_alpha_pll ipq_pll_huayra = {
},
};
+static struct clk_alpha_pll ipq_pll_stromer = {
+ .offset = 0x0,
+ /*
+ * Reuse CLK_ALPHA_PLL_TYPE_STROMER_PLUS register offsets.
+ * Although this is a bit confusing, but the offset values
+ * are correct nevertheless.
+ */
+ .regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "a53pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_stromer_ops,
+ },
+ },
+};
+
static struct clk_alpha_pll ipq_pll_stromer_plus = {
.offset = 0x0,
.regs = ipq_pll_offsets[CLK_ALPHA_PLL_TYPE_STROMER_PLUS],
@@ -73,8 +96,9 @@ static struct clk_alpha_pll ipq_pll_stromer_plus = {
},
};
+/* 1.008 GHz configuration */
static const struct alpha_pll_config ipq5018_pll_config = {
- .l = 0x32,
+ .l = 0x2a,
.config_ctl_val = 0x4001075b,
.config_ctl_hi_val = 0x304,
.main_output_mask = BIT(0),
@@ -144,8 +168,8 @@ struct apss_pll_data {
};
static const struct apss_pll_data ipq5018_pll_data = {
- .pll_type = CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
- .pll = &ipq_pll_stromer_plus,
+ .pll_type = CLK_ALPHA_PLL_TYPE_STROMER,
+ .pll = &ipq_pll_stromer,
.pll_config = &ipq5018_pll_config,
};
@@ -203,7 +227,8 @@ static int apss_ipq_pll_probe(struct platform_device *pdev)
if (data->pll_type == CLK_ALPHA_PLL_TYPE_HUAYRA)
clk_alpha_pll_configure(data->pll, regmap, data->pll_config);
- else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS)
+ else if (data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER ||
+ data->pll_type == CLK_ALPHA_PLL_TYPE_STROMER_PLUS)
clk_stromer_pll_configure(data->pll, regmap, data->pll_config);
ret = devm_clk_register_regmap(dev, &data->pll->clkr);
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 8a412ef47..be18ff983 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -213,7 +213,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_USER_CTL] = 0x18,
[PLL_OFF_USER_CTL_U] = 0x1c,
[PLL_OFF_CONFIG_CTL] = 0x20,
- [PLL_OFF_CONFIG_CTL_U] = 0xff,
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
[PLL_OFF_STATUS] = 0x28,
@@ -2490,6 +2489,8 @@ static int clk_alpha_pll_stromer_set_rate(struct clk_hw *hw, unsigned long rate,
rate = alpha_pll_round_rate(rate, prate, &l, &a, ALPHA_REG_BITWIDTH);
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+
+ a <<= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
a >> ALPHA_BITWIDTH);
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index 839435362..e4b7464c4 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -221,26 +221,17 @@ static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dp_link_clk_src[] = {
- F(162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dp_link_clk_src = {
.cmd_rcgr = 0x10f8,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_dp_link_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "disp_cc_mdss_dp_link_clk_src",
.parent_data = disp_cc_parent_data_0,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8450.c b/drivers/clk/qcom/dispcc-sm8450.c
index 92e9c4e7b..49bb4f58c 100644
--- a/drivers/clk/qcom/dispcc-sm8450.c
+++ b/drivers/clk/qcom/dispcc-sm8450.c
@@ -309,26 +309,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
- F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
.cmd_rcgr = 0x819c,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -382,13 +373,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -442,13 +432,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -502,13 +491,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8550.c b/drivers/clk/qcom/dispcc-sm8550.c
index 3672c73ac..38ecea805 100644
--- a/drivers/clk/qcom/dispcc-sm8550.c
+++ b/drivers/clk/qcom/dispcc-sm8550.c
@@ -345,26 +345,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
- F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
.cmd_rcgr = 0x8170,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_7,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk_src",
.parent_data = disp_cc_parent_data_7,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -418,13 +409,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -478,13 +468,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -538,13 +527,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
diff --git a/drivers/clk/qcom/dispcc-sm8650.c b/drivers/clk/qcom/dispcc-sm8650.c
index 9539db0d9..3eb64bcad 100644
--- a/drivers/clk/qcom/dispcc-sm8650.c
+++ b/drivers/clk/qcom/dispcc-sm8650.c
@@ -343,26 +343,17 @@ static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dptx0_link_clk_src[] = {
- F(162000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(270000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(540000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- F(810000, P_DP0_PHY_PLL_LINK_CLK, 1, 0, 0),
- { }
-};
-
static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
.cmd_rcgr = 0x8170,
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_7,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx0_link_clk_src",
.parent_data = disp_cc_parent_data_7,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -416,13 +407,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx1_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -476,13 +466,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx2_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
@@ -536,13 +525,12 @@ static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
.mnd_width = 0,
.hid_width = 5,
.parent_map = disp_cc_parent_map_3,
- .freq_tbl = ftbl_disp_cc_mdss_dptx0_link_clk_src,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "disp_cc_mdss_dptx3_link_clk_src",
.parent_data = disp_cc_parent_data_3,
.num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_byte2_ops,
},
};
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index 1180e48c6..275fb3b71 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -2535,6 +2535,8 @@ static struct clk_branch vmem_ahb_clk = {
static struct gdsc video_top_gdsc = {
.gdscr = 0x1024,
+ .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
+ .cxc_count = 3,
.pd = {
.name = "video_top",
},
@@ -2543,20 +2545,26 @@ static struct gdsc video_top_gdsc = {
static struct gdsc video_subcore0_gdsc = {
.gdscr = 0x1040,
+ .cxcs = (unsigned int []){ 0x1048 },
+ .cxc_count = 1,
.pd = {
.name = "video_subcore0",
},
.parent = &video_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc video_subcore1_gdsc = {
.gdscr = 0x1044,
+ .cxcs = (unsigned int []){ 0x104c },
+ .cxc_count = 1,
.pd = {
.name = "video_subcore1",
},
.parent = &video_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc mdss_gdsc = {
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 4c2872f45..ff3f85e90 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -139,7 +139,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("avb3", 214, R8A779A0_CLK_S3D2),
DEF_MOD("avb4", 215, R8A779A0_CLK_S3D2),
DEF_MOD("avb5", 216, R8A779A0_CLK_S3D2),
- DEF_MOD("canfd0", 328, R8A779A0_CLK_CANFD),
+ DEF_MOD("canfd0", 328, R8A779A0_CLK_S3D2),
DEF_MOD("csi40", 331, R8A779A0_CLK_CSI0),
DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0),
DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0),
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 33532673d..26b71547f 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -280,6 +280,10 @@ static struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
0x5a8, 1),
DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
0x5ac, 0),
+#ifdef CONFIG_RISCV
+ DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
+ 0x608, 0),
+#endif
};
static struct rzg2l_reset r9a07g043_resets[] = {
@@ -338,6 +342,10 @@ static struct rzg2l_reset r9a07g043_resets[] = {
DEF_RST(R9A07G043_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A07G043_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A07G043_TSU_PRESETN, 0x8ac, 0),
+#ifdef CONFIG_RISCV
+ DEF_RST(R9A07G043_NCEPLIC_ARESETN, 0x908, 0),
+#endif
+
};
static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
@@ -347,6 +355,7 @@ static const unsigned int r9a07g043_crit_mod_clks[] __initconst = {
#endif
#ifdef CONFIG_RISCV
MOD_CLK_BASE + R9A07G043_IAX45_CLK,
+ MOD_CLK_BASE + R9A07G043_NCEPLIC_ACLK,
#endif
MOD_CLK_BASE + R9A07G043_DMAC_ACLK,
};
diff --git a/drivers/clk/samsung/clk-exynosautov9.c b/drivers/clk/samsung/clk-exynosautov9.c
index e9c06eb93..f04bacaca 100644
--- a/drivers/clk/samsung/clk-exynosautov9.c
+++ b/drivers/clk/samsung/clk-exynosautov9.c
@@ -352,13 +352,13 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
/* CMU_TOP_PURECLKCOMP */
PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0, NULL),
- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared1_pll", "oscclk",
+ PLL(pll_0822x, FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1, NULL),
- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared2_pll", "oscclk",
+ PLL(pll_0822x, FOUT_SHARED2_PLL, "fout_shared2_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED2, PLL_CON3_PLL_SHARED2, NULL),
- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared3_pll", "oscclk",
+ PLL(pll_0822x, FOUT_SHARED3_PLL, "fout_shared3_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED3, PLL_CON3_PLL_SHARED3, NULL),
- PLL(pll_0822x, FOUT_SHARED0_PLL, "fout_shared4_pll", "oscclk",
+ PLL(pll_0822x, FOUT_SHARED4_PLL, "fout_shared4_pll", "oscclk",
PLL_LOCKTIME_PLL_SHARED4, PLL_CON3_PLL_SHARED4, NULL),
};
diff --git a/drivers/clk/samsung/clk-gs101.c b/drivers/clk/samsung/clk-gs101.c
index d065e343a..bd3c1b027 100644
--- a/drivers/clk/samsung/clk-gs101.c
+++ b/drivers/clk/samsung/clk-gs101.c
@@ -2763,33 +2763,33 @@ static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
MUX(CLK_MOUT_PERIC0_USI0_UART_USER,
"mout_peric0_usi0_uart_user", mout_peric0_usi0_uart_user_p,
PLL_CON0_MUX_CLKCMU_PERIC0_USI0_UART_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI14_USI_USER,
- "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI1_USI_USER,
- "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI2_USI_USER,
- "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI3_USI_USER,
- "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI4_USI_USER,
- "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI5_USI_USER,
- "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI6_USI_USER,
- "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI7_USI_USER,
- "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC0_USI8_USI_USER,
- "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p,
- PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI14_USI_USER,
+ "mout_peric0_usi14_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI1_USI_USER,
+ "mout_peric0_usi1_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI1_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI2_USI_USER,
+ "mout_peric0_usi2_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI2_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI3_USI_USER,
+ "mout_peric0_usi3_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI3_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI4_USI_USER,
+ "mout_peric0_usi4_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI4_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI5_USI_USER,
+ "mout_peric0_usi5_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI5_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI6_USI_USER,
+ "mout_peric0_usi6_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI6_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI7_USI_USER,
+ "mout_peric0_usi7_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI7_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC0_USI8_USI_USER,
+ "mout_peric0_usi8_usi_user", mout_peric0_usi_usi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI8_USI_USER, 4, 1),
};
static const struct samsung_div_clock peric0_div_clks[] __initconst = {
@@ -2798,33 +2798,42 @@ static const struct samsung_div_clock peric0_div_clks[] __initconst = {
DIV(CLK_DOUT_PERIC0_USI0_UART,
"dout_peric0_usi0_uart", "mout_peric0_usi0_uart_user",
CLK_CON_DIV_DIV_CLK_PERIC0_USI0_UART, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI14_USI,
- "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI1_USI,
- "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI2_USI,
- "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI3_USI,
- "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI4_USI,
- "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI5_USI,
- "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI6_USI,
- "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI7_USI,
- "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4),
- DIV(CLK_DOUT_PERIC0_USI8_USI,
- "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4),
+ DIV_F(CLK_DOUT_PERIC0_USI14_USI,
+ "dout_peric0_usi14_usi", "mout_peric0_usi14_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI1_USI,
+ "dout_peric0_usi1_usi", "mout_peric0_usi1_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI1_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI2_USI,
+ "dout_peric0_usi2_usi", "mout_peric0_usi2_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI2_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI3_USI,
+ "dout_peric0_usi3_usi", "mout_peric0_usi3_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI3_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI4_USI,
+ "dout_peric0_usi4_usi", "mout_peric0_usi4_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI4_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI5_USI,
+ "dout_peric0_usi5_usi", "mout_peric0_usi5_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI5_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI6_USI,
+ "dout_peric0_usi6_usi", "mout_peric0_usi6_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI6_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI7_USI,
+ "dout_peric0_usi7_usi", "mout_peric0_usi7_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI7_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC0_USI8_USI,
+ "dout_peric0_usi8_usi", "mout_peric0_usi8_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI8_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
@@ -2857,11 +2866,11 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_0,
"gout_peric0_peric0_top0_ipclk_0", "dout_peric0_usi1_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_0,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_1,
"gout_peric0_peric0_top0_ipclk_1", "dout_peric0_usi2_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_1,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_10,
"gout_peric0_peric0_top0_ipclk_10", "dout_peric0_i3c",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
@@ -2889,27 +2898,27 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_2,
"gout_peric0_peric0_top0_ipclk_2", "dout_peric0_usi3_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_2,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_3,
"gout_peric0_peric0_top0_ipclk_3", "dout_peric0_usi4_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_3,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_4,
"gout_peric0_peric0_top0_ipclk_4", "dout_peric0_usi5_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_5,
"gout_peric0_peric0_top0_ipclk_5", "dout_peric0_usi6_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_6,
"gout_peric0_peric0_top0_ipclk_6", "dout_peric0_usi7_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_7,
"gout_peric0_peric0_top0_ipclk_7", "dout_peric0_usi8_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC0_PERIC0_TOP0_IPCLK_8,
"gout_peric0_peric0_top0_ipclk_8", "dout_peric0_i3c",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
@@ -2990,7 +2999,7 @@ static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_IPCLK_2,
"gout_peric0_peric0_top1_ipclk_2", "dout_peric0_usi14_usi",
CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_2,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
/* Disabling this clock makes the system hang. Mark the clock as critical. */
GATE(CLK_GOUT_PERIC0_PERIC0_TOP1_PCLK_0,
"gout_peric0_peric0_top1_pclk_0", "mout_peric0_bus_user",
@@ -3230,47 +3239,53 @@ static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
MUX(CLK_MOUT_PERIC1_I3C_USER,
"mout_peric1_i3c_user", mout_peric1_nonbususer_p,
PLL_CON0_MUX_CLKCMU_PERIC1_I3C_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI0_USI_USER,
- "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI10_USI_USER,
- "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI11_USI_USER,
- "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI12_USI_USER,
- "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI13_USI_USER,
- "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1),
- MUX(CLK_MOUT_PERIC1_USI9_USI_USER,
- "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p,
- PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI0_USI_USER,
+ "mout_peric1_usi0_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI0_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI10_USI_USER,
+ "mout_peric1_usi10_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI11_USI_USER,
+ "mout_peric1_usi11_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI12_USI_USER,
+ "mout_peric1_usi12_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI13_USI_USER,
+ "mout_peric1_usi13_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI13_USI_USER, 4, 1),
+ nMUX(CLK_MOUT_PERIC1_USI9_USI_USER,
+ "mout_peric1_usi9_usi_user", mout_peric1_nonbususer_p,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI9_USI_USER, 4, 1),
};
static const struct samsung_div_clock peric1_div_clks[] __initconst = {
DIV(CLK_DOUT_PERIC1_I3C, "dout_peric1_i3c", "mout_peric1_i3c_user",
CLK_CON_DIV_DIV_CLK_PERIC1_I3C, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI0_USI,
- "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI10_USI,
- "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI11_USI,
- "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI12_USI,
- "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI13_USI,
- "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4),
- DIV(CLK_DOUT_PERIC1_USI9_USI,
- "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user",
- CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4),
+ DIV_F(CLK_DOUT_PERIC1_USI0_USI,
+ "dout_peric1_usi0_usi", "mout_peric1_usi0_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI0_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI10_USI,
+ "dout_peric1_usi10_usi", "mout_peric1_usi10_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI11_USI,
+ "dout_peric1_usi11_usi", "mout_peric1_usi11_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI12_USI,
+ "dout_peric1_usi12_usi", "mout_peric1_usi12_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI13_USI,
+ "dout_peric1_usi13_usi", "mout_peric1_usi13_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI13_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_PERIC1_USI9_USI,
+ "dout_peric1_usi9_usi", "mout_peric1_usi9_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI9_USI, 0, 4,
+ CLK_SET_RATE_PARENT, 0),
};
static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
@@ -3305,27 +3320,27 @@ static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_1,
"gout_peric1_peric1_top0_ipclk_1", "dout_peric1_usi0_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_1,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_2,
"gout_peric1_peric1_top0_ipclk_2", "dout_peric1_usi9_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_2,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_3,
"gout_peric1_peric1_top0_ipclk_3", "dout_peric1_usi10_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_3,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_4,
"gout_peric1_peric1_top0_ipclk_4", "dout_peric1_usi11_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_5,
"gout_peric1_peric1_top0_ipclk_5", "dout_peric1_usi12_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_5,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_6,
"gout_peric1_peric1_top0_ipclk_6", "dout_peric1_usi13_usi",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_6,
- 21, 0, 0),
+ 21, CLK_SET_RATE_PARENT, 0),
GATE(CLK_GOUT_PERIC1_PERIC1_TOP0_IPCLK_8,
"gout_peric1_peric1_top0_ipclk_8", "dout_peric1_i3c",
CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_8,
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index a763309e6..556167350 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -133,7 +133,7 @@ struct samsung_mux_clock {
.name = cname, \
.parent_names = pnames, \
.num_parents = ARRAY_SIZE(pnames), \
- .flags = (f) | CLK_SET_RATE_NO_REPARENT, \
+ .flags = f, \
.offset = o, \
.shift = s, \
.width = w, \
@@ -141,9 +141,16 @@ struct samsung_mux_clock {
}
#define MUX(_id, cname, pnames, o, s, w) \
- __MUX(_id, cname, pnames, o, s, w, 0, 0)
+ __MUX(_id, cname, pnames, o, s, w, CLK_SET_RATE_NO_REPARENT, 0)
#define MUX_F(_id, cname, pnames, o, s, w, f, mf) \
+ __MUX(_id, cname, pnames, o, s, w, (f) | CLK_SET_RATE_NO_REPARENT, mf)
+
+/* Used by MUX clocks where reparenting on clock rate change is allowed. */
+#define nMUX(_id, cname, pnames, o, s, w) \
+ __MUX(_id, cname, pnames, o, s, w, 0, 0)
+
+#define nMUX_F(_id, cname, pnames, o, s, w, f, mf) \
__MUX(_id, cname, pnames, o, s, w, f, mf)
/**
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index 25b8e1a80..b32a59fe5 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -4,7 +4,6 @@
* Copyright (C) 2020 Zong Li
*/
-#include <linux/clkdev.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -537,13 +536,6 @@ static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
return r;
}
- r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
- if (r) {
- dev_warn(dev, "Failed to register clkdev for %s: %d\n",
- init.name, r);
- return r;
- }
-
pd->hw_clks.hws[i] = &pic->hw;
}
diff --git a/drivers/cpufreq/amd-pstate-ut.c b/drivers/cpufreq/amd-pstate-ut.c
index f04ae67dd..fc275d41d 100644
--- a/drivers/cpufreq/amd-pstate-ut.c
+++ b/drivers/cpufreq/amd-pstate-ut.c
@@ -26,10 +26,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
-#include <linux/amd-pstate.h>
#include <acpi/cppc_acpi.h>
+#include "amd-pstate.h"
+
/*
* Abbreviations:
* amd_pstate_ut: used as a shortform for AMD P-State unit test.
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index 28166df81..6af175e6c 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -36,7 +36,6 @@
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/static_call.h>
-#include <linux/amd-pstate.h>
#include <linux/topology.h>
#include <acpi/processor.h>
@@ -46,6 +45,8 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
+
+#include "amd-pstate.h"
#include "amd-pstate-trace.h"
#define AMD_PSTATE_TRANSITION_LATENCY 20000
@@ -53,6 +54,37 @@
#define CPPC_HIGHEST_PERF_PERFORMANCE 196
#define CPPC_HIGHEST_PERF_DEFAULT 166
+#define AMD_CPPC_EPP_PERFORMANCE 0x00
+#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
+#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
+#define AMD_CPPC_EPP_POWERSAVE 0xFF
+
+/*
+ * enum amd_pstate_mode - driver working mode of amd pstate
+ */
+enum amd_pstate_mode {
+ AMD_PSTATE_UNDEFINED = 0,
+ AMD_PSTATE_DISABLE,
+ AMD_PSTATE_PASSIVE,
+ AMD_PSTATE_ACTIVE,
+ AMD_PSTATE_GUIDED,
+ AMD_PSTATE_MAX,
+};
+
+static const char * const amd_pstate_mode_string[] = {
+ [AMD_PSTATE_UNDEFINED] = "undefined",
+ [AMD_PSTATE_DISABLE] = "disable",
+ [AMD_PSTATE_PASSIVE] = "passive",
+ [AMD_PSTATE_ACTIVE] = "active",
+ [AMD_PSTATE_GUIDED] = "guided",
+ NULL,
+};
+
+struct quirk_entry {
+ u32 nominal_freq;
+ u32 lowest_freq;
+};
+
/*
* TODO: We need more time to fine tune processors with shared memory solution
* with community together.
@@ -68,6 +100,7 @@ static struct cpufreq_driver amd_pstate_epp_driver;
static int cppc_state = AMD_PSTATE_UNDEFINED;
static bool cppc_enabled;
static bool amd_pstate_prefcore = true;
+static struct quirk_entry *quirks;
/*
* AMD Energy Preference Performance (EPP)
@@ -112,6 +145,41 @@ static unsigned int epp_values[] = {
typedef int (*cppc_mode_transition_fn)(int);
+static struct quirk_entry quirk_amd_7k62 = {
+ .nominal_freq = 2600,
+ .lowest_freq = 550,
+};
+
+static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi)
+{
+ /**
+ * match the broken bios for family 17h processor support CPPC V2
+ * broken BIOS lack of nominal_freq and lowest_freq capabilities
+ * definition in ACPI tables
+ */
+ if (boot_cpu_has(X86_FEATURE_ZEN2)) {
+ quirks = dmi->driver_data;
+ pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident);
+ return 1;
+ }
+
+ return 0;
+}
+
+static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = {
+ {
+ .callback = dmi_matched_7k62_bios_bug,
+ .ident = "AMD EPYC 7K62",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VERSION, "5.14"),
+ DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"),
+ },
+ .driver_data = &quirk_amd_7k62,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table);
+
static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
@@ -622,74 +690,22 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
static int amd_get_min_freq(struct amd_cpudata *cpudata)
{
- struct cppc_perf_caps cppc_perf;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- /* Switch to khz */
- return cppc_perf.lowest_freq * 1000;
+ return READ_ONCE(cpudata->min_freq);
}
static int amd_get_max_freq(struct amd_cpudata *cpudata)
{
- struct cppc_perf_caps cppc_perf;
- u32 max_perf, max_freq, nominal_freq, nominal_perf;
- u64 boost_ratio;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
- max_perf = READ_ONCE(cpudata->highest_perf);
-
- boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
-
- max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
-
- /* Switch to khz */
- return max_freq * 1000;
+ return READ_ONCE(cpudata->max_freq);
}
static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
{
- struct cppc_perf_caps cppc_perf;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- /* Switch to khz */
- return cppc_perf.nominal_freq * 1000;
+ return READ_ONCE(cpudata->nominal_freq);
}
static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
{
- struct cppc_perf_caps cppc_perf;
- u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
- nominal_freq, nominal_perf;
- u64 lowest_nonlinear_ratio;
-
- int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
- if (ret)
- return ret;
-
- nominal_freq = cppc_perf.nominal_freq;
- nominal_perf = READ_ONCE(cpudata->nominal_perf);
-
- lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
-
- lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
- nominal_perf);
-
- lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
-
- /* Switch to khz */
- return lowest_nonlinear_freq * 1000;
+ return READ_ONCE(cpudata->lowest_nonlinear_freq);
}
static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
@@ -705,7 +721,7 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
if (state)
policy->cpuinfo.max_freq = cpudata->max_freq;
else
- policy->cpuinfo.max_freq = cpudata->nominal_freq;
+ policy->cpuinfo.max_freq = cpudata->nominal_freq * 1000;
policy->max = policy->cpuinfo.max_freq;
@@ -844,6 +860,61 @@ free_cpufreq_put:
mutex_unlock(&amd_pstate_driver_lock);
}
+/**
+ * amd_pstate_init_freq: Initialize the max_freq, min_freq,
+ * nominal_freq and lowest_nonlinear_freq for
+ * the @cpudata object.
+ *
+ * Requires: highest_perf, lowest_perf, nominal_perf and
+ * lowest_nonlinear_perf members of @cpudata to be
+ * initialized.
+ *
+ * Returns 0 on success, non-zero value on failure.
+ */
+static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
+{
+ int ret;
+ u32 min_freq;
+ u32 highest_perf, max_freq;
+ u32 nominal_perf, nominal_freq;
+ u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
+ u32 boost_ratio, lowest_nonlinear_ratio;
+ struct cppc_perf_caps cppc_perf;
+
+
+ ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
+ if (ret)
+ return ret;
+
+ if (quirks && quirks->lowest_freq)
+ min_freq = quirks->lowest_freq * 1000;
+ else
+ min_freq = cppc_perf.lowest_freq * 1000;
+
+ if (quirks && quirks->nominal_freq)
+ nominal_freq = quirks->nominal_freq ;
+ else
+ nominal_freq = cppc_perf.nominal_freq;
+
+ nominal_perf = READ_ONCE(cpudata->nominal_perf);
+
+ highest_perf = READ_ONCE(cpudata->highest_perf);
+ boost_ratio = div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+ max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+
+ lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+ lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
+ nominal_perf);
+ lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
+
+ WRITE_ONCE(cpudata->min_freq, min_freq);
+ WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
+ WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
+ WRITE_ONCE(cpudata->max_freq, max_freq);
+
+ return 0;
+}
+
static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
{
int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
@@ -871,6 +942,10 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
min_freq = amd_get_min_freq(cpudata);
max_freq = amd_get_max_freq(cpudata);
nominal_freq = amd_get_nominal_freq(cpudata);
@@ -912,13 +987,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
goto free_cpudata2;
}
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
cpudata->max_limit_freq = max_freq;
cpudata->min_limit_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
policy->driver_data = cpudata;
@@ -1333,6 +1403,10 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
if (ret)
goto free_cpudata1;
+ ret = amd_pstate_init_freq(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
min_freq = amd_get_min_freq(cpudata);
max_freq = amd_get_max_freq(cpudata);
nominal_freq = amd_get_nominal_freq(cpudata);
@@ -1349,12 +1423,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
/* It will be updated by governor */
policy->cur = policy->cpuinfo.min_freq;
- /* Initial processor data capability frequencies */
- cpudata->max_freq = max_freq;
- cpudata->min_freq = min_freq;
- cpudata->nominal_freq = nominal_freq;
- cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
-
policy->driver_data = cpudata;
cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
@@ -1394,6 +1462,13 @@ free_cpudata1:
static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata) {
+ kfree(cpudata);
+ policy->driver_data = NULL;
+ }
+
pr_debug("CPU %d exiting\n", policy->cpu);
return 0;
}
@@ -1672,6 +1747,11 @@ static int __init amd_pstate_init(void)
if (cpufreq_get_current_driver())
return -EEXIST;
+ quirks = NULL;
+
+ /* check if this machine need CPPC quirks */
+ dmi_check_system(amd_pstate_quirks_table);
+
switch (cppc_state) {
case AMD_PSTATE_UNDEFINED:
/* Disable on the following configs by default:
diff --git a/include/linux/amd-pstate.h b/drivers/cpufreq/amd-pstate.h
index d21838835..bc341f359 100644
--- a/include/linux/amd-pstate.h
+++ b/drivers/cpufreq/amd-pstate.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * linux/include/linux/amd-pstate.h
- *
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*
* Author: Meng Li <li.meng@amd.com>
@@ -12,11 +10,6 @@
#include <linux/pm_qos.h>
-#define AMD_CPPC_EPP_PERFORMANCE 0x00
-#define AMD_CPPC_EPP_BALANCE_PERFORMANCE 0x80
-#define AMD_CPPC_EPP_BALANCE_POWERSAVE 0xBF
-#define AMD_CPPC_EPP_POWERSAVE 0xFF
-
/*********************************************************************
* AMD P-state INTERFACE *
*********************************************************************/
@@ -104,24 +97,4 @@ struct amd_cpudata {
bool suspended;
};
-/*
- * enum amd_pstate_mode - driver working mode of amd pstate
- */
-enum amd_pstate_mode {
- AMD_PSTATE_UNDEFINED = 0,
- AMD_PSTATE_DISABLE,
- AMD_PSTATE_PASSIVE,
- AMD_PSTATE_ACTIVE,
- AMD_PSTATE_GUIDED,
- AMD_PSTATE_MAX,
-};
-
-static const char * const amd_pstate_mode_string[] = {
- [AMD_PSTATE_UNDEFINED] = "undefined",
- [AMD_PSTATE_DISABLE] = "disable",
- [AMD_PSTATE_PASSIVE] = "passive",
- [AMD_PSTATE_ACTIVE] = "active",
- [AMD_PSTATE_GUIDED] = "guided",
- NULL,
-};
#endif /* _LINUX_AMD_PSTATE_H */
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 1a1857b0a..ea8438550 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -481,9 +481,12 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct private_data *priv;
+
if (!policy)
return 0;
- struct private_data *priv = policy->driver_data;
+
+ priv = policy->driver_data;
cpufreq_cpu_put(policy);
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 64420d9cf..15f1d4192 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -741,10 +741,15 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
+ if (!policy)
+ return -ENODEV;
+
+ cpu_data = policy->driver_data;
+
cpufreq_cpu_put(policy);
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
@@ -822,10 +827,15 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- struct cppc_cpudata *cpu_data = policy->driver_data;
+ struct cppc_cpudata *cpu_data;
u64 desired_perf;
int ret;
+ if (!policy)
+ return -ENODEV;
+
+ cpu_data = policy->driver_data;
+
cpufreq_cpu_put(policy);
ret = cppc_get_desired_perf(cpu, &desired_perf);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 66e10a19d..fd9c3ed21 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1679,10 +1679,13 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
*/
if (cpufreq_driver->offline) {
cpufreq_driver->offline(policy);
- } else if (cpufreq_driver->exit) {
- cpufreq_driver->exit(policy);
- policy->freq_table = NULL;
+ return;
}
+
+ if (cpufreq_driver->exit)
+ cpufreq_driver->exit(policy);
+
+ policy->freq_table = NULL;
}
static int cpufreq_offline(unsigned int cpu)
@@ -1740,7 +1743,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
}
/* We did light-weight exit earlier, do full tear down now */
- if (cpufreq_driver->offline)
+ if (cpufreq_driver->offline && cpufreq_driver->exit)
cpufreq_driver->exit(policy);
up_write(&policy->rwsem);
diff --git a/drivers/crypto/bcm/spu2.c b/drivers/crypto/bcm/spu2.c
index 07989bb8c..3fdc64b5a 100644
--- a/drivers/crypto/bcm/spu2.c
+++ b/drivers/crypto/bcm/spu2.c
@@ -495,7 +495,7 @@ static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len,
if (hash_iv_len) {
packet_log(" Hash IV Length %u bytes\n", hash_iv_len);
packet_dump(" hash IV: ", ptr, hash_iv_len);
- ptr += ciph_key_len;
+ ptr += hash_iv_len;
}
if (ciph_iv_len) {
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 473301237..ff6ceb4fe 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -39,44 +39,38 @@ static const struct sp_dev_vdata dev_vdata[] = {
},
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id sp_acpi_match[] = {
{ "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
{ },
};
MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id sp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a",
.data = (const void *)&dev_vdata[0] },
{ },
};
MODULE_DEVICE_TABLE(of, sp_of_match);
-#endif
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
{
-#ifdef CONFIG_OF
const struct of_device_id *match;
match = of_match_node(sp_of_match, pdev->dev.of_node);
if (match && match->data)
return (struct sp_dev_vdata *)match->data;
-#endif
+
return NULL;
}
static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
{
-#ifdef CONFIG_ACPI
const struct acpi_device_id *match;
match = acpi_match_device(sp_acpi_match, &pdev->dev);
if (match && match->driver_data)
return (struct sp_dev_vdata *)match->driver_data;
-#endif
+
return NULL;
}
@@ -212,12 +206,8 @@ static int sp_platform_resume(struct platform_device *pdev)
static struct platform_driver sp_platform_driver = {
.driver = {
.name = "ccp",
-#ifdef CONFIG_ACPI
.acpi_match_table = sp_acpi_match,
-#endif
-#ifdef CONFIG_OF
.of_match_table = sp_of_match,
-#endif
},
.probe = sp_platform_probe,
.remove_new = sp_platform_remove,
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 92f0a1d9b..13e413533 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -2893,12 +2893,9 @@ void hisi_qm_uninit(struct hisi_qm *qm)
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
+ qm_remove_uacce(qm);
qm_irqs_unregister(qm);
hisi_qm_pci_uninit(qm);
- if (qm->use_sva) {
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
- }
}
EXPORT_SYMBOL_GPL(hisi_qm_uninit);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index 93a972fcb..0558f98e2 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -481,8 +481,10 @@ static void sec_alg_resource_free(struct sec_ctx *ctx,
if (ctx->pbuf_supported)
sec_free_pbuf_resource(dev, qp_ctx->res);
- if (ctx->alg_type == SEC_AEAD)
+ if (ctx->alg_type == SEC_AEAD) {
sec_free_mac_resource(dev, qp_ctx->res);
+ sec_free_aiv_resource(dev, qp_ctx->res);
+ }
}
static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
index 1102c47f8..1d0ef47a9 100644
--- a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -296,7 +296,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
if (adf_gen4_init_thd2arb_map(accel_dev))
dev_warn(&GET_DEV(accel_dev),
- "Generate of the thread to arbiter map failed");
+ "Failed to generate thread to arbiter mapping");
return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index 927506cf2..fb34fd7f0 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -208,7 +208,7 @@ static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
if (adf_gen4_init_thd2arb_map(accel_dev))
dev_warn(&GET_DEV(accel_dev),
- "Generate of the thread to arbiter map failed");
+ "Failed to generate thread to arbiter mapping");
return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 9762f2bf7..d26564ceb 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -197,7 +197,9 @@ module_pci_driver(adf_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_FIRMWARE(ADF_4XXX_FW);
+MODULE_FIRMWARE(ADF_402XX_FW);
MODULE_FIRMWARE(ADF_4XXX_MMP);
+MODULE_FIRMWARE(ADF_402XX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_SOFTDEP("pre: crypto-intel_qat");
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 9da2278bd..04260f61d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -130,8 +130,7 @@ static void adf_device_reset_worker(struct work_struct *work)
if (adf_dev_restart(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
- if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
- completion_done(&reset_data->compl))
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
kfree(reset_data);
WARN(1, "QAT: device restart failed. Device is unusable\n");
return;
@@ -147,16 +146,8 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarted_notify(accel_dev);
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
- /*
- * The dev is back alive. Notify the caller if in sync mode
- *
- * If device restart will take a more time than expected,
- * the schedule_reset() function can timeout and exit. This can be
- * detected by calling the completion_done() function. In this case
- * the reset_data structure needs to be freed here.
- */
- if (reset_data->mode == ADF_DEV_RESET_ASYNC ||
- completion_done(&reset_data->compl))
+ /* The dev is back alive. Notify the caller if in sync mode */
+ if (reset_data->mode == ADF_DEV_RESET_ASYNC)
kfree(reset_data);
else
complete(&reset_data->compl);
@@ -191,10 +182,10 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
if (!timeout) {
dev_err(&GET_DEV(accel_dev),
"Reset device timeout expired\n");
+ cancel_work_sync(&reset_data->reset_work);
ret = -EFAULT;
- } else {
- kfree(reset_data);
}
+ kfree(reset_data);
return ret;
}
return 0;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
index 7fc7a77f6..c7ad8cf07 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
@@ -149,5 +149,6 @@ void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
tl_data->sl_exec_counters = sl_exec_counters;
tl_data->rp_counters = rp_counters;
tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+ tl_data->max_sl_cnt = ADF_GEN4_TL_MAX_SLICES_PER_TYPE;
}
EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
index d4f2db3c5..e10f0024f 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_rl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -1125,7 +1125,7 @@ int adf_rl_start(struct adf_accel_dev *accel_dev)
}
if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
- dev_info(&GET_DEV(accel_dev), "not supported\n");
+ dev_info(&GET_DEV(accel_dev), "feature not supported by FW\n");
ret = -EOPNOTSUPP;
goto ret_free;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
index 2ff714d11..74fb0c2ed 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -41,6 +41,20 @@ static int validate_tl_data(struct adf_tl_hw_data *tl_data)
return 0;
}
+static int validate_tl_slice_counters(struct icp_qat_fw_init_admin_slice_cnt *slice_count,
+ u8 max_slices_per_type)
+{
+ u8 *sl_counter = (u8 *)slice_count;
+ int i;
+
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ if (sl_counter[i] > max_slices_per_type)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
{
struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
@@ -214,6 +228,13 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
return ret;
}
+ ret = validate_tl_slice_counters(&telemetry->slice_cnt, tl_data->max_sl_cnt);
+ if (ret) {
+ dev_err(dev, "invalid value returned by FW\n");
+ adf_send_admin_tl_stop(accel_dev);
+ return ret;
+ }
+
telemetry->hbuffs = state;
atomic_set(&telemetry->state, state);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
index 9be81cd3b..e54a406cc 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -40,6 +40,7 @@ struct adf_tl_hw_data {
u8 num_dev_counters;
u8 num_rp_counters;
u8 max_rp;
+ u8 max_sl_cnt;
};
struct adf_telemetry {
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 79b4e7480..6bfc59e67 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -138,6 +138,10 @@ int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
return -ENOMEM;
cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, cptr_dma)) {
+ kfree(hctx);
+ return -ENOMEM;
+ }
cn10k_cpt_hw_ctx_set(hctx, 1);
er_ctx->hw_ctx = hctx;
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index cf8bda7f0..7ec14b5b8 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -273,7 +273,6 @@ static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc)
err_rsa_crypt:
writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- kfree(rctx->rsa_data);
return ret;
}
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index 0df09bd79..2773f05ad 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -1045,3 +1045,32 @@ long cxl_pci_get_latency(struct pci_dev *pdev)
return cxl_flit_size(pdev) * MEGA / bw;
}
+
+static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data)
+{
+ struct cxl_port *port = data;
+ struct cxl_decoder *cxld;
+ struct cxl_hdm *cxlhdm;
+ void __iomem *hdm;
+ u32 ctrl;
+
+ if (!is_endpoint_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
+ return 0;
+
+ cxlhdm = dev_get_drvdata(&port->dev);
+ hdm = cxlhdm->regs.hdm_decoder;
+ ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
+
+ return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl);
+}
+
+bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port)
+{
+ return device_for_each_child(&port->dev, port,
+ __cxl_endpoint_decoder_reset_detected);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 5c186e0a3..18b951496 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2352,15 +2352,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd,
struct device *dev;
int rc;
- switch (mode) {
- case CXL_DECODER_RAM:
- case CXL_DECODER_PMEM:
- break;
- default:
- dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
- return ERR_PTR(-EINVAL);
- }
-
cxlr = cxl_region_alloc(cxlrd, id);
if (IS_ERR(cxlr))
return cxlr;
@@ -2415,6 +2406,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
{
int rc;
+ switch (mode) {
+ case CXL_DECODER_RAM:
+ case CXL_DECODER_PMEM:
+ break;
+ default:
+ dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode);
+ return ERR_PTR(-EINVAL);
+ }
+
rc = memregion_alloc(GFP_KERNEL);
if (rc < 0)
return ERR_PTR(rc);
@@ -2719,6 +2719,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr)
if (i == 0) {
cxl_nvb = cxl_find_nvdimm_bridge(cxlmd);
if (!cxl_nvb) {
+ kfree(cxlr_pmem);
cxlr_pmem = ERR_PTR(-ENODEV);
goto out;
}
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index e5f13260f..7c5cd069f 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event,
* DRAM Event Record
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
-#define CXL_DPA_FLAGS_MASK 0x3F
-#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK)
+#define CXL_DPA_FLAGS_MASK GENMASK(1, 0)
+#define CXL_DPA_MASK GENMASK_ULL(63, 6)
#define CXL_DPA_VOLATILE BIT(0)
#define CXL_DPA_NOT_REPAIRABLE BIT(1)
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 036d17db6..72fa47740 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -891,6 +891,8 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c1,
struct access_coordinate *c2);
+bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 2ff361e75..659f9d46b 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -957,11 +957,33 @@ static void cxl_error_resume(struct pci_dev *pdev)
dev->driver ? "successful" : "failed");
}
+static void cxl_reset_done(struct pci_dev *pdev)
+{
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct cxl_memdev *cxlmd = cxlds->cxlmd;
+ struct device *dev = &pdev->dev;
+
+ /*
+ * FLR does not expect to touch the HDM decoders and related
+ * registers. SBR, however, will wipe all device configurations.
+ * Issue a warning if there was an active decoder before the reset
+ * that no longer exists.
+ */
+ guard(device)(&cxlmd->dev);
+ if (cxlmd->endpoint &&
+ cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) {
+ dev_crit(dev, "SBR happened without memory regions removal.\n");
+ dev_crit(dev, "System may be unstable if regions hosted system memory.\n");
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ }
+}
+
static const struct pci_error_handlers cxl_error_handlers = {
.error_detected = cxl_error_detected,
.slot_reset = cxl_slot_reset,
.resume = cxl_error_resume,
.cor_error_detected = cxl_cor_error_detected,
+ .reset_done = cxl_reset_done,
};
static struct pci_driver cxl_pci_driver = {
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 797e1ebff..f24b67c64 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -192,7 +192,7 @@ static u64 dev_dax_size(struct dev_dax *dev_dax)
u64 size = 0;
int i;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
+ lockdep_assert_held(&dax_dev_rwsem);
for (i = 0; i < dev_dax->nr_range; i++)
size += range_len(&dev_dax->ranges[i].range);
@@ -302,7 +302,7 @@ static unsigned long long dax_region_avail_size(struct dax_region *dax_region)
resource_size_t size = resource_size(&dax_region->res);
struct resource *res;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held(&dax_region_rwsem);
for_each_dax_region_resource(dax_region, res)
size -= resource_size(res);
@@ -447,7 +447,7 @@ static void trim_dev_dax_range(struct dev_dax *dev_dax)
struct range *range = &dev_dax->ranges[i].range;
struct dax_region *dax_region = dev_dax->region;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held_write(&dax_region_rwsem);
dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
(unsigned long long)range->start,
(unsigned long long)range->end);
@@ -465,26 +465,17 @@ static void free_dev_dax_ranges(struct dev_dax *dev_dax)
trim_dev_dax_range(dev_dax);
}
-static void __unregister_dev_dax(void *dev)
+static void unregister_dev_dax(void *dev)
{
struct dev_dax *dev_dax = to_dev_dax(dev);
dev_dbg(dev, "%s\n", __func__);
+ down_write(&dax_region_rwsem);
kill_dev_dax(dev_dax);
device_del(dev);
free_dev_dax_ranges(dev_dax);
put_device(dev);
-}
-
-static void unregister_dev_dax(void *dev)
-{
- if (rwsem_is_locked(&dax_region_rwsem))
- return __unregister_dev_dax(dev);
-
- if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
- return;
- __unregister_dev_dax(dev);
up_write(&dax_region_rwsem);
}
@@ -507,7 +498,7 @@ static int __free_dev_dax_id(struct dev_dax *dev_dax)
struct dax_region *dax_region;
int rc = dev_dax->id;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_dev_rwsem));
+ lockdep_assert_held_write(&dax_dev_rwsem);
if (!dev_dax->dyn_id || dev_dax->id < 0)
return -1;
@@ -560,15 +551,10 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
if (!victim)
return -ENXIO;
- rc = down_write_killable(&dax_region_rwsem);
- if (rc)
- return rc;
- rc = down_write_killable(&dax_dev_rwsem);
- if (rc) {
- up_write(&dax_region_rwsem);
- return rc;
- }
+ device_lock(dev);
+ device_lock(victim);
dev_dax = to_dev_dax(victim);
+ down_write(&dax_dev_rwsem);
if (victim->driver || dev_dax_size(dev_dax))
rc = -EBUSY;
else {
@@ -589,11 +575,12 @@ static ssize_t delete_store(struct device *dev, struct device_attribute *attr,
rc = -EBUSY;
}
up_write(&dax_dev_rwsem);
+ device_unlock(victim);
/* won the race to invalidate the device, clean it up */
if (do_del)
devm_release_action(dev, unregister_dev_dax, victim);
- up_write(&dax_region_rwsem);
+ device_unlock(dev);
put_device(victim);
return rc;
@@ -705,7 +692,7 @@ static void dax_mapping_release(struct device *dev)
put_device(parent);
}
-static void __unregister_dax_mapping(void *data)
+static void unregister_dax_mapping(void *data)
{
struct device *dev = data;
struct dax_mapping *mapping = to_dax_mapping(dev);
@@ -713,25 +700,12 @@ static void __unregister_dax_mapping(void *data)
dev_dbg(dev, "%s\n", __func__);
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
-
dev_dax->ranges[mapping->range_id].mapping = NULL;
mapping->range_id = -1;
device_unregister(dev);
}
-static void unregister_dax_mapping(void *data)
-{
- if (rwsem_is_locked(&dax_region_rwsem))
- return __unregister_dax_mapping(data);
-
- if (WARN_ON_ONCE(down_write_killable(&dax_region_rwsem) != 0))
- return;
- __unregister_dax_mapping(data);
- up_write(&dax_region_rwsem);
-}
-
static struct dev_dax_range *get_dax_range(struct device *dev)
{
struct dax_mapping *mapping = to_dax_mapping(dev);
@@ -830,7 +804,7 @@ static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
struct device *dev;
int rc;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held_write(&dax_region_rwsem);
if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
"region disabled\n"))
@@ -876,7 +850,7 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
struct resource *alloc;
int i, rc;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held_write(&dax_region_rwsem);
/* handle the seed alloc special case */
if (!size) {
@@ -935,7 +909,7 @@ static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, r
struct device *dev = &dev_dax->dev;
int rc;
- WARN_ON_ONCE(!rwsem_is_locked(&dax_region_rwsem));
+ lockdep_assert_held_write(&dax_region_rwsem);
if (dev_WARN_ONCE(dev, !size, "deletion is handled by dev_dax_shrink\n"))
return -EINVAL;
@@ -963,11 +937,11 @@ static ssize_t size_show(struct device *dev,
unsigned long long size;
int rc;
- rc = down_write_killable(&dax_dev_rwsem);
+ rc = down_read_interruptible(&dax_dev_rwsem);
if (rc)
return rc;
size = dev_dax_size(dev_dax);
- up_write(&dax_dev_rwsem);
+ up_read(&dax_dev_rwsem);
return sysfs_emit(buf, "%llu\n", size);
}
@@ -1566,12 +1540,8 @@ err_id:
struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
{
struct dev_dax *dev_dax;
- int rc;
-
- rc = down_write_killable(&dax_region_rwsem);
- if (rc)
- return ERR_PTR(rc);
+ down_write(&dax_region_rwsem);
dev_dax = __devm_create_dev_dax(data);
up_write(&dax_region_rwsem);
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
index b7c6f7ea9..6a1bfcd0c 100644
--- a/drivers/dma-buf/st-dma-fence.c
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
t[i].before = pass;
t[i].task = kthread_run(thread_signal_callback, &t[i],
"dma-fence:%d", i);
+ if (IS_ERR(t[i].task)) {
+ ret = PTR_ERR(t[i].task);
+ while (--i >= 0)
+ kthread_stop_put(t[i].task);
+ return ret;
+ }
get_task_struct(t[i].task);
}
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 101394f16..237bce21d 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
seq_printf(s, "%s: %d\n", obj->name, obj->value);
- spin_lock_irq(&obj->lock);
+ spin_lock(&obj->lock); /* Caller already disabled IRQ. */
list_for_each(pos, &obj->pt_list) {
struct sync_pt *pt = container_of(pos, struct sync_pt, link);
sync_print_fence(s, &pt->base, false);
}
- spin_unlock_irq(&obj->lock);
+ spin_unlock(&obj->lock);
}
static void sync_print_sync_file(struct seq_file *s,
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 002a5ec80..9fc99cfbe 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -394,7 +394,7 @@ config LS2X_APB_DMA
config MCF_EDMA
tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
- depends on M5441x || COMPILE_TEST
+ depends on M5441x || (COMPILE_TEST && FSL_EDMA=n)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index 4e339c04f..d5a33e4a9 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -1134,8 +1134,8 @@ static void axi_dmac_remove(struct platform_device *pdev)
{
struct axi_dmac *dmac = platform_get_drvdata(pdev);
- of_dma_controller_free(pdev->dev.of_node);
free_irq(dmac->irq, dmac);
+ of_dma_controller_free(pdev->dev.of_node);
tasklet_kill(&dmac->chan.vchan.task);
dma_async_device_unregister(&dmac->dma_dev);
clk_disable_unprepare(dmac->clk);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index a86a81ff0..321446fdd 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
kfree(desc);
return NULL;
}
+ desc->nr_hw_descs = num;
return desc;
}
@@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
static void axi_desc_put(struct axi_dma_desc *desc)
{
struct axi_dma_chan *chan = desc->chan;
- int count = atomic_read(&chan->descs_allocated);
+ int count = desc->nr_hw_descs;
struct axi_dma_hw_desc *hw_desc;
int descs_put;
@@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* Remove the completed descriptor from issued list before completing */
list_del(&vd->node);
vchan_cookie_complete(vd);
-
- /* Submit queued descriptors after processing the completed ones */
- axi_chan_start_first_queued(chan);
}
out:
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
index 454904d99..ac571b413 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -104,6 +104,7 @@ struct axi_dma_desc {
u32 completed_blocks;
u32 length;
u32 period_len;
+ u32 nr_hw_descs;
};
struct axi_dma_chan_config {
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 1398814d8..e3505e567 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -598,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev;
- dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
+ if (ret)
+ return ret;
ret = dma_async_device_register(&idma64->dma);
if (ret)
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 399350711..fd9bbee4c 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -577,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
struct idxd_cdev *idxd_cdev;
idxd_cdev = wq->idxd_cdev;
- ida_destroy(&file_ida);
wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
put_device(cdev_dev(idxd_cdev));
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 8dc029c86..fc049c9c9 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -611,11 +611,13 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
spin_unlock(&irq_entry->list_lock);
- list_for_each_entry(desc, &flist, list) {
+ list_for_each_entry_safe(desc, n, &flist, list) {
/*
* Check against the original status as ABORT is software defined
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
+ list_del(&desc->list);
+
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
idxd_desc_complete(desc, IDXD_COMPLETE_ABORT, true);
continue;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 9c364e92c..e8f45a7fd 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -534,18 +534,6 @@ err_out:
return err;
}
-static int ioat_register(struct ioatdma_device *ioat_dma)
-{
- int err = dma_async_device_register(&ioat_dma->dma_dev);
-
- if (err) {
- ioat_disable_interrupts(ioat_dma);
- dma_pool_destroy(ioat_dma->completion_pool);
- }
-
- return err;
-}
-
static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
{
struct dma_device *dma = &ioat_dma->dma_dev;
@@ -1181,9 +1169,9 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
}
- err = ioat_register(ioat_dma);
+ err = dma_async_device_register(&ioat_dma->dma_dev);
if (err)
- return err;
+ goto err_disable_interrupts;
ioat_kobject_add(ioat_dma, &ioat_ktype);
@@ -1192,20 +1180,29 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca)
/* disable relaxed ordering */
err = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
/* clear relaxed ordering enable */
val16 &= ~PCI_EXP_DEVCTL_RELAX_EN;
err = pcie_capability_write_word(pdev, PCI_EXP_DEVCTL, val16);
- if (err)
- return pcibios_err_to_errno(err);
+ if (err) {
+ err = pcibios_err_to_errno(err);
+ goto err_disable_interrupts;
+ }
if (ioat_dma->cap & IOAT_CAP_DPS)
writeb(ioat_pending_level + 1,
ioat_dma->reg_base + IOAT_PREFETCH_LIMIT_OFFSET);
return 0;
+
+err_disable_interrupts:
+ ioat_disable_interrupts(ioat_dma);
+ dma_pool_destroy(ioat_dma->completion_pool);
+ return err;
}
static void ioat_shutdown(struct pci_dev *pdev)
@@ -1350,6 +1347,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct ioatdma_device *device;
+ unsigned int i;
+ u8 version;
int err;
err = pcim_enable_device(pdev);
@@ -1363,6 +1362,10 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!iomap)
return -ENOMEM;
+ version = readb(iomap[IOAT_MMIO_BAR] + IOAT_VER_OFFSET);
+ if (version < IOAT_VER_3_0)
+ return -ENODEV;
+
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err)
return err;
@@ -1373,17 +1376,18 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
pci_set_drvdata(pdev, device);
- device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+ device->version = version;
if (device->version >= IOAT_VER_3_4)
ioat_dca_enabled = 0;
- if (device->version >= IOAT_VER_3_0) {
- if (is_skx_ioat(pdev))
- device->version = IOAT_VER_3_2;
- err = ioat3_dma_probe(device, ioat_dca_enabled);
- } else
- return -ENODEV;
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
+
+ err = ioat3_dma_probe(device, ioat_dca_enabled);
if (err) {
+ for (i = 0; i < IOAT_MAX_CHANS; i++)
+ kfree(device->idx[i]);
+ kfree(device);
dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n");
return -ENODEV;
}
@@ -1445,6 +1449,7 @@ module_init(ioat_init_module);
static void __exit ioat_exit_module(void)
{
pci_unregister_driver(&ioat_pci_driver);
+ kmem_cache_destroy(ioat_sed_cache);
kmem_cache_destroy(ioat_cache);
}
module_exit(ioat_exit_module);
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c9b93055d..f0a399cf4 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -200,12 +200,9 @@ of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glu
ret = of_k3_udma_glue_parse(udmax_np, common);
if (ret)
- goto out_put_spec;
+ return ret;
ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
-
-out_put_spec:
- of_node_put(udmax_np);
return ret;
}
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
index 313b21738..ae6e06057 100644
--- a/drivers/dma/xilinx/xdma.c
+++ b/drivers/dma/xilinx/xdma.c
@@ -885,11 +885,11 @@ static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
u32 st;
bool repeat_tx;
+ spin_lock(&xchan->vchan.lock);
+
if (xchan->stop_requested)
complete(&xchan->last_interrupt);
- spin_lock(&xchan->vchan.lock);
-
/* get submitted request */
vd = vchan_next_desc(&xchan->vchan);
if (!vd)
diff --git a/drivers/dpll/dpll_core.c b/drivers/dpll/dpll_core.c
index d0f6693ca..32019dc33 100644
--- a/drivers/dpll/dpll_core.c
+++ b/drivers/dpll/dpll_core.c
@@ -449,7 +449,7 @@ static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
sizeof(*src->freq_supported);
dst->freq_supported = kmemdup(src->freq_supported,
freq_size, GFP_KERNEL);
- if (!src->freq_supported)
+ if (!dst->freq_supported)
return -ENOMEM;
}
if (src->board_label) {
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 1f3520d76..a17f3c0cd 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -81,7 +81,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error reading F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
@@ -94,7 +94,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error writing to F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
/*
@@ -1025,8 +1025,10 @@ static int gpu_get_node_map(struct amd64_pvt *pvt)
}
ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
- if (ret)
+ if (ret) {
+ ret = pcibios_err_to_errno(ret);
goto out;
+ }
gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index cdd8480e7..dbe9fe5f2 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -800,7 +800,7 @@ static int errcmd_enable_error_reporting(bool enable)
rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
if (rc)
- return rc;
+ return pcibios_err_to_errno(rc);
if (enable)
errcmd |= ERRCMD_CE | ERRSTS_UE;
@@ -809,7 +809,7 @@ static int errcmd_enable_error_reporting(bool enable)
rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
if (rc)
- return rc;
+ return pcibios_err_to_errno(rc);
return 0;
}
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 9c5b6f8bd..27996b792 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -648,7 +648,7 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
memset(&res, 0, sizeof(res));
res.mce = mce;
res.addr = mce->addr & MCI_ADDR_PHYSADDR;
- if (!pfn_to_online_page(res.addr >> PAGE_SHIFT)) {
+ if (!pfn_to_online_page(res.addr >> PAGE_SHIFT) && !arch_is_platform_page(res.addr)) {
pr_err("Invalid address 0x%llx in IA32_MC%d_ADDR\n", mce->addr, mce->bank);
return NOTIFY_DONE;
}
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 5f869eacd..3da94b382 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -116,7 +116,8 @@ config EXTCON_MAX77843
config EXTCON_MAX8997
tristate "Maxim MAX8997 EXTCON Support"
- depends on MFD_MAX8997 && IRQ_DOMAIN
+ depends on MFD_MAX8997
+ select IRQ_DOMAIN
help
If you say yes here you get support for the MUIC device of
Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 5f3a3e913..d19c78a78 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -169,9 +169,14 @@ static int dmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static void dmi_dev_release(struct device *dev)
+{
+ kfree(dev);
+}
+
static struct class dmi_class = {
.name = "dmi",
- .dev_release = (void(*)(struct device *)) kfree,
+ .dev_release = dmi_dev_release,
.dev_uevent = dmi_dev_uevent,
};
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 70e9789ff..6a337f1f8 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -335,8 +335,8 @@ fail_free_new_fdt:
fail:
efi_free(fdt_size, fdt_addr);
-
- efi_bs_call(free_pool, priv.runtime_map);
+ if (!efi_novamap)
+ efi_bs_call(free_pool, priv.runtime_map);
return EFI_LOAD_ERROR;
}
diff --git a/drivers/firmware/efi/libstub/loongarch.c b/drivers/firmware/efi/libstub/loongarch.c
index 684c93546..d0ef93551 100644
--- a/drivers/firmware/efi/libstub/loongarch.c
+++ b/drivers/firmware/efi/libstub/loongarch.c
@@ -41,7 +41,7 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map, void *priv)
unsigned long __weak kernel_entry_address(unsigned long kernel_addr,
efi_loaded_image_t *image)
{
- return *(unsigned long *)(kernel_addr + 8) - VMLINUX_LOAD_ADDRESS + kernel_addr;
+ return *(unsigned long *)(kernel_addr + 8) - PHYSADDR(VMLINUX_LOAD_ADDRESS) + kernel_addr;
}
efi_status_t efi_boot_kernel(void *handle, efi_loaded_image_t *image,
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index d5a8182cf..1983fd3bf 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -776,6 +776,26 @@ static void error(char *str)
efi_warn("Decompression failed: %s\n", str);
}
+static const char *cmdline_memmap_override;
+
+static efi_status_t parse_options(const char *cmdline)
+{
+ static const char opts[][14] = {
+ "mem=", "memmap=", "efi_fake_mem=", "hugepages="
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(opts); i++) {
+ const char *p = strstr(cmdline, opts[i]);
+
+ if (p == cmdline || (p > cmdline && isspace(p[-1]))) {
+ cmdline_memmap_override = opts[i];
+ break;
+ }
+ }
+
+ return efi_parse_options(cmdline);
+}
+
static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
{
unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
@@ -807,6 +827,10 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
!memcmp(efistub_fw_vendor(), ami, sizeof(ami))) {
efi_debug("AMI firmware v2.0 or older detected - disabling physical KASLR\n");
seed[0] = 0;
+ } else if (cmdline_memmap_override) {
+ efi_info("%s detected on the kernel command line - disabling physical KASLR\n",
+ cmdline_memmap_override);
+ seed[0] = 0;
}
boot_params_ptr->hdr.loadflags |= KASLR_FLAG;
@@ -883,7 +907,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
}
#ifdef CONFIG_CMDLINE_BOOL
- status = efi_parse_options(CONFIG_CMDLINE);
+ status = parse_options(CONFIG_CMDLINE);
if (status != EFI_SUCCESS) {
efi_err("Failed to parse options\n");
goto fail;
@@ -892,7 +916,7 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
if (!IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
unsigned long cmdline_paddr = ((u64)hdr->cmd_line_ptr |
((u64)boot_params->ext_cmd_line_ptr << 32));
- status = efi_parse_options((char *)cmdline_paddr);
+ status = parse_options((char *)cmdline_paddr);
if (status != EFI_SUCCESS) {
efi_err("Failed to parse options\n");
goto fail;
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index 3365944f7..34109fd86 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -15,10 +15,6 @@
#include <asm/early_ioremap.h>
#include <asm/efi.h>
-#ifndef __efi_memmap_free
-#define __efi_memmap_free(phys, size, flags) do { } while (0)
-#endif
-
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
* @data: EFI memory map data
@@ -51,11 +47,6 @@ int __init __efi_memmap_init(struct efi_memory_map_data *data)
return -ENOMEM;
}
- if (efi.memmap.flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB))
- __efi_memmap_free(efi.memmap.phys_map,
- efi.memmap.desc_size * efi.memmap.nr_map,
- efi.memmap.flags);
-
map.phys_map = data->phys_map;
map.nr_map = data->size / data->desc_size;
map.map_end = map.map + data->size;
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index d9629ff87..2328ca58b 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -497,10 +497,12 @@ int psci_cpu_suspend_enter(u32 state)
static int psci_system_suspend(unsigned long unused)
{
+ int err;
phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
- return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
+ err = invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
pa_cpu_resume, 0, 0);
+ return psci_to_linux_errno(err);
}
static int psci_system_suspend_enter(suspend_state_t state)
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 90283f160..2ad85052b 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -569,13 +569,14 @@ int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
desc.args[1] = mdata_phys;
ret = qcom_scm_call(__scm->dev, &desc, &res);
-
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
out:
@@ -637,10 +638,12 @@ int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -672,10 +675,12 @@ int qcom_scm_pas_auth_and_reset(u32 peripheral)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -706,11 +711,12 @@ int qcom_scm_pas_shutdown(u32 peripheral)
ret = qcom_scm_bw_enable();
if (ret)
- return ret;
+ goto disable_clk;
ret = qcom_scm_call(__scm->dev, &desc, &res);
-
qcom_scm_bw_disable();
+
+disable_clk:
qcom_scm_clk_disable();
return ret ? : res.result[0];
@@ -1624,7 +1630,7 @@ EXPORT_SYMBOL_GPL(qcom_scm_qseecom_app_send);
* We do not yet support re-entrant calls via the qseecom interface. To prevent
+ any potential issues with this, only allow validated machines for now.
*/
-static const struct of_device_id qcom_scm_qseecom_allowlist[] = {
+static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "lenovo,thinkpad-x13s", },
{ }
};
@@ -1713,7 +1719,7 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
*/
bool qcom_scm_is_available(void)
{
- return !!__scm;
+ return !!READ_ONCE(__scm);
}
EXPORT_SYMBOL_GPL(qcom_scm_is_available);
@@ -1794,10 +1800,12 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (!scm)
return -ENOMEM;
+ scm->dev = &pdev->dev;
ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
if (ret < 0)
return ret;
+ init_completion(&scm->waitq_comp);
mutex_init(&scm->scm_bw_lock);
scm->path = devm_of_icc_get(&pdev->dev, NULL);
@@ -1829,10 +1837,8 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (ret)
return ret;
- __scm = scm;
- __scm->dev = &pdev->dev;
-
- init_completion(&__scm->waitq_comp);
+ /* Let all above stores be available after this */
+ smp_store_release(&__scm, scm);
irq = platform_get_irq_optional(pdev, 0);
if (irq < 0) {
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index 322aada20..ac34876a9 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -9,6 +9,7 @@
#include <linux/dma-mapping.h>
#include <linux/kref.h>
#include <linux/mailbox_client.h>
+#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -97,8 +98,8 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
if (size & 3)
return -EINVAL;
- buf = dma_alloc_coherent(fw->cl.dev, PAGE_ALIGN(size), &bus_addr,
- GFP_ATOMIC);
+ buf = dma_alloc_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size),
+ &bus_addr, GFP_ATOMIC);
if (!buf)
return -ENOMEM;
@@ -126,7 +127,7 @@ int rpi_firmware_property_list(struct rpi_firmware *fw,
ret = -EINVAL;
}
- dma_free_coherent(fw->cl.dev, PAGE_ALIGN(size), buf, bus_addr);
+ dma_free_coherent(fw->chan->mbox->dev, PAGE_ALIGN(size), buf, bus_addr);
return ret;
}
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 79c473b3c..8ef395b49 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -55,33 +55,26 @@ int fpga_bridge_disable(struct fpga_bridge *bridge)
}
EXPORT_SYMBOL_GPL(fpga_bridge_disable);
-static struct fpga_bridge *__fpga_bridge_get(struct device *dev,
+static struct fpga_bridge *__fpga_bridge_get(struct device *bridge_dev,
struct fpga_image_info *info)
{
struct fpga_bridge *bridge;
- int ret = -ENODEV;
- bridge = to_fpga_bridge(dev);
+ bridge = to_fpga_bridge(bridge_dev);
bridge->info = info;
- if (!mutex_trylock(&bridge->mutex)) {
- ret = -EBUSY;
- goto err_dev;
- }
+ if (!mutex_trylock(&bridge->mutex))
+ return ERR_PTR(-EBUSY);
- if (!try_module_get(dev->parent->driver->owner))
- goto err_ll_mod;
+ if (!try_module_get(bridge->br_ops_owner)) {
+ mutex_unlock(&bridge->mutex);
+ return ERR_PTR(-ENODEV);
+ }
dev_dbg(&bridge->dev, "get\n");
return bridge;
-
-err_ll_mod:
- mutex_unlock(&bridge->mutex);
-err_dev:
- put_device(dev);
- return ERR_PTR(ret);
}
/**
@@ -98,13 +91,18 @@ err_dev:
struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
struct fpga_image_info *info)
{
- struct device *dev;
+ struct fpga_bridge *bridge;
+ struct device *bridge_dev;
- dev = class_find_device_by_of_node(&fpga_bridge_class, np);
- if (!dev)
+ bridge_dev = class_find_device_by_of_node(&fpga_bridge_class, np);
+ if (!bridge_dev)
return ERR_PTR(-ENODEV);
- return __fpga_bridge_get(dev, info);
+ bridge = __fpga_bridge_get(bridge_dev, info);
+ if (IS_ERR(bridge))
+ put_device(bridge_dev);
+
+ return bridge;
}
EXPORT_SYMBOL_GPL(of_fpga_bridge_get);
@@ -125,6 +123,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
struct fpga_bridge *fpga_bridge_get(struct device *dev,
struct fpga_image_info *info)
{
+ struct fpga_bridge *bridge;
struct device *bridge_dev;
bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev,
@@ -132,7 +131,11 @@ struct fpga_bridge *fpga_bridge_get(struct device *dev,
if (!bridge_dev)
return ERR_PTR(-ENODEV);
- return __fpga_bridge_get(bridge_dev, info);
+ bridge = __fpga_bridge_get(bridge_dev, info);
+ if (IS_ERR(bridge))
+ put_device(bridge_dev);
+
+ return bridge;
}
EXPORT_SYMBOL_GPL(fpga_bridge_get);
@@ -146,7 +149,7 @@ void fpga_bridge_put(struct fpga_bridge *bridge)
dev_dbg(&bridge->dev, "put\n");
bridge->info = NULL;
- module_put(bridge->dev.parent->driver->owner);
+ module_put(bridge->br_ops_owner);
mutex_unlock(&bridge->mutex);
put_device(&bridge->dev);
}
@@ -316,18 +319,19 @@ static struct attribute *fpga_bridge_attrs[] = {
ATTRIBUTE_GROUPS(fpga_bridge);
/**
- * fpga_bridge_register - create and register an FPGA Bridge device
+ * __fpga_bridge_register - create and register an FPGA Bridge device
* @parent: FPGA bridge device from pdev
* @name: FPGA bridge name
* @br_ops: pointer to structure of fpga bridge ops
* @priv: FPGA bridge private data
+ * @owner: owner module containing the br_ops
*
* Return: struct fpga_bridge pointer or ERR_PTR()
*/
struct fpga_bridge *
-fpga_bridge_register(struct device *parent, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv)
+__fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv, struct module *owner)
{
struct fpga_bridge *bridge;
int id, ret;
@@ -357,6 +361,7 @@ fpga_bridge_register(struct device *parent, const char *name,
bridge->name = name;
bridge->br_ops = br_ops;
+ bridge->br_ops_owner = owner;
bridge->priv = priv;
bridge->dev.groups = br_ops->groups;
@@ -386,7 +391,7 @@ error_kfree:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(fpga_bridge_register);
+EXPORT_SYMBOL_GPL(__fpga_bridge_register);
/**
* fpga_bridge_unregister - unregister an FPGA bridge
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index 06651389c..0f4035b08 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -664,20 +664,16 @@ static struct attribute *fpga_mgr_attrs[] = {
};
ATTRIBUTE_GROUPS(fpga_mgr);
-static struct fpga_manager *__fpga_mgr_get(struct device *dev)
+static struct fpga_manager *__fpga_mgr_get(struct device *mgr_dev)
{
struct fpga_manager *mgr;
- mgr = to_fpga_manager(dev);
+ mgr = to_fpga_manager(mgr_dev);
- if (!try_module_get(dev->parent->driver->owner))
- goto err_dev;
+ if (!try_module_get(mgr->mops_owner))
+ mgr = ERR_PTR(-ENODEV);
return mgr;
-
-err_dev:
- put_device(dev);
- return ERR_PTR(-ENODEV);
}
static int fpga_mgr_dev_match(struct device *dev, const void *data)
@@ -693,12 +689,18 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data)
*/
struct fpga_manager *fpga_mgr_get(struct device *dev)
{
- struct device *mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev,
- fpga_mgr_dev_match);
+ struct fpga_manager *mgr;
+ struct device *mgr_dev;
+
+ mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, fpga_mgr_dev_match);
if (!mgr_dev)
return ERR_PTR(-ENODEV);
- return __fpga_mgr_get(mgr_dev);
+ mgr = __fpga_mgr_get(mgr_dev);
+ if (IS_ERR(mgr))
+ put_device(mgr_dev);
+
+ return mgr;
}
EXPORT_SYMBOL_GPL(fpga_mgr_get);
@@ -711,13 +713,18 @@ EXPORT_SYMBOL_GPL(fpga_mgr_get);
*/
struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
{
- struct device *dev;
+ struct fpga_manager *mgr;
+ struct device *mgr_dev;
- dev = class_find_device_by_of_node(&fpga_mgr_class, node);
- if (!dev)
+ mgr_dev = class_find_device_by_of_node(&fpga_mgr_class, node);
+ if (!mgr_dev)
return ERR_PTR(-ENODEV);
- return __fpga_mgr_get(dev);
+ mgr = __fpga_mgr_get(mgr_dev);
+ if (IS_ERR(mgr))
+ put_device(mgr_dev);
+
+ return mgr;
}
EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
@@ -727,7 +734,7 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get);
*/
void fpga_mgr_put(struct fpga_manager *mgr)
{
- module_put(mgr->dev.parent->driver->owner);
+ module_put(mgr->mops_owner);
put_device(&mgr->dev);
}
EXPORT_SYMBOL_GPL(fpga_mgr_put);
@@ -766,9 +773,10 @@ void fpga_mgr_unlock(struct fpga_manager *mgr)
EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
/**
- * fpga_mgr_register_full - create and register an FPGA Manager device
+ * __fpga_mgr_register_full - create and register an FPGA Manager device
* @parent: fpga manager device from pdev
* @info: parameters for fpga manager
+ * @owner: owner module containing the ops
*
* The caller of this function is responsible for calling fpga_mgr_unregister().
* Using devm_fpga_mgr_register_full() instead is recommended.
@@ -776,7 +784,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
* Return: pointer to struct fpga_manager pointer or ERR_PTR()
*/
struct fpga_manager *
-fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
+__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner)
{
const struct fpga_manager_ops *mops = info->mops;
struct fpga_manager *mgr;
@@ -804,6 +813,8 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
mutex_init(&mgr->ref_mutex);
+ mgr->mops_owner = owner;
+
mgr->name = info->name;
mgr->mops = info->mops;
mgr->priv = info->priv;
@@ -841,14 +852,15 @@ error_kfree:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
+EXPORT_SYMBOL_GPL(__fpga_mgr_register_full);
/**
- * fpga_mgr_register - create and register an FPGA Manager device
+ * __fpga_mgr_register - create and register an FPGA Manager device
* @parent: fpga manager device from pdev
* @name: fpga manager name
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
+ * @owner: owner module containing the ops
*
* The caller of this function is responsible for calling fpga_mgr_unregister().
* Using devm_fpga_mgr_register() instead is recommended. This simple
@@ -859,8 +871,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
* Return: pointer to struct fpga_manager pointer or ERR_PTR()
*/
struct fpga_manager *
-fpga_mgr_register(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops, void *priv)
+__fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv, struct module *owner)
{
struct fpga_manager_info info = { 0 };
@@ -868,9 +880,9 @@ fpga_mgr_register(struct device *parent, const char *name,
info.mops = mops;
info.priv = priv;
- return fpga_mgr_register_full(parent, &info);
+ return __fpga_mgr_register_full(parent, &info, owner);
}
-EXPORT_SYMBOL_GPL(fpga_mgr_register);
+EXPORT_SYMBOL_GPL(__fpga_mgr_register);
/**
* fpga_mgr_unregister - unregister an FPGA manager
@@ -900,9 +912,10 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
}
/**
- * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
+ * __devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
* @parent: fpga manager device from pdev
* @info: parameters for fpga manager
+ * @owner: owner module containing the ops
*
* Return: fpga manager pointer on success, negative error code otherwise.
*
@@ -910,7 +923,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
* function will be called automatically when the managing device is detached.
*/
struct fpga_manager *
-devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
+__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner)
{
struct fpga_mgr_devres *dr;
struct fpga_manager *mgr;
@@ -919,7 +933,7 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf
if (!dr)
return ERR_PTR(-ENOMEM);
- mgr = fpga_mgr_register_full(parent, info);
+ mgr = __fpga_mgr_register_full(parent, info, owner);
if (IS_ERR(mgr)) {
devres_free(dr);
return mgr;
@@ -930,14 +944,15 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf
return mgr;
}
-EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
+EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register_full);
/**
- * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
+ * __devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
* @parent: fpga manager device from pdev
* @name: fpga manager name
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
+ * @owner: owner module containing the ops
*
* Return: fpga manager pointer on success, negative error code otherwise.
*
@@ -946,8 +961,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
* device is detached.
*/
struct fpga_manager *
-devm_fpga_mgr_register(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops, void *priv)
+__devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv,
+ struct module *owner)
{
struct fpga_manager_info info = { 0 };
@@ -955,9 +971,9 @@ devm_fpga_mgr_register(struct device *parent, const char *name,
info.mops = mops;
info.priv = priv;
- return devm_fpga_mgr_register_full(parent, &info);
+ return __devm_fpga_mgr_register_full(parent, &info, owner);
}
-EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
+EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register);
static void fpga_mgr_dev_release(struct device *dev)
{
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index b364a9294..753cd1425 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -53,7 +53,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region)
}
get_device(dev);
- if (!try_module_get(dev->parent->driver->owner)) {
+ if (!try_module_get(region->ops_owner)) {
put_device(dev);
mutex_unlock(&region->mutex);
return ERR_PTR(-ENODEV);
@@ -75,7 +75,7 @@ static void fpga_region_put(struct fpga_region *region)
dev_dbg(dev, "put\n");
- module_put(dev->parent->driver->owner);
+ module_put(region->ops_owner);
put_device(dev);
mutex_unlock(&region->mutex);
}
@@ -181,14 +181,16 @@ static struct attribute *fpga_region_attrs[] = {
ATTRIBUTE_GROUPS(fpga_region);
/**
- * fpga_region_register_full - create and register an FPGA Region device
+ * __fpga_region_register_full - create and register an FPGA Region device
* @parent: device parent
* @info: parameters for FPGA Region
+ * @owner: module containing the get_bridges function
*
* Return: struct fpga_region or ERR_PTR()
*/
struct fpga_region *
-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info)
+__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
+ struct module *owner)
{
struct fpga_region *region;
int id, ret = 0;
@@ -213,6 +215,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
region->compat_id = info->compat_id;
region->priv = info->priv;
region->get_bridges = info->get_bridges;
+ region->ops_owner = owner;
mutex_init(&region->mutex);
INIT_LIST_HEAD(&region->bridge_list);
@@ -241,13 +244,14 @@ err_free:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(fpga_region_register_full);
+EXPORT_SYMBOL_GPL(__fpga_region_register_full);
/**
- * fpga_region_register - create and register an FPGA Region device
+ * __fpga_region_register - create and register an FPGA Region device
* @parent: device parent
* @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list
+ * @owner: module containing the get_bridges function
*
* This simple version of the register function should be sufficient for most users.
* The fpga_region_register_full() function is available for users that need to
@@ -256,17 +260,17 @@ EXPORT_SYMBOL_GPL(fpga_region_register_full);
* Return: struct fpga_region or ERR_PTR()
*/
struct fpga_region *
-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *))
+__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *), struct module *owner)
{
struct fpga_region_info info = { 0 };
info.mgr = mgr;
info.get_bridges = get_bridges;
- return fpga_region_register_full(parent, &info);
+ return __fpga_region_register_full(parent, &info, owner);
}
-EXPORT_SYMBOL_GPL(fpga_region_register);
+EXPORT_SYMBOL_GPL(__fpga_region_register);
/**
* fpga_region_unregister - unregister an FPGA region
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b50d0b470..cbfcfefdb 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -1549,7 +1549,7 @@ config GPIO_TPS68470
are "output only" GPIOs.
config GPIO_TQMX86
- tristate "TQ-Systems QTMX86 GPIO"
+ tristate "TQ-Systems TQMx86 GPIO"
depends on MFD_TQMX86 || COMPILE_TEST
depends on HAS_IOPORT_MAP
select GPIOLIB_IRQCHIP
diff --git a/drivers/gpio/gpio-npcm-sgpio.c b/drivers/gpio/gpio-npcm-sgpio.c
index d31788b43..260570614 100644
--- a/drivers/gpio/gpio-npcm-sgpio.c
+++ b/drivers/gpio/gpio-npcm-sgpio.c
@@ -434,7 +434,7 @@ static void npcm_sgpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *ic = irq_desc_get_chip(desc);
struct npcm_sgpio *gpio = gpiochip_get_data(gc);
- unsigned int i, j, girq;
+ unsigned int i, j;
unsigned long reg;
chained_irq_enter(ic, desc);
@@ -443,11 +443,9 @@ static void npcm_sgpio_irq_handler(struct irq_desc *desc)
const struct npcm_sgpio_bank *bank = &npcm_sgpio_banks[i];
reg = ioread8(bank_reg(gpio, bank, EVENT_STS));
- for_each_set_bit(j, &reg, 8) {
- girq = irq_find_mapping(gc->irq.domain,
- i * 8 + gpio->nout_sgpio + j);
- generic_handle_domain_irq(gc->irq.domain, girq);
- }
+ for_each_set_bit(j, &reg, 8)
+ generic_handle_domain_irq(gc->irq.domain,
+ i * 8 + gpio->nout_sgpio + j);
}
chained_irq_exit(ic, desc);
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 3a28c1f27..f2e7e8754 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -6,6 +6,7 @@
* Vadim V.Vlasov <vvlasov@dev.rtsoft.ru>
*/
+#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
@@ -28,16 +29,25 @@
#define TQMX86_GPIIC 3 /* GPI Interrupt Configuration Register */
#define TQMX86_GPIIS 4 /* GPI Interrupt Status Register */
+#define TQMX86_GPII_NONE 0
#define TQMX86_GPII_FALLING BIT(0)
#define TQMX86_GPII_RISING BIT(1)
+/* Stored in irq_type as a trigger type, but not actually valid as a register
+ * value, so the name doesn't use "GPII"
+ */
+#define TQMX86_INT_BOTH (BIT(0) | BIT(1))
#define TQMX86_GPII_MASK (BIT(0) | BIT(1))
#define TQMX86_GPII_BITS 2
+/* Stored in irq_type with GPII bits */
+#define TQMX86_INT_UNMASKED BIT(2)
struct tqmx86_gpio_data {
struct gpio_chip chip;
void __iomem *io_base;
int irq;
+ /* Lock must be held for accessing output and irq_type fields */
raw_spinlock_t spinlock;
+ DECLARE_BITMAP(output, TQMX86_NGPIO);
u8 irq_type[TQMX86_NGPI];
};
@@ -64,15 +74,10 @@ static void tqmx86_gpio_set(struct gpio_chip *chip, unsigned int offset,
{
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
unsigned long flags;
- u8 val;
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- val = tqmx86_gpio_read(gpio, TQMX86_GPIOD);
- if (value)
- val |= BIT(offset);
- else
- val &= ~BIT(offset);
- tqmx86_gpio_write(gpio, val, TQMX86_GPIOD);
+ __assign_bit(offset, gpio->output, value);
+ tqmx86_gpio_write(gpio, bitmap_get_value8(gpio->output, 0), TQMX86_GPIOD);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
@@ -107,21 +112,38 @@ static int tqmx86_gpio_get_direction(struct gpio_chip *chip,
return GPIO_LINE_DIRECTION_OUT;
}
+static void tqmx86_gpio_irq_config(struct tqmx86_gpio_data *gpio, int offset)
+ __must_hold(&gpio->spinlock)
+{
+ u8 type = TQMX86_GPII_NONE, gpiic;
+
+ if (gpio->irq_type[offset] & TQMX86_INT_UNMASKED) {
+ type = gpio->irq_type[offset] & TQMX86_GPII_MASK;
+
+ if (type == TQMX86_INT_BOTH)
+ type = tqmx86_gpio_get(&gpio->chip, offset + TQMX86_NGPO)
+ ? TQMX86_GPII_FALLING
+ : TQMX86_GPII_RISING;
+ }
+
+ gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
+ gpiic &= ~(TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS));
+ gpiic |= type << (offset * TQMX86_GPII_BITS);
+ tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+}
+
static void tqmx86_gpio_irq_mask(struct irq_data *data)
{
unsigned int offset = (data->hwirq - TQMX86_NGPO);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
- u8 gpiic, mask;
-
- mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~mask;
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] &= ~TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+
gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(data));
}
@@ -131,16 +153,12 @@ static void tqmx86_gpio_irq_unmask(struct irq_data *data)
struct tqmx86_gpio_data *gpio = gpiochip_get_data(
irq_data_get_irq_chip_data(data));
unsigned long flags;
- u8 gpiic, mask;
-
- mask = TQMX86_GPII_MASK << (offset * TQMX86_GPII_BITS);
gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(data));
+
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~mask;
- gpiic |= gpio->irq_type[offset] << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] |= TQMX86_INT_UNMASKED;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
}
@@ -151,7 +169,7 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
unsigned int offset = (data->hwirq - TQMX86_NGPO);
unsigned int edge_type = type & IRQF_TRIGGER_MASK;
unsigned long flags;
- u8 new_type, gpiic;
+ u8 new_type;
switch (edge_type) {
case IRQ_TYPE_EDGE_RISING:
@@ -161,19 +179,16 @@ static int tqmx86_gpio_irq_set_type(struct irq_data *data, unsigned int type)
new_type = TQMX86_GPII_FALLING;
break;
case IRQ_TYPE_EDGE_BOTH:
- new_type = TQMX86_GPII_FALLING | TQMX86_GPII_RISING;
+ new_type = TQMX86_INT_BOTH;
break;
default:
return -EINVAL; /* not supported */
}
- gpio->irq_type[offset] = new_type;
-
raw_spin_lock_irqsave(&gpio->spinlock, flags);
- gpiic = tqmx86_gpio_read(gpio, TQMX86_GPIIC);
- gpiic &= ~((TQMX86_GPII_MASK) << (offset * TQMX86_GPII_BITS));
- gpiic |= new_type << (offset * TQMX86_GPII_BITS);
- tqmx86_gpio_write(gpio, gpiic, TQMX86_GPIIC);
+ gpio->irq_type[offset] &= ~TQMX86_GPII_MASK;
+ gpio->irq_type[offset] |= new_type;
+ tqmx86_gpio_irq_config(gpio, offset);
raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
return 0;
@@ -184,8 +199,8 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct tqmx86_gpio_data *gpio = gpiochip_get_data(chip);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned long irq_bits;
- int i = 0;
+ unsigned long irq_bits, flags;
+ int i;
u8 irq_status;
chained_irq_enter(irq_chip, desc);
@@ -194,6 +209,34 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
tqmx86_gpio_write(gpio, irq_status, TQMX86_GPIIS);
irq_bits = irq_status;
+
+ raw_spin_lock_irqsave(&gpio->spinlock, flags);
+ for_each_set_bit(i, &irq_bits, TQMX86_NGPI) {
+ /*
+ * Edge-both triggers are implemented by flipping the edge
+ * trigger after each interrupt, as the controller only supports
+ * either rising or falling edge triggers, but not both.
+ *
+ * Internally, the TQMx86 GPIO controller has separate status
+ * registers for rising and falling edge interrupts. GPIIC
+ * configures which bits from which register are visible in the
+ * interrupt status register GPIIS and defines what triggers the
+ * parent IRQ line. Writing to GPIIS always clears both rising
+ * and falling interrupt flags internally, regardless of the
+ * currently configured trigger.
+ *
+ * In consequence, we can cleanly implement the edge-both
+ * trigger in software by first clearing the interrupt and then
+ * setting the new trigger based on the current GPIO input in
+ * tqmx86_gpio_irq_config() - even if an edge arrives between
+ * reading the input and setting the trigger, we will have a new
+ * interrupt pending.
+ */
+ if ((gpio->irq_type[i] & TQMX86_GPII_MASK) == TQMX86_INT_BOTH)
+ tqmx86_gpio_irq_config(gpio, i);
+ }
+ raw_spin_unlock_irqrestore(&gpio->spinlock, flags);
+
for_each_set_bit(i, &irq_bits, TQMX86_NGPI)
generic_handle_domain_irq(gpio->chip.irq.domain,
i + TQMX86_NGPO);
@@ -277,6 +320,13 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
tqmx86_gpio_write(gpio, (u8)~TQMX86_DIR_INPUT_MASK, TQMX86_GPIODD);
+ /*
+ * Reading the previous output state is not possible with TQMx86 hardware.
+ * Initialize all outputs to 0 to have a defined state that matches the
+ * shadow register.
+ */
+ tqmx86_gpio_write(gpio, 0, TQMX86_GPIOD);
+
chip = &gpio->chip;
chip->label = "gpio-tqmx86";
chip->owner = THIS_MODULE;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 7f140df40..c1e190d3e 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -128,7 +128,24 @@ static bool acpi_gpio_deferred_req_irqs_done;
static int acpi_gpiochip_find(struct gpio_chip *gc, const void *data)
{
- return device_match_acpi_handle(&gc->gpiodev->dev, data);
+ /* First check the actual GPIO device */
+ if (device_match_acpi_handle(&gc->gpiodev->dev, data))
+ return true;
+
+ /*
+ * When the ACPI device is artificially split to the banks of GPIOs,
+ * where each of them is represented by a separate GPIO device,
+ * the firmware node of the physical device may not be shared among
+ * the banks as they may require different values for the same property,
+ * e.g., number of GPIOs in a certain bank. In such case the ACPI handle
+ * of a GPIO device is NULL and can not be used. Hence we have to check
+ * the parent device to be sure that there is no match before bailing
+ * out.
+ */
+ if (gc->parent)
+ return device_match_acpi_handle(gc->parent, data);
+
+ return false;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index e4d4e55c0..0535b0798 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1188,7 +1188,8 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
int ret;
ctx->sync = &mem->sync;
- drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&ctx->exec) {
ctx->n_vms = 0;
list_for_each_entry(entry, &mem->attachments, list) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 6857c586d..c8c23dbb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -211,6 +211,7 @@ union igp_info {
struct atom_integrated_system_info_v1_11 v11;
struct atom_integrated_system_info_v1_12 v12;
struct atom_integrated_system_info_v2_1 v21;
+ struct atom_integrated_system_info_v2_3 v23;
};
union umc_info {
@@ -359,6 +360,20 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
+ case 3:
+ mem_channel_number = igp_info->v23.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v23.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 7753a2e64..941d6e379 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -5809,13 +5809,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
*speed = PCI_SPEED_UNKNOWN;
*width = PCIE_LNK_WIDTH_UNKNOWN;
- while ((parent = pci_upstream_bridge(parent))) {
- /* skip upstream/downstream switches internal to dGPU*/
- if (parent->vendor == PCI_VENDOR_ID_ATI)
- continue;
- *speed = pcie_get_speed_cap(parent);
- *width = pcie_get_width_cap(parent);
- break;
+ if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
+ while ((parent = pci_upstream_bridge(parent))) {
+ /* skip upstream/downstream switches internal to dGPU*/
+ if (parent->vendor == PCI_VENDOR_ID_ATI)
+ continue;
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ break;
+ }
+ } else {
+ /* use the current speeds rather than max if switching is not supported */
+ pcie_bandwidth_available(adev->pdev, NULL, speed, width);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index be4629cda..08b9dfb65 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
unsigned int ndw;
- signed long r;
+ int r;
uint32_t seq;
- if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready ||
- !down_read_trylock(&adev->reset_domain->sem)) {
+ /*
+ * A GPU reset should flush all TLBs anyway, so no need to do
+ * this while one is ongoing.
+ */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return 0;
+ if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
if (adev->gmc.flush_tlb_needs_extra_type_2)
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
2, all_hub,
@@ -703,43 +708,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
flush_type, all_hub,
inst);
- return 0;
- }
+ r = 0;
+ } else {
+ /* 2 dwords flush + 8 dwords fence */
+ ndw = kiq->pmf->invalidate_tlbs_size + 8;
- /* 2 dwords flush + 8 dwords fence */
- ndw = kiq->pmf->invalidate_tlbs_size + 8;
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ if (adev->gmc.flush_tlb_needs_extra_type_0)
+ ndw += kiq->pmf->invalidate_tlbs_size;
- if (adev->gmc.flush_tlb_needs_extra_type_0)
- ndw += kiq->pmf->invalidate_tlbs_size;
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
+ amdgpu_ring_alloc(ring, ndw);
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
- spin_lock(&adev->gfx.kiq[inst].ring_lock);
- amdgpu_ring_alloc(ring, ndw);
- if (adev->gmc.flush_tlb_needs_extra_type_2)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
+ if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
- if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ goto error_unlock_reset;
+ }
- kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
- r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
- if (r) {
- amdgpu_ring_undo(ring);
+ amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- goto error_unlock_reset;
- }
-
- amdgpu_ring_commit(ring);
- spin_unlock(&adev->gfx.kiq[inst].ring_lock);
- r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
- if (r < 1) {
- dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
- r = -ETIME;
- goto error_unlock_reset;
+ if (amdgpu_fence_wait_polling(ring, seq, usec_timeout) < 1) {
+ dev_err(adev->dev, "timeout waiting for kiq fence\n");
+ r = -ETIME;
+ }
}
- r = 0;
error_unlock_reset:
up_read(&adev->reset_domain->sem);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index b53c8fd4e..d89d6829f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -431,16 +431,16 @@ out:
static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
+ char ucode_prefix[15];
int r;
- chip_name = "gc_9_4_3";
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name);
+ r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
if (r)
return r;
- r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name);
+ r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 43775cb67..8f2317667 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2021,6 +2021,9 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: SDMA trap\n");
instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+ if (instance < 0)
+ return instance;
+
switch (entry->ring_id) {
case 0:
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 719d6d365..ff01610fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 0, 3):
- if ((adev->pdev->device == 0x7460 &&
- adev->pdev->revision == 0x00) ||
- (adev->pdev->device == 0x7461 &&
- adev->pdev->revision == 0x00))
- /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
- gfx_target_version = 110005;
- else
- /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
- gfx_target_version = 110001;
+ /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
+ gfx_target_version = 110001;
f2g = &gfx_v11_kfd2kgd;
break;
case IP_VERSION(11, 5, 0):
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d6e71aa80..f866a02f4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -9149,9 +9149,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
trace_amdgpu_dm_atomic_commit_tail_begin(state);
- if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
- dc_allow_idle_optimizations(dm->dc, false);
-
drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_dp_mst_atomic_wait_for_dependencies(state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index c27063305..2c36f3d00 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -363,7 +363,7 @@ void dm_helpers_dp_mst_send_payload_allocation(
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
- ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
+ ret = drm_dp_add_payload_part2(mst_mgr, new_payload);
if (ret) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index cb31a699c..1a269099f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
&connector->base,
dev->mode_config.tile_property,
0);
+ connector->colorspace_property = master->base.colorspace_property;
+ if (connector->colorspace_property)
+ drm_connector_attach_colorspace_property(connector);
drm_connector_set_path_property(connector, pathprop);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 6083b1dcf..a72e849ec 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1340,16 +1340,27 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_
* Powering up the hardware requires notifying PMFW and DMCUB.
* Clearing the driver idle allow requires a DMCUB command.
* DMCUB commands requires the DMCUB to be powered up and restored.
- *
- * Exit out early to prevent an infinite loop of DMCUB commands
- * triggering exit low power - use software state to track this.
*/
- dc_dmub_srv->idle_allowed = allow_idle;
- if (!allow_idle)
+ if (!allow_idle) {
dc_dmub_srv_exit_low_power_state(dc);
- else
+ /*
+ * Idle is considered fully exited only after the sequence above
+ * fully completes. If we have a race of two threads exiting
+ * at the same time then it's safe to perform the sequence
+ * twice as long as we're not re-entering.
+ *
+ * Infinite command submission is avoided by using the
+ * dm_execute_dmub_cmd submission instead of the "wake" helpers.
+ */
+ dc_dmub_srv->idle_allowed = false;
+ } else {
+ /* Consider idle as notified prior to the actual submission to
+ * prevent multiple entries. */
+ dc_dmub_srv->idle_allowed = true;
+
dc_dmub_srv_notify_idle(dc, allow_idle);
+ }
}
bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index b7e57aa27..b0d192c6e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -402,6 +402,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx,
i += increment) {
if (j == hw_points - 1)
break;
+ if (i >= TRANSFER_FUNC_POINTS) {
+ DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n",
+ i, TRANSFER_FUNC_POINTS);
+ return false;
+ }
rgb_resulted[j].red = output_tf->tf_pts.red[i];
rgb_resulted[j].green = output_tf->tf_pts.green[i];
rgb_resulted[j].blue = output_tf->tf_pts.blue[i];
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 9067ca78f..eb6c6ba64 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -999,8 +999,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->plane_res.dpp)
update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false;
- if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) &&
- pipe_ctx->plane_res.mpcc_inst >= 0)
+ if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false;
if (pipe_ctx->stream_res.dsc)
@@ -1374,3 +1373,75 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
set_static_screen_control(pipe_ctx[i]->stream_res.tg,
triggers, params->num_frames);
}
+
+static bool should_avoid_empty_tu(struct pipe_ctx *pipe_ctx)
+{
+ /* Calculate average pixel count per TU, return false if under ~2.00 to
+ * avoid empty TUs. This is only required for DPIA tunneling as empty TUs
+ * are legal to generate for native DP links. Assume TU size 64 as there
+ * is currently no scenario where it's reprogrammed from HW default.
+ * MTPs have no such limitation, so this does not affect MST use cases.
+ */
+ unsigned int pix_clk_mhz;
+ unsigned int symclk_mhz;
+ unsigned int avg_pix_per_tu_x1000;
+ unsigned int tu_size_bytes = 64;
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+ struct dc_link_settings *link_settings = &pipe_ctx->link_config.dp_link_settings;
+ const struct dc *dc = pipe_ctx->stream->link->dc;
+
+ if (pipe_ctx->stream->link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ return false;
+
+ // Not necessary for MST configurations
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ return false;
+
+ pix_clk_mhz = timing->pix_clk_100hz / 10000;
+
+ // If this is true, can't block due to dynamic ODM
+ if (pix_clk_mhz > dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz)
+ return false;
+
+ switch (link_settings->link_rate) {
+ case LINK_RATE_LOW:
+ symclk_mhz = 162;
+ break;
+ case LINK_RATE_HIGH:
+ symclk_mhz = 270;
+ break;
+ case LINK_RATE_HIGH2:
+ symclk_mhz = 540;
+ break;
+ case LINK_RATE_HIGH3:
+ symclk_mhz = 810;
+ break;
+ default:
+ // We shouldn't be tunneling any other rates, something is wrong
+ ASSERT(0);
+ return false;
+ }
+
+ avg_pix_per_tu_x1000 = (1000 * pix_clk_mhz * tu_size_bytes)
+ / (symclk_mhz * link_settings->lane_count);
+
+ // Add small empirically-decided margin to account for potential jitter
+ return (avg_pix_per_tu_x1000 < 2020);
+}
+
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
+ return false;
+
+ if (should_avoid_empty_tu(pipe_ctx))
+ return false;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) &&
+ dc->debug.enable_dp_dig_pixel_rate_div_policy)
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index c354efa6c..91f5d1136 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -93,4 +93,6 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx,
int num_pipes, const struct dc_static_screen_params *params);
+bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index a93073055..6c8da59b7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -158,7 +158,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = {
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
.calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
- .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .is_dp_dig_pixel_rate_div_policy = dcn35_is_dp_dig_pixel_rate_div_policy,
.dsc_pg_control = dcn35_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.enable_plane = dcn35_enable_plane,
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index af3eebb4c..1acb2d2c5 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1657,6 +1657,49 @@ struct atom_integrated_system_info_v2_2
uint32_t reserved4[189];
};
+struct uma_carveout_option {
+ char optionName[29]; //max length of string is 28chars + '\0'. Current design is for "minimum", "Medium", "High". This makes entire struct size 64bits
+ uint8_t memoryCarvedGb; //memory carved out with setting
+ uint8_t memoryRemainingGb; //memory remaining on system
+ union {
+ struct _flags {
+ uint8_t Auto : 1;
+ uint8_t Custom : 1;
+ uint8_t Reserved : 6;
+ } flags;
+ uint8_t all8;
+ } uma_carveout_option_flags;
+};
+
+struct atom_integrated_system_info_v2_3 {
+ struct atom_common_table_header table_header;
+ uint32_t vbios_misc; // enum of atom_system_vbiosmisc_def
+ uint32_t gpucapinfo; // enum of atom_system_gpucapinf_def
+ uint32_t system_config;
+ uint32_t cpucapinfo;
+ uint16_t gpuclk_ss_percentage; // unit of 0.001%, 1000 mean 1%
+ uint16_t gpuclk_ss_type;
+ uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
+ uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
+ uint8_t umachannelnumber; // number of memory channels
+ uint8_t htc_hyst_limit;
+ uint8_t htc_tmp_limit;
+ uint8_t reserved1; // dp_ss_control
+ uint8_t gpu_package_id;
+ struct edp_info_table edp1_info;
+ struct edp_info_table edp2_info;
+ uint32_t reserved2[8];
+ struct atom_external_display_connection_info extdispconninfo;
+ uint8_t UMACarveoutVersion;
+ uint8_t UMACarveoutIndexMax;
+ uint8_t UMACarveoutTypeDefault;
+ uint8_t UMACarveoutIndexDefault;
+ uint8_t UMACarveoutType; //Auto or Custom
+ uint8_t UMACarveoutIndex;
+ struct uma_carveout_option UMASizeControlOption[20];
+ uint8_t reserved3[110];
+};
+
// system_config
enum atom_system_vbiosmisc_def{
INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT = 0x01,
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 5cb4725c7..c8586cb7d 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -164,6 +164,8 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 4abfcd327..2fb6c9cb0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -226,15 +226,17 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && adev->in_s4) {
- /* Adds a GFX reset as workaround just before sending the
- * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
- * an invalid state.
- */
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
- SMU_RESET_MODE_2, NULL);
- if (ret)
- return ret;
+ if (!en && !adev->in_s0ix) {
+ if (adev->in_s4) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+ }
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index f3e744172..f4e76b46c 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -259,7 +259,7 @@ komeda_component_get_avail_scaler(struct komeda_component *c,
u32 avail_scalers;
pipe_st = komeda_pipeline_get_state(c->pipeline, state);
- if (!pipe_st)
+ if (IS_ERR_OR_NULL(pipe_st))
return NULL;
avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 626709bec..2577f0cef 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -72,7 +72,10 @@ static void malidp_mw_connector_reset(struct drm_connector *connector)
__drm_atomic_helper_connector_destroy_state(connector->state);
kfree(connector->state);
- __drm_atomic_helper_connector_reset(connector, &mw_state->base);
+ connector->state = NULL;
+
+ if (mw_state)
+ __drm_atomic_helper_connector_reset(connector, &mw_state->base);
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 9d96d28d6..59e9ad349 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2066,10 +2066,8 @@ static int anx7625_setup_dsi_device(struct anx7625_data *ctx)
};
host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
- if (!host) {
- DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "fail to find dsi host.\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
@@ -2471,15 +2469,22 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
mutex_unlock(&ctx->aux_lock);
}
+static void
+anx7625_audio_update_connector_status(struct anx7625_data *ctx,
+ enum drm_connector_status status);
+
static enum drm_connector_status
anx7625_bridge_detect(struct drm_bridge *bridge)
{
struct anx7625_data *ctx = bridge_to_anx7625(bridge);
struct device *dev = ctx->dev;
+ enum drm_connector_status status;
DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
- return anx7625_sink_detect(ctx);
+ status = anx7625_sink_detect(ctx);
+ anx7625_audio_update_connector_status(ctx, status);
+ return status;
}
static const struct drm_edid *anx7625_bridge_edid_read(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index e226acc5c..8a91ef0ae 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2059,6 +2059,9 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
+ if (!mhdp_state->current_mode)
+ return;
+
drm_mode_set_name(mhdp_state->current_mode);
dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 82d23e4df..ff3284b6b 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -563,10 +563,8 @@ static int chipone_dsi_host_attach(struct chipone *icn)
host = of_find_mipi_dsi_host_by_node(host_node);
of_node_put(host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 5965e8027..8dd89efa8 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -8,8 +8,8 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE
depends on OF
depends on COMMON_CLK
select DRM_DW_HDMI
- select DRM_IMX8MP_HDMI_PVI
- select PHY_FSL_SAMSUNG_HDMI_PHY
+ imply DRM_IMX8MP_HDMI_PVI
+ imply PHY_FSL_SAMSUNG_HDMI_PHY
help
Choose this to enable support for the internal HDMI encoder found
on the i.MX8MP SoC.
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 4b2ae27f0..1a9defa15 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -494,10 +494,8 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
};
host = of_find_mipi_dsi_host_by_node(lt->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index a9c7e2b07..b99fe87ec 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -761,10 +761,8 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node);
- if (!host) {
- dev_err(lt9611->dev, "failed to find dsi host\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
+ if (!host)
+ return ERR_PTR(dev_err_probe(lt9611->dev, -EPROBE_DEFER, "failed to find dsi host\n"));
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index f4f593ad8..ab702471f 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -266,10 +266,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
int ret;
host = of_find_mipi_dsi_host_by_node(dsi_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return ERR_PTR(-EPROBE_DEFER);
- }
+ if (!host)
+ return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n"));
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 7f41525f7..3d6e8f096 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -358,9 +358,12 @@ EXPORT_SYMBOL(drm_panel_bridge_set_orientation);
static void devm_drm_panel_bridge_release(struct device *dev, void *res)
{
- struct drm_bridge **bridge = res;
+ struct drm_bridge *bridge = *(struct drm_bridge **)res;
- drm_panel_bridge_remove(*bridge);
+ if (!bridge)
+ return;
+
+ drm_bridge_remove(bridge);
}
/**
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 90a89d70d..c73767063 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -454,10 +454,6 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
dev_dbg(tc->dev, "bus_formats %04x bpc %d\n",
connector->display_info.bus_formats[0],
tc->bpc);
- /*
- * Default hardware register settings of tc358775 configured
- * with MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA jeida-24 format
- */
if (connector->display_info.bus_formats[0] ==
MEDIA_BUS_FMT_RGB888_1X7X4_SPWG) {
/* VESA-24 */
@@ -468,14 +464,15 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
- } else { /* MEDIA_BUS_FMT_RGB666_1X7X3_SPWG - JEIDA-18 */
- d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
- d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R4, LVI_L0, LVI_R5, LVI_G0));
- d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_L0, LVI_L0));
- d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
- d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_L0, LVI_L0, LVI_B1, LVI_B2));
- d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
- d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_L0));
+ } else {
+ /* JEIDA-18 and JEIDA-24 */
+ d2l_write(tc->i2c, LV_MX0003, LV_MX(LVI_R2, LVI_R3, LVI_R4, LVI_R5));
+ d2l_write(tc->i2c, LV_MX0407, LV_MX(LVI_R6, LVI_R1, LVI_R7, LVI_G2));
+ d2l_write(tc->i2c, LV_MX0811, LV_MX(LVI_G3, LVI_G4, LVI_G0, LVI_G1));
+ d2l_write(tc->i2c, LV_MX1215, LV_MX(LVI_G5, LVI_G6, LVI_G7, LVI_B2));
+ d2l_write(tc->i2c, LV_MX1619, LV_MX(LVI_B0, LVI_B1, LVI_B3, LVI_B4));
+ d2l_write(tc->i2c, LV_MX2023, LV_MX(LVI_B5, LVI_B6, LVI_B7, LVI_L0));
+ d2l_write(tc->i2c, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R0));
}
d2l_write(tc->i2c, VFUEN, VFUEN_EN);
@@ -610,10 +607,8 @@ static int tc_attach_host(struct tc_data *tc)
};
host = of_find_mipi_dsi_host_by_node(tc->host_node);
- if (!host) {
- dev_err(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
if (IS_ERR(dsi)) {
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index ca3348109..6b559e071 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -319,12 +319,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
.channel = 0,
.node = NULL,
};
+ int ret;
host = of_find_mipi_dsi_host_by_node(dlpc->host_node);
- if (!host) {
- DRM_DEV_ERROR(dev, "failed to find dsi host\n");
- return -EPROBE_DEFER;
- }
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "failed to find dsi host\n");
dlpc->dsi = mipi_dsi_device_register_full(host, &info);
if (IS_ERR(dlpc->dsi)) {
@@ -336,7 +335,11 @@ static int dlpc_host_attach(struct dlpc *dlpc)
dlpc->dsi->format = MIPI_DSI_FMT_RGB565;
dlpc->dsi->lanes = dlpc->dsi_lanes;
- return devm_mipi_dsi_attach(dev, dlpc->dsi);
+ ret = devm_mipi_dsi_attach(dev, dlpc->dsi);
+ if (ret)
+ DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
+
+ return ret;
}
static int dlpc3433_probe(struct i2c_client *client)
@@ -367,10 +370,8 @@ static int dlpc3433_probe(struct i2c_client *client)
drm_bridge_add(&dlpc->bridge);
ret = dlpc_host_attach(dlpc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to attach dsi host\n");
+ if (ret)
goto err_remove_bridge;
- }
return 0;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 4814b7b6d..57a7ed13f 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -478,7 +478,6 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
dev_err(ctx->dev, "failed to lock PLL, ret=%i\n", ret);
/* On failure, disable PLL again and exit. */
regmap_write(ctx->regmap, REG_RC_PLL_EN, 0x00);
- regulator_disable(ctx->vcc);
return;
}
diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml
index 0857773e5..8bc63912f 100644
--- a/drivers/gpu/drm/ci/test.yml
+++ b/drivers/gpu/drm/ci/test.yml
@@ -252,11 +252,11 @@ i915:cml:
i915:tgl:
extends:
- .i915
- parallel: 8
+ parallel: 5
variables:
- DEVICE_TYPE: asus-cx9400-volteer
+ DEVICE_TYPE: acer-cp514-2h-1130g7-volteer
GPU_VERSION: tgl
- RUNNER_TAG: mesa-ci-x86-64-lava-asus-cx9400-volteer
+ RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer
.amdgpu:
extends:
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 03d528209..95fd18f24 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3421,7 +3421,6 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
/**
* drm_dp_add_payload_part2() - Execute payload update part 2
* @mgr: Manager to use.
- * @state: The global atomic state
* @payload: The payload to update
*
* If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
@@ -3430,14 +3429,13 @@ EXPORT_SYMBOL(drm_dp_remove_payload_part2);
* Returns: 0 on success, negative error code on failure.
*/
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload)
{
int ret = 0;
/* Skip failed payloads */
if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {
- drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
+ drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
payload->port->connector->name);
return -EIO;
}
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 521a71c61..17ed94885 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -687,11 +687,17 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
*/
list_for_each_entry_from(next, &encoder->bridge_chain,
chain_node) {
- if (next->pre_enable_prev_first) {
+ if (!next->pre_enable_prev_first) {
next = list_prev_entry(next, chain_node);
limit = next;
break;
}
+
+ if (list_is_last(&next->chain_node,
+ &encoder->bridge_chain)) {
+ limit = next;
+ break;
+ }
}
/* Call these bridges in reverse order */
@@ -774,7 +780,7 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
/* Found first bridge that does NOT
* request prev to be enabled first
*/
- limit = list_prev_entry(next, chain_node);
+ limit = next;
break;
}
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 923c44231..9064cdeb1 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -7324,7 +7324,7 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
static bool displayid_is_tiled_block(const struct displayid_iter *iter,
const struct displayid_block *block)
{
- return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
+ return (displayid_version(iter) < DISPLAY_ID_STRUCTURE_VER_20 &&
block->tag == DATA_BLOCK_TILED_DISPLAY) ||
(displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index d647d8976..b4659cd62 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -113,7 +113,6 @@ static int drm_fbdev_generic_helper_fb_probe(struct drm_fb_helper *fb_helper,
/* screen */
info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
info->screen_buffer = screen_buffer;
- info->fix.smem_start = page_to_phys(vmalloc_to_page(info->screen_buffer));
info->fix.smem_len = screen_size;
/* deferred I/O */
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index e435f986c..1ff0678be 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -610,6 +610,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
return ret;
}
+ if (is_cow_mapping(vma->vm_flags))
+ return -EINVAL;
+
dma_resv_lock(shmem->base.resv, NULL);
ret = drm_gem_shmem_get_pages(shmem);
dma_resv_unlock(shmem->base.resv);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index ef6e41652..9874ff6d4 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -654,7 +654,7 @@ EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
*
* Return: 0 on success or a negative error code on failure.
*/
-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
+int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable)
{
/* Note: Needs updating for non-default PPS or algorithm */
u8 tx[2] = { enable << 0, 0 };
@@ -679,8 +679,8 @@ EXPORT_SYMBOL(mipi_dsi_compression_mode);
*
* Return: 0 on success or a negative error code on failure.
*/
-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
- const struct drm_dsc_picture_parameter_set *pps)
+int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps)
{
struct mipi_dsi_msg msg = {
.channel = dsi->channel,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index f5bbba9ad..b1e9a7021 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -309,6 +309,7 @@ static int vidi_get_modes(struct drm_connector *connector)
struct vidi_context *ctx = ctx_from_connector(connector);
struct edid *edid;
int edid_len;
+ int count;
/*
* the edid data comes from user side and it would be set
@@ -328,7 +329,11 @@ static int vidi_get_modes(struct drm_connector *connector)
drm_connector_update_edid_property(connector, edid);
- return drm_add_edid_modes(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+
+ return count;
}
static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index b1d02dec3..6385202cd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
- return 0;
+ goto no_edid;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return 0;
+ goto no_edid;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
@@ -906,6 +906,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
+
+no_edid:
+ return drm_add_modes_noedid(connector, 640, 480);
}
static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index ed81e1466..40e7d8626 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
static void i915_audio_component_init(struct drm_i915_private *i915)
{
u32 aud_freq, aud_freq_init;
- int ret;
-
- ret = component_add_typed(i915->drm.dev,
- &i915_audio_component_bind_ops,
- I915_COMPONENT_AUDIO);
- if (ret < 0) {
- drm_err(&i915->drm,
- "failed to add audio component (%d)\n", ret);
- /* continue with reduced functionality */
- return;
- }
if (DISPLAY_VER(i915) >= 9) {
aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
@@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
/* init with current cdclk */
intel_audio_cdclk_change_post(i915);
+}
+
+static void i915_audio_component_register(struct drm_i915_private *i915)
+{
+ int ret;
+
+ ret = component_add_typed(i915->drm.dev,
+ &i915_audio_component_bind_ops,
+ I915_COMPONENT_AUDIO);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "failed to add audio component (%d)\n", ret);
+ /* continue with reduced functionality */
+ return;
+ }
i915->display.audio.component_registered = true;
}
@@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
i915_audio_component_init(i915);
}
+void intel_audio_register(struct drm_i915_private *i915)
+{
+ if (!i915->display.audio.lpe.platdev)
+ i915_audio_component_register(i915);
+}
+
/**
* intel_audio_deinit() - deinitialize the audio driver
* @i915: the i915 drm device private data
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 9327954b8..576c061d7 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_register(struct drm_i915_private *i915);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 87dd07e0d..6da5e85ab 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -542,6 +542,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
intel_display_driver_enable_user_access(i915);
+ intel_audio_register(i915);
+
intel_display_debugfs_register(i915);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index e583515f9..950f86fb1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -431,6 +431,10 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ /* eDP MSO is not compatible with joiner */
+ if (intel_dp->mso_link_count)
+ return false;
+
return DISPLAY_VER(dev_priv) >= 12 ||
(DISPLAY_VER(dev_priv) == 11 &&
encoder->port != PORT_A);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index b651c990a..8264ff7fb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -1160,7 +1160,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
if (first_mst_stream)
intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
- drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
+ drm_dp_add_payload_part2(&intel_dp->mst_mgr,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
if (DISPLAY_VER(dev_priv) >= 12)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3560a062d..5d7446a48 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
+ /* TODO: make DPT shrinkable when it has no bound vmas */
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
+ !obj->is_dpt;
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index d650beb8e..20b9b04ec 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
i915_request_put(rq);
}
+ /* Lazy irq enabling after HW submission */
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
intel_breadcrumbs_arm_irq(b);
+
+ /* And confirm that we still want irqs enabled before we yield */
+ if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
+ intel_breadcrumbs_disarm_irq(b);
}
struct intel_breadcrumbs *
@@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
return;
/* Kick the work once more to drain the signalers, and disarm the irq */
- irq_work_sync(&b->irq_work);
- while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
- local_irq_disable();
- signal_irq_work(&b->irq_work);
- local_irq_enable();
- cond_resched();
- }
+ irq_work_queue(&b->irq_work);
}
void intel_breadcrumbs_free(struct kref *kref)
@@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
* the request as it may have completed and raised the interrupt as
* we were attaching it into the lists.
*/
- if (!b->irq_armed || __i915_request_is_complete(rq))
+ if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
irq_work_queue(&b->irq_work);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 7a6dc371c..bc6209df0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -919,6 +919,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
if (IS_DG2(gt->i915)) {
u8 first_ccs = __ffs(CCS_MASK(gt));
+ /*
+ * Store the number of active cslices before
+ * changing the CCS engine configuration
+ */
+ gt->ccs.cslices = CCS_MASK(gt);
+
/* Mask off all the CCS engine */
info->engine_mask &= ~GENMASK(CCS3, CCS0);
/* Put back in the first CCS engine */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
index 99b71bb7d..3c62a44e9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
/* Build the value for the fixed CCS load balancing */
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
- if (CCS_MASK(gt) & BIT(cslice))
+ if (gt->ccs.cslices & BIT(cslice))
/*
* If available, assign the cslice
* to the first available engine...
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index def7dd0eb..cfdd2ad5e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -207,6 +207,14 @@ struct intel_gt {
[MAX_ENGINE_INSTANCE + 1];
enum intel_submission_method submission_method;
+ struct {
+ /*
+ * Mask of the non fused CCS slices
+ * to be used for the load balancing
+ */
+ intel_engine_mask_t cslices;
+ } ccs;
+
/*
* Default address space (either GGTT or ppGTT depending on arch).
*
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
index 58012edd4..4f4f53c42 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
@@ -29,9 +29,9 @@
*/
#define GUC_KLV_LEN_MIN 1u
-#define GUC_KLV_0_KEY (0xffff << 16)
-#define GUC_KLV_0_LEN (0xffff << 0)
-#define GUC_KLV_n_VALUE (0xffffffff << 0)
+#define GUC_KLV_0_KEY (0xffffu << 16)
+#define GUC_KLV_0_LEN (0xffffu << 0)
+#define GUC_KLV_n_VALUE (0xffffffffu << 0)
/**
* DOC: GuC Self Config KLVs
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index b758fd110..c0662a022 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -793,7 +793,7 @@ void i915_hwmon_register(struct drm_i915_private *i915)
if (!IS_DGFX(i915))
return;
- hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
+ hwmon = kzalloc(sizeof(*hwmon), GFP_KERNEL);
if (!hwmon)
return;
@@ -819,14 +819,12 @@ void i915_hwmon_register(struct drm_i915_private *i915)
hwm_get_preregistration_info(i915);
/* hwmon_dev points to device hwmon<i> */
- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat->name,
- ddat,
- &hwm_chip_info,
- hwm_groups);
- if (IS_ERR(hwmon_dev)) {
- i915->hwmon = NULL;
- return;
- }
+ hwmon_dev = hwmon_device_register_with_info(dev, ddat->name,
+ ddat,
+ &hwm_chip_info,
+ hwm_groups);
+ if (IS_ERR(hwmon_dev))
+ goto err;
ddat->hwmon_dev = hwmon_dev;
@@ -839,16 +837,36 @@ void i915_hwmon_register(struct drm_i915_private *i915)
if (!hwm_gt_is_visible(ddat_gt, hwmon_energy, hwmon_energy_input, 0))
continue;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, ddat_gt->name,
- ddat_gt,
- &hwm_gt_chip_info,
- NULL);
+ hwmon_dev = hwmon_device_register_with_info(dev, ddat_gt->name,
+ ddat_gt,
+ &hwm_gt_chip_info,
+ NULL);
if (!IS_ERR(hwmon_dev))
ddat_gt->hwmon_dev = hwmon_dev;
}
+ return;
+err:
+ i915_hwmon_unregister(i915);
}
void i915_hwmon_unregister(struct drm_i915_private *i915)
{
- fetch_and_zero(&i915->hwmon);
+ struct i915_hwmon *hwmon = i915->hwmon;
+ struct intel_gt *gt;
+ int i;
+
+ if (!hwmon)
+ return;
+
+ for_each_gt(gt, i915, i)
+ if (hwmon->ddat_gt[i].hwmon_dev)
+ hwmon_device_unregister(hwmon->ddat_gt[i].hwmon_dev);
+
+ if (hwmon->ddat.hwmon_dev)
+ hwmon_device_unregister(hwmon->ddat.hwmon_dev);
+
+ mutex_destroy(&hwmon->hwmon_lock);
+
+ kfree(i915->hwmon);
+ i915->hwmon = NULL;
}
diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c
index b7fef3c79..4f99b4af8 100644
--- a/drivers/gpu/drm/imagination/pvr_vm_mips.c
+++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c
@@ -46,7 +46,7 @@ pvr_vm_mips_init(struct pvr_device *pvr_dev)
if (!mips_data)
return -ENOMEM;
- for (page_nr = 0; page_nr < ARRAY_SIZE(mips_data->pt_pages); page_nr++) {
+ for (page_nr = 0; page_nr < PVR_MIPS_PT_PAGE_COUNT; page_nr++) {
mips_data->pt_pages[page_nr] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!mips_data->pt_pages[page_nr]) {
err = -ENOMEM;
@@ -102,7 +102,7 @@ pvr_vm_mips_fini(struct pvr_device *pvr_dev)
int page_nr;
vunmap(mips_data->pt);
- for (page_nr = ARRAY_SIZE(mips_data->pt_pages) - 1; page_nr >= 0; page_nr--) {
+ for (page_nr = PVR_MIPS_PT_PAGE_COUNT - 1; page_nr >= 0; page_nr--) {
dma_unmap_page(from_pvr_device(pvr_dev)->dev,
mips_data->pt_dma_addr[page_nr], PAGE_SIZE, DMA_TO_DEVICE);
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
index fbc43f243..6d000504e 100644
--- a/drivers/gpu/drm/lima/lima_bcast.c
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -43,6 +43,18 @@ void lima_bcast_suspend(struct lima_ip *ip)
}
+int lima_bcast_mask_irq(struct lima_ip *ip)
+{
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, 0);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, 0);
+ return 0;
+}
+
+int lima_bcast_reset(struct lima_ip *ip)
+{
+ return lima_bcast_hw_init(ip);
+}
+
int lima_bcast_init(struct lima_ip *ip)
{
int i;
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
index 465ee587b..cd08841e4 100644
--- a/drivers/gpu/drm/lima/lima_bcast.h
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -13,4 +13,7 @@ void lima_bcast_fini(struct lima_ip *ip);
void lima_bcast_enable(struct lima_device *dev, int num_pp);
+int lima_bcast_mask_irq(struct lima_ip *ip);
+int lima_bcast_reset(struct lima_ip *ip);
+
#endif
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index 6b354e2fb..e15295071 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -233,6 +233,13 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static void lima_gp_task_mask_irq(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+
+ gp_write(LIMA_GP_INT_MASK, 0);
+}
+
static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
{
struct lima_ip *ip = pipe->processor[0];
@@ -365,6 +372,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
pipe->task_error = lima_gp_task_error;
pipe->task_mmu_error = lima_gp_task_mmu_error;
pipe->task_recover = lima_gp_task_recover;
+ pipe->task_mask_irq = lima_gp_task_mask_irq;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index d0d2db0ef..a4a2ffe65 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -429,6 +429,9 @@ static void lima_pp_task_error(struct lima_sched_pipe *pipe)
lima_pp_hard_reset(ip);
}
+
+ if (pipe->bcast_processor)
+ lima_bcast_reset(pipe->bcast_processor);
}
static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
@@ -437,6 +440,20 @@ static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static void lima_pp_task_mask_irq(struct lima_sched_pipe *pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ pp_write(LIMA_PP_INT_MASK, 0);
+ }
+
+ if (pipe->bcast_processor)
+ lima_bcast_mask_irq(pipe->bcast_processor);
+}
+
static struct kmem_cache *lima_pp_task_slab;
static int lima_pp_task_slab_refcnt;
@@ -468,6 +485,7 @@ int lima_pp_pipe_init(struct lima_device *dev)
pipe->task_fini = lima_pp_task_fini;
pipe->task_error = lima_pp_task_error;
pipe->task_mmu_error = lima_pp_task_mmu_error;
+ pipe->task_mask_irq = lima_pp_task_mask_irq;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 00b19adfc..bbf3f8fea 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -422,12 +422,21 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
*/
for (i = 0; i < pipe->num_processor; i++)
synchronize_irq(pipe->processor[i]->irq);
+ if (pipe->bcast_processor)
+ synchronize_irq(pipe->bcast_processor->irq);
if (dma_fence_is_signaled(task->fence)) {
DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip));
return DRM_GPU_SCHED_STAT_NOMINAL;
}
+ /*
+ * The task might still finish while this timeout handler runs.
+ * To prevent a race condition on its completion, mask all irqs
+ * on the running core until the next hard reset completes.
+ */
+ pipe->task_mask_irq(pipe);
+
if (!pipe->error)
DRM_ERROR("%s job timeout\n", lima_ip_name(ip));
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 6bd4f3b70..85b23ba90 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -80,6 +80,7 @@ struct lima_sched_pipe {
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
int (*task_recover)(struct lima_sched_pipe *pipe);
+ void (*task_mask_irq)(struct lima_sched_pipe *pipe);
struct work_struct recover_work;
};
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 0ba721026..536366956 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2104,7 +2104,7 @@ static ssize_t mtk_dp_aux_transfer(struct drm_dp_aux *mtk_aux,
if (mtk_dp->bridge.type != DRM_MODE_CONNECTOR_eDP &&
!mtk_dp->train_info.cable_plugged_in) {
- ret = -EAGAIN;
+ ret = -EIO;
goto err;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index a04499c4f..29207b275 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -1009,10 +1009,10 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->mmsys_dev = priv->mmsys_dev;
mtk_crtc->ddp_comp_nr = path_len;
- mtk_crtc->ddp_comp = devm_kmalloc_array(dev,
- mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
- sizeof(*mtk_crtc->ddp_comp),
- GFP_KERNEL);
+ mtk_crtc->ddp_comp = devm_kcalloc(dev,
+ mtk_crtc->ddp_comp_nr + (conn_routes ? 1 : 0),
+ sizeof(*mtk_crtc->ddp_comp),
+ GFP_KERNEL);
if (!mtk_crtc->ddp_comp)
return -ENOMEM;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 4f2e3feab..1bf229615 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -38,6 +38,9 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
size = round_up(size, PAGE_SIZE);
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
mtk_gem_obj = kzalloc(sizeof(*mtk_gem_obj), GFP_KERNEL);
if (!mtk_gem_obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
index a6bc1bdb3..a10cff3ca 100644
--- a/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
+++ b/drivers/gpu/drm/meson/meson_dw_mipi_dsi.c
@@ -95,6 +95,7 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
return ret;
}
+ clk_disable_unprepare(mipi_dsi->px_clk);
ret = clk_set_rate(mipi_dsi->px_clk, mipi_dsi->mode->clock * 1000);
if (ret) {
@@ -103,6 +104,12 @@ static int dw_mipi_dsi_phy_init(void *priv_data)
return ret;
}
+ ret = clk_prepare_enable(mipi_dsi->px_clk);
+ if (ret) {
+ dev_err(mipi_dsi->dev, "Failed to enable DSI Pixel clock (ret %d)\n", ret);
+ return ret;
+ }
+
switch (mipi_dsi->dsi_device->format) {
case MIPI_DSI_FMT_RGB888:
dpi_data_format = DPI_COLOR_24BIT;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 2a82119eb..2a942dc6a 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
FREQ_1000_1001(params[i].pixel_freq));
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
i, params[i].phy_freq,
- FREQ_1000_1001(params[i].phy_freq/10)*10);
+ FREQ_1000_1001(params[i].phy_freq/1000)*1000);
/* Match strict frequency */
if (phy_freq == params[i].phy_freq &&
vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
if ((phy_freq == params[freq].phy_freq ||
- phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+ phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
(vclk_freq == params[freq].vclk_freq ||
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
if (vclk_freq != params[freq].vclk_freq)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index cf0b1de1c..7b72327df 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -284,7 +284,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
- get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_start));
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
rbmemptr_stats(ring, index, alwayson_start));
@@ -330,7 +330,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_MARKER, 1);
OUT_RING(ring, 0x00e); /* IB1LIST end */
- get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
+ get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
rbmemptr_stats(ring, index, cpcycles_end));
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
rbmemptr_stats(ring, index, alwayson_end));
@@ -3062,7 +3062,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
ret = a6xx_set_supported_hw(&pdev->dev, config->info);
if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
+ a6xx_llc_slices_destroy(a6xx_gpu);
+ kfree(a6xx_gpu);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index fc1d5736d..489be1c0c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -448,9 +448,6 @@ static void dpu_encoder_phys_cmd_enable_helper(
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
- if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- return;
-
ctl = phys_enc->hw_ctl;
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index a06f69d0b..2e50049f2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -545,6 +545,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
u32 intf_active = 0;
+ u32 dsc_active = 0;
u32 wb_active = 0;
u32 mode_sel = 0;
@@ -560,6 +561,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
@@ -567,17 +569,18 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->wb)
wb_active |= BIT(cfg->wb - WB_0);
+ if (cfg->dsc)
+ dsc_active |= cfg->dsc;
+
DPU_REG_WRITE(c, CTL_TOP, mode_sel);
DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
+ DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
if (cfg->merge_3d)
DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
BIT(cfg->merge_3d - MERGE_3D_0));
- if (cfg->dsc)
- DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
-
if (cfg->cdm)
DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 6a0a74832..b85881aab 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -223,9 +223,11 @@ static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int
VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
- if (!irq_entry->cb)
+ if (!irq_entry->cb) {
DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+ return;
+ }
atomic_inc(&irq_entry->count);
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index adbd5a367..707489776 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -38,6 +38,7 @@ struct dp_aux_private {
bool no_send_stop;
bool initted;
bool is_edp;
+ bool enable_xfers;
u32 offset;
u32 segment;
@@ -305,6 +306,17 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
/*
+ * If we're using DP and an external display isn't connected then the
+ * transfer won't succeed. Return right away. If we don't do this we
+ * can end up with long timeouts if someone tries to access the DP AUX
+ * character device when no DP device is connected.
+ */
+ if (!aux->is_edp && !aux->enable_xfers) {
+ ret = -ENXIO;
+ goto exit;
+ }
+
+ /*
* For eDP it's important to give a reasonably long wait here for HPD
* to be asserted. This is because the panel driver may have _just_
* turned on the panel and then tried to do an AUX transfer. The panel
@@ -313,7 +325,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
* avoid ever doing the extra long wait for DP.
*/
if (aux->is_edp) {
- ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
+ ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog,
+ 500000);
if (ret) {
DRM_DEBUG_DP("Panel not ready for aux transactions\n");
goto exit;
@@ -436,6 +449,14 @@ irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux)
return IRQ_HANDLED;
}
+void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled)
+{
+ struct dp_aux_private *aux;
+
+ aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ aux->enable_xfers = enabled;
+}
+
void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
{
struct dp_aux_private *aux;
@@ -513,7 +534,7 @@ static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
pm_runtime_get_sync(aux->dev);
- ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog);
+ ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
pm_runtime_put_sync(aux->dev);
return ret;
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
index f47d591c1..4f65e892a 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.h
+++ b/drivers/gpu/drm/msm/dp/dp_aux.h
@@ -12,6 +12,7 @@
int dp_aux_register(struct drm_dp_aux *dp_aux);
void dp_aux_unregister(struct drm_dp_aux *dp_aux);
irqreturn_t dp_aux_isr(struct drm_dp_aux *dp_aux);
+void dp_aux_enable_xfers(struct drm_dp_aux *dp_aux, bool enabled);
void dp_aux_init(struct drm_dp_aux *dp_aux);
void dp_aux_deinit(struct drm_dp_aux *dp_aux);
void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index 3e7c84cde..628c8181d 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -263,17 +263,18 @@ void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
}
-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog)
+int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
+ unsigned long wait_us)
{
u32 state;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
- /* poll for hpd connected status every 2ms and timeout after 500ms */
+ /* poll for hpd connected status every 2ms and timeout after wait_us */
return readl_poll_timeout(catalog->io.aux.base +
REG_DP_DP_HPD_INT_STATUS,
state, state & DP_DP_HPD_STATE_STATUS_CONNECTED,
- 2000, 500000);
+ min(wait_us, 2000), wait_us);
}
static void dump_regs(void __iomem *base, int len)
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 75ec29012..72a858106 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -87,7 +87,8 @@ int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
-int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_wait_for_hpd_connect_state(struct dp_catalog *dp_catalog,
+ unsigned long wait_us);
u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
/* DP Controller APIs */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index c4dda1fae..112c7e54f 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1052,14 +1052,14 @@ static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
if (ret)
return ret;
- if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
+ if (voltage_swing_level >= DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(ctrl->drm_dev,
"max. voltage swing level reached %d\n",
voltage_swing_level);
max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
}
- if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
+ if (pre_emphasis_level >= DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(ctrl->drm_dev,
"max. pre-emphasis level reached %d\n",
pre_emphasis_level);
@@ -1150,7 +1150,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
}
if (ctrl->link->phy_params.v_level >=
- DP_TRAIN_VOLTAGE_SWING_MAX) {
+ DP_TRAIN_LEVEL_MAX) {
DRM_ERROR_RATELIMITED("max v_level reached\n");
return -EAGAIN;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index ffbfde922..36a0ef1cd 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -555,6 +555,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
int ret;
struct platform_device *pdev = dp->dp_display.pdev;
+ dp_aux_enable_xfers(dp->aux, true);
+
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
@@ -620,6 +622,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
u32 state;
struct platform_device *pdev = dp->dp_display.pdev;
+ dp_aux_enable_xfers(dp->aux, false);
+
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 49dfac1fd..ea911d924 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -1109,6 +1109,7 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
{
int i;
+ u8 max_p_level;
int v_max = 0, p_max = 0;
struct dp_link_private *link;
@@ -1140,30 +1141,29 @@ int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
* Adjust the voltage swing and pre-emphasis level combination to within
* the allowable range.
*/
- if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
+ if (dp_link->phy_params.v_level > DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested vSwingLevel=%d, change to %d\n",
dp_link->phy_params.v_level,
- DP_TRAIN_VOLTAGE_SWING_MAX);
- dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
+ DP_TRAIN_LEVEL_MAX);
+ dp_link->phy_params.v_level = DP_TRAIN_LEVEL_MAX;
}
- if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
+ if (dp_link->phy_params.p_level > DP_TRAIN_LEVEL_MAX) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
dp_link->phy_params.p_level,
- DP_TRAIN_PRE_EMPHASIS_MAX);
- dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
+ DP_TRAIN_LEVEL_MAX);
+ dp_link->phy_params.p_level = DP_TRAIN_LEVEL_MAX;
}
- if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
- && (dp_link->phy_params.v_level ==
- DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
+ max_p_level = DP_TRAIN_LEVEL_MAX - dp_link->phy_params.v_level;
+ if (dp_link->phy_params.p_level > max_p_level) {
drm_dbg_dp(link->drm_dev,
"Requested preEmphasisLevel=%d, change to %d\n",
dp_link->phy_params.p_level,
- DP_TRAIN_PRE_EMPHASIS_LVL_1);
- dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
+ max_p_level);
+ dp_link->phy_params.p_level = max_p_level;
}
drm_dbg_dp(link->drm_dev, "adjusted: v_level=%d, p_level=%d\n",
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index 83da170bc..42aed9c90 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -19,19 +19,7 @@ struct dp_link_info {
unsigned long capabilities;
};
-enum dp_link_voltage_level {
- DP_TRAIN_VOLTAGE_SWING_LVL_0 = 0,
- DP_TRAIN_VOLTAGE_SWING_LVL_1 = 1,
- DP_TRAIN_VOLTAGE_SWING_LVL_2 = 2,
- DP_TRAIN_VOLTAGE_SWING_MAX = DP_TRAIN_VOLTAGE_SWING_LVL_2,
-};
-
-enum dp_link_preemaphasis_level {
- DP_TRAIN_PRE_EMPHASIS_LVL_0 = 0,
- DP_TRAIN_PRE_EMPHASIS_LVL_1 = 1,
- DP_TRAIN_PRE_EMPHASIS_LVL_2 = 2,
- DP_TRAIN_PRE_EMPHASIS_MAX = DP_TRAIN_PRE_EMPHASIS_LVL_2,
-};
+#define DP_TRAIN_LEVEL_MAX 3
struct dp_link_test_video {
u32 test_video_pattern;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9d86a6aca..c80be74cf 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -356,8 +356,8 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
int ret;
- DBG("Set clk rates: pclk=%d, byteclk=%lu",
- msm_host->mode->clock, msm_host->byte_clk_rate);
+ DBG("Set clk rates: pclk=%lu, byteclk=%lu",
+ msm_host->pixel_clk_rate, msm_host->byte_clk_rate);
ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
msm_host->byte_clk_rate);
@@ -430,9 +430,9 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
- DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
- msm_host->mode->clock, msm_host->byte_clk_rate,
- msm_host->esc_clk_rate, msm_host->src_clk_rate);
+ DBG("Set clk rates: pclk=%lu, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
+ msm_host->pixel_clk_rate, msm_host->byte_clk_rate,
+ msm_host->esc_clk_rate, msm_host->src_clk_rate);
ret = clk_set_rate(msm_host->byte_clk, msm_host->byte_clk_rate);
if (ret) {
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index ea10bf815..0f895b8a9 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -343,6 +343,9 @@ static int __maybe_unused lcdif_suspend(struct device *dev)
if (ret)
return ret;
+ if (pm_runtime_suspended(dev))
+ return 0;
+
return lcdif_rpm_suspend(dev);
}
@@ -350,7 +353,8 @@ static int __maybe_unused lcdif_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- lcdif_rpm_resume(dev);
+ if (!pm_runtime_suspended(dev))
+ lcdif_rpm_resume(dev);
return drm_mode_config_helper_resume(drm);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 13705c5f1..4b7497a87 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -68,7 +68,7 @@ nv04_display_fini(struct drm_device *dev, bool runtime, bool suspend)
if (nv_two_heads(dev))
NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
if (!suspend)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0c3d88ad0..674dc567e 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -915,7 +915,7 @@ nv50_msto_cleanup(struct drm_atomic_state *state,
msto->disabled = false;
drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload);
} else if (msto->enabled) {
- drm_dp_add_payload_part2(mgr, state, new_payload);
+ drm_dp_add_payload_part2(mgr, new_payload);
msto->enabled = false;
}
}
@@ -2680,7 +2680,7 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
nv50_mstm_fini(nouveau_encoder(encoder));
}
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 80f74ee0f..47e53e17b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -272,6 +272,9 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = (u64)ttm_resource_manager_usage(vram_mgr);
break;
}
+ case NOUVEAU_GETPARAM_HAS_VMA_TILEMODE:
+ getparam->value = 1;
+ break;
default:
NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index db8cbf615..186add400 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
- if (!nouveau_cli_uvmm(cli) || internal) {
- /* for BO noVM allocs, don't assign kinds */
- if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
- nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
- } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- nvbo->kind = (tile_flags & 0x00007f00) >> 8;
- nvbo->comp = (tile_flags & 0x00030000) >> 16;
- if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
- kfree(nvbo);
- return ERR_PTR(-EINVAL);
- }
- } else {
- nvbo->zeta = (tile_flags & 0x00000007);
+ if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
+ nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
+ } else if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+ nvbo->kind = (tile_flags & 0x00007f00) >> 8;
+ nvbo->comp = (tile_flags & 0x00030000) >> 16;
+ if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
+ kfree(nvbo);
+ return ERR_PTR(-EINVAL);
}
- nvbo->mode = tile_mode;
+ } else {
+ nvbo->zeta = (tile_flags & 0x00000007);
+ }
+ nvbo->mode = tile_mode;
+ if (!nouveau_cli_uvmm(cli) || internal) {
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
}
nvbo->page = vmm->page[pi].shift;
} else {
- /* reject other tile flags when in VM mode. */
- if (tile_mode)
- return ERR_PTR(-EINVAL);
- if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG)
- return ERR_PTR(-EINVAL);
-
/* Determine the desirable target GPU page size for the buffer. */
for (i = 0; i < vmm->page_nr; i++) {
/* Because we cannot currently allow VMM maps to fail
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index f28f9a857..60c322442 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -450,6 +450,9 @@ nouveau_display_hpd_resume(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ if (drm->headless)
+ return;
+
spin_lock_irq(&drm->hpd_lock);
drm->hpd_pending = ~0;
spin_unlock_irq(&drm->hpd_lock);
@@ -635,7 +638,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
}
drm_connector_list_iter_end(&conn_iter);
- if (!runtime)
+ if (!runtime && !drm->headless)
cancel_work_sync(&drm->hpd_work);
drm_kms_helper_poll_disable(dev);
@@ -729,6 +732,7 @@ nouveau_display_create(struct drm_device *dev)
/* no display hw */
if (ret == -ENODEV) {
ret = 0;
+ drm->headless = true;
goto disp_create_err;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index e239c6bf4..25fca98a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -276,6 +276,7 @@ struct nouveau_drm {
/* modesetting */
struct nvbios vbios;
struct nouveau_display *display;
+ bool headless;
struct work_struct hpd_work;
spinlock_t hpd_lock;
u32 hpd_pending;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
index 6a0a4d3b8..027867c2a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
@@ -1080,7 +1080,7 @@ r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
ret = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, &ctrl, sizeof(*ctrl));
if (ret) {
nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
- return PTR_ERR(ctrl);
+ return ret;
}
memcpy(data, ctrl->data, size);
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index b715301ec..6c49270cb 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -4,7 +4,7 @@ config DRM_OMAP
depends on DRM && OF
depends on ARCH_OMAP2PLUS
select DRM_KMS_HELPER
- select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
+ select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select VIDEOMODE_HELPERS
select HDMI
default n
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 6b08b137a..523be3468 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -51,6 +51,10 @@ static void pan_worker(struct work_struct *work)
omap_gem_roll(bo, fbi->var.yoffset * npages);
}
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area)
+
static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
struct fb_info *fbi)
{
@@ -78,11 +82,9 @@ fallback:
static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
- struct drm_fb_helper *helper = info->par;
- struct drm_framebuffer *fb = helper->fb;
- struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0);
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma);
+ return fb_deferred_io_mmap(info, vma);
}
static void omap_fbdev_fb_destroy(struct fb_info *info)
@@ -94,6 +96,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
DBG();
+ fb_deferred_io_cleanup(info);
drm_fb_helper_fini(helper);
omap_gem_unpin(bo);
@@ -104,15 +107,19 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
kfree(fbdev);
}
+/*
+ * For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap()
+ * because we use write-combine.
+ */
static const struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
- __FB_DEFAULT_DMAMEM_OPS_RDWR,
+ __FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev),
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_blank = drm_fb_helper_blank,
.fb_pan_display = omap_fbdev_pan_display,
- __FB_DEFAULT_DMAMEM_OPS_DRAW,
+ __FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev),
.fb_ioctl = drm_fb_helper_ioctl,
.fb_mmap = omap_fbdev_fb_mmap,
.fb_destroy = omap_fbdev_fb_destroy,
@@ -213,6 +220,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = bo->size;
+ /* deferred I/O */
+ helper->fbdefio.delay = HZ / 20;
+ helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ fbi->fbdefio = &helper->fbdefio;
+ ret = fb_deferred_io_init(fbi);
+ if (ret)
+ goto fail;
+
/* if we have DMM, then we can use it for scrolling by just
* shuffling pages around in DMM rather than doing sw blit.
*/
@@ -238,8 +254,20 @@ fail:
return ret;
}
+static int omap_fbdev_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs omap_fb_helper_funcs = {
.fb_probe = omap_fbdev_create,
+ .fb_dirty = omap_fbdev_dirty,
};
static struct drm_fb_helper *get_fb(struct fb_info *fbi)
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index d58f90bc4..745f3e48f 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1865,6 +1865,13 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
+static const struct panel_delay delay_200_500_e50_p2e200 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .prepare_to_enable = 200,
+};
+
static const struct panel_delay delay_200_500_e80 = {
.hpd_absent = 200,
.unprepare = 500,
@@ -2034,7 +2041,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d51, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('H', 'K', 'C', 0x2d5b, &delay_200_500_e200, "Unknown"),
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 9d87cc1a3..1a2620570 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -295,8 +295,6 @@ static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx)
mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef);
mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02);
- mipi_dsi_dcs_write_seq(dsi, 0x11);
- mipi_dsi_dcs_write_seq(dsi, 0x29);
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
@@ -326,7 +324,8 @@ static const struct drm_display_mode ltk050h3148w_mode = {
static const struct ltk050h3146w_desc ltk050h3148w_data = {
.mode = &ltk050h3148w_mode,
.init = ltk050h3148w_init_sequence,
- .mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_VIDEO_BURST,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_VIDEO_BURST,
};
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 648ce9201..028fdac29 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -556,10 +556,8 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
}
dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
of_node_put(dsi_r);
- if (!dsi_r_host) {
- dev_err(dev, "Cannot get secondary DSI host\n");
- return -EPROBE_DEFER;
- }
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n");
nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info);
if (!nt->dsi[1]) {
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 76c2a8f67..9c336c715 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -109,19 +109,17 @@ static int atana33xc20_resume(struct device *dev)
if (hpd_asserted < 0)
ret = hpd_asserted;
- if (ret)
+ if (ret) {
dev_warn(dev, "Error waiting for HPD GPIO: %d\n", ret);
-
- return ret;
- }
-
- if (p->aux->wait_hpd_asserted) {
+ goto error;
+ }
+ } else if (p->aux->wait_hpd_asserted) {
ret = p->aux->wait_hpd_asserted(p->aux, HPD_MAX_US);
- if (ret)
+ if (ret) {
dev_warn(dev, "Controller error waiting for HPD: %d\n", ret);
-
- return ret;
+ goto error;
+ }
}
/*
@@ -133,6 +131,12 @@ static int atana33xc20_resume(struct device *dev)
* right times.
*/
return 0;
+
+error:
+ drm_dp_dpcd_set_powered(p->aux, false);
+ regulator_disable(p->supply);
+
+ return ret;
}
static int atana33xc20_disable(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 20e3df1c5..e8fe5a694 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2591,6 +2591,9 @@ static const struct panel_desc innolux_g121x1_l03 = {
.unprepare = 200,
.disable = 400,
},
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing innolux_g156hce_l01_timings = {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 88e80fe98..28bfc48a9 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
.clock = 6000,
.hdisplay = 240,
- .hsync_start = 240 + 28,
- .hsync_end = 240 + 28 + 10,
- .htotal = 240 + 28 + 10 + 10,
+ .hsync_start = 240 + 38,
+ .hsync_end = 240 + 38 + 10,
+ .htotal = 240 + 38 + 10 + 10,
.vdisplay = 280,
- .vsync_start = 280 + 8,
- .vsync_end = 280 + 8 + 4,
- .vtotal = 280 + 8 + 4 + 4,
- .width_mm = 43,
- .height_mm = 37,
+ .vsync_start = 280 + 48,
+ .vsync_end = 280 + 48 + 4,
+ .vtotal = 280 + 48 + 4 + 4,
+ .width_mm = 37,
+ .height_mm = 43,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -643,7 +643,9 @@ static int st7789v_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
- of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+ ret = of_drm_get_panel_orientation(spi->dev.of_node, &ctx->orientation);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret, "Failed to get orientation\n");
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 21d27e623..b11f7c5bb 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1619,6 +1619,8 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
+ if (table[i].usVoltageIndex >= SUMO_MAX_NUMBER_VOLTAGES)
+ continue;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index fdd768bbd..62ebbdb16 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -706,6 +706,8 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
const struct drm_format_info *info;
u16 hor_scl_mode, ver_scl_mode;
u16 hscl_filter_mode, vscl_filter_mode;
+ uint16_t cbcr_src_w = src_w;
+ uint16_t cbcr_src_h = src_h;
u8 gt2 = 0;
u8 gt4 = 0;
u32 val;
@@ -763,27 +765,27 @@ static void vop2_setup_scale(struct vop2 *vop2, const struct vop2_win *win,
vop2_win_write(win, VOP2_WIN_YRGB_VSCL_FILTER_MODE, vscl_filter_mode);
if (info->is_yuv) {
- src_w /= info->hsub;
- src_h /= info->vsub;
+ cbcr_src_w /= info->hsub;
+ cbcr_src_h /= info->vsub;
gt4 = 0;
gt2 = 0;
- if (src_h >= (4 * dst_h)) {
+ if (cbcr_src_h >= (4 * dst_h)) {
gt4 = 1;
- src_h >>= 2;
- } else if (src_h >= (2 * dst_h)) {
+ cbcr_src_h >>= 2;
+ } else if (cbcr_src_h >= (2 * dst_h)) {
gt2 = 1;
- src_h >>= 1;
+ cbcr_src_h >>= 1;
}
- hor_scl_mode = scl_get_scl_mode(src_w, dst_w);
- ver_scl_mode = scl_get_scl_mode(src_h, dst_h);
+ hor_scl_mode = scl_get_scl_mode(cbcr_src_w, dst_w);
+ ver_scl_mode = scl_get_scl_mode(cbcr_src_h, dst_h);
- val = vop2_scale_factor(src_w, dst_w);
+ val = vop2_scale_factor(cbcr_src_w, dst_w);
vop2_win_write(win, VOP2_WIN_SCALE_CBCR_X, val);
- val = vop2_scale_factor(src_h, dst_h);
+ val = vop2_scale_factor(cbcr_src_h, dst_h);
vop2_win_write(win, VOP2_WIN_SCALE_CBCR_Y, val);
vop2_win_write(win, VOP2_WIN_VSD_CBCR_GT4, gt4);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index d8751ea20..5f8d51b29 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -2740,6 +2740,8 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
index = 1;
addr = of_get_address(dev->of_node, index, NULL, NULL);
+ if (!addr)
+ return -EINVAL;
vc4_hdmi->audio.dma_data.addr = be32_to_cpup(addr) + mai_data->offset;
vc4_hdmi->audio.dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 58fb40c93..bea576434 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -956,13 +956,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_read(dev_priv,
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
- /*
- * Workaround for low memory 2D VMs to compensate for the
- * allocation taken by fbdev
- */
- if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 3;
-
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->max_primary_mem =
vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_MEM);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b019a1a17..c1430e554 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1066,9 +1066,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth);
-bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 84ae4e10a..11755d143 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -216,7 +216,7 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
new_image = vmw_du_cursor_plane_acquire_image(new_vps);
changed = false;
- if (old_image && new_image)
+ if (old_image && new_image && old_image != new_image)
changed = memcmp(old_image, new_image, size) != 0;
return changed;
@@ -2157,13 +2157,12 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
+static
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
- uint32_t pitch,
- uint32_t height)
+ u64 pitch,
+ u64 height)
{
- return ((u64) pitch * (u64) height) < (u64)
- ((dev_priv->active_display_unit == vmw_du_screen_target) ?
- dev_priv->max_primary_mem : dev_priv->vram_size);
+ return (pitch * height) < (u64)dev_priv->vram_size;
}
/**
@@ -2859,25 +2858,18 @@ out_unref:
enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ enum drm_mode_status ret;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
- u32 max_width = dev_priv->texture_max_width;
- u32 max_height = dev_priv->texture_max_height;
u32 assumed_cpp = 4;
if (dev_priv->assume_16bpp)
assumed_cpp = 2;
- if (dev_priv->active_display_unit == vmw_du_screen_target) {
- max_width = min(dev_priv->stdu_max_width, max_width);
- max_height = min(dev_priv->stdu_max_height, max_height);
- }
-
- if (max_width < mode->hdisplay)
- return MODE_BAD_HVALUE;
-
- if (max_height < mode->vdisplay)
- return MODE_BAD_VVALUE;
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
if (!vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_cpp,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 3c8414a13..dbc44ecbd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -41,7 +41,14 @@
#define vmw_connector_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.connector)
-
+/*
+ * Some renderers such as llvmpipe will align the width and height of their
+ * buffers to match their tile size. We need to keep this in mind when exposing
+ * modes to userspace so that this possible over-allocation will not exceed
+ * graphics memory. 64x64 pixels seems to be a reasonable upper bound for the
+ * tile size of current renderers.
+ */
+#define GPU_TILE_SIZE 64
enum stdu_content_type {
SAME_AS_DISPLAY = 0,
@@ -830,7 +837,41 @@ static void vmw_stdu_connector_destroy(struct drm_connector *connector)
vmw_stdu_destroy(vmw_connector_to_stdu(connector));
}
+static enum drm_mode_status
+vmw_stdu_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ enum drm_mode_status ret;
+ struct drm_device *dev = connector->dev;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ u64 assumed_cpp = dev_priv->assume_16bpp ? 2 : 4;
+ /* Align width and height to account for GPU tile over-alignment */
+ u64 required_mem = ALIGN(mode->hdisplay, GPU_TILE_SIZE) *
+ ALIGN(mode->vdisplay, GPU_TILE_SIZE) *
+ assumed_cpp;
+ required_mem = ALIGN(required_mem, PAGE_SIZE);
+
+ ret = drm_mode_validate_size(mode, dev_priv->stdu_max_width,
+ dev_priv->stdu_max_height);
+ if (ret != MODE_OK)
+ return ret;
+ ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
+ if (ret != MODE_OK)
+ return ret;
+
+ if (required_mem > dev_priv->max_primary_mem)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_pages * PAGE_SIZE)
+ return MODE_MEM;
+
+ if (required_mem > dev_priv->max_mob_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
@@ -846,7 +887,7 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
- .mode_valid = vmw_connector_mode_valid
+ .mode_valid = vmw_stdu_connector_mode_valid
};
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 7c124475c..a35e0781b 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -96,7 +96,8 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
{
u64 addr = xe_sa_bo_gpu_addr(bb->bo);
- xe_gt_assert(q->gt, !(q->vm && q->vm->flags & XE_VM_FLAG_MIGRATION));
+ xe_gt_assert(q->gt, !xe_sched_job_is_migration(q));
+ xe_gt_assert(q->gt, q->width == 1);
return __xe_bb_create_job(q, bb, &addr);
}
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index d32ff3857..b3b37ed83 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -389,8 +389,14 @@ mask_err:
return err;
}
-/*
- * Initialize MMIO resources that don't require any knowledge about tile count.
+/**
+ * xe_device_probe_early: Device early probe
+ * @xe: xe device instance
+ *
+ * Initialize MMIO resources that don't require any
+ * knowledge about tile count. Also initialize pcode
+ *
+ * Return: 0 on success, error code on failure
*/
int xe_device_probe_early(struct xe_device *xe)
{
@@ -404,6 +410,10 @@ int xe_device_probe_early(struct xe_device *xe)
if (err)
return err;
+ err = xe_pcode_probe_early(xe);
+ if (err)
+ return err;
+
return 0;
}
@@ -482,11 +492,8 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
- for_each_gt(gt, xe, id) {
- err = xe_pcode_probe(gt);
- if (err)
- return err;
- }
+ for_each_gt(gt, xe, id)
+ xe_pcode_init(gt);
err = xe_display_init_noirq(xe);
if (err)
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index 9fcae65b6..e7a39ad7a 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -126,6 +126,13 @@ static const struct attribute *gt_idle_attrs[] = {
static void gt_idle_sysfs_fini(struct drm_device *drm, void *arg)
{
struct kobject *kobj = arg;
+ struct xe_gt *gt = kobj_to_gt(kobj->parent);
+
+ if (gt_to_xe(gt)->info.skip_guc_pc) {
+ XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FW_GT));
+ xe_gt_idle_disable_c6(gt);
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
+ }
sysfs_remove_files(kobj, gt_idle_attrs);
kobject_put(kobj);
@@ -184,7 +191,7 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
void xe_gt_idle_disable_c6(struct xe_gt *gt)
{
xe_device_assert_mem_access(gt_to_xe(gt));
- xe_force_wake_assert_held(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
xe_mmio_write32(gt, PG_ENABLE, 0);
xe_mmio_write32(gt, RC_CONTROL, 0);
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index 0d2a2dd13..a38f59b43 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -643,8 +643,6 @@ int xe_guc_enable_communication(struct xe_guc *guc)
struct xe_device *xe = guc_to_xe(guc);
int err;
- guc_enable_irq(guc);
-
if (IS_SRIOV_VF(xe) && xe_device_has_memirq(xe)) {
struct xe_gt *gt = guc_to_gt(guc);
struct xe_tile *tile = gt_to_tile(gt);
@@ -652,6 +650,8 @@ int xe_guc_enable_communication(struct xe_guc *guc)
err = xe_memirq_init_guc(&tile->sriov.vf.memirq, guc);
if (err)
return err;
+ } else {
+ guc_enable_irq(guc);
}
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 2839d6856..dd9d65f92 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -381,8 +381,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
struct xe_device *xe = gt_to_xe(gt);
u32 freq;
- xe_device_mem_access_get(gt_to_xe(gt));
-
/* When in RC6, actual frequency reported will be 0. */
if (GRAPHICS_VERx100(xe) >= 1270) {
freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
@@ -394,8 +392,6 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc)
freq = decode_freq(freq);
- xe_device_mem_access_put(gt_to_xe(gt));
-
return freq;
}
@@ -412,14 +408,13 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
struct xe_gt *gt = pc_to_gt(pc);
int ret;
- xe_device_mem_access_get(gt_to_xe(gt));
/*
* GuC SLPC plays with cur freq request when GuCRC is enabled
* Block RC6 for a more reliable read.
*/
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out;
+ return ret;
*freq = xe_mmio_read32(gt, RPNSWREQ);
@@ -427,9 +422,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq)
*freq = decode_freq(*freq);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out:
- xe_device_mem_access_put(gt_to_xe(gt));
- return ret;
+ return 0;
}
/**
@@ -451,12 +444,7 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
*/
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
{
- struct xe_gt *gt = pc_to_gt(pc);
- struct xe_device *xe = gt_to_xe(gt);
-
- xe_device_mem_access_get(xe);
pc_update_rp_values(pc);
- xe_device_mem_access_put(xe);
return pc->rpe_freq;
}
@@ -485,7 +473,6 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq)
struct xe_gt *gt = pc_to_gt(pc);
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -511,7 +498,6 @@ fw:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -528,7 +514,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -544,8 +529,6 @@ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
-
return ret;
}
@@ -561,7 +544,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -577,7 +559,6 @@ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -594,7 +575,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
{
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
mutex_lock(&pc->freq_lock);
if (!pc->freq_ready) {
/* Might be in the middle of a gt reset */
@@ -610,7 +590,6 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
out:
mutex_unlock(&pc->freq_lock);
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -623,8 +602,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 reg, gt_c_state;
- xe_device_mem_access_get(gt_to_xe(gt));
-
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
reg = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
gt_c_state = REG_FIELD_GET(MTL_CC_MASK, reg);
@@ -633,8 +610,6 @@ enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc)
gt_c_state = REG_FIELD_GET(RCN_MASK, reg);
}
- xe_device_mem_access_put(gt_to_xe(gt));
-
switch (gt_c_state) {
case GT_C6:
return GT_IDLE_C6;
@@ -654,9 +629,7 @@ u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
- xe_device_mem_access_get(gt_to_xe(gt));
reg = xe_mmio_read32(gt, GT_GFX_RC6);
- xe_device_mem_access_put(gt_to_xe(gt));
return reg;
}
@@ -670,9 +643,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc)
struct xe_gt *gt = pc_to_gt(pc);
u64 reg;
- xe_device_mem_access_get(gt_to_xe(gt));
reg = xe_mmio_read32(gt, MTL_MEDIA_MC6);
- xe_device_mem_access_put(gt_to_xe(gt));
return reg;
}
@@ -801,23 +772,19 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (xe->info.skip_guc_pc)
return 0;
- xe_device_mem_access_get(pc_to_xe(pc));
-
ret = pc_action_setup_gucrc(pc, XE_GUCRC_HOST_CONTROL);
if (ret)
- goto out;
+ return ret;
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out;
+ return ret;
xe_gt_idle_disable_c6(gt);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out:
- xe_device_mem_access_put(pc_to_xe(pc));
- return ret;
+ return 0;
}
static void pc_init_pcode_freq(struct xe_guc_pc *pc)
@@ -870,11 +837,9 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
xe_gt_assert(gt, xe_device_uc_enabled(xe));
- xe_device_mem_access_get(pc_to_xe(pc));
-
ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (ret)
- goto out_fail_force_wake;
+ return ret;
if (xe->info.skip_guc_pc) {
if (xe->info.platform != XE_PVC)
@@ -914,8 +879,6 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
-out_fail_force_wake:
- xe_device_mem_access_put(pc_to_xe(pc));
return ret;
}
@@ -928,12 +891,9 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
struct xe_device *xe = pc_to_xe(pc);
int ret;
- xe_device_mem_access_get(pc_to_xe(pc));
-
if (xe->info.skip_guc_pc) {
xe_gt_idle_disable_c6(pc_to_gt(pc));
- ret = 0;
- goto out;
+ return 0;
}
mutex_lock(&pc->freq_lock);
@@ -942,16 +902,14 @@ int xe_guc_pc_stop(struct xe_guc_pc *pc)
ret = pc_action_shutdown(pc);
if (ret)
- goto out;
+ return ret;
if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_NOT_RUNNING)) {
drm_err(&pc_to_xe(pc)->drm, "GuC PC Shutdown failed\n");
- ret = -EIO;
+ return -EIO;
}
-out:
- xe_device_mem_access_put(pc_to_xe(pc));
- return ret;
+ return 0;
}
/**
@@ -962,14 +920,6 @@ out:
static void xe_guc_pc_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_pc *pc = arg;
- struct xe_device *xe = pc_to_xe(pc);
-
- if (xe->info.skip_guc_pc) {
- xe_device_mem_access_get(xe);
- xe_gt_idle_disable_c6(pc_to_gt(pc));
- xe_device_mem_access_put(xe);
- return;
- }
xe_force_wake_get(gt_to_fw(pc_to_gt(pc)), XE_FORCEWAKE_ALL);
XE_WARN_ON(xe_guc_pc_gucrc_disable(pc));
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index e2a4c3b5e..3757e0dc5 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1257,6 +1257,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
return 0;
err_entity:
+ mutex_unlock(&guc->submission_state.lock);
xe_sched_entity_fini(&ge->entity);
err_sched:
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 2ba4fb951..aca519f5b 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -33,7 +33,6 @@
#include "xe_sync.h"
#include "xe_trace.h"
#include "xe_vm.h"
-#include "xe_wa.h"
/**
* struct xe_migrate - migrate context.
@@ -299,10 +298,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
}
/*
- * Due to workaround 16017236439, odd instance hardware copy engines are
- * faster than even instance ones.
- * This function returns the mask involving all fast copy engines and the
- * reserved copy engine to be used as logical mask for migrate engine.
* Including the reserved copy engine is required to avoid deadlocks due to
* migrate jobs servicing the faults gets stuck behind the job that faulted.
*/
@@ -316,8 +311,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
if (hwe->class != XE_ENGINE_CLASS_COPY)
continue;
- if (!XE_WA(gt, 16017236439) ||
- xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
+ if (xe_gt_is_usm_hwe(gt, hwe))
logical_mask |= BIT(hwe->logical_instance);
}
@@ -368,6 +362,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
if (!hwe || !logical_mask)
return ERR_PTR(-EINVAL);
+ /*
+ * XXX: Currently only reserving 1 (likely slow) BCS instance on
+ * PVC, may want to revisit if performance is needed.
+ */
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index b324dc2a5..81f4ae2ea 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
+#include "xe_device.h"
#include "xe_gt.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
@@ -43,8 +44,6 @@ static int pcode_mailbox_status(struct xe_gt *gt)
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
};
- lockdep_assert_held(&gt->pcode.lock);
-
err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) {
drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
@@ -55,17 +54,15 @@ static int pcode_mailbox_status(struct xe_gt *gt)
return 0;
}
-static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
- unsigned int timeout_ms, bool return_data,
- bool atomic)
+static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+ unsigned int timeout_ms, bool return_data,
+ bool atomic)
{
int err;
if (gt_to_xe(gt)->info.skip_pcode)
return 0;
- lockdep_assert_held(&gt->pcode.lock);
-
if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
return -EAGAIN;
@@ -87,6 +84,18 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
return pcode_mailbox_status(gt);
}
+static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+ unsigned int timeout_ms, bool return_data,
+ bool atomic)
+{
+ if (gt_to_xe(gt)->info.skip_pcode)
+ return 0;
+
+ lockdep_assert_held(&gt->pcode.lock);
+
+ return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic);
+}
+
int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
{
int err;
@@ -109,15 +118,19 @@ int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
return err;
}
-static int xe_pcode_try_request(struct xe_gt *gt, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- u32 *status, bool atomic, int timeout_us)
+static int pcode_try_request(struct xe_gt *gt, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status, bool atomic, int timeout_us, bool locked)
{
int slept, wait = 10;
for (slept = 0; slept < timeout_us; slept += wait) {
- *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
- atomic);
+ if (locked)
+ *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ atomic);
+ else
+ *status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ atomic);
if ((*status == 0) && ((request & reply_mask) == reply))
return 0;
@@ -158,8 +171,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
mutex_lock(&gt->pcode.lock);
- ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- false, timeout_base_ms * 1000);
+ ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ false, timeout_base_ms * 1000, true);
if (!ret)
goto out;
@@ -177,8 +190,8 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
"PCODE timeout, retrying with preemption disabled\n");
drm_WARN_ON_ONCE(&gt_to_xe(gt)->drm, timeout_base_ms > 1);
preempt_disable();
- ret = xe_pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
- true, timeout_base_ms * 1000);
+ ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ true, 50 * 1000, true);
preempt_enable();
out:
@@ -238,59 +251,71 @@ unlock:
}
/**
- * xe_pcode_init - Ensure PCODE is initialized
- * @gt: gt instance
+ * xe_pcode_ready - Ensure PCODE is initialized
+ * @xe: xe instance
+ * @locked: true if lock held, false otherwise
*
- * This function ensures that PCODE is properly initialized. To be called during
- * probe and resume paths.
+ * PCODE init mailbox is polled only on root gt of root tile
+ * as the root tile provides the initialization is complete only
+ * after all the tiles have completed the initialization.
+ * Called only on early probe without locks and with locks in
+ * resume path.
*
- * It returns 0 on success, and -error number on failure.
+ * Returns 0 on success, and -error number on failure.
*/
-int xe_pcode_init(struct xe_gt *gt)
+int xe_pcode_ready(struct xe_device *xe, bool locked)
{
u32 status, request = DGFX_GET_INIT_STATUS;
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
int timeout_us = 180000000; /* 3 min */
int ret;
- if (gt_to_xe(gt)->info.skip_pcode)
+ if (xe->info.skip_pcode)
return 0;
- if (!IS_DGFX(gt_to_xe(gt)))
+ if (!IS_DGFX(xe))
return 0;
- mutex_lock(&gt->pcode.lock);
- ret = xe_pcode_try_request(gt, DGFX_PCODE_STATUS, request,
- DGFX_INIT_STATUS_COMPLETE,
- DGFX_INIT_STATUS_COMPLETE,
- &status, false, timeout_us);
- mutex_unlock(&gt->pcode.lock);
+ if (locked)
+ mutex_lock(&gt->pcode.lock);
+
+ ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request,
+ DGFX_INIT_STATUS_COMPLETE,
+ DGFX_INIT_STATUS_COMPLETE,
+ &status, false, timeout_us, locked);
+
+ if (locked)
+ mutex_unlock(&gt->pcode.lock);
if (ret)
- drm_err(&gt_to_xe(gt)->drm,
+ drm_err(&xe->drm,
"PCODE initialization timedout after: 3 min\n");
return ret;
}
/**
- * xe_pcode_probe - Prepare xe_pcode and also ensure PCODE is initialized.
+ * xe_pcode_init: initialize components of PCODE
* @gt: gt instance
*
- * This function initializes the xe_pcode component, and when needed, it ensures
- * that PCODE has properly performed its initialization and it is really ready
- * to go. To be called once only during probe.
- *
- * It returns 0 on success, and -error number on failure.
+ * This function initializes the xe_pcode component.
+ * To be called once only during probe.
*/
-int xe_pcode_probe(struct xe_gt *gt)
+void xe_pcode_init(struct xe_gt *gt)
{
drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
+}
- if (gt_to_xe(gt)->info.skip_pcode)
- return 0;
-
- if (!IS_DGFX(gt_to_xe(gt)))
- return 0;
-
- return xe_pcode_init(gt);
+/**
+ * xe_pcode_probe_early: initializes PCODE
+ * @xe: xe instance
+ *
+ * This function checks the initialization status of PCODE
+ * To be called once only during early probe without locks.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int xe_pcode_probe_early(struct xe_device *xe)
+{
+ return xe_pcode_ready(xe, false);
}
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index 08cb1d047..3f54c6d2a 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -8,9 +8,11 @@
#include <linux/types.h>
struct xe_gt;
+struct xe_device;
-int xe_pcode_probe(struct xe_gt *gt);
-int xe_pcode_init(struct xe_gt *gt);
+void xe_pcode_init(struct xe_gt *gt);
+int xe_pcode_probe_early(struct xe_device *xe);
+int xe_pcode_ready(struct xe_device *xe, bool locked);
int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
u32 max_gt_freq);
int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1);
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 53b3b0b01..944cf4d76 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -54,13 +54,15 @@ int xe_pm_suspend(struct xe_device *xe)
u8 id;
int err;
+ drm_dbg(&xe->drm, "Suspending device\n");
+
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
/* FIXME: Super racey... */
err = xe_bo_evict_all(xe);
if (err)
- return err;
+ goto err;
xe_display_pm_suspend(xe);
@@ -68,7 +70,7 @@ int xe_pm_suspend(struct xe_device *xe)
err = xe_gt_suspend(gt);
if (err) {
xe_display_pm_resume(xe);
- return err;
+ goto err;
}
}
@@ -76,7 +78,11 @@ int xe_pm_suspend(struct xe_device *xe)
xe_display_pm_suspend_late(xe);
+ drm_dbg(&xe->drm, "Device suspended\n");
return 0;
+err:
+ drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
+ return err;
}
/**
@@ -92,14 +98,14 @@ int xe_pm_resume(struct xe_device *xe)
u8 id;
int err;
+ drm_dbg(&xe->drm, "Resuming device\n");
+
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
- for_each_gt(gt, xe, id) {
- err = xe_pcode_init(gt);
- if (err)
- return err;
- }
+ err = xe_pcode_ready(xe, true);
+ if (err)
+ return err;
xe_display_pm_resume_early(xe);
@@ -109,7 +115,7 @@ int xe_pm_resume(struct xe_device *xe)
*/
err = xe_bo_restore_kernel(xe);
if (err)
- return err;
+ goto err;
xe_irq_resume(xe);
@@ -120,9 +126,13 @@ int xe_pm_resume(struct xe_device *xe)
err = xe_bo_restore_user(xe);
if (err)
- return err;
+ goto err;
+ drm_dbg(&xe->drm, "Device resumed\n");
return 0;
+err:
+ drm_dbg(&xe->drm, "Device resume failed %d\n", err);
+ return err;
}
static bool xe_pm_pci_d3cold_capable(struct xe_device *xe)
@@ -310,11 +320,9 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe->d3cold.power_lost = xe_guc_in_reset(&gt->uc.guc);
if (xe->d3cold.allowed && xe->d3cold.power_lost) {
- for_each_gt(gt, xe, id) {
- err = xe_pcode_init(gt);
- if (err)
- goto out;
- }
+ err = xe_pcode_ready(xe, true);
+ if (err)
+ goto out;
/*
* This only restores pinned memory which is the memory
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 5b2b37b59..820c8d73c 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -79,6 +79,16 @@ static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
return i;
}
+static int emit_flush_dw(u32 *dw, int i)
+{
+ dw[i++] = MI_FLUSH_DW | MI_FLUSH_IMM_DW;
+ dw[i++] = 0;
+ dw[i++] = 0;
+ dw[i++] = 0;
+
+ return i;
+}
+
static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
u32 *dw, int i)
{
@@ -233,10 +243,12 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
- if (job->user_fence.used)
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
job->user_fence.value,
dw, i);
+ }
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
@@ -292,10 +304,12 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
- if (job->user_fence.used)
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
job->user_fence.value,
dw, i);
+ }
i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 88eb33acd..face8d6b2 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -256,12 +256,12 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev)
if (ret)
goto err_dp;
+ drm_bridge_add(dpsub->bridge);
+
if (dpsub->dma_enabled) {
ret = zynqmp_dpsub_drm_init(dpsub);
if (ret)
goto err_disp;
- } else {
- drm_bridge_add(dpsub->bridge);
}
dev_info(&pdev->dev, "ZynqMP DisplayPort Subsystem driver probed");
@@ -288,9 +288,8 @@ static void zynqmp_dpsub_remove(struct platform_device *pdev)
if (dpsub->drm)
zynqmp_dpsub_drm_cleanup(dpsub);
- else
- drm_bridge_remove(dpsub->bridge);
+ drm_bridge_remove(dpsub->bridge);
zynqmp_disp_remove(dpsub);
zynqmp_dp_remove(dpsub);
diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
index fd58a86b0..d022bfb5e 100644
--- a/drivers/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -693,6 +693,7 @@ static void gb_interface_release(struct device *dev)
trace_gb_interface_release(intf);
+ cancel_work_sync(&intf->mode_switch_work);
kfree(intf);
}
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 5b24d5f63..c2f9fc1f2 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -227,6 +227,11 @@ static void amd_sfh_resume(struct amd_mp2_dev *mp2)
struct amd_mp2_sensor_info info;
int i, status;
+ if (!cl_data->is_any_sensor_enabled) {
+ amd_sfh_clear_intr(mp2);
+ return;
+ }
+
for (i = 0; i < cl_data->num_hid_devices; i++) {
if (cl_data->sensor_sts[i] == SENSOR_DISABLED) {
info.sensor_idx = cl_data->sensor_idx[i];
@@ -252,6 +257,11 @@ static void amd_sfh_suspend(struct amd_mp2_dev *mp2)
struct amdtp_cl_data *cl_data = mp2->cl_data;
int i, status;
+ if (!cl_data->is_any_sensor_enabled) {
+ amd_sfh_clear_intr(mp2);
+ return;
+ }
+
for (i = 0; i < cl_data->num_hid_devices; i++) {
if (cl_data->sensor_idx[i] != HPD_IDX &&
cl_data->sensor_sts[i] == SENSOR_ENABLED) {
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 78cdfb8b9..88cbb2fe6 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -335,36 +335,20 @@ static int asus_raw_event(struct hid_device *hdev,
if (drvdata->quirks & QUIRK_MEDION_E1239T)
return asus_e1239t_event(drvdata, data, size);
- if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) {
+ /*
+ * Skip these report ID, the device emits a continuous stream associated
+ * with the AURA mode it is in which looks like an 'echo'.
+ */
+ if (report->id == FEATURE_KBD_LED_REPORT_ID1 || report->id == FEATURE_KBD_LED_REPORT_ID2)
+ return -1;
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
/*
- * Skip these report ID, the device emits a continuous stream associated
- * with the AURA mode it is in which looks like an 'echo'.
+ * G713 and G733 send these codes on some keypresses, depending on
+ * the key pressed it can trigger a shutdown event if not caught.
*/
- if (report->id == FEATURE_KBD_LED_REPORT_ID1 ||
- report->id == FEATURE_KBD_LED_REPORT_ID2) {
+ if (data[0] == 0x02 && data[1] == 0x30) {
return -1;
- /* Additional report filtering */
- } else if (report->id == FEATURE_KBD_REPORT_ID) {
- /*
- * G14 and G15 send these codes on some keypresses with no
- * discernable reason for doing so. We'll filter them out to avoid
- * unmapped warning messages later.
- */
- if (data[1] == 0xea || data[1] == 0xec || data[1] == 0x02 ||
- data[1] == 0x8a || data[1] == 0x9e) {
- return -1;
- }
- }
- if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) {
- /*
- * G713 and G733 send these codes on some keypresses, depending on
- * the key pressed it can trigger a shutdown event if not caught.
- */
- if(data[0] == 0x02 && data[1] == 0x30) {
- return -1;
- }
}
-
}
if (drvdata->quirks & QUIRK_ROG_CLAYMORE_II_KEYBOARD) {
@@ -1250,6 +1234,19 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[205] = 0x01;
}
+ /* match many more n-key devices */
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD && *rsize > 15) {
+ for (int i = 0; i < *rsize - 15; i++) {
+ /* offset to the count from 0x5a report part always 14 */
+ if (rdesc[i] == 0x85 && rdesc[i + 1] == 0x5a &&
+ rdesc[i + 14] == 0x95 && rdesc[i + 15] == 0x05) {
+ hid_info(hdev, "Fixing up Asus N-Key report descriptor\n");
+ rdesc[i + 15] = 0x01;
+ break;
+ }
+ }
+ }
+
return rdesc;
}
@@ -1319,4 +1316,4 @@ static struct hid_driver asus_driver = {
};
module_hid_driver(asus_driver);
-MODULE_LICENSE("GPL"); \ No newline at end of file
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index de7a477d6..751a73ae7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1448,7 +1448,6 @@ static void implement(const struct hid_device *hid, u8 *report,
hid_warn(hid,
"%s() called with too large value %d (n: %d)! (%s)\n",
__func__, value, n, current->comm);
- WARN_ON(1);
value &= m;
}
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8376fb5e2..68b0f39de 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -823,6 +823,7 @@
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
#define USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD 0xb309
+#define USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD 0xbb00
#define USB_DEVICE_ID_LOGITECH_C007 0xc007
#define USB_DEVICE_ID_LOGITECH_C077 0xc077
#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 3c3c497b6..37958edec 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1284,8 +1284,10 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
*/
msleep(50);
- if (retval)
+ if (retval) {
+ kfree(dj_report);
return retval;
+ }
}
/*
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 04a014cd2..56fc78841 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -2081,6 +2081,12 @@ static const struct hid_device_id mt_devices[] = {
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X12_TAB) },
+ /* Logitech devices */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
+ USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_CASA_TOUCHPAD) },
+
/* MosArt panels */
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
MT_USB_DEVICE(USB_VENDOR_ID_ASUS,
diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
index 58b15750d..ff9078ad1 100644
--- a/drivers/hid/hid-nvidia-shield.c
+++ b/drivers/hid/hid-nvidia-shield.c
@@ -283,7 +283,9 @@ static struct input_dev *shield_haptics_create(
return haptics;
input_set_capability(haptics, EV_FF, FF_RUMBLE);
- input_ff_create_memless(haptics, NULL, play_effect);
+ ret = input_ff_create_memless(haptics, NULL, play_effect);
+ if (ret)
+ goto err;
ret = input_register_device(haptics);
if (ret)
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 5b91fb106..091e37933 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -31,6 +31,7 @@ struct i2c_hid_of_elan {
struct regulator *vcc33;
struct regulator *vccio;
struct gpio_desc *reset_gpio;
+ bool no_reset_on_power_off;
const struct elan_i2c_hid_chip_data *chip_data;
};
@@ -40,17 +41,17 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
container_of(ops, struct i2c_hid_of_elan, ops);
int ret;
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+
if (ihid_elan->vcc33) {
ret = regulator_enable(ihid_elan->vcc33);
if (ret)
- return ret;
+ goto err_deassert_reset;
}
ret = regulator_enable(ihid_elan->vccio);
- if (ret) {
- regulator_disable(ihid_elan->vcc33);
- return ret;
- }
+ if (ret)
+ goto err_disable_vcc33;
if (ihid_elan->chip_data->post_power_delay_ms)
msleep(ihid_elan->chip_data->post_power_delay_ms);
@@ -60,6 +61,15 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms);
return 0;
+
+err_disable_vcc33:
+ if (ihid_elan->vcc33)
+ regulator_disable(ihid_elan->vcc33);
+err_deassert_reset:
+ if (ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
+
+ return ret;
}
static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
@@ -67,7 +77,14 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
struct i2c_hid_of_elan *ihid_elan =
container_of(ops, struct i2c_hid_of_elan, ops);
- gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+ /*
+ * Do not assert reset when the hardware allows for it to remain
+ * deasserted regardless of the state of the (shared) power supply to
+ * avoid wasting power when the supply is left on.
+ */
+ if (!ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+
if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms)
msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms);
@@ -79,6 +96,7 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
static int i2c_hid_of_elan_probe(struct i2c_client *client)
{
struct i2c_hid_of_elan *ihid_elan;
+ int ret;
ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
if (!ihid_elan)
@@ -93,21 +111,38 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
if (IS_ERR(ihid_elan->reset_gpio))
return PTR_ERR(ihid_elan->reset_gpio);
+ ihid_elan->no_reset_on_power_off = of_property_read_bool(client->dev.of_node,
+ "no-reset-on-power-off");
+
ihid_elan->vccio = devm_regulator_get(&client->dev, "vccio");
- if (IS_ERR(ihid_elan->vccio))
- return PTR_ERR(ihid_elan->vccio);
+ if (IS_ERR(ihid_elan->vccio)) {
+ ret = PTR_ERR(ihid_elan->vccio);
+ goto err_deassert_reset;
+ }
ihid_elan->chip_data = device_get_match_data(&client->dev);
if (ihid_elan->chip_data->main_supply_name) {
ihid_elan->vcc33 = devm_regulator_get(&client->dev,
ihid_elan->chip_data->main_supply_name);
- if (IS_ERR(ihid_elan->vcc33))
- return PTR_ERR(ihid_elan->vcc33);
+ if (IS_ERR(ihid_elan->vcc33)) {
+ ret = PTR_ERR(ihid_elan->vcc33);
+ goto err_deassert_reset;
+ }
}
- return i2c_hid_core_probe(client, &ihid_elan->ops,
- ihid_elan->chip_data->hid_descriptor_address, 0);
+ ret = i2c_hid_core_probe(client, &ihid_elan->ops,
+ ihid_elan->chip_data->hid_descriptor_address, 0);
+ if (ret)
+ goto err_deassert_reset;
+
+ return 0;
+
+err_deassert_reset:
+ if (ihid_elan->no_reset_on_power_off)
+ gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
+
+ return ret;
}
static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 56bd4f02f..4b8232360 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -173,6 +173,11 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* request and enable interrupt */
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_err(dev, "ISH: Failed to allocate IRQ vectors\n");
+ return ret;
+ }
+
if (!pdev->msi_enabled && !pdev->msix_enabled)
irq_flag = IRQF_SHARED;
diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
index 6500ca548..ca2dff158 100644
--- a/drivers/hwmon/intel-m10-bmc-hwmon.c
+++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
@@ -429,7 +429,7 @@ static const struct m10bmc_sdata n6000bmc_curr_tbl[] = {
};
static const struct m10bmc_sdata n6000bmc_power_tbl[] = {
- { 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" },
+ { 0x724, 0x0, 0x0, 0x0, 0x0, 1000, "Board Power" },
};
static const struct hwmon_channel_info * const n6000bmc_hinfo[] = {
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 001799bc2..b8548105c 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -876,9 +876,11 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
if (!ret) {
- if (!val)
+ if (!val) {
+ fwnode_handle_put(child);
return dev_err_probe(&st->client->dev, -EINVAL,
"shunt resistor value cannot be zero\n");
+ }
st->r_sense_uohm[addr] = val;
}
}
diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
index 1f96e9496..439dd3dba 100644
--- a/drivers/hwmon/shtc1.c
+++ b/drivers/hwmon/shtc1.c
@@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
if (np) {
data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
- data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
+ data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
} else {
if (client->dev.platform_data)
data->setup = *(struct shtc1_platform_data *)dev->platform_data;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index c2ca4a02d..a0bdfabdd 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -1240,6 +1240,8 @@ static void etm4_init_arch_data(void *info)
drvdata->nr_event = FIELD_GET(TRCIDR0_NUMEVENT_MASK, etmidr0);
/* QSUPP, bits[16:15] Q element support field */
drvdata->q_support = FIELD_GET(TRCIDR0_QSUPP_MASK, etmidr0);
+ if (drvdata->q_support)
+ drvdata->q_filt = !!(etmidr0 & TRCIDR0_QFILT);
/* TSSIZE, bits[28:24] Global timestamp size field */
drvdata->ts_size = FIELD_GET(TRCIDR0_TSSIZE_MASK, etmidr0);
@@ -1732,16 +1734,14 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcccctlr = etm4x_read32(csa, TRCCCCTLR);
state->trcbbctlr = etm4x_read32(csa, TRCBBCTLR);
state->trctraceidr = etm4x_read32(csa, TRCTRACEIDR);
- state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
+ if (drvdata->q_filt)
+ state->trcqctlr = etm4x_read32(csa, TRCQCTLR);
state->trcvictlr = etm4x_read32(csa, TRCVICTLR);
state->trcviiectlr = etm4x_read32(csa, TRCVIIECTLR);
state->trcvissctlr = etm4x_read32(csa, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
state->trcvipcssctlr = etm4x_read32(csa, TRCVIPCSSCTLR);
- state->trcvdctlr = etm4x_read32(csa, TRCVDCTLR);
- state->trcvdsacctlr = etm4x_read32(csa, TRCVDSACCTLR);
- state->trcvdarcctlr = etm4x_read32(csa, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
@@ -1758,7 +1758,8 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trccntvr[i] = etm4x_read32(csa, TRCCNTVRn(i));
}
- for (i = 0; i < drvdata->nr_resource * 2; i++)
+ /* Resource selector pair 0 is reserved */
+ for (i = 2; i < drvdata->nr_resource * 2; i++)
state->trcrsctlr[i] = etm4x_read32(csa, TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
@@ -1843,8 +1844,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
{
int i;
struct etmv4_save_state *state = drvdata->save_state;
- struct csdev_access tmp_csa = CSDEV_ACCESS_IOMEM(drvdata->base);
- struct csdev_access *csa = &tmp_csa;
+ struct csdev_access *csa = &drvdata->csdev->access;
+
+ if (WARN_ON(!drvdata->csdev))
+ return;
etm4_cs_unlock(drvdata, csa);
etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
@@ -1863,16 +1866,14 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, state->trcccctlr, TRCCCCTLR);
etm4x_relaxed_write32(csa, state->trcbbctlr, TRCBBCTLR);
etm4x_relaxed_write32(csa, state->trctraceidr, TRCTRACEIDR);
- etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
+ if (drvdata->q_filt)
+ etm4x_relaxed_write32(csa, state->trcqctlr, TRCQCTLR);
etm4x_relaxed_write32(csa, state->trcvictlr, TRCVICTLR);
etm4x_relaxed_write32(csa, state->trcviiectlr, TRCVIIECTLR);
etm4x_relaxed_write32(csa, state->trcvissctlr, TRCVISSCTLR);
if (drvdata->nr_pe_cmp)
etm4x_relaxed_write32(csa, state->trcvipcssctlr, TRCVIPCSSCTLR);
- etm4x_relaxed_write32(csa, state->trcvdctlr, TRCVDCTLR);
- etm4x_relaxed_write32(csa, state->trcvdsacctlr, TRCVDSACCTLR);
- etm4x_relaxed_write32(csa, state->trcvdarcctlr, TRCVDARCCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
@@ -1889,7 +1890,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, state->trccntvr[i], TRCCNTVRn(i));
}
- for (i = 0; i < drvdata->nr_resource * 2; i++)
+ /* Resource selector pair 0 is reserved */
+ for (i = 2; i < drvdata->nr_resource * 2; i++)
etm4x_relaxed_write32(csa, state->trcrsctlr[i], TRCRSCTLRn(i));
for (i = 0; i < drvdata->nr_ss_cmp; i++) {
@@ -2213,6 +2215,9 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
ret = etm4_probe(&pdev->dev);
pm_runtime_put(&pdev->dev);
+ if (ret)
+ pm_runtime_disable(&pdev->dev);
+
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 9ea678bc2..9e9165f62 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -43,9 +43,6 @@
#define TRCVIIECTLR 0x084
#define TRCVISSCTLR 0x088
#define TRCVIPCSSCTLR 0x08C
-#define TRCVDCTLR 0x0A0
-#define TRCVDSACCTLR 0x0A4
-#define TRCVDARCCTLR 0x0A8
/* Derived resources registers */
#define TRCSEQEVRn(n) (0x100 + (n * 4)) /* n = 0-2 */
#define TRCSEQRSTEVR 0x118
@@ -90,9 +87,6 @@
/* Address Comparator registers n = 0-15 */
#define TRCACVRn(n) (0x400 + (n * 8))
#define TRCACATRn(n) (0x480 + (n * 8))
-/* Data Value Comparator Value registers, n = 0-7 */
-#define TRCDVCVRn(n) (0x500 + (n * 16))
-#define TRCDVCMRn(n) (0x580 + (n * 16))
/* ContextID/Virtual ContextID comparators, n = 0-7 */
#define TRCCIDCVRn(n) (0x600 + (n * 8))
#define TRCVMIDCVRn(n) (0x640 + (n * 8))
@@ -141,6 +135,7 @@
#define TRCIDR0_TRCCCI BIT(7)
#define TRCIDR0_RETSTACK BIT(9)
#define TRCIDR0_NUMEVENT_MASK GENMASK(11, 10)
+#define TRCIDR0_QFILT BIT(14)
#define TRCIDR0_QSUPP_MASK GENMASK(16, 15)
#define TRCIDR0_TSSIZE_MASK GENMASK(28, 24)
@@ -272,9 +267,6 @@
/* List of registers accessible via System instructions */
#define ETM4x_ONLY_SYSREG_LIST(op, val) \
CASE_##op((val), TRCPROCSELR) \
- CASE_##op((val), TRCVDCTLR) \
- CASE_##op((val), TRCVDSACCTLR) \
- CASE_##op((val), TRCVDARCCTLR) \
CASE_##op((val), TRCOSLAR)
#define ETM_COMMON_SYSREG_LIST(op, val) \
@@ -422,22 +414,6 @@
CASE_##op((val), TRCACATRn(13)) \
CASE_##op((val), TRCACATRn(14)) \
CASE_##op((val), TRCACATRn(15)) \
- CASE_##op((val), TRCDVCVRn(0)) \
- CASE_##op((val), TRCDVCVRn(1)) \
- CASE_##op((val), TRCDVCVRn(2)) \
- CASE_##op((val), TRCDVCVRn(3)) \
- CASE_##op((val), TRCDVCVRn(4)) \
- CASE_##op((val), TRCDVCVRn(5)) \
- CASE_##op((val), TRCDVCVRn(6)) \
- CASE_##op((val), TRCDVCVRn(7)) \
- CASE_##op((val), TRCDVCMRn(0)) \
- CASE_##op((val), TRCDVCMRn(1)) \
- CASE_##op((val), TRCDVCMRn(2)) \
- CASE_##op((val), TRCDVCMRn(3)) \
- CASE_##op((val), TRCDVCMRn(4)) \
- CASE_##op((val), TRCDVCMRn(5)) \
- CASE_##op((val), TRCDVCMRn(6)) \
- CASE_##op((val), TRCDVCMRn(7)) \
CASE_##op((val), TRCCIDCVRn(0)) \
CASE_##op((val), TRCCIDCVRn(1)) \
CASE_##op((val), TRCCIDCVRn(2)) \
@@ -907,9 +883,6 @@ struct etmv4_save_state {
u32 trcviiectlr;
u32 trcvissctlr;
u32 trcvipcssctlr;
- u32 trcvdctlr;
- u32 trcvdsacctlr;
- u32 trcvdarcctlr;
u32 trcseqevr[ETM_MAX_SEQ_STATES];
u32 trcseqrstevr;
@@ -982,6 +955,7 @@ struct etmv4_save_state {
* @os_unlock: True if access to management registers is allowed.
* @instrp0: Tracing of load and store instructions
* as P0 elements is supported.
+ * @q_filt: Q element filtering support, if Q elements are supported.
* @trcbb: Indicates if the trace unit supports branch broadcast tracing.
* @trccond: If the trace unit supports conditional
* instruction tracing.
@@ -1044,6 +1018,7 @@ struct etmv4_drvdata {
bool boot_enable;
bool os_unlock;
bool instrp0;
+ bool q_filt;
bool trcbb;
bool trccond;
bool retstack;
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 147d338c1..8dad239ab 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -290,6 +290,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Meteor Lake-S CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xae24),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Meteor Lake-S */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7f26),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Raptor Lake-S */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7a26),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -300,6 +310,26 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Granite Rapids */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0963),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Granite Rapids SOC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3256),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Sapphire Rapids SOC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3456),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Lunar Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 534fbefc7..20895d391 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -868,8 +868,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
return -ENOMEM;
stm->major = register_chrdev(0, stm_data->name, &stm_fops);
- if (stm->major < 0)
- goto err_free;
+ if (stm->major < 0) {
+ err = stm->major;
+ vfree(stm);
+ return err;
+ }
device_initialize(&stm->dev);
stm->dev.devt = MKDEV(stm->major, 0);
@@ -913,10 +916,8 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
err_device:
unregister_chrdev(stm->major, stm_data->name);
- /* matches device_initialize() above */
+ /* calls stm_device_release() */
put_device(&stm->dev);
-err_free:
- vfree(stm);
return err;
}
diff --git a/drivers/i2c/busses/i2c-at91-slave.c b/drivers/i2c/busses/i2c-at91-slave.c
index d6eeea516..131a67d9d 100644
--- a/drivers/i2c/busses/i2c-at91-slave.c
+++ b/drivers/i2c/busses/i2c-at91-slave.c
@@ -106,8 +106,7 @@ static int at91_unreg_slave(struct i2c_client *slave)
static u32 at91_twi_func(struct i2c_adapter *adapter)
{
- return I2C_FUNC_SLAVE | I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
- | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
+ return I2C_FUNC_SLAVE;
}
static const struct i2c_algorithm at91_twi_algorithm_slave = {
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 4bb7d6756..2fce3e84b 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -633,6 +633,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
if (hold_clear) {
ctrl_reg &= ~CDNS_I2C_CR_HOLD;
+ ctrl_reg &= ~CDNS_I2C_CR_CLR_FIFO;
/*
* In case of Xilinx Zynq SOC, clear the HOLD bit before transfer size
* register reaches '0'. This is an IP bug which causes transfer size
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index 2e079cf20..78e2c47e3 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -220,7 +220,7 @@ static const struct i2c_algorithm i2c_dw_algo = {
void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
{
- dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
+ dev->functionality = I2C_FUNC_SLAVE;
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 6d72e4e12..36e8f6196 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -99,6 +99,7 @@ struct lpi2c_imx_struct {
__u8 *rx_buf;
__u8 *tx_buf;
struct completion complete;
+ unsigned long rate_per;
unsigned int msglen;
unsigned int delivered;
unsigned int block_data;
@@ -212,9 +213,7 @@ static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
lpi2c_imx_set_mode(lpi2c_imx);
- clk_rate = clk_get_rate(lpi2c_imx->clks[0].clk);
- if (!clk_rate)
- return -EINVAL;
+ clk_rate = lpi2c_imx->rate_per;
if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
filt = 0;
@@ -611,6 +610,20 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /*
+ * Lock the parent clock rate to avoid getting parent clock upon
+ * each transfer
+ */
+ ret = devm_clk_rate_exclusive_get(&pdev->dev, lpi2c_imx->clks[0].clk);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't lock I2C peripheral clock rate\n");
+
+ lpi2c_imx->rate_per = clk_get_rate(lpi2c_imx->clks[0].clk);
+ if (!lpi2c_imx->rate_per)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "can't get I2C peripheral clock rate\n");
+
pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index e106af83c..350ccfbe8 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -442,8 +442,8 @@ static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
/* Init the device */
- oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_EN);
+ oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index bbea521b0..a73f5bb9a 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -550,17 +550,13 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
device_property_read_u32(&pdev->dev, "socionext,pclk-rate",
&i2c->pclkrate);
- i2c->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (PTR_ERR(i2c->pclk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (!IS_ERR_OR_NULL(i2c->pclk)) {
- dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
-
- ret = clk_prepare_enable(i2c->pclk);
- if (ret)
- return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n");
- i2c->pclkrate = clk_get_rate(i2c->pclk);
- }
+ i2c->pclk = devm_clk_get_enabled(&pdev->dev, "pclk");
+ if (IS_ERR(i2c->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
+ "failed to get and enable clock\n");
+
+ dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
+ i2c->pclkrate = clk_get_rate(i2c->pclk);
if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
@@ -615,8 +611,6 @@ static void synquacer_i2c_remove(struct platform_device *pdev)
struct synquacer_i2c *i2c = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c->adapter);
- if (!IS_ERR(i2c->pclk))
- clk_disable_unprepare(i2c->pclk);
};
static const struct of_device_id synquacer_i2c_dt_ids[] __maybe_unused = {
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index d6037a328..14ae0cfc3 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -445,6 +445,11 @@ static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev));
}
+static struct i2c_adapter *i2c_acpi_find_adapter_by_adev(struct acpi_device *adev)
+{
+ return i2c_find_adapter_by_fwnode(acpi_fwnode_handle(adev));
+}
+
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
void *arg)
{
@@ -471,11 +476,17 @@ static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
break;
client = i2c_acpi_find_client_by_adev(adev);
- if (!client)
- break;
+ if (client) {
+ i2c_unregister_device(client);
+ put_device(&client->dev);
+ }
+
+ adapter = i2c_acpi_find_adapter_by_adev(adev);
+ if (adapter) {
+ acpi_unbind_one(&adapter->dev);
+ put_device(&adapter->dev);
+ }
- i2c_unregister_device(client);
- put_device(&client->dev);
break;
}
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 5ee4db689..bb299ce02 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -415,6 +415,19 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
int ret;
mutex_lock(&master->lock);
+ /*
+ * IBIWON may be set before SVC_I3C_MCTRL_REQUEST_AUTO_IBI, causing
+ * readl_relaxed_poll_timeout() to return immediately. Consequently,
+ * ibitype will be 0 since it was last updated only after the 8th SCL
+ * cycle, leading to missed client IBI handlers.
+ *
+ * A typical scenario is when IBIWON occurs and bus arbitration is lost
+ * at svc_i3c_master_priv_xfers().
+ *
+ * Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
+ */
+ writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
/* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
SVC_I3C_MCTRL_IBIRESP_AUTO,
@@ -429,9 +442,6 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
goto reenable_ibis;
}
- /* Clear the interrupt status */
- writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
-
status = readl(master->regs + SVC_I3C_MSTATUS);
ibitype = SVC_I3C_MSTATUS_IBITYPE(status);
ibiaddr = SVC_I3C_MSTATUS_IBIADDR(status);
@@ -1080,7 +1090,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
* and yield the above events handler.
*/
if (SVC_I3C_MSTATUS_IBIWON(reg)) {
- ret = -ENXIO;
+ ret = -EAGAIN;
*actual_len = 0;
goto emit_stop;
}
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 7475ec2a5..13e1bba45 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -225,11 +225,11 @@ static void __ad9467_get_scale(struct ad9467_state *st, int index,
}
static const struct iio_chan_spec ad9434_channels[] = {
- AD9467_CHAN(0, 0, 12, 'S'),
+ AD9467_CHAN(0, 0, 12, 's'),
};
static const struct iio_chan_spec ad9467_channels[] = {
- AD9467_CHAN(0, 0, 16, 'S'),
+ AD9467_CHAN(0, 0, 16, 's'),
};
static const struct ad9467_chip_info ad9467_chip_tbl = {
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index 4156639b3..e3b215882 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -175,6 +175,7 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
struct adi_axi_adc_state *st;
void __iomem *base;
unsigned int ver;
+ struct clk *clk;
int ret;
st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
@@ -195,6 +196,10 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
if (!expected_ver)
return -ENODEV;
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
/*
* Force disable the core. Up to the frontend to enable us. And we can
* still read/write registers...
@@ -207,9 +212,9 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (*expected_ver > ver) {
+ if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) {
dev_err(&pdev->dev,
- "IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
+ "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
ADI_AXI_PCORE_VER_MINOR(*expected_ver),
ADI_AXI_PCORE_VER_PATCH(*expected_ver),
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
index e0c2742da..8a0c35742 100644
--- a/drivers/iio/adc/pac1934.c
+++ b/drivers/iio/adc/pac1934.c
@@ -787,6 +787,15 @@ static int pac1934_read_raw(struct iio_dev *indio_dev,
s64 curr_energy;
int ret, channel = chan->channel - 1;
+ /*
+ * For AVG the index should be between 5 to 8.
+ * To calculate PAC1934_CH_VOLTAGE_AVERAGE,
+ * respectively PAC1934_CH_CURRENT real index, we need
+ * to remove the added offset (PAC1934_MAX_NUM_CHANNELS).
+ */
+ if (channel >= PAC1934_MAX_NUM_CHANNELS)
+ channel = channel - PAC1934_MAX_NUM_CHANNELS;
+
ret = pac1934_retrieve_data(info, PAC1934_MIN_UPDATE_WAIT_TIME_US);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index b5d3c9cea..283c20757 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -2234,6 +2234,7 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
if (vin[0] != val || vin[1] >= adc_info->max_channels) {
dev_err(&indio_dev->dev, "Invalid channel in%d-in%d\n",
vin[0], vin[1]);
+ ret = -EINVAL;
goto err;
}
} else if (ret != -EINVAL) {
diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index 3b0f9598a..adceb1b7a 100644
--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -60,11 +60,15 @@ EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
uint32_t period, bool fifo)
{
+ uint32_t mult;
+
/* when FIFO is on, prevent odr change if one is already pending */
if (fifo && ts->new_mult != 0)
return -EAGAIN;
- ts->new_mult = period / ts->chip.clock_period;
+ mult = period / ts->chip.clock_period;
+ if (mult != ts->mult)
+ ts->new_mult = mult;
return 0;
}
@@ -101,6 +105,9 @@ static bool inv_update_chip_period(struct inv_sensors_timestamp *ts,
static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
{
+ const int64_t period_min = ts->min_period * ts->mult;
+ const int64_t period_max = ts->max_period * ts->mult;
+ int64_t add_max, sub_max;
int64_t delta, jitter;
int64_t adjust;
@@ -108,11 +115,13 @@ static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
delta = ts->it.lo - ts->timestamp;
/* adjust timestamp while respecting jitter */
+ add_max = period_max - (int64_t)ts->period;
+ sub_max = period_min - (int64_t)ts->period;
jitter = INV_SENSORS_TIMESTAMP_JITTER((int64_t)ts->period, ts->chip.jitter);
if (delta > jitter)
- adjust = jitter;
+ adjust = add_max;
else if (delta < -jitter)
- adjust = -jitter;
+ adjust = sub_max;
else
adjust = 0;
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
index 076bc9ecf..4763402db 100644
--- a/drivers/iio/dac/ad5592r-base.c
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -415,7 +415,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
s64 tmp = *val * (3767897513LL / 25LL);
*val = div_s64_rem(tmp, 1000000000LL, val2);
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT_PLUS_NANO;
}
mutex_lock(&st->lock);
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index 5d42ab9b1..67d74a1a1 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -1391,7 +1391,7 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
&data->buffer.channels,
ARRAY_SIZE(data->buffer.channels));
if (ret)
- return IRQ_NONE;
+ goto out;
} else {
for_each_set_bit(bit, indio_dev->active_scan_mask,
BMI323_CHAN_MAX) {
@@ -1400,13 +1400,14 @@ static irqreturn_t bmi323_trigger_handler(int irq, void *p)
&data->buffer.channels[index++],
BMI323_BYTES_PER_SAMPLE);
if (ret)
- return IRQ_NONE;
+ goto out;
}
}
iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
iio_get_time_ns(indio_dev));
+out:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index f67bd5a39..6d9cb010f 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -129,10 +129,6 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
- if (ret)
- goto out_unlock;
-
- ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index 3df0a715e..fc80b4a97 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -129,10 +129,6 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
/* update data FIFO write */
inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
- if (ret)
- goto out_unlock;
-
- ret = inv_icm42600_buffer_update_watermark(st);
out_unlock:
mutex_unlock(&st->lock);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 4302093b9..8684ba246 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1654,8 +1654,10 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
return NULL;
indio_dev = &iio_dev_opaque->indio_dev;
- indio_dev->priv = (char *)iio_dev_opaque +
- ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN);
+
+ if (sizeof_priv)
+ indio_dev->priv = (char *)iio_dev_opaque +
+ ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN);
indio_dev->dev.parent = parent;
indio_dev->dev.type = &iio_device_type;
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 62e9e93d9..0199d055e 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1394,12 +1394,12 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2)
/*
* Temperature is returned in Celsius degrees in fractional
- * form down 2^16. We rescale by x1000 to return milli Celsius
- * to respect IIO ABI.
+ * form down 2^16. We rescale by x1000 to return millidegrees
+ * Celsius to respect IIO ABI.
*/
- *val = raw_temp * 1000;
- *val2 = 16;
- return IIO_VAL_FRACTIONAL_LOG2;
+ raw_temp = sign_extend32(raw_temp, 23);
+ *val = ((s64)raw_temp * 1000) / (1 << 16);
+ return IIO_VAL_INT;
}
static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2)
diff --git a/drivers/iio/pressure/dps310.c b/drivers/iio/pressure/dps310.c
index 1ff091b2f..d0a516d56 100644
--- a/drivers/iio/pressure/dps310.c
+++ b/drivers/iio/pressure/dps310.c
@@ -730,7 +730,7 @@ static int dps310_read_pressure(struct dps310_data *data, int *val, int *val2,
}
}
-static int dps310_calculate_temp(struct dps310_data *data)
+static int dps310_calculate_temp(struct dps310_data *data, int *val)
{
s64 c0;
s64 t;
@@ -746,7 +746,9 @@ static int dps310_calculate_temp(struct dps310_data *data)
t = c0 + ((s64)data->temp_raw * (s64)data->c1);
/* Convert to milliCelsius and scale the temperature */
- return (int)div_s64(t * 1000LL, kt);
+ *val = (int)div_s64(t * 1000LL, kt);
+
+ return 0;
}
static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
@@ -768,11 +770,10 @@ static int dps310_read_temp(struct dps310_data *data, int *val, int *val2,
if (rc)
return rc;
- rc = dps310_calculate_temp(data);
- if (rc < 0)
+ rc = dps310_calculate_temp(data, val);
+ if (rc)
return rc;
- *val = rc;
return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c
index 468458042..7a3eef5d5 100644
--- a/drivers/iio/temperature/mcp9600.c
+++ b/drivers/iio/temperature/mcp9600.c
@@ -52,7 +52,8 @@ static int mcp9600_read(struct mcp9600_data *data,
if (ret < 0)
return ret;
- *val = ret;
+
+ *val = sign_extend32(ret, 15);
return 0;
}
diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
index 1f5c962c1..f7f88498b 100644
--- a/drivers/iio/temperature/mlx90635.c
+++ b/drivers/iio/temperature/mlx90635.c
@@ -947,9 +947,9 @@ static int mlx90635_probe(struct i2c_client *client)
"failed to allocate regmap\n");
regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee);
- if (IS_ERR(regmap))
- return dev_err_probe(&client->dev, PTR_ERR(regmap),
- "failed to allocate regmap\n");
+ if (IS_ERR(regmap_ee))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap_ee),
+ "failed to allocate EEPROM regmap\n");
mlx90635 = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f25329579..be0743dac 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -348,16 +348,10 @@ static int dst_fetch_ha(const struct dst_entry *dst,
static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
{
- struct rtable *rt;
- struct rt6_info *rt6;
-
- if (family == AF_INET) {
- rt = container_of(dst, struct rtable, dst);
- return rt->rt_uses_gateway;
- }
+ if (family == AF_INET)
+ return dst_rtable(dst)->rt_uses_gateway;
- rt6 = container_of(dst, struct rt6_info, dst);
- return rt6->rt6i_flags & RTF_GATEWAY;
+ return dst_rt6_info(dst)->rt6i_flags & RTF_GATEWAY;
}
static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 1e2cd7c87..64ace0b96 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -715,8 +715,10 @@ cma_validate_port(struct ib_device *device, u32 port,
rcu_read_lock();
ndev = rcu_dereference(sgid_attr->ndev);
if (!net_eq(dev_net(ndev), dev_addr->net) ||
- ndev->ifindex != bound_if_index)
+ ndev->ifindex != bound_if_index) {
+ rdma_put_gid_attr(sgid_attr);
sgid_attr = ERR_PTR(-ENODEV);
+ }
rcu_read_unlock();
goto out;
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 9dca451ed..6974922e5 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -107,8 +107,6 @@ struct bnxt_re_gsi_context {
struct bnxt_re_sqp_entries *sqp_tbl;
};
-#define BNXT_RE_MIN_MSIX 2
-#define BNXT_RE_MAX_MSIX 9
#define BNXT_RE_AEQ_IDX 0
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
@@ -168,7 +166,7 @@ struct bnxt_re_dev {
struct bnxt_qplib_rcfw rcfw;
/* NQ */
- struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
+ struct bnxt_qplib_nq nq[BNXT_MAX_ROCE_MSIX];
/* Device Resources */
struct bnxt_qplib_dev_attr dev_attr;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 439d0c7c5..04258676d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1013,7 +1013,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
hwq_attr.stride = sizeof(struct sq_sge);
hwq_attr.depth = bnxt_qplib_get_depth(sq);
hwq_attr.aux_stride = psn_sz;
- hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode);
+ hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
+ : 0;
/* Update msn tbl size */
if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 7250d0643..68e22f368 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -149,7 +149,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return ret;
}
- ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
+ ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) {
ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
goto err_put;
@@ -163,7 +163,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
return 0;
err_xa:
- xa_erase(&cq_table->array, hr_cq->cqn);
+ xa_erase_irq(&cq_table->array, hr_cq->cqn);
err_put:
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
@@ -182,7 +182,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn);
- xa_erase(&cq_table->array, hr_cq->cqn);
+ xa_erase_irq(&cq_table->array, hr_cq->cqn);
/* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
@@ -476,13 +476,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
struct ib_event event;
struct ib_cq *ibcq;
- hr_cq = xa_load(&hr_dev->cq_table.array,
- cqn & (hr_dev->caps.num_cqs - 1));
- if (!hr_cq) {
- dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
- return;
- }
-
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
@@ -491,7 +484,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
return;
}
- refcount_inc(&hr_cq->refcount);
+ xa_lock(&hr_dev->cq_table.array);
+ hr_cq = xa_load(&hr_dev->cq_table.array,
+ cqn & (hr_dev->caps.num_cqs - 1));
+ if (hr_cq)
+ refcount_inc(&hr_cq->refcount);
+ xa_unlock(&hr_dev->cq_table.array);
+ if (!hr_cq) {
+ dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
+ return;
+ }
ibcq = &hr_cq->ib_cq;
if (ibcq->event_handler) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index c3cbd0a49..0b47c6d68 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -100,6 +100,9 @@
#define CQ_BANKID_SHIFT 2
#define CQ_BANKID_MASK GENMASK(1, 0)
+#define HNS_ROCE_MAX_CQ_COUNT 0xFFFF
+#define HNS_ROCE_MAX_CQ_PERIOD 0xFFFF
+
enum {
SERV_TYPE_RC,
SERV_TYPE_UC,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index a4b3f1916..658c522be 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -281,7 +281,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
return hem;
fail:
- hns_roce_free_hem(hr_dev, hem);
+ kfree(hem);
return NULL;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 6fb51db96..9c415b254 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -57,16 +57,16 @@ enum {
};
#define check_whether_bt_num_3(type, hop_num) \
- (type < HEM_TYPE_MTT && hop_num == 2)
+ ((type) < HEM_TYPE_MTT && (hop_num) == 2)
#define check_whether_bt_num_2(type, hop_num) \
- ((type < HEM_TYPE_MTT && hop_num == 1) || \
- (type >= HEM_TYPE_MTT && hop_num == 2))
+ (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == 2))
#define check_whether_bt_num_1(type, hop_num) \
- ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
- (type >= HEM_TYPE_MTT && hop_num == 1) || \
- (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
+ (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \
+ ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0))
struct hns_roce_hem {
void *buf;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index ba7ae792d..8800464c9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2105,7 +2105,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->gmv_bt_num *
(HNS_HW_PAGE_SIZE / caps->gmv_entry_sz));
- caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE /
+ caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE /
caps->gmv_entry_sz);
} else {
u32 func_num = max_t(u32, 1, hr_dev->func_num);
@@ -3711,8 +3711,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
wc->status == IB_WC_WR_FLUSH_ERR))
return;
- ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status);
- print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe,
+ ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n",
+ cqe_status);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe,
cq->cqe_size, false);
wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS);
@@ -5802,7 +5803,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
dev_info(hr_dev->dev,
"cq_period(%u) reached the upper limit, adjusted to 65.\n",
cq_period);
- cq_period = HNS_ROCE_MAX_CQ_PERIOD;
+ cq_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
}
cq_period *= HNS_ROCE_CLOCK_ADJUST;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index df04bc8ed..dfed6b4dd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -1334,7 +1334,7 @@ struct fmea_ram_ecc {
/* only for RNR timeout issue of HIP08 */
#define HNS_ROCE_CLOCK_ADJUST 1000
-#define HNS_ROCE_MAX_CQ_PERIOD 65
+#define HNS_ROCE_MAX_CQ_PERIOD_HIP08 65
#define HNS_ROCE_MAX_EQ_PERIOD 65
#define HNS_ROCE_RNR_TIMER_10NS 1
#define HNS_ROCE_1US_CFG 999
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 1dc60c2b2..d20225836 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -37,9 +37,11 @@
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
+#include "hnae3.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
+#include "hns_roce_hw_v2.h"
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
const u8 *addr)
@@ -192,6 +194,12 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
+ props->max_ah = INT_MAX;
+ props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD;
+ props->cq_caps.max_cq_moderation_count = HNS_ROCE_MAX_CQ_COUNT;
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+ props->cq_caps.max_cq_moderation_period = HNS_ROCE_MAX_CQ_PERIOD_HIP08;
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
props->max_srq = hr_dev->caps.num_srqs;
props->max_srq_wr = hr_dev->caps.max_srq_wrs;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 9e05b57a2..80c050d7d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -441,18 +441,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mr *mr = to_hr_mr(ibmr);
struct hns_roce_mtr *mtr = &mr->pbl_mtr;
- int ret = 0;
+ int ret, sg_num = 0;
mr->npages = 0;
mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
sizeof(dma_addr_t), GFP_KERNEL);
if (!mr->page_list)
- return ret;
+ return sg_num;
- ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
- if (ret < 1) {
+ sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ if (sg_num < 1) {
ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
- mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
+ mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
goto err_page_list;
}
@@ -463,17 +463,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
if (ret) {
ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
- ret = 0;
+ sg_num = 0;
} else {
mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
- ret = mr->npages;
}
err_page_list:
kvfree(mr->page_list);
mr->page_list = NULL;
- return ret;
+ return sg_num;
}
static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 4abae9477..8f48c6723 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -123,7 +123,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
return ret;
}
- ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
+ ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
if (ret) {
ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
goto err_put;
@@ -136,7 +136,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
return 0;
err_xa:
- xa_erase(&srq_table->xa, srq->srqn);
+ xa_erase_irq(&srq_table->xa, srq->srqn);
err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
@@ -154,7 +154,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
ret, srq->srqn);
- xa_erase(&srq_table->xa, srq->srqn);
+ xa_erase_irq(&srq_table->xa, srq->srqn);
if (refcount_dec_and_test(&srq->refcount))
complete(&srq->free);
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 4a71e678d..89fcc09de 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -39,37 +39,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->cqe = attr->cqe;
- cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(cq->umem)) {
- err = PTR_ERR(cq->umem);
- ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n",
- err);
- return err;
- }
-
- err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
+ err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
if (err) {
- ibdev_dbg(ibdev,
- "Failed to create dma region for create cq, %d\n",
- err);
- goto err_release_umem;
+ ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
+ return err;
}
- ibdev_dbg(ibdev,
- "create_dma_region ret %d gdma_region 0x%llx\n",
- err, cq->gdma_region);
-
- /*
- * The CQ ID is not known at this time. The ID is generated at create_qp
- */
- cq->id = INVALID_QUEUE_ID;
-
return 0;
-
-err_release_umem:
- ib_umem_release(cq->umem);
- return err;
}
int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
@@ -78,24 +54,16 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
struct ib_device *ibdev = ibcq->device;
struct mana_ib_dev *mdev;
struct gdma_context *gc;
- int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
gc = mdev_to_gc(mdev);
- err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
- if (err) {
- ibdev_dbg(ibdev,
- "Failed to destroy dma region, %d\n", err);
- return err;
+ if (cq->queue.id != INVALID_QUEUE_ID) {
+ kfree(gc->cq_table[cq->queue.id]);
+ gc->cq_table[cq->queue.id] = NULL;
}
- if (cq->id != INVALID_QUEUE_ID) {
- kfree(gc->cq_table[cq->id]);
- gc->cq_table[cq->id] = NULL;
- }
-
- ib_umem_release(cq->umem);
+ mana_ib_destroy_queue(mdev, &cq->queue);
return 0;
}
@@ -113,8 +81,10 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue *gdma_cq;
+ if (cq->queue.id >= gc->max_num_cqs)
+ return -EINVAL;
/* Create CQ table entry */
- WARN_ON(gc->cq_table[cq->id]);
+ WARN_ON(gc->cq_table[cq->queue.id]);
gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
if (!gdma_cq)
return -ENOMEM;
@@ -122,7 +92,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
gdma_cq->cq.context = cq;
gdma_cq->type = GDMA_CQ;
gdma_cq->cq.callback = mana_ib_cq_handler;
- gdma_cq->id = cq->id;
- gc->cq_table[cq->id] = gdma_cq;
+ gdma_cq->id = cq->queue.id;
+ gc->cq_table[cq->queue.id] = gdma_cq;
return 0;
}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 71e33feee..4524c6b80 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -237,6 +237,49 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret);
}
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue)
+{
+ struct ib_umem *umem;
+ int err;
+
+ queue->umem = NULL;
+ queue->id = INVALID_QUEUE_ID;
+ queue->gdma_region = GDMA_INVALID_DMA_REGION;
+
+ umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
+ return err;
+ }
+
+ err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
+ if (err) {
+ ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err);
+ goto free_umem;
+ }
+ queue->umem = umem;
+
+ ibdev_dbg(&mdev->ib_dev,
+ "create_dma_region ret %d gdma_region 0x%llx\n",
+ err, queue->gdma_region);
+
+ return 0;
+free_umem:
+ ib_umem_release(umem);
+ return err;
+}
+
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue)
+{
+ /* Ignore return code as there is not much we can do about it.
+ * The error message is printed inside.
+ */
+ mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region);
+ ib_umem_release(queue->umem);
+}
+
static int
mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
struct gdma_context *gc,
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index f83390eeb..6acb5c281 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -45,6 +45,12 @@ struct mana_ib_adapter_caps {
u32 max_inline_data_size;
};
+struct mana_ib_queue {
+ struct ib_umem *umem;
+ u64 gdma_region;
+ u64 id;
+};
+
struct mana_ib_dev {
struct ib_device ib_dev;
struct gdma_dev *gdma_dev;
@@ -82,10 +88,8 @@ struct mana_ib_mr {
struct mana_ib_cq {
struct ib_cq ibcq;
- struct ib_umem *umem;
+ struct mana_ib_queue queue;
int cqe;
- u64 gdma_region;
- u64 id;
u32 comp_vector;
};
@@ -169,6 +173,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
+int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
+ struct mana_ib_queue *queue);
+void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue);
+
struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index b70b13484..13a49d8fd 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -112,6 +112,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
"start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
start, iova, length, access_flags);
+ access_flags &= ~IB_ACCESS_OPTIONAL;
if (access_flags & ~VALID_MR_FLAGS)
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 6e7627745..d7485ee6a 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -197,7 +197,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
wq_spec.gdma_region = wq->gdma_region;
wq_spec.queue_size = wq->wq_buf_size;
- cq_spec.gdma_region = cq->gdma_region;
+ cq_spec.gdma_region = cq->queue.gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
@@ -213,16 +213,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
/* The GDMA regions are now owned by the WQ object */
wq->gdma_region = GDMA_INVALID_DMA_REGION;
- cq->gdma_region = GDMA_INVALID_DMA_REGION;
+ cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
wq->id = wq_spec.queue_index;
- cq->id = cq_spec.queue_index;
+ cq->queue.id = cq_spec.queue_index;
ibdev_dbg(&mdev->ib_dev,
"ret %d rx_object 0x%llx wq id %llu cq id %llu\n",
- ret, wq->rx_object, wq->id, cq->id);
+ ret, wq->rx_object, wq->id, cq->queue.id);
- resp.entries[i].cqid = cq->id;
+ resp.entries[i].cqid = cq->queue.id;
resp.entries[i].wqid = wq->id;
mana_ind_table[i] = wq->rx_object;
@@ -232,7 +232,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
if (ret)
goto fail;
- gdma_cq_allocated[i] = gc->cq_table[cq->id];
+ gdma_cq_allocated[i] = gc->cq_table[cq->queue.id];
}
resp.num_entries = i;
@@ -264,7 +264,7 @@ fail:
wq = container_of(ibwq, struct mana_ib_wq, ibwq);
cq = container_of(ibcq, struct mana_ib_cq, ibcq);
- gc->cq_table[cq->id] = NULL;
+ gc->cq_table[cq->queue.id] = NULL;
kfree(gdma_cq_allocated[i]);
mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object);
@@ -374,7 +374,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
wq_spec.gdma_region = qp->sq_gdma_region;
wq_spec.queue_size = ucmd.sq_buf_size;
- cq_spec.gdma_region = send_cq->gdma_region;
+ cq_spec.gdma_region = send_cq->queue.gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
eq_vec = send_cq->comp_vector % gc->max_num_queues;
@@ -392,10 +392,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
/* The GDMA regions are now owned by the WQ object */
qp->sq_gdma_region = GDMA_INVALID_DMA_REGION;
- send_cq->gdma_region = GDMA_INVALID_DMA_REGION;
+ send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION;
qp->sq_id = wq_spec.queue_index;
- send_cq->id = cq_spec.queue_index;
+ send_cq->queue.id = cq_spec.queue_index;
/* Create CQ table entry */
err = mana_ib_install_cq_cb(mdev, send_cq);
@@ -404,10 +404,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
ibdev_dbg(&mdev->ib_dev,
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
- qp->tx_object, qp->sq_id, send_cq->id);
+ qp->tx_object, qp->sq_id, send_cq->queue.id);
resp.sqid = qp->sq_id;
- resp.cqid = send_cq->id;
+ resp.cqid = send_cq->queue.id;
resp.tx_vp_offset = pd->tx_vp_offset;
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -422,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
err_release_gdma_cq:
kfree(gdma_cq);
- gc->cq_table[send_cq->id] = NULL;
+ gc->cq_table[send_cq->queue.id] = NULL;
err_destroy_wq_obj:
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c2b557e64..9fb8a5442 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3760,10 +3760,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
spin_lock_init(&dev->dm.lock);
dev->dm.dev = mdev;
return 0;
-err:
- mlx5r_macsec_dealloc_gids(dev);
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
+err:
+ mlx5r_macsec_dealloc_gids(dev);
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 96ffbbaf0..5a22be14d 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/io.h>
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
#include <linux/jiffies.h>
@@ -108,7 +109,6 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
__be32 mmio_wqe[16] = {};
unsigned long flags;
unsigned int idx;
- int i;
if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
return -EIO;
@@ -148,10 +148,8 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
* we hit doorbell
*/
wmb();
- for (i = 0; i < 8; i++)
- mlx5_write64(&mmio_wqe[i * 2],
- bf->bfreg->map + bf->offset + i * 8);
- io_stop_wc();
+ __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe,
+ sizeof(mmio_wqe) / 8);
bf->offset ^= bf->buf_size;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a8de35c07..f255a12e2 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -643,9 +643,10 @@ struct mlx5_ib_mkey {
unsigned int ndescs;
struct wait_queue_head wait;
refcount_t usecount;
- /* User Mkey must hold either a rb_key or a cache_ent. */
+ /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */
struct mlx5r_cache_rb_key rb_key;
struct mlx5_cache_ent *cache_ent;
+ u8 cacheable : 1;
};
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a8ee2ca1f..d3c1f6379 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -246,6 +246,7 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
MLX5_SET(mkc, mkc, access_mode_4_2,
(ent->rb_key.access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats);
MLX5_SET(mkc, mkc, translations_octword_size,
get_mkc_octo_size(ent->rb_key.access_mode,
@@ -641,10 +642,8 @@ static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
new = &((*new)->rb_left);
if (cmp < 0)
new = &((*new)->rb_right);
- if (cmp == 0) {
- mutex_unlock(&cache->rb_lock);
+ if (cmp == 0)
return -EEXIST;
- }
}
/* Add new node and rebalance tree. */
@@ -719,6 +718,8 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
}
mr->mmkey.cache_ent = ent;
mr->mmkey.type = MLX5_MKEY_MR;
+ mr->mmkey.rb_key = ent->rb_key;
+ mr->mmkey.cacheable = true;
init_waitqueue_head(&mr->mmkey.wait);
return mr;
}
@@ -1158,6 +1159,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (IS_ERR(mr))
return mr;
mr->mmkey.rb_key = rb_key;
+ mr->mmkey.cacheable = true;
return mr;
}
@@ -1570,7 +1572,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
unsigned int diffs = current_access_flags ^ target_access_flags;
if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
+ IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING |
+ IB_ACCESS_REMOTE_ATOMIC))
return false;
return mlx5r_umr_can_reconfig(dev, current_access_flags,
target_access_flags);
@@ -1835,6 +1838,23 @@ end:
return ret;
}
+static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
+
+ if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr))
+ return 0;
+
+ if (ent) {
+ spin_lock_irq(&ent->mkeys_queue.lock);
+ ent->in_use--;
+ mr->mmkey.cache_ent = NULL;
+ spin_unlock_irq(&ent->mkeys_queue.lock);
+ }
+ return destroy_mkey(dev, mr);
+}
+
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1880,16 +1900,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
}
/* Stop DMA */
- if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
- if (mlx5r_umr_revoke_mr(mr) ||
- cache_ent_find_and_store(dev, mr))
- mr->mmkey.cache_ent = NULL;
-
- if (!mr->mmkey.cache_ent) {
- rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
- if (rc)
- return rc;
- }
+ rc = mlx5_revoke_mr(mr);
+ if (rc)
+ return rc;
if (mr->umem) {
bool is_odp = is_odp_mr(mr);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index a056ea835..84be0c3d5 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -199,17 +199,20 @@ int mlx5_ib_create_srq(struct ib_srq *ib_srq,
int err;
struct mlx5_srq_attr in = {};
__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+ __u32 max_sge_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq) /
+ sizeof(struct mlx5_wqe_data_seg);
if (init_attr->srq_type != IB_SRQT_BASIC &&
init_attr->srq_type != IB_SRQT_XRC &&
init_attr->srq_type != IB_SRQT_TM)
return -EOPNOTSUPP;
- /* Sanity check SRQ size before proceeding */
- if (init_attr->attr.max_wr >= max_srq_wqes) {
- mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
- init_attr->attr.max_wr,
- max_srq_wqes);
+ /* Sanity check SRQ and sge size before proceeding */
+ if (init_attr->attr.max_wr >= max_srq_wqes ||
+ init_attr->attr.max_sge > max_sge_sz) {
+ mlx5_ib_dbg(dev, "max_wr %d,wr_cap %d,max_sge %d, sge_cap:%d\n",
+ init_attr->attr.max_wr, max_srq_wqes,
+ init_attr->attr.max_sge, max_sge_sz);
return -EINVAL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index b78b8c085..c997b7cbf 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -131,12 +131,12 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
{
int must_sched;
- skb_queue_tail(&qp->resp_pkts, skb);
-
- must_sched = skb_queue_len(&qp->resp_pkts) > 1;
+ must_sched = skb_queue_len(&qp->resp_pkts) > 0;
if (must_sched != 0)
rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);
+ skb_queue_tail(&qp->resp_pkts, skb);
+
if (must_sched)
rxe_sched_task(&qp->comp.task);
else
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index cd5966615..e5827064a 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -366,18 +366,10 @@ static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ else
err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
- } else {
- rxe_dbg_qp(pkt->qp, "Unknown layer 3 protocol: %d\n",
- skb->protocol);
- atomic_dec(&pkt->qp->skb_out);
- rxe_put(pkt->qp);
- kfree_skb(skb);
- return -EINVAL;
- }
if (unlikely(net_xmit_eval(err))) {
rxe_dbg_qp(pkt->qp, "error sending packet: %d\n", err);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 963382f62..fa2b87c74 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -354,6 +354,19 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
* receive buffer later. For rmda operations additional
* length checks are performed in check_rkey.
*/
+ if ((qp_type(qp) == IB_QPT_GSI) || (qp_type(qp) == IB_QPT_UD)) {
+ unsigned int payload = payload_size(pkt);
+ unsigned int recv_buffer_len = 0;
+ int i;
+
+ for (i = 0; i < qp->resp.wqe->dma.num_sge; i++)
+ recv_buffer_len += qp->resp.wqe->dma.sge[i].length;
+ if (payload + 40 > recv_buffer_len) {
+ rxe_dbg_qp(qp, "The receive buffer is too small for this UD packet.\n");
+ return RESPST_ERR_LENGTH;
+ }
+ }
+
if (pkt->mask & RXE_PAYLOAD_MASK && ((qp_type(qp) == IB_QPT_RC) ||
(qp_type(qp) == IB_QPT_UC))) {
unsigned int mtu = qp->mtu;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 614581989..a7e951066 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -812,7 +812,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
int i;
for (i = 0; i < ibwr->num_sge; i++, sge++) {
- memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
+ memcpy(p, ib_virt_dma_to_ptr(sge->addr), sge->length);
p += sge->length;
}
}
@@ -888,6 +888,7 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
{
int err = 0;
unsigned long flags;
+ int good = 0;
spin_lock_irqsave(&qp->sq.sq_lock, flags);
while (ibwr) {
@@ -895,12 +896,15 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
if (err) {
*bad_wr = ibwr;
break;
+ } else {
+ good++;
}
ibwr = ibwr->next;
}
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
- if (!err)
+ /* kickoff processing of any posted wqes */
+ if (good)
rxe_sched_task(&qp->req.task);
spin_lock_irqsave(&qp->state_lock, flags);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 4bd161e86..562df2b3e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -184,8 +184,12 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = ipoib_priv(pdev);
- snprintf(intf_name, sizeof(intf_name), "%s.%04x",
- ppriv->dev->name, pkey);
+ /* If you increase IFNAMSIZ, update snprintf below
+ * to allow longer names.
+ */
+ BUILD_BUG_ON(IFNAMSIZ != 16);
+ snprintf(intf_name, sizeof(intf_name), "%.10s.%04x", ppriv->dev->name,
+ pkey);
ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (IS_ERR(ndev)) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 711485437..fd4997ba2 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1378,19 +1378,19 @@ static int input_print_modalias_bits(char *buf, int size,
char name, const unsigned long *bm,
unsigned int min_bit, unsigned int max_bit)
{
- int len = 0, i;
+ int bit = min_bit;
+ int len = 0;
len += snprintf(buf, max(size, 0), "%c", name);
- for (i = min_bit; i < max_bit; i++)
- if (bm[BIT_WORD(i)] & BIT_MASK(i))
- len += snprintf(buf + len, max(size - len, 0), "%X,", i);
+ for_each_set_bit_from(bit, bm, max_bit)
+ len += snprintf(buf + len, max(size - len, 0), "%X,", bit);
return len;
}
-static int input_print_modalias(char *buf, int size, const struct input_dev *id,
- int add_cr)
+static int input_print_modalias_parts(char *buf, int size, int full_len,
+ const struct input_dev *id)
{
- int len;
+ int len, klen, remainder, space;
len = snprintf(buf, max(size, 0),
"input:b%04Xv%04Xp%04Xe%04X-",
@@ -1399,8 +1399,48 @@ static int input_print_modalias(char *buf, int size, const struct input_dev *id,
len += input_print_modalias_bits(buf + len, size - len,
'e', id->evbit, 0, EV_MAX);
- len += input_print_modalias_bits(buf + len, size - len,
+
+ /*
+ * Calculate the remaining space in the buffer making sure we
+ * have place for the terminating 0.
+ */
+ space = max(size - (len + 1), 0);
+
+ klen = input_print_modalias_bits(buf + len, size - len,
'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
+ len += klen;
+
+ /*
+ * If we have more data than we can fit in the buffer, check
+ * if we can trim key data to fit in the rest. We will indicate
+ * that key data is incomplete by adding "+" sign at the end, like
+ * this: * "k1,2,3,45,+,".
+ *
+ * Note that we shortest key info (if present) is "k+," so we
+ * can only try to trim if key data is longer than that.
+ */
+ if (full_len && size < full_len + 1 && klen > 3) {
+ remainder = full_len - len;
+ /*
+ * We can only trim if we have space for the remainder
+ * and also for at least "k+," which is 3 more characters.
+ */
+ if (remainder <= space - 3) {
+ /*
+ * We are guaranteed to have 'k' in the buffer, so
+ * we need at least 3 additional bytes for storing
+ * "+," in addition to the remainder.
+ */
+ for (int i = size - 1 - remainder - 3; i >= 0; i--) {
+ if (buf[i] == 'k' || buf[i] == ',') {
+ strcpy(buf + i + 1, "+,");
+ len = i + 3; /* Not counting '\0' */
+ break;
+ }
+ }
+ }
+ }
+
len += input_print_modalias_bits(buf + len, size - len,
'r', id->relbit, 0, REL_MAX);
len += input_print_modalias_bits(buf + len, size - len,
@@ -1416,12 +1456,25 @@ static int input_print_modalias(char *buf, int size, const struct input_dev *id,
len += input_print_modalias_bits(buf + len, size - len,
'w', id->swbit, 0, SW_MAX);
- if (add_cr)
- len += snprintf(buf + len, max(size - len, 0), "\n");
-
return len;
}
+static int input_print_modalias(char *buf, int size, const struct input_dev *id)
+{
+ int full_len;
+
+ /*
+ * Printing is done in 2 passes: first one figures out total length
+ * needed for the modalias string, second one will try to trim key
+ * data in case when buffer is too small for the entire modalias.
+ * If the buffer is too small regardless, it will fill as much as it
+ * can (without trimming key data) into the buffer and leave it to
+ * the caller to figure out what to do with the result.
+ */
+ full_len = input_print_modalias_parts(NULL, 0, 0, id);
+ return input_print_modalias_parts(buf, size, full_len, id);
+}
+
static ssize_t input_dev_show_modalias(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1429,7 +1482,9 @@ static ssize_t input_dev_show_modalias(struct device *dev,
struct input_dev *id = to_input_dev(dev);
ssize_t len;
- len = input_print_modalias(buf, PAGE_SIZE, id, 1);
+ len = input_print_modalias(buf, PAGE_SIZE, id);
+ if (len < PAGE_SIZE - 2)
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return min_t(int, len, PAGE_SIZE);
}
@@ -1641,6 +1696,23 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
return 0;
}
+/*
+ * This is a pretty gross hack. When building uevent data the driver core
+ * may try adding more environment variables to kobj_uevent_env without
+ * telling us, so we have no idea how much of the buffer we can use to
+ * avoid overflows/-ENOMEM elsewhere. To work around this let's artificially
+ * reduce amount of memory we will use for the modalias environment variable.
+ *
+ * The potential additions are:
+ *
+ * SEQNUM=18446744073709551615 - (%llu - 28 bytes)
+ * HOME=/ (6 bytes)
+ * PATH=/sbin:/bin:/usr/sbin:/usr/bin (34 bytes)
+ *
+ * 68 bytes total. Allow extra buffer - 96 bytes
+ */
+#define UEVENT_ENV_EXTRA_LEN 96
+
static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
const struct input_dev *dev)
{
@@ -1650,9 +1722,11 @@ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
return -ENOMEM;
len = input_print_modalias(&env->buf[env->buflen - 1],
- sizeof(env->buf) - env->buflen,
- dev, 0);
- if (len >= (sizeof(env->buf) - env->buflen))
+ (int)sizeof(env->buf) - env->buflen -
+ UEVENT_ENV_EXTRA_LEN,
+ dev);
+ if (len >= ((int)sizeof(env->buf) - env->buflen -
+ UEVENT_ENV_EXTRA_LEN))
return -ENOMEM;
env->buflen += len;
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 6e8cc28de..80d16c92a 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -42,8 +42,8 @@ struct ims_pcu_backlight {
#define IMS_PCU_PART_NUMBER_LEN 15
#define IMS_PCU_SERIAL_NUMBER_LEN 8
#define IMS_PCU_DOM_LEN 8
-#define IMS_PCU_FW_VERSION_LEN (9 + 1)
-#define IMS_PCU_BL_VERSION_LEN (9 + 1)
+#define IMS_PCU_FW_VERSION_LEN 16
+#define IMS_PCU_BL_VERSION_LEN 16
#define IMS_PCU_BL_RESET_REASON_LEN (2 + 1)
#define IMS_PCU_PCU_B_DEVICE_ID 5
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index 5c288fe7a..79f478d3a 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -13,7 +13,8 @@
#define VIB_MAX_LEVEL_mV (3100)
#define VIB_MIN_LEVEL_mV (1200)
-#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV)
+#define VIB_PER_STEP_mV (100)
+#define VIB_MAX_LEVELS (VIB_MAX_LEVEL_mV - VIB_MIN_LEVEL_mV + VIB_PER_STEP_mV)
#define MAX_FF_SPEED 0xff
@@ -117,10 +118,10 @@ static void pm8xxx_work_handler(struct work_struct *work)
vib->active = true;
vib->level = ((VIB_MAX_LEVELS * vib->speed) / MAX_FF_SPEED) +
VIB_MIN_LEVEL_mV;
- vib->level /= 100;
+ vib->level /= VIB_PER_STEP_mV;
} else {
vib->active = false;
- vib->level = VIB_MIN_LEVEL_mV / 100;
+ vib->level = VIB_MIN_LEVEL_mV / VIB_PER_STEP_mV;
}
pm8xxx_vib_set(vib, vib->active);
diff --git a/drivers/input/mouse/cyapa.c b/drivers/input/mouse/cyapa.c
index 5979deabe..256f757a1 100644
--- a/drivers/input/mouse/cyapa.c
+++ b/drivers/input/mouse/cyapa.c
@@ -1347,10 +1347,16 @@ static int cyapa_suspend(struct device *dev)
u8 power_mode;
int error;
- error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ error = mutex_lock_interruptible(&cyapa->input->mutex);
if (error)
return error;
+ error = mutex_lock_interruptible(&cyapa->state_sync_lock);
+ if (error) {
+ mutex_unlock(&cyapa->input->mutex);
+ return error;
+ }
+
/*
* Runtime PM is enable only when device is in operational mode and
* users in use, so need check it before disable it to
@@ -1385,6 +1391,8 @@ static int cyapa_suspend(struct device *dev)
cyapa->irq_wake = (enable_irq_wake(client->irq) == 0);
mutex_unlock(&cyapa->state_sync_lock);
+ mutex_unlock(&cyapa->input->mutex);
+
return 0;
}
@@ -1394,6 +1402,7 @@ static int cyapa_resume(struct device *dev)
struct cyapa *cyapa = i2c_get_clientdata(client);
int error;
+ mutex_lock(&cyapa->input->mutex);
mutex_lock(&cyapa->state_sync_lock);
if (device_may_wakeup(dev) && cyapa->irq_wake) {
@@ -1412,6 +1421,7 @@ static int cyapa_resume(struct device *dev)
enable_irq(client->irq);
mutex_unlock(&cyapa->state_sync_lock);
+ mutex_unlock(&cyapa->input->mutex);
return 0;
}
diff --git a/drivers/input/serio/ioc3kbd.c b/drivers/input/serio/ioc3kbd.c
index 50552dc7b..676b0bda3 100644
--- a/drivers/input/serio/ioc3kbd.c
+++ b/drivers/input/serio/ioc3kbd.c
@@ -200,9 +200,16 @@ static void ioc3kbd_remove(struct platform_device *pdev)
serio_unregister_port(d->aux);
}
+static const struct platform_device_id ioc3kbd_id_table[] = {
+ { "ioc3-kbd", },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, ioc3kbd_id_table);
+
static struct platform_driver ioc3kbd_driver = {
.probe = ioc3kbd_probe,
.remove_new = ioc3kbd_remove,
+ .id_table = ioc3kbd_id_table,
.driver = {
.name = "ioc3-kbd",
},
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
index 96735800b..ba4cc0868 100644
--- a/drivers/interconnect/qcom/qcm2290.c
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -164,7 +164,7 @@ static struct qcom_icc_node mas_snoc_bimc = {
.name = "mas_snoc_bimc",
.buswidth = 16,
.qos.ap_owned = true,
- .qos.qos_port = 2,
+ .qos.qos_port = 6,
.qos.qos_mode = NOC_QOS_MODE_BYPASS,
.mas_rpm_id = 164,
.slv_rpm_id = -1,
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index ac6754a85..e740dc54c 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1678,8 +1678,17 @@ static void __init free_pci_segments(void)
}
}
+static void __init free_sysfs(struct amd_iommu *iommu)
+{
+ if (iommu->iommu.dev) {
+ iommu_device_unregister(&iommu->iommu);
+ iommu_device_sysfs_remove(&iommu->iommu);
+ }
+}
+
static void __init free_iommu_one(struct amd_iommu *iommu)
{
+ free_sysfs(iommu);
free_cwwb_sem(iommu);
free_command_buffer(iommu);
free_event_buffer(iommu);
@@ -2097,6 +2106,8 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_max_glx_val = glxval;
else
amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+
+ iommu_enable_gt(iommu);
}
if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
@@ -2773,7 +2784,6 @@ static void early_enable_iommu(struct amd_iommu *iommu)
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
iommu_set_exclusion_range(iommu);
- iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
@@ -2830,7 +2840,6 @@ static void early_enable_iommus(void)
iommu_disable_irtcachedis(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
- iommu_enable_gt(iommu);
iommu_enable_ga(iommu);
iommu_enable_xt(iommu);
iommu_enable_irtcachedis(iommu);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 41f93c3ab..3afec8714 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3402,7 +3402,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
/* Add callback to free MSIs on teardown */
- devm_add_action(dev, arm_smmu_free_msis, dev);
+ devm_add_action_or_reset(dev, arm_smmu_free_msis, dev);
}
static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index a7ecd9030..e4a03588a 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -221,12 +221,11 @@ int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
-static int dmar_map_gfx = 1;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
static int iommu_skip_te_disable;
+static int disable_igfx_iommu;
-#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
const struct iommu_ops intel_iommu_ops;
@@ -265,7 +264,7 @@ static int __init intel_iommu_setup(char *str)
no_platform_optin = 1;
pr_info("IOMMU disabled\n");
} else if (!strncmp(str, "igfx_off", 8)) {
- dmar_map_gfx = 0;
+ disable_igfx_iommu = 1;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
@@ -2402,9 +2401,6 @@ static int device_def_domain_type(struct device *dev)
if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
return IOMMU_DOMAIN_IDENTITY;
-
- if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
- return IOMMU_DOMAIN_IDENTITY;
}
return 0;
@@ -2705,9 +2701,6 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
}
- if (!dmar_map_gfx)
- iommu_identity_mapping |= IDENTMAP_GFX;
-
check_tylersburg_isoch();
ret = si_domain_init(hw_pass_through);
@@ -2798,7 +2791,7 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
- if (!dmar_map_gfx)
+ if (disable_igfx_iommu)
drhd->ignored = 1;
}
}
@@ -4875,7 +4868,7 @@ static void quirk_iommu_igfx(struct pci_dev *dev)
return;
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
- dmar_map_gfx = 0;
+ disable_igfx_iommu = 1;
}
/* G4x/GM45 integrated gfx dmar support is totally busted. */
@@ -4956,8 +4949,8 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
pci_info(dev, "BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
- dmar_map_gfx = 0;
- } else if (dmar_map_gfx) {
+ disable_igfx_iommu = 1;
+ } else if (!disable_igfx_iommu) {
/* we have to ensure the gfx device is idle before we flush */
pci_info(dev, "Disabling batched IOTLB flush on Ironlake\n");
iommu_set_dma_strict();
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index a95a483de..659a77f7b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3317,15 +3317,26 @@ EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
static int __iommu_set_group_pasid(struct iommu_domain *domain,
struct iommu_group *group, ioasid_t pasid)
{
- struct group_device *device;
- int ret = 0;
+ struct group_device *device, *last_gdev;
+ int ret;
for_each_group_device(group, device) {
ret = domain->ops->set_dev_pasid(domain, device->dev, pasid);
if (ret)
- break;
+ goto err_revert;
}
+ return 0;
+
+err_revert:
+ last_gdev = device;
+ for_each_group_device(group, device) {
+ const struct iommu_ops *ops = dev_iommu_ops(device->dev);
+
+ if (device == last_gdev)
+ break;
+ ops->remove_dev_pasid(device->dev, pasid);
+ }
return ret;
}
@@ -3383,10 +3394,8 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
}
ret = __iommu_set_group_pasid(domain, group, pasid);
- if (ret) {
- __iommu_remove_group_pasid(group, pasid);
+ if (ret)
xa_erase(&group->pasid_array, pasid);
- }
out_unlock:
mutex_unlock(&group->mutex);
return ret;
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 9c8b1349e..a1430ab60 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -165,7 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
return 0;
err_sgi:
- irq_domain_free_irqs_parent(domain, virq, i - 1);
+ irq_domain_free_irqs_parent(domain, virq, i);
alpine_msix_free_sgi(priv, sgi, nr_irqs);
return err;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5f7d3db3a..6dbac6ec7 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1846,28 +1846,22 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- int ret = 0;
if (!info->map)
return -EINVAL;
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
-
if (!its_dev->event_map.vm) {
struct its_vlpi_map *maps;
maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
GFP_ATOMIC);
- if (!maps) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!maps)
+ return -ENOMEM;
its_dev->event_map.vm = info->map->vm;
its_dev->event_map.vlpi_maps = maps;
} else if (its_dev->event_map.vm != info->map->vm) {
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* Get our private copy of the mapping information */
@@ -1899,46 +1893,32 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
its_dev->event_map.nr_vlpis++;
}
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
struct its_vlpi_map *map;
- int ret = 0;
-
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
map = get_vlpi_map(d);
- if (!its_dev->event_map.vm || !map) {
- ret = -EINVAL;
- goto out;
- }
+ if (!its_dev->event_map.vm || !map)
+ return -EINVAL;
/* Copy our mapping information to the incoming request */
*info->map = *map;
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_unmap(struct irq_data *d)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
- int ret = 0;
-
- raw_spin_lock(&its_dev->event_map.vlpi_lock);
- if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
- ret = -EINVAL;
- goto out;
- }
+ if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
+ return -EINVAL;
/* Drop the virtual mapping */
its_send_discard(its_dev, event);
@@ -1962,9 +1942,7 @@ static int its_vlpi_unmap(struct irq_data *d)
kfree(its_dev->event_map.vlpi_maps);
}
-out:
- raw_spin_unlock(&its_dev->event_map.vlpi_lock);
- return ret;
+ return 0;
}
static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
@@ -1992,6 +1970,8 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
if (!is_v4(its_dev->its))
return -EINVAL;
+ guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock);
+
/* Unmap request? */
if (!info)
return its_vlpi_unmap(d);
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 6e1e1f011..dd4d69917 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -136,7 +136,7 @@ static int pch_msi_middle_domain_alloc(struct irq_domain *domain,
err_hwirq:
pch_msi_free_hwirq(priv, hwirq, nr_irqs);
- irq_domain_free_irqs_parent(domain, virq, i - 1);
+ irq_domain_free_irqs_parent(domain, virq, i);
return err;
}
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 9e71c4428..4f3a12383 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -253,8 +253,9 @@ IRQCHIP_DECLARE(andes, "andestech,cpu-intc", riscv_intc_init);
static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
- struct fwnode_handle *fn;
struct acpi_madt_rintc *rintc;
+ struct fwnode_handle *fn;
+ int rc;
rintc = (struct acpi_madt_rintc *)header;
@@ -273,7 +274,11 @@ static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
return -ENOMEM;
}
- return riscv_intc_init_common(fn, &riscv_intc_chip);
+ rc = riscv_intc_init_common(fn, &riscv_intc_chip);
+ if (rc)
+ irq_domain_free_fwnode(fn);
+
+ return rc;
}
IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index f3d4cb9e3..d246b7230 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -85,7 +85,7 @@ struct plic_handler {
struct plic_priv *priv;
};
static int plic_parent_irq __ro_after_init;
-static bool plic_cpuhp_setup_done __ro_after_init;
+static bool plic_global_setup_done __ro_after_init;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static int plic_irq_set_type(struct irq_data *d, unsigned int type);
@@ -490,10 +490,8 @@ static int plic_probe(struct platform_device *pdev)
unsigned long plic_quirks = 0;
struct plic_handler *handler;
u32 nr_irqs, parent_hwirq;
- struct irq_domain *domain;
struct plic_priv *priv;
irq_hw_number_t hwirq;
- bool cpuhp_setup;
if (is_of_node(dev->fwnode)) {
const struct of_device_id *id;
@@ -552,14 +550,6 @@ static int plic_probe(struct platform_device *pdev)
continue;
}
- /* Find parent domain and register chained handler */
- domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
- if (!plic_parent_irq && domain) {
- plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
- if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
- }
-
/*
* When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a
@@ -600,25 +590,35 @@ done:
goto fail_cleanup_contexts;
/*
- * We can have multiple PLIC instances so setup cpuhp state
+ * We can have multiple PLIC instances so setup global state
* and register syscore operations only once after context
* handlers of all online CPUs are initialized.
*/
- if (!plic_cpuhp_setup_done) {
- cpuhp_setup = true;
+ if (!plic_global_setup_done) {
+ struct irq_domain *domain;
+ bool global_setup = true;
+
for_each_online_cpu(cpu) {
handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present) {
- cpuhp_setup = false;
+ global_setup = false;
break;
}
}
- if (cpuhp_setup) {
+
+ if (global_setup) {
+ /* Find parent domain and register chained handler */
+ domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
+ if (domain)
+ plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
+ if (plic_parent_irq)
+ irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
+
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
register_syscore_ops(&plic_irq_syscore_ops);
- plic_cpuhp_setup_done = true;
+ plic_global_setup_done = true;
}
}
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 24fcff682..ba1be15cf 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -552,12 +552,6 @@ int led_classdev_register_ext(struct device *parent,
led_init_core(led_cdev);
#ifdef CONFIG_LEDS_TRIGGERS
- /*
- * If no default trigger was given and hw_control_trigger is set,
- * make it the default trigger.
- */
- if (!led_cdev->default_trigger && led_cdev->hw_control_trigger)
- led_cdev->default_trigger = led_cdev->hw_control_trigger;
led_trigger_set_default(led_cdev);
#endif
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 4e3936a39..e1b414b40 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -53,7 +53,13 @@ static int led_pwm_set(struct led_classdev *led_cdev,
duty = led_dat->pwmstate.period - duty;
led_dat->pwmstate.duty_cycle = duty;
- led_dat->pwmstate.enabled = true;
+ /*
+ * Disabling a PWM doesn't guarantee that it emits the inactive level.
+ * So keep it on. Only for suspending the PWM should be disabled because
+ * otherwise it refuses to suspend. The possible downside is that the
+ * LED might stay (or even go) on.
+ */
+ led_dat->pwmstate.enabled = !(led_cdev->flags & LED_SUSPENDED);
return pwm_apply_might_sleep(led_dat->pwm, &led_dat->pwmstate);
}
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index db9270da5..b6ddf1d47 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -140,24 +140,19 @@ static int macii_probe(void)
/* Initialize the driver */
static int macii_init(void)
{
- unsigned long flags;
int err;
- local_irq_save(flags);
-
err = macii_init_via();
if (err)
- goto out;
+ return err;
err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
macii_interrupt);
if (err)
- goto out;
+ return err;
macii_state = idle;
-out:
- local_irq_restore(flags);
- return err;
+ return 0;
}
/* initialize the hardware */
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index ead2200f3..033aff11f 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -465,7 +465,7 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
struct cmdq_task *task, *tmp;
unsigned long flags;
- WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev));
+ WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev) < 0);
spin_lock_irqsave(&thread->chan->lock, flags);
if (list_empty(&thread->task_busy_list))
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 2bba4d6aa..463eb13bd 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -54,7 +54,7 @@ void bch_dump_bucket(struct btree_keys *b)
int __bch_count_data(struct btree_keys *b)
{
unsigned int ret = 0;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k;
if (b->ops->is_extents)
@@ -67,7 +67,7 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
{
va_list args;
struct bkey *k, *p = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
const char *err;
for_each_key(b, k, &iter) {
@@ -879,7 +879,7 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey preceding_key_on_stack = ZERO_KEY;
struct bkey *preceding_key_p = &preceding_key_on_stack;
@@ -895,9 +895,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
else
preceding_key(k, &preceding_key_p);
- m = bch_btree_iter_init(b, &iter, preceding_key_p);
+ m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
- if (b->ops->insert_fixup(b, k, &iter, replace_key))
+ if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
return status;
status = BTREE_INSERT_STATUS_INSERT;
@@ -1100,33 +1100,33 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
btree_iter_cmp));
}
-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
- struct bkey *search,
- struct bset_tree *start)
+static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search,
+ struct bset_tree *start)
{
struct bkey *ret = NULL;
- iter->size = ARRAY_SIZE(iter->data);
- iter->used = 0;
+ iter->iter.size = ARRAY_SIZE(iter->stack_data);
+ iter->iter.used = 0;
#ifdef CONFIG_BCACHE_DEBUG
- iter->b = b;
+ iter->iter.b = b;
#endif
for (; start <= bset_tree_last(b); start++) {
ret = bch_bset_search(b, start, search);
- bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
+ bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
}
return ret;
}
-struct bkey *bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
struct bkey *search)
{
- return __bch_btree_iter_init(b, iter, search, b->set);
+ return __bch_btree_iter_stack_init(b, iter, search, b->set);
}
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
@@ -1293,10 +1293,10 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state)
{
size_t order = b->page_order, keys = 0;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
int oldsize = bch_count_data(b);
- __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
+ __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
if (start) {
unsigned int i;
@@ -1307,7 +1307,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
order = get_order(__set_bytes(b->set->data, keys));
}
- __btree_sort(b, &iter, start, order, false, state);
+ __btree_sort(b, &iter.iter, start, order, false, state);
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
}
@@ -1323,11 +1323,11 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
struct bset_sort_state *state)
{
uint64_t start_time = local_clock();
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(b, &iter, NULL);
+ bch_btree_iter_stack_init(b, &iter, NULL);
- btree_mergesort(b, new->set->data, &iter, false, true);
+ btree_mergesort(b, new->set->data, &iter.iter, false, true);
bch_time_stats_update(&state->time, start_time);
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index d795c8424..011f6062c 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -321,7 +321,14 @@ struct btree_iter {
#endif
struct btree_iter_set {
struct bkey *k, *end;
- } data[MAX_BSETS];
+ } data[];
+};
+
+/* Fixed-size btree_iter that can be allocated on the stack */
+
+struct btree_iter_stack {
+ struct btree_iter iter;
+ struct btree_iter_set stack_data[MAX_BSETS];
};
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
@@ -333,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *end);
-struct bkey *bch_btree_iter_init(struct btree_keys *b,
- struct btree_iter *iter,
- struct bkey *search);
+struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
+ struct btree_iter_stack *iter,
+ struct bkey *search);
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *search);
@@ -350,13 +357,14 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
return search ? __bch_bset_search(b, t, search) : t->data->start;
}
-#define for_each_key_filter(b, k, iter, filter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+#define for_each_key_filter(b, k, stack_iter, filter) \
+ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
+ ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
+ filter));)
-#define for_each_key(b, k, iter) \
- for (bch_btree_iter_init((b), (iter), NULL); \
- ((k) = bch_btree_iter_next(iter));)
+#define for_each_key(b, k, stack_iter) \
+ for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
+ ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
/* Sorting */
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 196cdacce..d011a7154 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1309,7 +1309,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
uint8_t stale = 0;
unsigned int keys = 0, good_keys = 0;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bset_tree *t;
gc->nodes++;
@@ -1570,7 +1570,7 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
static unsigned int btree_gc_count_keys(struct btree *b)
{
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
unsigned int ret = 0;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
@@ -1611,17 +1611,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
int ret = 0;
bool should_rewrite;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct gc_merge_info r[GC_MERGE_NODES];
struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
- bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
+ bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
for (i = r; i < r + ARRAY_SIZE(r); i++)
i->b = ERR_PTR(-EINTR);
while (1) {
- k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
+ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad);
if (k) {
r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
true, b);
@@ -1911,7 +1912,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
{
int ret = 0;
struct bkey *k, *p = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
bch_initial_mark_key(b->c, b->level, k);
@@ -1919,10 +1920,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
bch_initial_mark_key(b->c, b->level + 1, &b->key);
if (b->level) {
- bch_btree_iter_init(&b->keys, &iter, NULL);
+ bch_btree_iter_stack_init(&b->keys, &iter, NULL);
do {
- k = bch_btree_iter_next_filter(&iter, &b->keys,
+ k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
bch_ptr_bad);
if (k) {
btree_node_prefetch(b, k);
@@ -1950,7 +1951,7 @@ static int bch_btree_check_thread(void *arg)
struct btree_check_info *info = arg;
struct btree_check_state *check_state = info->state;
struct cache_set *c = check_state->c;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
@@ -1959,8 +1960,8 @@ static int bch_btree_check_thread(void *arg)
ret = 0;
/* root node keys are checked before thread created */
- bch_btree_iter_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -1978,7 +1979,7 @@ static int bch_btree_check_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter,
+ k = bch_btree_iter_next_filter(&iter.iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -2051,7 +2052,7 @@ int bch_btree_check(struct cache_set *c)
int ret = 0;
int i;
struct bkey *k = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct btree_check_state check_state;
/* check and mark root node keys */
@@ -2547,11 +2548,11 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
if (b->level) {
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(&b->keys, &iter, from);
+ bch_btree_iter_stack_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
+ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
bch_ptr_bad))) {
ret = bcache_btree(map_nodes_recurse, k, b,
op, from, fn, flags);
@@ -2580,11 +2581,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
{
int ret = MAP_CONTINUE;
struct bkey *k;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
- bch_btree_iter_init(&b->keys, &iter, from);
+ bch_btree_iter_stack_init(&b->keys, &iter, from);
- while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
+ while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+ bch_ptr_bad))) {
ret = !b->level
? fn(op, b, k)
: bcache_btree(map_keys_recurse, k,
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 330bcd9ea..b0819be04 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1914,8 +1914,9 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
INIT_LIST_HEAD(&c->btree_cache_freed);
INIT_LIST_HEAD(&c->data_buckets);
- iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
- sizeof(struct btree_iter_set);
+ iter_size = sizeof(struct btree_iter) +
+ ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
+ sizeof(struct btree_iter_set);
c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
if (!c->devices)
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 6956beb55..826b14cae 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -660,7 +660,7 @@ static unsigned int bch_root_usage(struct cache_set *c)
unsigned int bytes = 0;
struct bkey *k;
struct btree *b;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
goto lock_root;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 8827a6f13..792e070cc 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -908,15 +908,15 @@ static int bch_dirty_init_thread(void *arg)
struct dirty_init_thrd_info *info = arg;
struct bch_dirty_init_state *state = info->state;
struct cache_set *c = state->c;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct bkey *k, *p;
int cur_idx, prev_idx, skip_nr;
k = p = NULL;
prev_idx = 0;
- bch_btree_iter_init(&c->root->keys, &iter, NULL);
- k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
+ bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
+ k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
BUG_ON(!k);
p = k;
@@ -930,7 +930,7 @@ static int bch_dirty_init_thread(void *arg)
skip_nr = cur_idx - prev_idx;
while (skip_nr) {
- k = bch_btree_iter_next_filter(&iter,
+ k = bch_btree_iter_next_filter(&iter.iter,
&c->root->keys,
bch_ptr_bad);
if (k)
@@ -979,7 +979,7 @@ void bch_sectors_dirty_init(struct bcache_device *d)
int i;
struct btree *b = NULL;
struct bkey *k = NULL;
- struct btree_iter iter;
+ struct btree_iter_stack iter;
struct sectors_dirty_init op;
struct cache_set *c = d->c;
struct bch_dirty_init_state state;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 5eabdb06c..2ac43d1f1 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -154,8 +154,10 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- if (dc->kdelayd_wq)
+ if (dc->kdelayd_wq) {
+ timer_shutdown_sync(&dc->delay_timer);
destroy_workqueue(dc->kdelayd_wq);
+ }
if (dc->read.dev)
dm_put_device(ti, dc->read.dev);
@@ -240,19 +242,18 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ret = delay_class_ctr(ti, &dc->flush, argv);
if (ret)
goto bad;
- max_delay = max(max_delay, dc->write.delay);
- max_delay = max(max_delay, dc->flush.delay);
goto out;
}
ret = delay_class_ctr(ti, &dc->write, argv + 3);
if (ret)
goto bad;
+ max_delay = max(max_delay, dc->write.delay);
+
if (argc == 6) {
ret = delay_class_ctr(ti, &dc->flush, argv + 3);
if (ret)
goto bad;
- max_delay = max(max_delay, dc->flush.delay);
goto out;
}
@@ -267,8 +268,7 @@ out:
* In case of small requested delays, use kthread instead of
* timers and workqueue to achieve better latency.
*/
- dc->worker = kthread_create(&flush_worker_fn, dc,
- "dm-delay-flush-worker");
+ dc->worker = kthread_run(&flush_worker_fn, dc, "dm-delay-flush-worker");
if (IS_ERR(dc->worker)) {
ret = PTR_ERR(dc->worker);
dc->worker = NULL;
@@ -335,7 +335,7 @@ static void delay_presuspend(struct dm_target *ti)
mutex_unlock(&delayed_bios_lock);
if (!delay_is_fast(dc))
- del_timer_sync(&dc->delay_timer);
+ timer_delete(&dc->delay_timer);
flush_delayed_bios(dc, true);
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7f3dc8ee6..417fddebe 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3492,6 +3492,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
limits->dma_alignment = limits->logical_block_size - 1;
+ limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
}
limits->max_integrity_segments = USHRT_MAX;
}
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 059afc24c..0a2d37eb3 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -1424,7 +1424,7 @@ __acquires(bitmap->lock)
sector_t chunk = offset >> bitmap->chunkshift;
unsigned long page = chunk >> PAGE_COUNTER_SHIFT;
unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT;
- sector_t csize;
+ sector_t csize = ((sector_t)1) << bitmap->chunkshift;
int err;
if (page >= bitmap->pages) {
@@ -1433,6 +1433,7 @@ __acquires(bitmap->lock)
* End-of-device while looking for a whole page or
* user set a huge number to sysfs bitmap_set_bits.
*/
+ *blocks = csize - (offset & (csize - 1));
return NULL;
}
err = md_bitmap_checkpage(bitmap, page, create, 0);
@@ -1441,8 +1442,7 @@ __acquires(bitmap->lock)
bitmap->bp[page].map == NULL)
csize = ((sector_t)1) << (bitmap->chunkshift +
PAGE_COUNTER_SHIFT);
- else
- csize = ((sector_t)1) << bitmap->chunkshift;
+
*blocks = csize - (offset & (csize - 1));
if (err < 0)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d874abfc1..2bd1ce9b3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -36,7 +36,6 @@
*/
#include <linux/blkdev.h>
-#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
@@ -6734,6 +6733,9 @@ static void raid5d(struct md_thread *thread)
int batch_size, released;
unsigned int offset;
+ if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ break;
+
released = release_stripe_list(conf, conf->temp_inactive_list);
if (released)
clear_bit(R5_DID_ALLOC, &conf->cache_state);
@@ -6770,18 +6772,7 @@ static void raid5d(struct md_thread *thread)
spin_unlock_irq(&conf->device_lock);
md_check_recovery(mddev);
spin_lock_irq(&conf->device_lock);
-
- /*
- * Waiting on MD_SB_CHANGE_PENDING below may deadlock
- * seeing md_check_recovery() is needed to clear
- * the flag when using mdmon.
- */
- continue;
}
-
- wait_event_lock_irq(mddev->sb_wait,
- !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
- conf->device_lock);
}
pr_debug("%d stripes handled\n", handled);
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 559a172eb..da0983499 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -490,6 +490,15 @@ int cec_thread_func(void *_adap)
goto unlock;
}
+ if (adap->transmit_in_progress &&
+ adap->transmit_in_progress_aborted) {
+ if (adap->transmitting)
+ cec_data_cancel(adap->transmitting,
+ CEC_TX_STATUS_ABORTED, 0);
+ adap->transmit_in_progress = false;
+ adap->transmit_in_progress_aborted = false;
+ goto unlock;
+ }
if (adap->transmit_in_progress && timeout) {
/*
* If we timeout, then log that. Normally this does
@@ -771,6 +780,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
{
struct cec_data *data;
bool is_raw = msg_is_raw(msg);
+ int err;
if (adap->devnode.unregistered)
return -ENODEV;
@@ -935,11 +945,13 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
* Release the lock and wait, retake the lock afterwards.
*/
mutex_unlock(&adap->lock);
- wait_for_completion_killable(&data->c);
- if (!data->completed)
- cancel_delayed_work_sync(&data->work);
+ err = wait_for_completion_killable(&data->c);
+ cancel_delayed_work_sync(&data->work);
mutex_lock(&adap->lock);
+ if (err)
+ adap->transmit_in_progress_aborted = true;
+
/* Cancel the transmit if it was interrupted */
if (!data->completed) {
if (data->msg.tx_status & CEC_TX_STATUS_OK)
@@ -1575,9 +1587,12 @@ unconfigure:
*/
static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
{
- if (WARN_ON(adap->is_configuring || adap->is_configured))
+ if (WARN_ON(adap->is_claiming_log_addrs ||
+ adap->is_configuring || adap->is_configured))
return;
+ adap->is_claiming_log_addrs = true;
+
init_completion(&adap->config_completion);
/* Ready to kick off the thread */
@@ -1592,6 +1607,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
wait_for_completion(&adap->config_completion);
mutex_lock(&adap->lock);
}
+ adap->is_claiming_log_addrs = false;
}
/*
diff --git a/drivers/media/cec/core/cec-api.c b/drivers/media/cec/core/cec-api.c
index 67dc79ef1..3ef915344 100644
--- a/drivers/media/cec/core/cec-api.c
+++ b/drivers/media/cec/core/cec-api.c
@@ -178,7 +178,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
CEC_LOG_ADDRS_FL_CDC_ONLY;
mutex_lock(&adap->lock);
- if (!adap->is_configuring &&
+ if (!adap->is_claiming_log_addrs && !adap->is_configuring &&
(!log_addrs.num_log_addrs || !adap->is_configured) &&
!cec_is_busy(adap, fh)) {
err = __cec_s_log_addrs(adap, &log_addrs, block);
@@ -664,6 +664,8 @@ static int cec_release(struct inode *inode, struct file *filp)
list_del_init(&data->xfer_list);
}
mutex_unlock(&adap->lock);
+
+ mutex_lock(&fh->lock);
while (!list_empty(&fh->msgs)) {
struct cec_msg_entry *entry =
list_first_entry(&fh->msgs, struct cec_msg_entry, list);
@@ -681,6 +683,7 @@ static int cec_release(struct inode *inode, struct file *filp)
kfree(entry);
}
}
+ mutex_unlock(&fh->lock);
kfree(fh);
cec_put_device(devnode);
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 263887592..231b45632 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2176,6 +2176,11 @@ static int lgdt3306a_probe(struct i2c_client *client)
struct dvb_frontend *fe;
int ret;
+ if (!client->dev.platform_data) {
+ dev_err(&client->dev, "platform data is mandatory\n");
+ return -EINVAL;
+ }
+
config = kmemdup(client->dev.platform_data,
sizeof(struct lgdt3306a_config), GFP_KERNEL);
if (config == NULL) {
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 4ebbcf05c..91e9c3783 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -1381,57 +1381,57 @@ static int config_ts(struct mxl *state, enum MXL_HYDRA_DEMOD_ID_E demod_id,
u32 nco_count_min = 0;
u32 clk_type = 0;
- struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_sync_polarity[MXL_HYDRA_DEMOD_MAX] = {
{0x90700010, 8, 1}, {0x90700010, 9, 1},
{0x90700010, 10, 1}, {0x90700010, 11, 1},
{0x90700010, 12, 1}, {0x90700010, 13, 1},
{0x90700010, 14, 1}, {0x90700010, 15, 1} };
- struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_clock_polarity[MXL_HYDRA_DEMOD_MAX] = {
{0x90700010, 16, 1}, {0x90700010, 17, 1},
{0x90700010, 18, 1}, {0x90700010, 19, 1},
{0x90700010, 20, 1}, {0x90700010, 21, 1},
{0x90700010, 22, 1}, {0x90700010, 23, 1} };
- struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_valid_polarity[MXL_HYDRA_DEMOD_MAX] = {
{0x90700014, 0, 1}, {0x90700014, 1, 1},
{0x90700014, 2, 1}, {0x90700014, 3, 1},
{0x90700014, 4, 1}, {0x90700014, 5, 1},
{0x90700014, 6, 1}, {0x90700014, 7, 1} };
- struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_ts_clock_phase[MXL_HYDRA_DEMOD_MAX] = {
{0x90700018, 0, 3}, {0x90700018, 4, 3},
{0x90700018, 8, 3}, {0x90700018, 12, 3},
{0x90700018, 16, 3}, {0x90700018, 20, 3},
{0x90700018, 24, 3}, {0x90700018, 28, 3} };
- struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_lsb_first[MXL_HYDRA_DEMOD_MAX] = {
{0x9070000C, 16, 1}, {0x9070000C, 17, 1},
{0x9070000C, 18, 1}, {0x9070000C, 19, 1},
{0x9070000C, 20, 1}, {0x9070000C, 21, 1},
{0x9070000C, 22, 1}, {0x9070000C, 23, 1} };
- struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_sync_byte[MXL_HYDRA_DEMOD_MAX] = {
{0x90700010, 0, 1}, {0x90700010, 1, 1},
{0x90700010, 2, 1}, {0x90700010, 3, 1},
{0x90700010, 4, 1}, {0x90700010, 5, 1},
{0x90700010, 6, 1}, {0x90700010, 7, 1} };
- struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_enable_output[MXL_HYDRA_DEMOD_MAX] = {
{0x9070000C, 0, 1}, {0x9070000C, 1, 1},
{0x9070000C, 2, 1}, {0x9070000C, 3, 1},
{0x9070000C, 4, 1}, {0x9070000C, 5, 1},
{0x9070000C, 6, 1}, {0x9070000C, 7, 1} };
- struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_err_replace_sync[MXL_HYDRA_DEMOD_MAX] = {
{0x9070000C, 24, 1}, {0x9070000C, 25, 1},
{0x9070000C, 26, 1}, {0x9070000C, 27, 1},
{0x9070000C, 28, 1}, {0x9070000C, 29, 1},
{0x9070000C, 30, 1}, {0x9070000C, 31, 1} };
- struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_err_replace_valid[MXL_HYDRA_DEMOD_MAX] = {
{0x90700014, 8, 1}, {0x90700014, 9, 1},
{0x90700014, 10, 1}, {0x90700014, 11, 1},
{0x90700014, 12, 1}, {0x90700014, 13, 1},
{0x90700014, 14, 1}, {0x90700014, 15, 1} };
- struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_continuous_clock[MXL_HYDRA_DEMOD_MAX] = {
{0x907001D4, 0, 1}, {0x907001D4, 1, 1},
{0x907001D4, 2, 1}, {0x907001D4, 3, 1},
{0x907001D4, 4, 1}, {0x907001D4, 5, 1},
{0x907001D4, 6, 1}, {0x907001D4, 7, 1} };
- struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
+ static const struct MXL_REG_FIELD_T xpt_nco_clock_rate[MXL_HYDRA_DEMOD_MAX] = {
{0x90700044, 16, 80}, {0x90700044, 16, 81},
{0x90700044, 16, 82}, {0x90700044, 16, 83},
{0x90700044, 16, 84}, {0x90700044, 16, 85},
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index f548b1bb7..e932d25ca 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -1475,7 +1475,7 @@ err_mutex:
return ret;
}
-static void __exit et8ek8_remove(struct i2c_client *client)
+static void et8ek8_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev);
@@ -1517,7 +1517,7 @@ static struct i2c_driver et8ek8_i2c_driver = {
.of_match_table = et8ek8_of_table,
},
.probe = et8ek8_probe,
- .remove = __exit_p(et8ek8_remove),
+ .remove = et8ek8_remove,
.id_table = et8ek8_id_table,
};
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 39d321e2b..4577a8977 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -1116,25 +1116,24 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
sensor->pixel_rate = sensor->link_freq[0] * 2;
do_div(sensor->pixel_rate, 10);
- /* Verify bus cfg */
- if (bus_cfg.bus.mipi_csi2.num_data_lanes != 1) {
- ret = dev_err_probe(dev, -EINVAL,
- "only a 1-lane CSI2 config is supported");
- goto out_free_bus_cfg;
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_warn(dev, "Consider passing 'link-frequencies' in DT\n");
+ goto skip_link_freq_validation;
}
for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
if (bus_cfg.link_frequencies[i] == sensor->link_freq[0])
break;
- if (bus_cfg.nr_of_link_frequencies == 0 ||
- bus_cfg.nr_of_link_frequencies == i) {
+ if (bus_cfg.nr_of_link_frequencies == i) {
ret = dev_err_probe(dev, -EINVAL,
"supported link freq %lld not found\n",
sensor->link_freq[0]);
goto out_free_bus_cfg;
}
+skip_link_freq_validation:
+ ret = 0;
out_free_bus_cfg:
v4l2_fwnode_endpoint_free(&bus_cfg);
return ret;
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 552935ccb..57906df7b 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -768,14 +768,15 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
cur_mode = ov2740->cur_mode;
size = ARRAY_SIZE(link_freq_menu_items);
- ov2740->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops,
- V4L2_CID_LINK_FREQ,
- size - 1, 0,
- link_freq_menu_items);
+ ov2740->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2740_ctrl_ops,
+ V4L2_CID_LINK_FREQ, size - 1,
+ ov2740->supported_modes->link_freq_index,
+ link_freq_menu_items);
if (ov2740->link_freq)
ov2740->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- pixel_rate = to_pixel_rate(OV2740_LINK_FREQ_360MHZ_INDEX);
+ pixel_rate = to_pixel_rate(ov2740->supported_modes->link_freq_index);
ov2740->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2740_ctrl_ops,
V4L2_CID_PIXEL_RATE, 0,
pixel_rate, 1, pixel_rate);
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index 7f67825c8..318e267e7 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -245,15 +245,14 @@ int __must_check media_devnode_register(struct media_device *mdev,
kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor);
/* Part 3: Add the media and char device */
+ set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
ret = cdev_device_add(&devnode->cdev, &devnode->dev);
if (ret < 0) {
+ clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
pr_err("%s: cdev_device_add failed\n", __func__);
goto cdev_add_error;
}
- /* Part 4: Activate this minor. The char device can now be used. */
- set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
-
return 0;
cdev_add_error:
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 0e28b9a79..96dd0f6cc 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -619,6 +619,12 @@ static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
link = list_entry(entry->links, typeof(*link), list);
last_link = media_pipeline_walk_pop(walk);
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (not data-link)\n");
+ return 0;
+ }
+
dev_dbg(walk->mdev->dev,
"media pipeline: exploring link '%s':%u -> '%s':%u\n",
link->source->entity->name, link->source->index,
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index e994db4f4..61750cc98 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -15,6 +15,8 @@
#include <media/ipu-bridge.h>
#include <media/v4l2-fwnode.h>
+#define ADEV_DEV(adev) ACPI_PTR(&((adev)->dev))
+
/*
* 92335fcf-3203-4472-af93-7b4453ac29da
*
@@ -87,6 +89,7 @@ static const char * const ipu_vcm_types[] = {
"lc898212axb",
};
+#if IS_ENABLED(CONFIG_ACPI)
/*
* Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
* instead of device and driver match to probe IVSC device.
@@ -100,13 +103,13 @@ static const struct acpi_device_id ivsc_acpi_ids[] = {
static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
{
- acpi_handle handle = acpi_device_handle(adev);
- struct acpi_device *consumer, *ivsc_adev;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ivsc_acpi_ids); i++) {
const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
+ struct acpi_device *consumer, *ivsc_adev;
+ acpi_handle handle = acpi_device_handle(adev);
for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
/* camera sensor depends on IVSC in DSDT if exist */
for_each_acpi_consumer_dev(ivsc_adev, consumer)
@@ -118,6 +121,12 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
return NULL;
}
+#else
+static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
+{
+ return NULL;
+}
+#endif
static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
{
@@ -163,7 +172,7 @@ static int ipu_bridge_check_ivsc_dev(struct ipu_sensor *sensor,
csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
if (!csi_dev) {
acpi_dev_put(adev);
- dev_err(&adev->dev, "Failed to find MEI CSI dev\n");
+ dev_err(ADEV_DEV(adev), "Failed to find MEI CSI dev\n");
return -ENODEV;
}
@@ -182,24 +191,25 @@ static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
acpi_status status;
int ret = 0;
- status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
+ status = acpi_evaluate_object(ACPI_PTR(adev->handle),
+ id, NULL, &buffer);
if (ACPI_FAILURE(status))
return -ENODEV;
obj = buffer.pointer;
if (!obj) {
- dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
+ dev_err(ADEV_DEV(adev), "Couldn't locate ACPI buffer\n");
return -ENODEV;
}
if (obj->type != ACPI_TYPE_BUFFER) {
- dev_err(&adev->dev, "Not an ACPI buffer\n");
+ dev_err(ADEV_DEV(adev), "Not an ACPI buffer\n");
ret = -ENODEV;
goto out_free_buff;
}
if (obj->buffer.length > size) {
- dev_err(&adev->dev, "Given buffer is too small\n");
+ dev_err(ADEV_DEV(adev), "Given buffer is too small\n");
ret = -EINVAL;
goto out_free_buff;
}
@@ -220,7 +230,7 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
case IPU_SENSOR_ROTATION_INVERTED:
return 180;
default:
- dev_warn(&adev->dev,
+ dev_warn(ADEV_DEV(adev),
"Unknown rotation %d. Assume 0 degree rotation\n",
ssdb->degree);
return 0;
@@ -230,12 +240,14 @@ static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_device *adev)
{
enum v4l2_fwnode_orientation orientation;
- struct acpi_pld_info *pld;
- acpi_status status;
+ struct acpi_pld_info *pld = NULL;
+ acpi_status status = AE_ERROR;
+#if IS_ENABLED(CONFIG_ACPI)
status = acpi_get_physical_device_location(adev->handle, &pld);
+#endif
if (ACPI_FAILURE(status)) {
- dev_warn(&adev->dev, "_PLD call failed, using default orientation\n");
+ dev_warn(ADEV_DEV(adev), "_PLD call failed, using default orientation\n");
return V4L2_FWNODE_ORIENTATION_EXTERNAL;
}
@@ -253,7 +265,8 @@ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_dev
orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
break;
default:
- dev_warn(&adev->dev, "Unknown _PLD panel val %d\n", pld->panel);
+ dev_warn(ADEV_DEV(adev), "Unknown _PLD panel val %d\n",
+ pld->panel);
orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
break;
}
@@ -272,12 +285,12 @@ int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor)
return ret;
if (ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) {
- dev_warn(&adev->dev, "Unknown VCM type %d\n", ssdb.vcmtype);
+ dev_warn(ADEV_DEV(adev), "Unknown VCM type %d\n", ssdb.vcmtype);
ssdb.vcmtype = 0;
}
if (ssdb.lanes > IPU_MAX_LANES) {
- dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n");
+ dev_err(ADEV_DEV(adev), "Number of lanes in SSDB is invalid\n");
return -EINVAL;
}
@@ -465,8 +478,14 @@ static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
sensor->ipu_properties);
if (sensor->csi_dev) {
+ const char *device_hid = "";
+
+#if IS_ENABLED(CONFIG_ACPI)
+ device_hid = acpi_device_hid(sensor->ivsc_adev);
+#endif
+
snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
- acpi_device_hid(sensor->ivsc_adev), sensor->link);
+ device_hid, sensor->link);
nodes[SWNODE_IVSC_HID] = NODE_SENSOR(sensor->ivsc_name,
sensor->ivsc_properties);
@@ -631,11 +650,15 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
{
struct fwnode_handle *fwnode, *primary;
struct ipu_sensor *sensor;
- struct acpi_device *adev;
+ struct acpi_device *adev = NULL;
int ret;
+#if IS_ENABLED(CONFIG_ACPI)
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
- if (!adev->status.enabled)
+#else
+ while (true) {
+#endif
+ if (!ACPI_PTR(adev->status.enabled))
continue;
if (bridge->n_sensors >= IPU_MAX_PORTS) {
@@ -671,7 +694,7 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
goto err_free_swnodes;
}
- sensor->adev = acpi_dev_get(adev);
+ sensor->adev = ACPI_PTR(acpi_dev_get(adev));
primary = acpi_fwnode_handle(adev);
primary->secondary = fwnode;
@@ -727,11 +750,16 @@ static int ipu_bridge_ivsc_is_ready(void)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
+#if IS_ENABLED(CONFIG_ACPI)
const struct ipu_sensor_config *cfg =
&ipu_supported_sensors[i];
for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
- if (!sensor_adev->status.enabled)
+#else
+ while (true) {
+ sensor_adev = NULL;
+#endif
+ if (!ACPI_PTR(sensor_adev->status.enabled))
continue;
adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index c42adc5a4..00090e7f5 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -1752,11 +1752,6 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
- /* Register notifier for subdevices we care */
- r = cio2_parse_firmware(cio2);
- if (r)
- goto fail_clean_notifier;
-
r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
CIO2_NAME, cio2);
if (r) {
@@ -1764,6 +1759,11 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
goto fail_clean_notifier;
}
+ /* Register notifier for subdevices we care */
+ r = cio2_parse_firmware(cio2);
+ if (r)
+ goto fail_clean_notifier;
+
pm_runtime_put_noidle(dev);
pm_runtime_allow(dev);
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
index 9bcf10a77..97c833a8d 100644
--- a/drivers/media/pci/mgb4/mgb4_core.c
+++ b/drivers/media/pci/mgb4/mgb4_core.c
@@ -642,9 +642,6 @@ static void mgb4_remove(struct pci_dev *pdev)
struct mgb4_dev *mgbdev = pci_get_drvdata(pdev);
int i;
-#ifdef CONFIG_DEBUG_FS
- debugfs_remove_recursive(mgbdev->debugfs);
-#endif
#if IS_REACHABLE(CONFIG_HWMON)
hwmon_device_unregister(mgbdev->hwmon_dev);
#endif
@@ -659,6 +656,10 @@ static void mgb4_remove(struct pci_dev *pdev)
if (mgbdev->vin[i])
mgb4_vin_free(mgbdev->vin[i]);
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(mgbdev->debugfs);
+#endif
+
device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups);
free_spi(mgbdev);
free_i2c(mgbdev);
diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c
index 7481f553f..24ec576dc 100644
--- a/drivers/media/pci/ngene/ngene-core.c
+++ b/drivers/media/pci/ngene/ngene-core.c
@@ -1488,7 +1488,9 @@ static int init_channel(struct ngene_channel *chan)
}
if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
- dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
+ ret = dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
+ if (ret != 0)
+ goto err;
set_transfer(chan, 1);
chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
set_transfer(&chan->dev->channel[2], 1);
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 2d7b0508c..6f7d27a48 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -239,10 +239,6 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG);
- ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
- if (ret)
- goto err_disable_pclk;
-
/* Enable DPHY clk and data lanes. */
if (csi2rx->dphy) {
reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
@@ -252,6 +248,13 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
}
writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
+
+ ret = csi2rx_configure_ext_dphy(csi2rx);
+ if (ret) {
+ dev_err(csi2rx->dev,
+ "Failed to configure external DPHY: %d\n", ret);
+ goto err_disable_pclk;
+ }
}
/*
@@ -291,14 +294,9 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
reset_control_deassert(csi2rx->sys_rst);
- if (csi2rx->dphy) {
- ret = csi2rx_configure_ext_dphy(csi2rx);
- if (ret) {
- dev_err(csi2rx->dev,
- "Failed to configure external DPHY: %d\n", ret);
- goto err_disable_sysclk;
- }
- }
+ ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true);
+ if (ret)
+ goto err_disable_sysclk;
clk_disable_unprepare(csi2rx->p_clk);
@@ -312,6 +310,10 @@ err_disable_pixclk:
clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
}
+ if (csi2rx->dphy) {
+ writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
+ phy_power_off(csi2rx->dphy);
+ }
err_disable_pclk:
clk_disable_unprepare(csi2rx->p_clk);
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
index 6bbe55de6..ff23b225d 100644
--- a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
@@ -79,6 +79,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
}
fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
fw->type = SCP;
fw->ops = &mtk_vcodec_rproc_msg;
fw->scp = scp;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
index a22b7dfc6..1a2b14a3e 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
@@ -58,13 +58,15 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *mtkdev)
return 0;
}
-void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
+int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
{
int ret;
ret = pm_runtime_resume_and_get(pm->dev);
if (ret)
dev_err(pm->dev, "pm_runtime_resume_and_get fail: %d", ret);
+
+ return ret;
}
void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm)
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
index 157ea08ba..2e28f25e3 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
@@ -10,7 +10,7 @@
#include "mtk_vcodec_enc_drv.h"
int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *dev);
-void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
+int mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
index c402a686f..e83747b8d 100644
--- a/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
@@ -64,7 +64,9 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
ctx->dev->curr_ctx = ctx;
spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
- mtk_vcodec_enc_pw_on(&ctx->dev->pm);
+ ret = mtk_vcodec_enc_pw_on(&ctx->dev->pm);
+ if (ret)
+ goto venc_if_encode_pw_on_err;
mtk_vcodec_enc_clock_on(&ctx->dev->pm);
ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf,
bs_buf, result);
@@ -75,6 +77,7 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
ctx->dev->curr_ctx = NULL;
spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
+venc_if_encode_pw_on_err:
mtk_venc_unlock(ctx);
return ret;
}
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
index 792336dad..997a66318 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
@@ -59,7 +59,7 @@ enum rvin_isp_id {
#define RVIN_REMOTES_MAX \
(((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
- RVIN_CSI_MAX : RVIN_ISP_MAX)
+ (unsigned int)RVIN_CSI_MAX : (unsigned int)RVIN_ISP_MAX)
/**
* enum rvin_dma_state - DMA states
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
index 47a8c0fb7..99c401e65 100644
--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig
@@ -8,6 +8,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
select REGMAP_MMIO
+ select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
help
Support for the Allwinner A83T MIPI CSI-2 controller and D-PHY.
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
index 6da83d0cf..22442fce7 100644
--- a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
+++ b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
@@ -786,15 +786,14 @@ static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
dev_warn(csi->dev,
"Failed to drain DMA. Next frame might be bogus\n");
+ spin_lock_irqsave(&dma->lock, flags);
ret = ti_csi2rx_start_dma(csi, buf);
if (ret) {
- dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
- spin_lock_irqsave(&dma->lock, flags);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
dma->state = TI_CSI2RX_DMA_IDLE;
spin_unlock_irqrestore(&dma->lock, flags);
+ dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
} else {
- spin_lock_irqsave(&dma->lock, flags);
list_add_tail(&buf->list, &dma->submitted);
spin_unlock_irqrestore(&dma->lock, flags);
}
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index f1c5c0a6a..e3e6aa87f 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -62,7 +62,7 @@ struct shark_device {
#ifdef SHARK_USE_LEDS
struct work_struct led_work;
struct led_classdev leds[NO_LEDS];
- char led_names[NO_LEDS][32];
+ char led_names[NO_LEDS][64];
atomic_t brightness[NO_LEDS];
unsigned long brightness_new;
#endif
diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
index 790787f0e..bcb24d896 100644
--- a/drivers/media/usb/b2c2/flexcop-usb.c
+++ b/drivers/media/usb/b2c2/flexcop-usb.c
@@ -515,7 +515,7 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
alt = fc_usb->uintf->cur_altsetting;
- if (alt->desc.bNumEndpoints < 1)
+ if (alt->desc.bNumEndpoints < 2)
return -ENODEV;
if (!usb_endpoint_is_isoc_in(&alt->endpoint[0].desc))
return -ENODEV;
diff --git a/drivers/media/usb/stk1160/stk1160-video.c b/drivers/media/usb/stk1160/stk1160-video.c
index 366f0e4a5..e79c45db6 100644
--- a/drivers/media/usb/stk1160/stk1160-video.c
+++ b/drivers/media/usb/stk1160/stk1160-video.c
@@ -99,7 +99,7 @@ void stk1160_buffer_done(struct stk1160 *dev)
static inline
void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
{
- int linesdone, lineoff, lencopy;
+ int linesdone, lineoff, lencopy, offset;
int bytesperline = dev->width * 2;
struct stk1160_buffer *buf = dev->isoc_ctl.buf;
u8 *dst = buf->mem;
@@ -139,8 +139,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
* Check if we have enough space left in the buffer.
* In that case, we force loop exit after copy.
*/
- if (lencopy > buf->bytesused - buf->length) {
- lencopy = buf->bytesused - buf->length;
+ offset = dst - (u8 *)buf->mem;
+ if (offset > buf->length) {
+ dev_warn_ratelimited(dev->dev, "out of bounds offset\n");
+ return;
+ }
+ if (lencopy > buf->length - offset) {
+ lencopy = buf->length - offset;
remain = lencopy;
}
@@ -182,8 +187,13 @@ void stk1160_copy_video(struct stk1160 *dev, u8 *src, int len)
* Check if we have enough space left in the buffer.
* In that case, we force loop exit after copy.
*/
- if (lencopy > buf->bytesused - buf->length) {
- lencopy = buf->bytesused - buf->length;
+ offset = dst - (u8 *)buf->mem;
+ if (offset > buf->length) {
+ dev_warn_ratelimited(dev->dev, "offset out of bounds\n");
+ return;
+ }
+ if (lencopy > buf->length - offset) {
+ lencopy = buf->length - offset;
remain = lencopy;
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index bbd90123a..91a41aa3c 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/usb/quirks.h>
#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
@@ -2232,6 +2233,9 @@ static int uvc_probe(struct usb_interface *intf,
goto error;
}
+ if (dev->quirks & UVC_QUIRK_NO_RESET_RESUME)
+ udev->quirks &= ~USB_QUIRK_RESET_RESUME;
+
uvc_dbg(dev, PROBE, "UVC device initialized\n");
usb_enable_autosuspend(udev);
return 0;
@@ -2574,6 +2578,33 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_RESTORE_CTRLS_ON_INIT) },
+ /* Logitech Rally Bar Huddle */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x087c,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
+ /* Logitech Rally Bar */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x089b,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
+ /* Logitech Rally Bar Mini */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x08d3,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_NO_RESET_RESUME) },
/* Chicony CNF7129 (Asus EEE 100HE) */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 6fb0a78b1..88218693f 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -73,6 +73,7 @@
#define UVC_QUIRK_FORCE_Y8 0x00000800
#define UVC_QUIRK_FORCE_BPP 0x00001000
#define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000
+#define UVC_QUIRK_NO_RESET_RESUME 0x00004000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 3ec323bd5..4bb073587 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -563,6 +563,7 @@ void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
{
INIT_LIST_HEAD(&notifier->waiting_list);
INIT_LIST_HEAD(&notifier->done_list);
+ INIT_LIST_HEAD(&notifier->notifier_entry);
notifier->v4l2_dev = v4l2_dev;
}
EXPORT_SYMBOL(v4l2_async_nf_init);
@@ -572,6 +573,7 @@ void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
{
INIT_LIST_HEAD(&notifier->waiting_list);
INIT_LIST_HEAD(&notifier->done_list);
+ INIT_LIST_HEAD(&notifier->notifier_entry);
notifier->sd = sd;
}
EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
@@ -618,16 +620,10 @@ err_unlock:
int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
- int ret;
-
if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
return -EINVAL;
- ret = __v4l2_async_nf_register(notifier);
- if (ret)
- notifier->v4l2_dev = NULL;
-
- return ret;
+ return __v4l2_async_nf_register(notifier);
}
EXPORT_SYMBOL(v4l2_async_nf_register);
@@ -639,7 +635,7 @@ __v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
v4l2_async_nf_unbind_all_subdevs(notifier);
- list_del(&notifier->notifier_entry);
+ list_del_init(&notifier->notifier_entry);
}
void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index d13954bd3..bae73b8c5 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -1036,8 +1036,10 @@ int __video_register_device(struct video_device *vdev,
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
vdev->dev.parent = vdev->dev_parent;
dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
+ mutex_lock(&videodev_lock);
ret = device_register(&vdev->dev);
if (ret < 0) {
+ mutex_unlock(&videodev_lock);
pr_err("%s: device_register failed\n", __func__);
goto cleanup;
}
@@ -1057,6 +1059,7 @@ int __video_register_device(struct video_device *vdev,
/* Part 6: Activate this minor. The char device can now be used. */
set_bit(V4L2_FL_REGISTERED, &vdev->flags);
+ mutex_unlock(&videodev_lock);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 4c6198c48..19d20871a 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -412,15 +412,6 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
if (WARN_ON(!!sd->enabled_streams == !!enable))
return 0;
-#if IS_REACHABLE(CONFIG_LEDS_CLASS)
- if (!IS_ERR_OR_NULL(sd->privacy_led)) {
- if (enable)
- led_set_brightness(sd->privacy_led,
- sd->privacy_led->max_brightness);
- else
- led_set_brightness(sd->privacy_led, 0);
- }
-#endif
ret = sd->ops->video->s_stream(sd, enable);
if (!enable && ret < 0) {
@@ -428,9 +419,20 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
ret = 0;
}
- if (!ret)
+ if (!ret) {
sd->enabled_streams = enable ? BIT(0) : 0;
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ if (enable)
+ led_set_brightness(sd->privacy_led,
+ sd->privacy_led->max_brightness);
+ else
+ led_set_brightness(sd->privacy_led, 0);
+ }
+#endif
+ }
+
return ret;
}
@@ -732,6 +734,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
+ sel.stream = crop->stream;
sel.target = V4L2_SEL_TGT_CROP;
rval = v4l2_subdev_call(
@@ -756,6 +759,7 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
memset(&sel, 0, sizeof(sel));
sel.which = crop->which;
sel.pad = crop->pad;
+ sel.stream = crop->stream;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = crop->rect;
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 95ef971b5..b28701138 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -19,7 +19,7 @@ KASAN_SANITIZE_rodata.o := n
KCSAN_SANITIZE_rodata.o := n
KCOV_INSTRUMENT_rodata.o := n
OBJECT_FILES_NON_STANDARD_rodata.o := y
-CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
+CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS) $(CC_FLAGS_CFI)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index b93404d65..5b861dbff 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -61,7 +61,7 @@ static void *setup_function_descriptor(func_desc_t *fdesc, void *dst)
return fdesc;
}
-static noinline void execute_location(void *dst, bool write)
+static noinline __nocfi void execute_location(void *dst, bool write)
{
void (*func)(void);
func_desc_t fdesc;
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
index 32af2b14f..34c9be437 100644
--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
@@ -69,8 +69,10 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
aux_bus->aux_device_wrapper[1] = kzalloc(sizeof(*aux_bus->aux_device_wrapper[1]),
GFP_KERNEL);
- if (!aux_bus->aux_device_wrapper[1])
- return -ENOMEM;
+ if (!aux_bus->aux_device_wrapper[1]) {
+ retval = -ENOMEM;
+ goto err_aux_dev_add_0;
+ }
retval = ida_alloc(&gp_client_ida, GFP_KERNEL);
if (retval < 0)
@@ -111,6 +113,7 @@ static int gp_aux_bus_probe(struct pci_dev *pdev, const struct pci_device_id *id
err_aux_dev_add_1:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[1]->aux_dev);
+ goto err_aux_dev_add_0;
err_aux_dev_init_1:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[1]->aux_dev.id);
@@ -120,6 +123,7 @@ err_ida_alloc_1:
err_aux_dev_add_0:
auxiliary_device_uninit(&aux_bus->aux_device_wrapper[0]->aux_dev);
+ goto err_ret;
err_aux_dev_init_0:
ida_free(&gp_client_ida, aux_bus->aux_device_wrapper[0]->aux_dev.id);
@@ -127,6 +131,7 @@ err_aux_dev_init_0:
err_ida_alloc_0:
kfree(aux_bus->aux_device_wrapper[0]);
+err_ret:
return retval;
}
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 7f59dd38c..6589635f8 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -385,8 +385,10 @@ static int mei_me_pci_resume(struct device *device)
}
err = mei_restart(dev);
- if (err)
+ if (err) {
+ free_irq(pdev->irq, dev);
return err;
+ }
/* Start timer if stopped in suspend */
schedule_delayed_work(&dev->timer_work, HZ);
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index b543e6b9f..1ec65d874 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -399,41 +399,32 @@ static void mei_vsc_remove(struct platform_device *pdev)
static int mei_vsc_suspend(struct device *dev)
{
- struct mei_device *mei_dev = dev_get_drvdata(dev);
- struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
+ struct mei_device *mei_dev;
+ int ret = 0;
- mei_stop(mei_dev);
+ mei_dev = dev_get_drvdata(dev);
+ if (!mei_dev)
+ return -ENODEV;
- mei_disable_interrupts(mei_dev);
+ mutex_lock(&mei_dev->device_lock);
- vsc_tp_free_irq(hw->tp);
+ if (!mei_write_is_idle(mei_dev))
+ ret = -EAGAIN;
- return 0;
+ mutex_unlock(&mei_dev->device_lock);
+
+ return ret;
}
static int mei_vsc_resume(struct device *dev)
{
- struct mei_device *mei_dev = dev_get_drvdata(dev);
- struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
- int ret;
-
- ret = vsc_tp_request_irq(hw->tp);
- if (ret)
- return ret;
-
- ret = mei_restart(mei_dev);
- if (ret)
- goto err_free;
+ struct mei_device *mei_dev;
- /* start timer if stopped in suspend */
- schedule_delayed_work(&mei_dev->timer_work, HZ);
+ mei_dev = dev_get_drvdata(dev);
+ if (!mei_dev)
+ return -ENODEV;
return 0;
-
-err_free:
- vsc_tp_free_irq(hw->tp);
-
- return ret;
}
static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume);
diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c
index ffa4ccd96..596a9d695 100644
--- a/drivers/misc/mei/vsc-fw-loader.c
+++ b/drivers/misc/mei/vsc-fw-loader.c
@@ -252,7 +252,7 @@ static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader,
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER };
union acpi_object obj = {
- .type = ACPI_TYPE_INTEGER,
+ .integer.type = ACPI_TYPE_INTEGER,
.integer.value = 1,
};
struct acpi_object_list arg_list = {
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
index 5d7ac0762..9a41ab653 100644
--- a/drivers/misc/vmw_vmci/vmci_event.c
+++ b/drivers/misc/vmw_vmci/vmci_event.c
@@ -9,6 +9,7 @@
#include <linux/vmw_vmci_api.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/rculist.h>
@@ -86,9 +87,12 @@ static void event_deliver(struct vmci_event_msg *event_msg)
{
struct vmci_subscription *cur;
struct list_head *subscriber_list;
+ u32 sanitized_event, max_vmci_event;
rcu_read_lock();
- subscriber_list = &subscriber_array[event_msg->event_data.event];
+ max_vmci_event = ARRAY_SIZE(subscriber_array);
+ sanitized_event = array_index_nospec(event_msg->event_data.event, max_vmci_event);
+ subscriber_list = &subscriber_array[sanitized_event];
list_for_each_entry_rcu(cur, subscriber_list, node) {
cur->callback(cur->id, &event_msg->event_data,
cur->callback_data);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 4f8d962bb..1300ccab3 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -625,7 +625,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
if (!vmci_dev) {
dev_err(&pdev->dev,
"Can't allocate memory for VMCI device\n");
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_unmap_mmio_base;
}
vmci_dev->dev = &pdev->dev;
@@ -642,7 +643,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
if (!vmci_dev->tx_buffer) {
dev_err(&pdev->dev,
"Can't allocate memory for datagram tx buffer\n");
- return -ENOMEM;
+ error = -ENOMEM;
+ goto err_unmap_mmio_base;
}
vmci_dev->data_buffer = dma_alloc_coherent(&pdev->dev, VMCI_DMA_DG_BUFFER_SIZE,
@@ -893,6 +895,10 @@ err_free_notification_bitmap:
err_free_data_buffers:
vmci_free_dg_buffers(vmci_dev);
+err_unmap_mmio_base:
+ if (mmio_base != NULL)
+ pci_iounmap(pdev, mmio_base);
+
/* The rest are managed resources and will be freed by PCI core */
return error;
}
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 39f45c2b6..8791656e9 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -221,6 +221,26 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
}
EXPORT_SYMBOL(mmc_gpiod_request_cd);
+/**
+ * mmc_gpiod_set_cd_config - set config for card-detection GPIO
+ * @host: mmc host
+ * @config: Generic pinconf config (from pinconf_to_config_packed())
+ *
+ * This can be used by mmc host drivers to fixup a card-detection GPIO's config
+ * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor
+ * through mmc_gpiod_request_cd().
+ *
+ * Returns:
+ * 0 on success, or a negative errno value on error.
+ */
+int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ return gpiod_set_config(ctx->cd_gpio, config);
+}
+EXPORT_SYMBOL(mmc_gpiod_set_cd_config);
+
bool mmc_can_gpio_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 8bd938919..d7427894e 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1337,7 +1337,7 @@ ioremap_fail:
return ret;
}
-static void __exit davinci_mmcsd_remove(struct platform_device *pdev)
+static void davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
@@ -1392,7 +1392,7 @@ static struct platform_driver davinci_mmcsd_driver = {
.of_match_table = davinci_mmc_dt_ids,
},
.probe = davinci_mmcsd_probe,
- .remove_new = __exit_p(davinci_mmcsd_remove),
+ .remove_new = davinci_mmcsd_remove,
.id_table = davinci_mmc_devtype,
};
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index acf5fc3ad..eb8f427f9 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/pinctrl/pinconf-generic.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/io.h>
@@ -80,6 +81,8 @@ struct sdhci_acpi_host {
enum {
DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0),
DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1),
+ DMI_QUIRK_SD_CD_ACTIVE_HIGH = BIT(2),
+ DMI_QUIRK_SD_CD_ENABLE_PULL_UP = BIT(3),
};
static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
@@ -719,9 +722,30 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+/* Please keep this list sorted alphabetically */
static const struct dmi_system_id sdhci_acpi_quirks[] = {
{
/*
+ * The Acer Aspire Switch 10 (SW5-012) microSD slot always
+ * reports the card being write-protected even though microSD
+ * cards do not have a write-protect switch at all.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
+ {
+ /* Asus T100TA, needs pull-up for cd but DSDT GpioInt has NoPull set */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_CD_ENABLE_PULL_UP,
+ },
+ {
+ /*
* The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
* the SHC1 ACPI device, this bug causes it to reprogram the
* wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the
@@ -736,15 +760,23 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
{
/*
- * The Acer Aspire Switch 10 (SW5-012) microSD slot always
- * reports the card being write-protected even though microSD
- * cards do not have a write-protect switch at all.
+ * Lenovo Yoga Tablet 2 Pro 1380F/L (13" Android version) this
+ * has broken WP reporting and an inverted CD signal.
+ * Note this has more or less the same BIOS as the Lenovo Yoga
+ * Tablet 2 830F/L or 1050F/L (8" and 10" Android), but unlike
+ * the 830 / 1050 models which share the same mainboard this
+ * model has a different mainboard and the inverted CD and
+ * broken WP are unique to this board.
*/
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
- DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Full match so as to NOT match the 830/1050 BIOS */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"),
},
- .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ .driver_data = (void *)(DMI_QUIRK_SD_NO_WRITE_PROTECT |
+ DMI_QUIRK_SD_CD_ACTIVE_HIGH),
},
{
/*
@@ -757,6 +789,17 @@ static const struct dmi_system_id sdhci_acpi_quirks[] = {
},
.driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
},
+ {
+ /*
+ * The Toshiba WT10-A's microSD slot always reports the card being
+ * write-protected.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A"),
+ },
+ .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+ },
{} /* Terminating entry */
};
@@ -866,12 +909,18 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) {
bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL);
+ if (quirks & DMI_QUIRK_SD_CD_ACTIVE_HIGH)
+ host->mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0);
if (err) {
if (err == -EPROBE_DEFER)
goto err_free;
dev_warn(dev, "failed to setup card detect gpio\n");
c->use_runtime_pm = false;
+ } else if (quirks & DMI_QUIRK_SD_CD_ENABLE_PULL_UP) {
+ mmc_gpiod_set_cd_config(host->mmc,
+ PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 20000));
}
if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c79f73459..746f4cf7a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3439,12 +3439,18 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
host->data->error = -EILSEQ;
if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
- } else if ((intmask & SDHCI_INT_DATA_CRC) &&
+ } else if ((intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) &&
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
!= MMC_BUS_TEST_R) {
host->data->error = -EILSEQ;
if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
+ if (intmask & SDHCI_INT_TUNING_ERROR) {
+ u16 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ ctrl2 &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
+ }
} else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
intmask);
@@ -3979,7 +3985,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
} else
*cmd_error = 0;
- if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
+ if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC | SDHCI_INT_TUNING_ERROR)) {
*data_error = -EILSEQ;
if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
sdhci_err_stats_inc(host, DAT_CRC);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a20864fc0..957c7a917 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -158,6 +158,7 @@
#define SDHCI_INT_BUS_POWER 0x00800000
#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
#define SDHCI_INT_ADMA_ERROR 0x02000000
+#define SDHCI_INT_TUNING_ERROR 0x04000000
#define SDHCI_INT_NORMAL_MASK 0x00007FFF
#define SDHCI_INT_ERROR_MASK 0xFFFF8000
@@ -169,7 +170,7 @@
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
- SDHCI_INT_BLK_GAP)
+ SDHCI_INT_BLK_GAP | SDHCI_INT_TUNING_ERROR)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
#define SDHCI_CQE_INT_ERR_MASK ( \
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index d659c5942..562034af6 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -143,16 +143,24 @@ struct sdhci_am654_data {
struct regmap *base;
int otap_del_sel[ARRAY_SIZE(td)];
int itap_del_sel[ARRAY_SIZE(td)];
+ u32 itap_del_ena[ARRAY_SIZE(td)];
int clkbuf_sel;
int trm_icp;
int drv_strength;
int strb_sel;
u32 flags;
u32 quirks;
+ bool dll_enable;
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
};
+struct window {
+ u8 start;
+ u8 end;
+ u8 length;
+};
+
struct sdhci_am654_driver_data {
const struct sdhci_pltfm_data *pdata;
u32 flags;
@@ -232,11 +240,13 @@ static void sdhci_am654_setup_dll(struct sdhci_host *host, unsigned int clock)
}
static void sdhci_am654_write_itapdly(struct sdhci_am654_data *sdhci_am654,
- u32 itapdly)
+ u32 itapdly, u32 enable)
{
/* Set ITAPCHGWIN before writing to ITAPDLY */
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
1 << ITAPCHGWIN_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
+ enable << ITAPDLYENA_SHIFT);
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYSEL_MASK,
itapdly << ITAPDLYSEL_SHIFT);
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
@@ -253,8 +263,8 @@ static void sdhci_am654_setup_delay_chain(struct sdhci_am654_data *sdhci_am654,
mask = SELDLYTXCLK_MASK | SELDLYRXCLK_MASK;
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, mask, val);
- sdhci_am654_write_itapdly(sdhci_am654,
- sdhci_am654->itap_del_sel[timing]);
+ sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
+ sdhci_am654->itap_del_ena[timing]);
}
static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
@@ -263,7 +273,6 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
unsigned char timing = host->mmc->ios.timing;
u32 otap_del_sel;
- u32 otap_del_ena;
u32 mask, val;
regmap_update_bits(sdhci_am654->base, PHY_CTRL1, ENDLL_MASK, 0);
@@ -272,10 +281,9 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
/* Setup DLL Output TAP delay */
otap_del_sel = sdhci_am654->otap_del_sel[timing];
- otap_del_ena = (timing > MMC_TIMING_UHS_SDR25) ? 1 : 0;
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
- val = (otap_del_ena << OTAPDLYENA_SHIFT) |
+ val = (0x1 << OTAPDLYENA_SHIFT) |
(otap_del_sel << OTAPDLYSEL_SHIFT);
/* Write to STRBSEL for HS400 speed mode */
@@ -290,10 +298,21 @@ static void sdhci_am654_set_clock(struct sdhci_host *host, unsigned int clock)
regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
- if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ)
+ if (timing > MMC_TIMING_UHS_SDR25 && clock >= CLOCK_TOO_SLOW_HZ) {
sdhci_am654_setup_dll(host, clock);
- else
+ sdhci_am654->dll_enable = true;
+
+ if (timing == MMC_TIMING_MMC_HS400) {
+ sdhci_am654->itap_del_ena[timing] = 0x1;
+ sdhci_am654->itap_del_sel[timing] = sdhci_am654->itap_del_sel[timing - 1];
+ }
+
+ sdhci_am654_write_itapdly(sdhci_am654, sdhci_am654->itap_del_sel[timing],
+ sdhci_am654->itap_del_ena[timing]);
+ } else {
sdhci_am654_setup_delay_chain(sdhci_am654, timing);
+ sdhci_am654->dll_enable = false;
+ }
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
sdhci_am654->clkbuf_sel);
@@ -306,6 +325,8 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
unsigned char timing = host->mmc->ios.timing;
u32 otap_del_sel;
+ u32 itap_del_ena;
+ u32 itap_del_sel;
u32 mask, val;
/* Setup DLL Output TAP delay */
@@ -314,8 +335,19 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
mask = OTAPDLYENA_MASK | OTAPDLYSEL_MASK;
val = (0x1 << OTAPDLYENA_SHIFT) |
(otap_del_sel << OTAPDLYSEL_SHIFT);
- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ /* Setup Input TAP delay */
+ itap_del_ena = sdhci_am654->itap_del_ena[timing];
+ itap_del_sel = sdhci_am654->itap_del_sel[timing];
+
+ mask |= ITAPDLYENA_MASK | ITAPDLYSEL_MASK;
+ val |= (itap_del_ena << ITAPDLYENA_SHIFT) |
+ (itap_del_sel << ITAPDLYSEL_SHIFT);
+
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK,
+ 1 << ITAPCHGWIN_SHIFT);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, mask, val);
+ regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPCHGWIN_MASK, 0);
regmap_update_bits(sdhci_am654->base, PHY_CTRL5, CLKBUFSEL_MASK,
sdhci_am654->clkbuf_sel);
@@ -408,40 +440,105 @@ static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask)
return 0;
}
-#define ITAP_MAX 32
+#define ITAPDLY_LENGTH 32
+#define ITAPDLY_LAST_INDEX (ITAPDLY_LENGTH - 1)
+
+static u32 sdhci_am654_calculate_itap(struct sdhci_host *host, struct window
+ *fail_window, u8 num_fails, bool circular_buffer)
+{
+ u8 itap = 0, start_fail = 0, end_fail = 0, pass_length = 0;
+ u8 first_fail_start = 0, last_fail_end = 0;
+ struct device *dev = mmc_dev(host->mmc);
+ struct window pass_window = {0, 0, 0};
+ int prev_fail_end = -1;
+ u8 i;
+
+ if (!num_fails)
+ return ITAPDLY_LAST_INDEX >> 1;
+
+ if (fail_window->length == ITAPDLY_LENGTH) {
+ dev_err(dev, "No passing ITAPDLY, return 0\n");
+ return 0;
+ }
+
+ first_fail_start = fail_window->start;
+ last_fail_end = fail_window[num_fails - 1].end;
+
+ for (i = 0; i < num_fails; i++) {
+ start_fail = fail_window[i].start;
+ end_fail = fail_window[i].end;
+ pass_length = start_fail - (prev_fail_end + 1);
+
+ if (pass_length > pass_window.length) {
+ pass_window.start = prev_fail_end + 1;
+ pass_window.length = pass_length;
+ }
+ prev_fail_end = end_fail;
+ }
+
+ if (!circular_buffer)
+ pass_length = ITAPDLY_LAST_INDEX - last_fail_end;
+ else
+ pass_length = ITAPDLY_LAST_INDEX - last_fail_end + first_fail_start;
+
+ if (pass_length > pass_window.length) {
+ pass_window.start = last_fail_end + 1;
+ pass_window.length = pass_length;
+ }
+
+ if (!circular_buffer)
+ itap = pass_window.start + (pass_window.length >> 1);
+ else
+ itap = (pass_window.start + (pass_window.length >> 1)) % ITAPDLY_LENGTH;
+
+ return (itap > ITAPDLY_LAST_INDEX) ? ITAPDLY_LAST_INDEX >> 1 : itap;
+}
+
static int sdhci_am654_platform_execute_tuning(struct sdhci_host *host,
u32 opcode)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
- int cur_val, prev_val = 1, fail_len = 0, pass_window = 0, pass_len;
- u32 itap;
+ unsigned char timing = host->mmc->ios.timing;
+ struct window fail_window[ITAPDLY_LENGTH];
+ u8 curr_pass, itap;
+ u8 fail_index = 0;
+ u8 prev_pass = 1;
+
+ memset(fail_window, 0, sizeof(fail_window));
/* Enable ITAPDLY */
- regmap_update_bits(sdhci_am654->base, PHY_CTRL4, ITAPDLYENA_MASK,
- 1 << ITAPDLYENA_SHIFT);
+ sdhci_am654->itap_del_ena[timing] = 0x1;
+
+ for (itap = 0; itap < ITAPDLY_LENGTH; itap++) {
+ sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
- for (itap = 0; itap < ITAP_MAX; itap++) {
- sdhci_am654_write_itapdly(sdhci_am654, itap);
+ curr_pass = !mmc_send_tuning(host->mmc, opcode, NULL);
- cur_val = !mmc_send_tuning(host->mmc, opcode, NULL);
- if (cur_val && !prev_val)
- pass_window = itap;
+ if (!curr_pass && prev_pass)
+ fail_window[fail_index].start = itap;
- if (!cur_val)
- fail_len++;
+ if (!curr_pass) {
+ fail_window[fail_index].end = itap;
+ fail_window[fail_index].length++;
+ }
+
+ if (curr_pass && !prev_pass)
+ fail_index++;
- prev_val = cur_val;
+ prev_pass = curr_pass;
}
- /*
- * Having determined the length of the failing window and start of
- * the passing window calculate the length of the passing window and
- * set the final value halfway through it considering the range as a
- * circular buffer
- */
- pass_len = ITAP_MAX - fail_len;
- itap = (pass_window + (pass_len >> 1)) % ITAP_MAX;
- sdhci_am654_write_itapdly(sdhci_am654, itap);
+
+ if (fail_window[fail_index].length != 0)
+ fail_index++;
+
+ itap = sdhci_am654_calculate_itap(host, fail_window, fail_index,
+ sdhci_am654->dll_enable);
+
+ sdhci_am654_write_itapdly(sdhci_am654, itap, sdhci_am654->itap_del_ena[timing]);
+
+ /* Save ITAPDLY */
+ sdhci_am654->itap_del_sel[timing] = itap;
return 0;
}
@@ -590,9 +687,12 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
host->mmc->caps2 &= ~td[i].capability;
}
- if (td[i].itap_binding)
- device_property_read_u32(dev, td[i].itap_binding,
- &sdhci_am654->itap_del_sel[i]);
+ if (td[i].itap_binding) {
+ ret = device_property_read_u32(dev, td[i].itap_binding,
+ &sdhci_am654->itap_del_sel[i]);
+ if (!ret)
+ sdhci_am654->itap_del_ena[i] = 0x1;
+ }
}
return 0;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 0de87bc63..6d5c75541 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -956,8 +956,10 @@ static int mtd_otp_nvmem_add(struct mtd_info *mtd)
if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
size = mtd_otp_size(mtd, true);
- if (size < 0)
- return size;
+ if (size < 0) {
+ err = size;
+ goto err;
+ }
if (size > 0) {
nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index a74e64e0c..c02e50608 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -401,7 +401,7 @@ static int hynix_nand_rr_init(struct nand_chip *chip)
if (ret)
pr_warn("failed to initialize read-retry infrastructure");
- return 0;
+ return ret;
}
static void hynix_nand_extract_oobsize(struct nand_chip *chip,
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7cab36f94..db55ffdb5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -48,7 +48,9 @@ obj-$(CONFIG_MHI_NET) += mhi_net.o
obj-$(CONFIG_ARCNET) += arcnet/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
-obj-$(CONFIG_NET_DSA) += dsa/
+ifdef CONFIG_NET_DSA
+obj-y += dsa/
+endif
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2c5ed0a7c..bceda85f0 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -6477,16 +6477,16 @@ static int __init bonding_init(void)
if (res)
goto out;
+ bond_create_debugfs();
+
res = register_pernet_subsys(&bond_net_ops);
if (res)
- goto out;
+ goto err_net_ops;
res = bond_netlink_init();
if (res)
goto err_link;
- bond_create_debugfs();
-
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
@@ -6501,10 +6501,11 @@ static int __init bonding_init(void)
out:
return res;
err:
- bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
+err_net_ops:
+ bond_destroy_debugfs();
goto out;
}
@@ -6513,11 +6514,11 @@ static void __exit bonding_exit(void)
{
unregister_netdevice_notifier(&bond_netdev_notifier);
- bond_destroy_debugfs();
-
bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
+ bond_destroy_debugfs();
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Make sure we don't have an imbalance on our netpoll blocking */
WARN_ON(atomic_read(&netpoll_block_tx));
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 2b510f150..2a5861a88 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -3050,7 +3050,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
else
interface = PHY_INTERFACE_MODE_MII;
} else if (val == bitval[P_RMII_SEL]) {
- interface = PHY_INTERFACE_MODE_RGMII;
+ interface = PHY_INTERFACE_MODE_RMII;
} else {
interface = PHY_INTERFACE_MODE_RGMII;
if (data8 & P_RGMII_ID_EG_ENABLE)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 0918bd6fa..5a202edfe 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3146,6 +3146,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
{
struct gpio_desc *gpiod = chip->reset;
+ int err;
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
@@ -3154,17 +3155,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* mid-byte, causing the first EEPROM read after the reset
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
+ * For this reason, switch families with EEPROM support
+ * generally wait for EEPROM loads to complete as their pre-
+ * and post-reset handlers.
*/
- if (chip->info->ops->get_eeprom)
- mv88e6xxx_g2_eeprom_wait(chip);
+ if (chip->info->ops->hardware_reset_pre) {
+ err = chip->info->ops->hardware_reset_pre(chip);
+ if (err)
+ dev_err(chip->dev, "pre-reset error: %d\n", err);
+ }
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- if (chip->info->ops->get_eeprom)
- mv88e6xxx_g2_eeprom_wait(chip);
+ if (chip->info->ops->hardware_reset_post) {
+ err = chip->info->ops->hardware_reset_post(chip);
+ if (err)
+ dev_err(chip->dev, "post-reset error: %d\n", err);
+ }
}
}
@@ -4394,6 +4404,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4584,6 +4596,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4684,6 +4698,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4778,6 +4794,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4836,6 +4854,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4892,6 +4912,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4951,6 +4973,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5004,6 +5028,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.watchdog_ops = &mv88e6250_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset,
+ .hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done,
.reset = mv88e6250_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -5051,6 +5077,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5110,6 +5138,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -5156,6 +5186,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -5206,6 +5238,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5361,6 +5395,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5423,6 +5459,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5485,6 +5523,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5550,6 +5590,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.watchdog_ops = &mv88e6393x_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 85eb29338..c34caf981 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -487,6 +487,12 @@ struct mv88e6xxx_ops {
int (*ppu_enable)(struct mv88e6xxx_chip *chip);
int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+ /* Additional handlers to run before and after hard reset, to make sure
+ * that the switch and EEPROM are in a good state.
+ */
+ int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip);
+ int (*hardware_reset_post)(struct mv88e6xxx_chip *chip);
+
/* Switch Software Reset */
int (*reset)(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 49444a72f..9820cd596 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
+static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip)
+{
+ /* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */
+ int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM);
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6185_G1_CTL1_RELOAD_EEPROM;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0);
+}
+
+/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */
+static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+ if (err < 0) {
+ dev_err(chip->dev, "Error reading status");
+ return err;
+ }
+
+ /* If the switch is still resetting, it may not
+ * respond on the bus, and so MDIO read returns
+ * 0xffff. Differentiate between that, and waiting for
+ * the EEPROM to be done by bit 0 being set.
+ */
+ if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)))
+ return -EBUSY;
+
+ return 0;
+}
+
+/* As the EEInt (EEPROM done) flag clears on read if the status register, this
+ * function must be called directly after a hard reset or EEPROM ReLoad request,
+ * or the done condition may have been missed
+ */
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ int ret;
+
+ /* Wait up to 1 second for the switch to finish reading the
+ * EEPROM.
+ */
+ while (time_before(jiffies, timeout)) {
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+ }
+
+ dev_err(chip->dev, "Timeout waiting for EEPROM done");
+ return -ETIMEDOUT;
+}
+
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+
+ /* Pre-reset, we don't know the state of the switch - when
+ * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because
+ * the switch is actually busy reading the EEPROM, or because
+ * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated
+ * status register read already.
+ *
+ * To account for the latter case, trigger another EEPROM reload for
+ * another chance at seeing the done flag.
+ */
+ ret = mv88e6250_g1_eeprom_reload(chip);
+ if (ret)
+ return ret;
+
+ return mv88e6xxx_g1_wait_eeprom_done(chip);
+}
+
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1095261f5..3dbb7a1b8 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -282,6 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index 811ebeeff..43ac68052 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
priv->internal_mdio_bus->id,
port_num);
- if (!init_data.devicename)
+ if (!init_data.devicename) {
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
return -ENOMEM;
+ }
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
if (ret)
@@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
kfree(init_data.devicename);
}
+ fwnode_handle_put(leds);
return 0;
}
@@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
* the correct port for LED setup.
*/
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
- if (ret)
+ if (ret) {
+ fwnode_handle_put(port);
+ fwnode_handle_put(ports);
return ret;
+ }
}
+ fwnode_handle_put(ports);
return 0;
}
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index e10ae94cf..5ccb1a3a1 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -185,7 +185,12 @@
#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+/* LED trigger event for each group */
#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
#define RTL8366RB_LED_OFF 0x0
#define RTL8366RB_LED_DUP_COL 0x1
#define RTL8366RB_LED_LINK_ACT 0x2
@@ -202,6 +207,11 @@
#define RTL8366RB_LED_LINK_TX 0xd
#define RTL8366RB_LED_MASTER 0xe
#define RTL8366RB_LED_FORCE 0xf
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LED_FORCE. Otherwise, it is ignored.
+ */
#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
#define RTL8366RB_LED_1_OFFSET 6
#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
@@ -1002,27 +1012,19 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (priv->leds_disabled) {
/* Turn everything off */
regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(priv->map,
RTL8366RB_INTERRUPT_CONTROL_REG,
RTL8366RB_P4_RGMII_LED,
0);
- val = RTL8366RB_LED_OFF;
- } else {
- /* TODO: make this configurable per LED */
- val = RTL8366RB_LED_FORCE;
- }
- for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_CTRL_REG,
- 0xf << (i * 4),
- val << (i * 4));
- if (ret)
- return ret;
+
+ for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) {
+ val = RTL8366RB_LED_OFF << RTL8366RB_LED_CTRL_OFFSET(i);
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_CTRL_REG,
+ RTL8366RB_LED_CTRL_MASK(i),
+ val);
+ if (ret)
+ return ret;
+ }
}
ret = rtl8366_reset_vlan(priv);
@@ -1167,52 +1169,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
}
}
-static void rb8366rb_set_port_led(struct realtek_priv *priv,
- int port, bool enable)
-{
- u16 val = enable ? 0x3f : 0;
- int ret;
-
- if (priv->leds_disabled)
- return;
-
- switch (port) {
- case 0:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F, val);
- break;
- case 1:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F << RTL8366RB_LED_1_OFFSET,
- val << RTL8366RB_LED_1_OFFSET);
- break;
- case 2:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F, val);
- break;
- case 3:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F << RTL8366RB_LED_3_OFFSET,
- val << RTL8366RB_LED_3_OFFSET);
- break;
- case 4:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- enable ? RTL8366RB_P4_RGMII_LED : 0);
- break;
- default:
- dev_err(priv->dev, "no LED for port %d\n", port);
- return;
- }
- if (ret)
- dev_err(priv->dev, "error updating LED on port %d\n", port);
-}
-
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
@@ -1226,7 +1182,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
if (ret)
return ret;
- rb8366rb_set_port_led(priv, port, true);
return 0;
}
@@ -1241,8 +1196,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
BIT(port));
if (ret)
return;
-
- rb8366rb_set_port_led(priv, port, false);
}
static int
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
index d2e876805..a9c170243 100644
--- a/drivers/net/dsa/realtek/rtl83xx.c
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -290,16 +290,13 @@ EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
* rtl83xx_remove() - Cleanup a realtek switch driver
* @priv: realtek_priv pointer
*
- * If a method is provided, this function asserts the hard reset of the switch
- * in order to avoid leaking traffic when the driver is gone.
+ * Placehold for common cleanup procedures.
*
- * Context: Might sleep if priv->gdev->chip->can_sleep.
+ * Context: Any
* Return: nothing
*/
void rtl83xx_remove(struct realtek_priv *priv)
{
- /* leave the device reset asserted */
- rtl83xx_reset_assert(priv);
}
EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 2d8a66ea8..713a59537 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
struct ena_com_io_sq *io_sq)
{
size_t size;
- int dev_node = 0;
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
@@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = io_sq->desc_entry_size * io_sq->q_depth;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
@@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
@@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq)
{
size_t size;
- int prev_node = 0;
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
@@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- prev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 933e619b3..4c6e07aa4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -229,30 +229,43 @@ static struct ena_eth_io_rx_cdesc_base *
idx * io_cq->cdesc_entry_size_in_bytes);
}
-static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
- u16 *first_cdesc_idx)
+static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx,
+ u16 *num_descs)
{
+ u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
struct ena_eth_io_rx_cdesc_base *cdesc;
- u16 count = 0, head_masked;
u32 last = 0;
do {
+ u32 status;
+
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (!cdesc)
break;
+ status = READ_ONCE(cdesc->status);
ena_com_cq_inc_head(io_cq);
+ if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
+ struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
+
+ netdev_err(dev->net_device,
+ "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
+ count, io_cq->qid, cdesc->req_id);
+ return -EFAULT;
+ }
count++;
- last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
if (last) {
*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
- count += io_cq->cur_rx_pkt_cdesc_count;
head_masked = io_cq->head & (io_cq->q_depth - 1);
+ *num_descs = count;
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
@@ -260,11 +273,11 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
"ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
- io_cq->cur_rx_pkt_cdesc_count += count;
- count = 0;
+ io_cq->cur_rx_pkt_cdesc_count = count;
+ *num_descs = 0;
}
- return count;
+ return 0;
}
static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
@@ -539,10 +552,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 cdesc_idx = 0;
u16 nb_hw_desc;
u16 i = 0;
+ int rc;
WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
- nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
+ if (unlikely(rc != 0))
+ return -EFAULT;
+
if (nb_hw_desc == 0) {
ena_rx_ctx->descs = nb_hw_desc;
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index be5acfa41..8db05f754 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1347,6 +1347,8 @@ error:
if (rc == -ENOSPC) {
ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ } else if (rc == -EFAULT) {
+ ena_reset_device(adapter, ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp);
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 2c3d6a77e..a2efebafd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -22,6 +22,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
+ ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED = 16,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2c2ee79c4..0fab62a56 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -730,9 +730,6 @@ tx_done:
return NETDEV_TX_OK;
tx_dma_error:
- if (BNXT_TX_PTP_IS_SET(lflags))
- atomic_inc(&bp->ptp_cfg->tx_avail);
-
last_frag = i;
/* start back at beginning and unmap skb */
@@ -754,6 +751,8 @@ tx_dma_error:
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
+ if (BNXT_TX_PTP_IS_SET(lflags))
+ atomic_inc(&bp->ptp_cfg->tx_avail);
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index dd849e715..c46abcccf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1419,6 +1419,57 @@ struct bnxt_l2_filter {
atomic_t refcnt;
};
+/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
+ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
+ * The last valid byte in the compat version is different.
+ */
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 active_fec_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ __le16 force_pam4_link_speed;
+ __le16 auto_pam4_link_speed_mask;
+ u8 link_partner_pam4_adv_speeds;
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 1df3d56cc..d2fd2d04e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- req_type, token->seq_id, rc);
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 175192eba..22898d3d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input *req;
int rc;
- if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
+ netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
+ msg_size);
return -EINVAL;
+ }
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
@@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
@@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ /* New SPEEDS2 fields are beyond the legacy structure, so
+ * clear the SPEEDS2_SUPPORTED flag.
+ */
+ phy_qcfg_resp.option_flags &=
+ ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index aa6c0dfb6..e26b4ed33 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
}
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset + MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d266a8729..54798df8e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
if (port[IFLA_PORT_PROFILE]) {
+ if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_NAME;
memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
+ if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_INSTANCE;
memcpy(pp->instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
+ if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_HOST;
memcpy(pp->host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 705c3eb19..d1fbadbf8 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1107,10 +1107,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
u32 val, mask;
netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
if (en)
@@ -1119,6 +1122,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
val = en ? val | mask : val & ~mask;
writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
}
static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
@@ -1415,15 +1420,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
union gmac_rxdesc_3 word3;
struct page *page = NULL;
unsigned int page_offs;
+ unsigned long flags;
unsigned short r, w;
union dma_rwptr rw;
dma_addr_t mapping;
int frag_nr = 0;
+ spin_lock_irqsave(&geth->irq_lock, flags);
rw.bits32 = readl(ptr_reg);
/* Reset interrupt as all packages until here are taken into account */
writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
r = rw.bits.rptr;
w = rw.bits.wptr;
@@ -1726,10 +1735,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
gmac_update_hw_stats(netdev);
if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ spin_lock(&geth->irq_lock);
writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
-
- spin_lock(&geth->irq_lock);
u64_stats_update_begin(&port->ir_stats_syncp);
++port->stats.rx_fifo_errors;
u64_stats_update_end(&port->ir_stats_syncp);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 9f07f4947..5c45f4223 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
priv->num_tx_rings) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
+ "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
num_xdp_tx_queues,
priv->min_num_stack_tx_queues,
priv->num_tx_rings);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8bd213da8..881ece735 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3674,29 +3674,6 @@ fec_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fec_poll_controller - FEC Poll controller function
- * @dev: The FEC network adapter
- *
- * Polled functionality used by netconsole and others in non interrupt mode
- *
- */
-static void fec_poll_controller(struct net_device *dev)
-{
- int i;
- struct fec_enet_private *fep = netdev_priv(dev);
-
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- if (fep->irq[i] > 0) {
- disable_irq(fep->irq[i]);
- fec_enet_interrupt(fep->irq[i], dev);
- enable_irq(fep->irq[i]);
- }
- }
-}
-#endif
-
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -4003,9 +3980,6 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
.ndo_eth_ioctl = phy_do_ioctl_running,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fec_poll_controller,
-#endif
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
.ndo_xdp_xmit = fec_enet_xdp_xmit,
@@ -4156,6 +4130,14 @@ free_queue_mem:
return ret;
}
+static void fec_enet_deinit(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ netif_napi_del(&fep->napi);
+ fec_enet_free_queue(ndev);
+}
+
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
@@ -4550,6 +4532,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
+ fec_enet_deinit(ndev);
failed_init:
fec_ptp_stop(pdev);
failed_reset:
@@ -4613,6 +4596,7 @@ fec_drv_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ fec_enet_deinit(ndev);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 181d9bfbe..e32f6724f 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -104,14 +104,13 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
struct timespec64 ts;
u64 ns;
- if (fep->pps_enable == enable)
- return 0;
-
- fep->pps_channel = DEFAULT_PPS_CHANNEL;
- fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
-
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ if (fep->pps_enable == enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return 0;
+ }
+
if (enable) {
/* clear capture or output compare interrupt status if have.
*/
@@ -532,6 +531,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
int ret = 0;
if (rq->type == PTP_CLK_REQ_PPS) {
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+
ret = fec_ptp_enable_pps(fep, on);
return ret;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8e8071308..d165a999d 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -581,11 +581,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
-static void gve_rx_free_skb(struct gve_rx_ring *rx)
+static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
return;
+ if (rx->ctx.skb_head == napi->skb)
+ napi->skb = NULL;
dev_kfree_skb_any(rx->ctx.skb_head);
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
@@ -884,7 +886,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
if (err == -ENOMEM)
rx->rx_skb_alloc_fail++;
@@ -927,7 +929,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* gve_rx_complete_skb() will consume skb if successful */
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index bc34b6cd3..917a79a47 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
return -1;
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -EINVAL;
+
/* Needed because we will modify header. */
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
tcp = tcp_hdr(skb);
-
- /* Remove payload length from checksum. */
paylen = skb->len - skb_transport_offset(skb);
-
- switch (skb_shinfo(skb)->gso_type) {
- case SKB_GSO_TCPV4:
- case SKB_GSO_TCPV6:
- csum_replace_by_diff(&tcp->check,
- (__force __wsum)htonl(paylen));
-
- /* Compute length of segmentation header. */
- header_len = skb_tcp_all_headers(skb);
- break;
- default:
- return -EINVAL;
- }
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
+ header_len = skb_tcp_all_headers(skb);
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 19668a8d2..c9258b1b2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -3539,6 +3539,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ret = hns3_alloc_and_attach_buffer(ring, i);
if (ret)
goto out_buffer_fail;
+
+ if (!(i % HNS3_RESCHED_BD_NUM))
+ cond_resched();
}
return 0;
@@ -5111,6 +5114,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
}
u64_stats_init(&priv->ring[i].syncp);
+ cond_resched();
}
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index acd756b0c..d36c4ed16 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -214,6 +214,8 @@ enum hns3_nic_state {
#define HNS3_CQ_MODE_EQE 1U
#define HNS3_CQ_MODE_CQE 0U
+#define HNS3_RESCHED_BD_NUM 1024
+
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ce60332d8..990d7bd39 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3042,9 +3042,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
- struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
@@ -3068,8 +3066,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle, state);
+
+ if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_client *rclient = hdev->roce_client;
+
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
+ }
hclge_push_link_status(hdev);
}
@@ -11245,6 +11250,12 @@ clear_roce:
return ret;
}
+static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
+{
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -11253,7 +11264,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ while (hclge_uninit_need_wait(hdev))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 365c03d1c..8e40f26aa 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -412,7 +412,6 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
- unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -749,6 +748,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
+ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
+ *
+ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
+ * attached and configured as zero-copy, NULL otherwise.
+ */
+static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
+ u16 qid)
+{
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
* @ring: Rx ring to use
*
@@ -760,10 +778,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
-
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ return ice_get_xp_from_qid(vsi, qid);
}
/**
@@ -788,12 +803,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
- ring->xsk_pool = NULL;
- return;
- }
-
- ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+ ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
}
/**
@@ -922,9 +932,17 @@ int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+
+enum ice_xdp_cfg {
+ ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
+ ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
+};
+
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index a545a7917..9d23a436d 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -860,6 +860,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
}
rx_rings_rem -= rx_rings_per_v;
}
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_map_xdp_rings(vsi);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index d9f6cc71d..e7d28432b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -3135,6 +3135,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
case ICE_PHY_TYPE_HIGH_100G_AUI2:
speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
break;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
+ break;
default:
speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index fc91c4d41..6e7d58243 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1329,6 +1329,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
for (i = 0; i < count; i++) {
bool last = false;
+ int try_cnt = 0;
int status;
bh = (struct ice_buf_hdr *)(bufs + start + i);
@@ -1336,8 +1337,26 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
if (indicate_last)
last = ice_is_last_download_buffer(bh, i, count);
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
+ while (1) {
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
+ last, &offset, &info,
+ NULL);
+ if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
+ hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
+ break;
+
+ try_cnt++;
+
+ if (try_cnt == 5)
+ break;
+
+ msleep(20);
+ }
+
+ if (try_cnt)
+ dev_dbg(ice_hw_to_dev(hw),
+ "ice_aq_download_pkg number of retries: %d\n",
+ try_cnt);
/* Save AQ status from download package */
if (status) {
@@ -1424,14 +1443,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
goto exit;
}
- conf_idx = le32_to_cpu(seg->signed_seg_idx);
- start = le32_to_cpu(seg->signed_buf_start);
count = le32_to_cpu(seg->signed_buf_count);
-
state = ice_download_pkg_sig_seg(hw, seg);
- if (state)
+ if (state || !count)
goto exit;
+ conf_idx = le32_to_cpu(seg->signed_seg_idx);
+ start = le32_to_cpu(seg->signed_buf_start);
+
state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
count);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 78b833b3e..62c8205fc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3593,7 +3593,6 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0;
bool locked = false;
- u32 curr_combined;
int ret = 0;
/* do not support changing channels in Safe Mode */
@@ -3615,22 +3614,8 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EOPNOTSUPP;
}
- curr_combined = ice_get_combined_cnt(vsi);
-
- /* these checks are for cases where user didn't specify a particular
- * value on cmd line but we get non-zero value anyway via
- * get_channels(); look at ethtool.c in ethtool repository (the user
- * space part), particularly, do_schannels() routine
- */
- if (ch->rx_count == vsi->num_rxq - curr_combined)
- ch->rx_count = 0;
- if (ch->tx_count == vsi->num_txq - curr_combined)
- ch->tx_count = 0;
- if (ch->combined_count == curr_combined)
- ch->combined_count = 0;
-
- if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
- netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
+ if (ch->rx_count && ch->tx_count) {
+ netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 558422120..acf732ce0 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
- vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
- if (!vsi->af_xdp_zc_qps)
- goto err_zc_qps;
-
return 0;
-err_zc_qps:
- devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -328,8 +322,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
- bitmap_free(vsi->af_xdp_zc_qps);
- vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */
devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
@@ -2331,22 +2323,23 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
if (ret)
goto unroll_vector_base;
- ice_vsi_map_rings_to_vectors(vsi);
-
- /* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi);
-
- vsi->stat_offsets_loaded = false;
-
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto unroll_vector_base;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
+ ICE_XDP_CFG_PART);
if (ret)
goto unroll_vector_base;
}
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi);
+
+ vsi->stat_offsets_loaded = false;
+
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
@@ -2493,7 +2486,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
- ice_destroy_xdp_rings(vsi);
+ ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
ice_vsi_clear_rings(vsi);
ice_vsi_free_q_vectors(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 33a164fa3..61eef3259 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -803,6 +803,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ speed = "200 G";
+ break;
case ICE_AQ_LINK_SPEED_100GB:
speed = "100 G";
break;
@@ -2670,17 +2673,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog);
}
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *ring;
+
+ if (static_key_enabled(&ice_xdp_locking_key))
+ return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+ q_vector = vsi->rx_rings[qid]->q_vector;
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (ice_ring_is_xdp(ring))
+ return ring;
+
+ return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ int v_idx, q_idx;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ ice_for_each_rxq(vsi, q_idx) {
+ vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+ q_idx);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+}
+
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -2693,8 +2751,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
- int i, v_idx;
- int status;
+ int status, i;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2713,49 +2770,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
- /* follow the logic from ice_vsi_map_rings_to_vectors */
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- int xdp_rings_per_v, q_id, q_base;
-
- xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
- vsi->num_q_vectors - v_idx);
- q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
- xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.tx_ring;
- q_vector->tx.tx_ring = xdp_ring;
- }
- xdp_rings_rem -= xdp_rings_per_v;
- }
-
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key)) {
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- } else {
- struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx) {
- if (ice_ring_is_xdp(ring)) {
- vsi->rx_rings[i]->xdp_ring = ring;
- break;
- }
- }
- }
- ice_tx_xsk_pool(vsi, i);
- }
-
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
*/
- if (ice_is_reset_in_progress(pf->state))
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
+ ice_map_xdp_rings(vsi);
+
/* tell the Tx scheduler that right now we have
* additional queues
*/
@@ -2805,22 +2828,21 @@ err_map_xdp:
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
*
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
* resources
*/
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset bits
- * in pf->state won't be set, so additionally check first q_vector
- * against NULL
+ * rings
*/
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
ice_for_each_q_vector(vsi, v_idx) {
@@ -2861,7 +2883,7 @@ free_qmap:
if (static_key_enabled(&ice_xdp_locking_key))
static_branch_dec(&ice_xdp_locking_key);
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -2972,7 +2994,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
} else {
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+ ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
@@ -2983,7 +3006,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
- xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
/* reallocate Rx queues that were used for zero-copy */
@@ -5431,7 +5454,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
- ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
@@ -5511,6 +5534,11 @@ static int __maybe_unused ice_resume(struct device *dev)
if (ret)
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ ret = ice_init_rdma(pf);
+ if (ret)
+ dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
+ ret);
+
clear_bit(ICE_DOWN, pf->state);
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR;
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index d4e05d2cb..8510a02af 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -375,11 +375,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
*
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
+ *
+ * Note that the Shadow RAM copy is always located after the CSS header, and
+ * is aligned to 64-byte (32-word) offsets.
*/
static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+ u32 sr_copy;
+
+ switch (bank) {
+ case ICE_ACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
+ break;
+ case ICE_INACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
+ break;
+ }
+
+ return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
}
/**
@@ -441,8 +455,7 @@ int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- u16 pfa_len, pfa_ptr;
- u16 next_tlv;
+ u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
@@ -455,11 +468,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
+
+ /* The Preserved Fields Area contains a sequence of Type-Length-Value
+ * structures which define its contents. The PFA length includes all
+ * of the TLVs, plus the initial length word itself, *and* one final
+ * word at the end after all of the TLVs.
+ */
+ if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
+ pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
+
/* Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
+ while (next_tlv < max_tlv) {
u16 tlv_sub_module_type;
u16 tlv_len;
@@ -483,10 +508,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
}
return -EINVAL;
}
- /* Check next TLV, i.e. current TLV pointer + length + 2 words
- * (for current TLV's type and length)
- */
- next_tlv = next_tlv + tlv_len + 2;
+
+ if (check_add_overflow(next_tlv, 2, &next_tlv) ||
+ check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
+ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
}
/* Module does not exist */
return -ENOENT;
@@ -1011,6 +1039,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
}
/**
+ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the Authentication
+ * header size, and then convert to words.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int
+ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ice_determine_css_hdr_len - Discover CSS header length for the device
+ * @hw: pointer to the HW struct
+ *
+ * Determine the size of the CSS header at the start of the NVM module. This
+ * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
+ * always located just after the CSS header.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ice_determine_css_hdr_len(struct ice_hw *hw)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ int status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
+ &banks->active_css_hdr_len);
+ if (status)
+ return status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
+ &banks->inactive_css_hdr_len);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -1056,6 +1150,12 @@ int ice_init_nvm(struct ice_hw *hw)
return status;
}
+ status = ice_determine_css_hdr_len(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
+ return status;
+ }
+
status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index b4ea935e8..1472385eb 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1825,7 +1825,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT) {
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
if (opc == ice_aqc_opc_alloc_res)
@@ -2759,7 +2760,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT)
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 9ff92dba5..0aacd0d05 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -481,6 +481,8 @@ struct ice_bank_info {
u32 orom_size; /* Size of OROM bank */
u32 netlist_ptr; /* Pointer to 1st Netlist bank */
u32 netlist_size; /* Size of Netlist bank */
+ u32 active_css_hdr_len; /* Active CSS header length */
+ u32 inactive_css_hdr_len; /* Inactive CSS header length */
enum ice_flash_bank nvm_bank; /* Active NVM bank */
enum ice_flash_bank orom_bank; /* Active OROM bank */
enum ice_flash_bank netlist_bank; /* Active Netlist bank */
@@ -1084,17 +1086,13 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
/* CSS Header words */
+#define ICE_NVM_CSS_HDR_LEN_L 0x02
+#define ICE_NVM_CSS_HDR_LEN_H 0x03
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
-/* Length of CSS header section in words */
-#define ICE_CSS_HEADER_LENGTH 330
-
-/* Offset of Shadow RAM copy in the NVM bank area. */
-#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32)
-
-/* Size in bytes of Option ROM trailer */
-#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
+/* Length of Authentication header section in words */
+#define ICE_NVM_AUTH_HEADER_LEN 0x08
/* The Link Topology Netlist section is stored as a series of words. It is
* stored in the NVM as a TLV, with the first two words containing the type
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 2e9ad27cb..6e8f2aab6 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return -EINVAL;
err = ice_fltr_add_vlan(vsi, vlan);
- if (err && err != -EEXIST) {
+ if (!err)
+ vsi->num_vlan++;
+ else if (err == -EEXIST)
+ err = 0;
+ else
dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
vlan->vid, vsi->vsi_num, err);
- return err;
- }
- vsi->num_vlan++;
- return 0;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 1857220d2..86a865788 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
- clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
- set_bit(qid, vsi->af_xdp_zc_qps);
-
return 0;
}
@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
{
struct ice_rx_ring *rx_ring;
- unsigned long q;
+ uint i;
+
+ ice_for_each_rxq(vsi, i) {
+ rx_ring = vsi->rx_rings[i];
+ if (!rx_ring->xsk_pool)
+ continue;
- for_each_set_bit(q, vsi->af_xdp_zc_qps,
- max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
- rx_ring = vsi->rx_rings[q];
if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 986d429d1..1885ba618 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -222,14 +222,19 @@ static int idpf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct idpf_vport_config *vport_config;
- u16 combined, num_txq, num_rxq;
unsigned int num_req_tx_q;
unsigned int num_req_rx_q;
struct idpf_vport *vport;
+ u16 num_txq, num_rxq;
struct device *dev;
int err = 0;
u16 idx;
+ if (ch->rx_count && ch->tx_count) {
+ netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
+ return -EINVAL;
+ }
+
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
@@ -239,20 +244,6 @@ static int idpf_set_channels(struct net_device *netdev,
num_txq = vport_config->user_config.num_req_tx_qs;
num_rxq = vport_config->user_config.num_req_rx_qs;
- combined = min(num_txq, num_rxq);
-
- /* these checks are for cases where user didn't specify a particular
- * value on cmd line but we get non-zero value anyway via
- * get_channels(); look at ethtool.c in ethtool repository (the user
- * space part), particularly, do_schannels() routine
- */
- if (ch->combined_count == combined)
- ch->combined_count = 0;
- if (ch->combined_count && ch->rx_count == num_rxq - combined)
- ch->rx_count = 0;
- if (ch->combined_count && ch->tx_count == num_txq - combined)
- ch->tx_count = 0;
-
num_req_tx_q = ch->combined_count + ch->tx_count;
num_req_rx_q = ch->combined_count + ch->rx_count;
@@ -376,7 +367,8 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_tx_count);
if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count)
+ new_rx_count == vport->rxq_desc_count &&
+ kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 5d3532c27..ae8a48c48 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -1394,6 +1394,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
}
idpf_rx_init_buf_tail(vport);
+ idpf_vport_intr_ena(vport);
err = idpf_send_config_queues_msg(vport);
if (err) {
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index f5bc4a278..7fc77ed9d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3747,9 +3747,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
*/
void idpf_vport_intr_deinit(struct idpf_vport *vport)
{
+ idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_rel_irq(vport);
}
@@ -4180,7 +4180,6 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
idpf_vport_intr_map_vector_to_qs(vport);
idpf_vport_intr_napi_add_all(vport);
- idpf_vport_intr_napi_ena_all(vport);
err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
if (err)
@@ -4194,17 +4193,20 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
if (err)
goto unroll_vectors_alloc;
- idpf_vport_intr_ena_irq_all(vport);
-
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
return err;
}
+void idpf_vport_intr_ena(struct idpf_vport *vport)
+{
+ idpf_vport_intr_napi_ena_all(vport);
+ idpf_vport_intr_ena_irq_all(vport);
+}
+
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index df76493fa..85a146689 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -988,6 +988,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
+void idpf_vport_intr_ena(struct idpf_vport *vport);
enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 1a64f1ca6..e699412d2 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
struct igc_hw *hw = &adapter->hw;
u32 eeer;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
+
if (hw->dev_spec._base.eee_enable)
mii_eee_cap1_mod_linkmode_t(edata->advertised,
adapter->eee_advert);
- *edata = adapter->eee;
-
eeer = rd32(IGC_EEER);
/* EEE status on negotiated link */
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 4d975d620..58bc96021 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/xdp_sock_drv.h>
#include <linux/pci.h>
+#include <linux/mdio.h>
#include <net/ipv6.h>
@@ -4876,6 +4877,9 @@ void igc_up(struct igc_adapter *adapter)
/* start the watchdog. */
hw->mac.get_link_status = true;
schedule_work(&adapter->watchdog_task);
+
+ adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T |
+ MDIO_EEE_2_5GT;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ed440dd0c..84fb6b8de 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3676,9 +3676,7 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
-#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
-#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
@@ -3688,7 +3686,6 @@ struct ixgbe_info {
#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
-#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 2decb0710..a5f644934 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1722,59 +1722,9 @@ static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return -EINVAL;
}
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* change mode enforcement rules to hybrid */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x0400;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* manually control the config */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x20002240;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* move the AN base page values */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x1;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* set the AN37 over CB mode */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x20000000;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* restart AN manually */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* Toggle port SW reset by AN reset. */
status = ixgbe_restart_an_internal_phy_x550em(hw);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 23adf53c2..cebc79a71 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4013,7 +4013,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
}
- skb = build_skb(data, frag_size);
+ if (frag_size)
+ skb = build_skb(data, frag_size);
+ else
+ skb = slab_build_skb(data);
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index e8b73b9d7..97722ce8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
* - when available free entries are less.
* Lower priority ones out of avaialble free entries are always
* chosen when 'high vs low' question arises.
+ *
+ * For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF then allocate low
+ * priority entries.
*/
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
/* Get the search range for priority allocation request */
if (req->priority) {
@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
- /* For a VF base MCAM match rule is set by its PF. And all the
- * further MCAM rules installed by VF on its own are
- * concatenated with the base rule set by its PF. Hence PF entries
- * should be at lower priority compared to VF entries. Otherwise
- * base rule is hit always and rules installed by VF will be of
- * no use. Hence if the request is from PF and NOT a priority
- * allocation request then allocate low priority entries.
- */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK))
- goto lprio_alloc;
-
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -2568,6 +2567,18 @@ lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
+ /* Ensure PF requests are always at bottom and if PF requests
+ * for higher/lower priority entry wrt reference entry then
+ * honour that criteria and start search for entries from bottom
+ * and not in mid zone.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_HIGHER_PRIO)
+ end = req->ref_entry;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_LOWER_PRIO)
+ start = req->ref_entry;
}
alloc:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 5664f768c..64a97a0a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o
-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 28fb643d2..aa01110f0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_config);
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -260,6 +262,7 @@ update_sq_smq_map:
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_update);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
{
@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_stop);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
@@ -321,6 +325,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
bool pfc_enable)
@@ -385,6 +390,7 @@ out:
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
qidx, err);
}
+EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
@@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
return 0;
}
+EXPORT_SYMBOL(otx2_dcbnl_set_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 4e1130496..05956bf03 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -112,6 +112,7 @@ err_dl:
devlink_free(dl);
return err;
}
+EXPORT_SYMBOL(otx2_register_dl);
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
@@ -123,3 +124,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
ARRAY_SIZE(otx2_dl_params));
devlink_free(dl);
}
+EXPORT_SYMBOL(otx2_unregister_dl);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f828d3273..04a49b9b5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -1171,8 +1171,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ return true;
+ }
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1723e9912..6cddb4da8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -1407,7 +1407,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
/* delete the txschq nodes allocated for this node */
+ otx2_qos_disable_sq(pfvf, qid);
+ otx2_qos_free_hw_node_schq(pfvf, node);
otx2_qos_free_sw_node_schq(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
/* mark this node as htb inner node */
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
@@ -1554,6 +1557,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
dwrr_del_node = true;
/* destroy the leaf node */
+ otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index caa13b9ce..41d9b0684 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -110,16 +110,16 @@ static const struct mtk_reg_map mt7986_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
- .rx_ptr = 0x6100,
- .rx_cnt_cfg = 0x6104,
- .pcrx_ptr = 0x6108,
- .glo_cfg = 0x6204,
- .rst_idx = 0x6208,
- .delay_irq = 0x620c,
- .irq_status = 0x6220,
- .irq_mask = 0x6228,
- .adma_rx_dbg0 = 0x6238,
- .int_grp = 0x6250,
+ .rx_ptr = 0x4100,
+ .rx_cnt_cfg = 0x4104,
+ .pcrx_ptr = 0x4108,
+ .glo_cfg = 0x4204,
+ .rst_idx = 0x4208,
+ .delay_irq = 0x420c,
+ .irq_status = 0x4220,
+ .irq_mask = 0x4228,
+ .adma_rx_dbg0 = 0x4238,
+ .int_grp = 0x4250,
},
.qdma = {
.qtx_cfg = 0x4400,
@@ -1107,7 +1107,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
@@ -1131,51 +1131,57 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
- int cnt = MTK_QDMA_RING_SIZE;
+ int cnt = soc->tx.fq_dma_size;
dma_addr_t dma_addr;
- int i;
+ int i, j, len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
eth->scratch_ring = eth->sram_base;
else
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * soc->txrx.txd_size,
+ cnt * soc->tx.desc_size,
&eth->phy_scratch_ring,
GFP_KERNEL);
+
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- if (unlikely(!eth->scratch_head))
- return -ENOMEM;
+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
- dma_addr = dma_map_single(eth->dma_dev,
- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
+ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
+ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
+ if (unlikely(!eth->scratch_head[j]))
+ return -ENOMEM;
- for (i = 0; i < cnt; i++) {
- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
- struct mtk_tx_dma_v2 *txd;
+ dma_addr = dma_map_single(eth->dma_dev,
+ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
- txd = eth->scratch_ring + i * soc->txrx.txd_size;
- txd->txd1 = addr;
- if (i < cnt - 1)
- txd->txd2 = eth->phy_scratch_ring +
- (i + 1) * soc->txrx.txd_size;
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
- txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
- txd->txd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
- txd->txd5 = 0;
- txd->txd6 = 0;
- txd->txd7 = 0;
- txd->txd8 = 0;
+ for (i = 0; i < cnt; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
+ txd->txd2 = eth->phy_scratch_ring +
+ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
+
+ txd->txd4 = 0;
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
}
@@ -1416,7 +1422,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free)
return -ENOMEM;
- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
@@ -1457,7 +1463,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
- soc->txrx.dma_max_len);
+ soc->tx.dma_max_len);
txd_info.qid = queue;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
@@ -1470,7 +1476,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, txd, &txd_info);
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
@@ -1513,7 +1519,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
} else {
int next_idx;
- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1522,7 +1528,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
/* unmap dma */
mtk_tx_unmap(eth, tx_buf, NULL, false);
@@ -1547,7 +1553,7 @@ static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- eth->soc->txrx.dma_max_len);
+ eth->soc->tx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1654,7 +1660,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
@@ -1822,7 +1828,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
}
htxd = txd;
- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
htx_buf = tx_buf;
@@ -1841,7 +1847,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
goto unmap;
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
n_desc++;
}
@@ -1879,7 +1885,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
} else {
int idx;
- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
@@ -1890,7 +1896,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
unmap:
while (htxd != txd) {
- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
@@ -2021,14 +2027,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
switch (val) {
@@ -2140,7 +2146,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
@@ -2156,7 +2162,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxdcsum = &trxd.rxd4;
}
- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -2280,7 +2286,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
break;
tx_buf = mtk_desc_to_tx_buf(ring, desc,
- eth->soc->txrx.txd_size);
+ eth->soc->tx.desc_size);
if (!tx_buf->data)
break;
@@ -2331,7 +2337,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
+ desc = ring->dma + cpu * eth->soc->tx.desc_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -2421,7 +2427,7 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ mtk_w32(eth, eth->soc->rx.irq_done_mask,
reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
@@ -2437,10 +2443,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
return budget;
} while (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask);
+ eth->soc->rx.irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
return rx_done_total;
}
@@ -2449,7 +2455,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = soc->txrx.txd_size;
+ int i, sz = soc->tx.desc_size;
struct mtk_tx_dma_v2 *txd;
int ring_size;
u32 ofs, val;
@@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
ring_size = MTK_QDMA_RING_SIZE;
else
- ring_size = MTK_DMA_SIZE;
+ ring_size = soc->tx.dma_size;
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL);
@@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
goto no_tx_mem;
if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
- ring->dma = eth->sram_base + ring_size * sz;
- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+ ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
+ ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
} else {
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys, GFP_KERNEL);
@@ -2572,14 +2578,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
@@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size, tx_ring_size;
int i;
@@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
tx_ring_size = MTK_QDMA_RING_SIZE;
else
- tx_ring_size = MTK_DMA_SIZE;
+ tx_ring_size = soc->tx.dma_size;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
@@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
} else {
rx_data_len = ETH_DATA_LEN;
- rx_dma_size = MTK_DMA_SIZE;
+ rx_dma_size = soc->rx.dma_size;
}
ring->frag_size = mtk_max_frag_size(rx_data_len);
@@ -2634,15 +2641,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
rx_flag != MTK_RX_FLAGS_NORMAL) {
ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * eth->soc->txrx.rxd_size,
- &ring->phys, GFP_KERNEL);
+ rx_dma_size * eth->soc->rx.desc_size,
+ &ring->phys, GFP_KERNEL);
} else {
struct mtk_tx_ring *tx_ring = &eth->tx_ring;
ring->dma = tx_ring->dma + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
+ eth->soc->tx.desc_size * (ring_no + 1);
ring->phys = tx_ring->phys + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
+ eth->soc->tx.desc_size * (ring_no + 1);
}
if (!ring->dma)
@@ -2653,7 +2660,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
dma_addr_t dma_addr;
void *data;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (ring->page_pool) {
data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr, GFP_KERNEL);
@@ -2690,7 +2697,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd3 = 0;
rxd->rxd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
@@ -2744,7 +2751,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!ring->data[i])
continue;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (!rxd->rxd1)
continue;
@@ -2761,7 +2768,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!in_sram && ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma_size * eth->soc->rx.desc_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
@@ -3124,7 +3131,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
+ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
@@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
mtk_rx_clean(eth, &eth->rx_ring[i], false);
}
- kfree(eth->scratch_head);
+ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
+ kfree(eth->scratch_head[i]);
+ eth->scratch_head[i] = NULL;
+ }
}
static bool mtk_hw_reset_check(struct mtk_eth *eth)
@@ -3174,7 +3184,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
__napi_schedule(&eth->rx_napi);
}
@@ -3200,9 +3210,9 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth)
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
if (mtk_r32(eth, reg_map->pdma.irq_mask) &
- eth->soc->txrx.rx_irq_done_mask) {
+ eth->soc->rx.irq_done_mask) {
if (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask)
+ eth->soc->rx.irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
@@ -3220,10 +3230,10 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
}
#endif
@@ -3387,7 +3397,7 @@ static int mtk_open(struct net_device *dev)
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
@@ -3471,7 +3481,7 @@ static int mtk_stop(struct net_device *dev)
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -3893,7 +3903,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3947,9 +3957,9 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
@@ -5039,11 +5049,18 @@ static const struct mtk_soc_data mt2701_data = {
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5059,11 +5076,18 @@ static const struct mtk_soc_data mt7621_data = {
.offload_version = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5081,11 +5105,18 @@ static const struct mtk_soc_data mt7622_data = {
.hash_offset = 2,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5102,11 +5133,18 @@ static const struct mtk_soc_data mt7623_data = {
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5121,11 +5159,18 @@ static const struct mtk_soc_data mt7629_data = {
.required_pctl = false,
.has_accounting = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5143,13 +5188,20 @@ static const struct mtk_soc_data mt7981_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5165,13 +5217,20 @@ static const struct mtk_soc_data mt7986_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5187,13 +5246,20 @@ static const struct mtk_soc_data mt7988_data = {
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(4K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma_v2),
+ .irq_done_mask = MTK_RX_DONE_INT_V2,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5204,13 +5270,19 @@ static const struct mtk_soc_data rt5350_data = {
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 9ae3b8a71..a25c33b9a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -32,7 +32,9 @@
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
-#define MTK_DMA_SIZE 512
+#define MTK_DMA_SIZE(x) (SZ_##x)
+#define MTK_FQ_DMA_HEAD 32
+#define MTK_FQ_DMA_LENGTH 2048
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@@ -327,8 +329,8 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_PQID GENMASK(3, 0)
#define TX_DMA_ADDR64_MASK GENMASK(3, 0)
@@ -348,8 +350,8 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
#define RX_DMA_ADDR64_MASK GENMASK(3, 0)
#if IS_ENABLED(CONFIG_64BIT)
@@ -1153,10 +1155,9 @@ struct mtk_reg_map {
* @foe_entry_size Foe table entry size.
* @has_accounting Bool indicating support for accounting of
* offloaded flows.
- * @txd_size Tx DMA descriptor size.
- * @rxd_size Rx DMA descriptor size.
- * @rx_irq_done_mask Rx irq done register mask.
- * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @desc_size Tx/Rx DMA descriptor size.
+ * @irq_done_mask Rx irq done register mask.
+ * @dma_l4_valid Rx DMA valid register mask.
* @dma_max_len Max DMA tx/rx buffer length.
* @dma_len_offset Tx/Rx DMA length field offset.
*/
@@ -1174,13 +1175,20 @@ struct mtk_soc_data {
bool has_accounting;
bool disable_pll_modes;
struct {
- u32 txd_size;
- u32 rxd_size;
- u32 rx_irq_done_mask;
- u32 rx_dma_l4_valid;
+ u32 desc_size;
u32 dma_max_len;
u32 dma_len_offset;
- } txrx;
+ u32 dma_size;
+ u32 fq_dma_size;
+ } tx;
+ struct {
+ u32 desc_size;
+ u32 irq_done_mask;
+ u32 dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ u32 dma_size;
+ } rx;
};
#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
@@ -1261,7 +1269,7 @@ struct mtk_eth {
struct napi_struct rx_napi;
void *scratch_ring;
dma_addr_t phy_scratch_ring;
- void *scratch_head;
+ void *scratch_head[MTK_FQ_DMA_HEAD];
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4957412ff..20768ef2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -969,19 +969,32 @@ static void cmd_work_handler(struct work_struct *work)
bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
struct mlx5_core_dev *dev;
- unsigned long cb_timeout;
- struct semaphore *sem;
+ unsigned long timeout;
unsigned long flags;
int alloc_ret;
int cmd_mode;
+ complete(&ent->handling);
+
dev = container_of(cmd, struct mlx5_core_dev, cmd);
- cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+ timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
- complete(&ent->handling);
- sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
- down(sem);
if (!ent->page_queue) {
+ if (down_timeout(&cmd->vars.sem, timeout)) {
+ mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
+ mlx5_command_str(ent->op), ent->op);
+ if (ent->callback) {
+ ent->callback(-EBUSY, ent->context);
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+ cmd_ent_put(ent);
+ } else {
+ ent->ret = -EBUSY;
+ complete(&ent->done);
+ }
+ complete(&ent->slotted);
+ return;
+ }
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
@@ -994,10 +1007,11 @@ static void cmd_work_handler(struct work_struct *work)
ent->ret = -EAGAIN;
complete(&ent->done);
}
- up(sem);
+ up(&cmd->vars.sem);
return;
}
} else {
+ down(&cmd->vars.pages_sem);
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
@@ -1005,6 +1019,8 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
+ complete(&ent->slotted);
+
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -1023,7 +1039,7 @@ static void cmd_work_handler(struct work_struct *work)
ent->ts1 = ktime_get_ns();
cmd_mode = cmd->mode;
- if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
+ if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
@@ -1143,6 +1159,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
ent->ret = -ECANCELED;
goto out_err;
}
+
+ wait_for_completion(&ent->slotted);
+
if (cmd->mode == CMD_MODE_POLLING || ent->polling)
wait_for_completion(&ent->done);
else if (!wait_for_completion_timeout(&ent->done, timeout))
@@ -1157,6 +1176,9 @@ out_err:
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
mlx5_command_str(ent->op), ent->op);
+ } else if (err == -EBUSY) {
+ mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
+ mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1208,6 +1230,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->polling = force_polling;
init_completion(&ent->handling);
+ init_completion(&ent->slotted);
if (!callback)
init_completion(&ent->done);
@@ -1225,7 +1248,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
- if (err == -ETIMEDOUT || err == -ECANCELED)
+ if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
goto out_free;
ds = ent->ts2 - ent->ts1;
@@ -1611,6 +1634,9 @@ static int cmd_comp_notifier(struct notifier_block *nb,
dev = container_of(cmd, struct mlx5_core_dev, cmd);
eqe = data;
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return NOTIFY_DONE;
+
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 06592b9f0..9240cfe25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
- /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ /* AF_XDP doesn't support frames larger than PAGE_SIZE,
+ * and xsk->chunk_size is limited to 65535 bytes.
+ */
+ if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index caa34b9c1..33e32584b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -102,8 +102,14 @@ static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+ struct udphdr *udphdr;
- udp_hdr(skb)->len = htons(payload_len);
+ if (skb->encapsulation)
+ udphdr = (struct udphdr *)skb_inner_transport_header(skb);
+ else
+ udphdr = udp_hdr(skb);
+
+ udphdr->len = htons(payload_len);
}
struct mlx5e_accel_tx_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 41a2543a5..e51b03d4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -750,8 +750,7 @@ err_fs:
err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ mlx5_ipsec_rx_status_destroy(ipsec, rx);
err_add:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 820646148..359050f0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
if (!x || !x->xso.offload_handle)
goto out_disable;
- if (xo->inner_ipproto) {
- /* Cannot support tunnel packet over IPsec tunnel mode
- * because we cannot offload three IP header csum
- */
- if (x->props.mode == XFRM_MODE_TUNNEL)
- goto out_disable;
-
- /* Only support UDP or TCP L4 checksum */
- if (xo->inner_ipproto != IPPROTO_UDP &&
- xo->inner_ipproto != IPPROTO_TCP)
- goto out_disable;
- }
+ /* Only support UDP or TCP L4 checksum */
+ if (xo->inner_ipproto &&
+ xo->inner_ipproto != IPPROTO_UDP &&
+ xo->inner_ipproto != IPPROTO_TCP)
+ goto out_disable;
return features;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 319930c04..981a3e058 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3790,7 +3790,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats);
}
- stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -4738,7 +4738,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
- return features;
+ return vxlan_features_check(skb, features);
#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
@@ -4764,7 +4764,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
- features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
@@ -6058,7 +6057,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
return 0;
}
-static int _mlx5e_suspend(struct auxiliary_device *adev)
+static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
@@ -6067,7 +6066,7 @@ static int _mlx5e_suspend(struct auxiliary_device *adev)
struct mlx5_core_dev *pos;
int i;
- if (!netif_device_present(netdev)) {
+ if (!pre_netdev_reg && !netif_device_present(netdev)) {
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5_sd_for_each_dev(i, mdev, pos)
mlx5e_destroy_mdev_resources(pos);
@@ -6090,7 +6089,7 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
if (actual_adev)
- err = _mlx5e_suspend(actual_adev);
+ err = _mlx5e_suspend(actual_adev, false);
mlx5_sd_cleanup(mdev);
return err;
@@ -6157,7 +6156,7 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
return 0;
err_resume:
- _mlx5e_suspend(adev);
+ _mlx5e_suspend(adev, true);
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
@@ -6197,7 +6196,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
- _mlx5e_suspend(adev);
+ _mlx5e_suspend(adev, false);
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e21a3b412..0964b16ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_tcp_all_headers(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 1b9bc32ef..c5ea1d1d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1874,7 +1874,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 349e28a6d..ef5567487 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -833,7 +833,7 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw, int max_slaves);
void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
@@ -925,7 +925,7 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
static inline int
-mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 844d3e3a6..e8caf12f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -2502,6 +2502,16 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
}
+static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
+ return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+
+ return 0;
+}
+
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
@@ -2526,13 +2536,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
int err;
rep = mlx5_eswitch_get_rep(esw, vport_num);
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
- if (err)
- goto err_reps;
- }
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ goto err_reps;
+ }
return 0;
@@ -3277,7 +3285,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
@@ -3290,13 +3298,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
return 0;
- ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
+ ret = __esw_offloads_load_rep(esw, rep, REP_IB);
if (ret)
return ret;
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
- mlx5_esw_offloads_rep_load(esw, rep->vport);
+ __esw_offloads_load_rep(esw, rep, REP_IB);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index e7faf7e73..6c7f2471f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ return -EACCES;
+ }
cond_resched();
} while (!time_after(jiffies, end));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index ad38e3182..a6329ca2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -248,6 +248,10 @@ recover_from_sw_reset:
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ goto unlock;
+ }
msleep(20);
} while (!time_after(jiffies, end));
@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
return -ENODEV;
}
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
+ return -EACCES;
+ }
msleep(100);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 69d482f7c..58a452d20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -720,6 +720,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev;
u8 mode;
#endif
+ bool roce_support;
int i;
for (i = 0; i < ldev->ports; i++)
@@ -746,6 +747,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
+ roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
+ for (i = 1; i < ldev->ports; i++)
+ if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
+ return false;
+
return true;
}
@@ -814,7 +820,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb)
for (i = 0; i < ldev->ports; i++)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
@@ -913,8 +919,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++)
- mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ for (i = 1; i < ldev->ports; i++) {
+ if (mlx5_get_roce_state(ldev->pf[i].dev))
+ mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ }
} else if (shared_fdb) {
int i;
@@ -922,7 +930,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
}
@@ -933,7 +941,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 82889f305..571ea26ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -99,7 +99,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
}
@@ -113,7 +113,7 @@ err_rescan_drivers:
err_add_devices:
mlx5_lag_add_devices(ldev);
for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index 101b3bb90..e12bc4cd8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- while (i--)
- while (j--)
+ do {
+ while (j--) {
+ idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
+ }
+ j = ldev->buckets;
+ } while (i--);
goto destroy_fg;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
index 6b774e0c2..d0b595ba6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
ret = -EBUSY;
goto pci_unlock;
}
+ if (pci_channel_offline(dev->pdev)) {
+ ret = -EACCES;
+ goto pci_unlock;
+ }
/* Check if semaphore is already locked */
ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index dd5d186dc..f6deb5a3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
{
- /* Feature is currently implemented for PFs only */
- if (!mlx5_core_is_pf(dev))
- return false;
-
/* Honor the SW implementation limit */
if (host_buses > MLX5_SD_MAX_GROUP_SZ)
return false;
@@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev)
bool sdm;
int err;
+ /* Feature is currently implemented for PFs only */
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
+ /* Block on embedded CPU PFs */
+ if (mlx5_core_is_ecpf(dev))
+ return 0;
+
if (!MLX5_CAP_MCAM_REG(dev, mpir))
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 331ce47f5..459a836a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
if (!err)
mlx5_function_disable(dev, boot);
+ else
+ mlx5_stop_health_poll(dev, boot);
+
return err;
}
@@ -1680,6 +1683,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
+ devl_register(devlink);
dev->state = MLX5_DEVICE_STATE_UP;
err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err) {
@@ -1693,27 +1698,21 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
goto query_hca_caps_err;
}
- devl_lock(devlink);
- devl_register(devlink);
-
err = mlx5_devlink_params_register(priv_to_devlink(dev));
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
- goto params_reg_err;
+ goto query_hca_caps_err;
}
devl_unlock(devlink);
return 0;
-params_reg_err:
- devl_unregister(devlink);
- devl_unlock(devlink);
query_hca_caps_err:
- devl_unregister(devlink);
- devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ devl_unregister(devlink);
+ devl_unlock(devlink);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 7ebe71280..b2986175d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -60,6 +60,13 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto remap_err;
}
+ /* Peer devlink logic expects to work on unregistered devlink instance. */
+ err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+ goto peer_devlink_set_err;
+ }
+
if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev))
err = mlx5_init_one_light(mdev);
else
@@ -69,20 +76,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto init_one_err;
}
- err = mlx5_core_peer_devlink_set(sf_dev, devlink);
- if (err) {
- mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
- goto peer_devlink_set_err;
- }
-
return 0;
-peer_devlink_set_err:
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
- else
- mlx5_uninit_one(sf_dev->mdev);
init_one_err:
+peer_devlink_set_err:
iounmap(mdev->iseg);
remap_err:
mlx5_mdev_uninit(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index af50ff9e5..ce49c9514 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -539,7 +539,7 @@ mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
if (!dst || dst->error)
goto out;
- rt6 = container_of(dst, struct rt6_info, dst);
+ rt6 = dst_rt6_info(dst);
dev = dst->dev;
*saddrp = fl6.saddr;
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 8a6ae171e..def932035 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1148,8 +1148,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
if (netdev->phydev)
phy_ethtool_get_wol(netdev->phydev, wol);
- wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
- WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ if (wol->supported != adapter->phy_wol_supported)
+ netif_warn(adapter, drv, adapter->netdev,
+ "PHY changed its supported WOL! old=%x, new=%x\n",
+ adapter->phy_wol_supported, wol->supported);
+
+ wol->supported |= MAC_SUPPORTED_WAKES;
if (adapter->is_pci11x1x)
wol->supported |= WAKE_MAGICSECURE;
@@ -1164,7 +1168,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ /* WAKE_MAGICSEGURE is a modifier of and only valid together with
+ * WAKE_MAGIC
+ */
+ if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
+ return -EINVAL;
+
+ if (netdev->phydev) {
+ struct ethtool_wolinfo phy_wol;
+ int ret;
+
+ phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
+
+ /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
+ * for PHYs that do not support WAKE_MAGICSECURE
+ */
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
+ phy_wol.wolopts &= ~WAKE_MAGIC;
+
+ ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ if (ret && (ret != -EOPNOTSUPP))
+ return ret;
+
+ if (ret == -EOPNOTSUPP)
+ adapter->phy_wolopts = 0;
+ else
+ adapter->phy_wolopts = phy_wol.wolopts;
+ } else {
+ adapter->phy_wolopts = 0;
+ }
+
adapter->wolopts = 0;
+ wol->wolopts &= ~adapter->phy_wolopts;
if (wol->wolopts & WAKE_UCAST)
adapter->wolopts |= WAKE_UCAST;
if (wol->wolopts & WAKE_MCAST)
@@ -1185,10 +1221,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
}
+ wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
- : -ENETDOWN;
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 75a988c0b..ecde3582e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -3111,6 +3111,17 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_tx;
}
+
+#ifdef CONFIG_PM
+ if (adapter->netdev->phydev) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(netdev->phydev, &wol);
+ adapter->phy_wol_supported = wol.supported;
+ adapter->phy_wolopts = wol.wolopts;
+ }
+#endif
+
return 0;
close_tx:
@@ -3568,7 +3579,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
/* clear wake settings */
pmtctl = lan743x_csr_read(adapter, PMT_CTL);
- pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
@@ -3580,10 +3591,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
- if (adapter->wolopts & WAKE_PHY) {
- pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ if (adapter->phy_wolopts)
pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
- }
+
if (adapter->wolopts & WAKE_MAGIC) {
wucsr |= MAC_WUCSR_MPEN_;
macrx |= MAC_RX_RXEN_;
@@ -3679,7 +3689,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_csr_write(adapter, MAC_WUCSR2, 0);
lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
- if (adapter->wolopts)
+ if (adapter->wolopts || adapter->phy_wolopts)
lan743x_pm_set_wol(adapter);
if (adapter->is_pci11x1x) {
@@ -3703,6 +3713,7 @@ static int lan743x_pm_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
int ret;
pci_set_power_state(pdev, PCI_D0);
@@ -3721,6 +3732,30 @@ static int lan743x_pm_resume(struct device *dev)
return ret;
}
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
+
+ /* Clear the wol configuration and status bits. Note that
+ * the status bits are "Write One to Clear (W1C)"
+ */
+ data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
+ MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
+ MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
+ lan743x_csr_write(adapter, MAC_WUCSR, data);
+
+ data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
+ MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
+ lan743x_csr_write(adapter, MAC_WUCSR2, data);
+
+ data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
+ MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
+ MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
+ MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
+ MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
+ MAC_WK_SRC_WK_FR_SAVED_;
+ lan743x_csr_write(adapter, MAC_WK_SRC, data);
+
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3729,9 +3764,6 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
- ret = lan743x_csr_read(adapter, MAC_WK_SRC);
- netif_info(adapter, drv, adapter->netdev,
- "Wakeup source : 0x%08X\n", ret);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 645bc048e..3b2585a38 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -61,6 +61,7 @@
#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
+#define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
#define PMT_CTL_WOL_EN_ BIT(3)
@@ -227,12 +228,31 @@
#define MAC_WUCSR (0x140)
#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13)
+#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11)
+#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9)
+#define MAC_WUCSR_PFDA_FR_ BIT(7)
+#define MAC_WUCSR_WUFR_ BIT(6)
+#define MAC_WUCSR_MPR_ BIT(5)
+#define MAC_WUCSR_BCAST_FR_ BIT(4)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
#define MAC_WUCSR_MPEN_ BIT(1)
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17)
+#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16)
+#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15)
+#define MAC_WK_SRC_EEE_TX_WK_ BIT(14)
+#define MAC_WK_SRC_EEE_RX_WK_ BIT(13)
+#define MAC_WK_SRC_RFE_FR_WK_ BIT(12)
+#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11)
+#define MAC_WK_SRC_MP_FR_WK_ BIT(10)
+#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9)
+#define MAC_WK_SRC_WU_FR_WK_ BIT(8)
+#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7)
+
#define MAC_MP_SO_HI (0x148)
#define MAC_MP_SO_LO (0x14C)
@@ -295,6 +315,10 @@
#define RFE_INDX(index) (0x580 + (index << 2))
#define MAC_WUCSR2 (0x600)
+#define MAC_WUCSR2_NS_RCD_ BIT(7)
+#define MAC_WUCSR2_ARP_RCD_ BIT(6)
+#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5)
+#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4)
#define SGMII_ACC (0x720)
#define SGMII_ACC_SGMII_BZY_ BIT(31)
@@ -1018,6 +1042,8 @@ enum lan743x_sgmii_lsd {
LINK_2500_SLAVE
};
+#define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
+ WAKE_MAGIC | WAKE_ARP)
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
@@ -1025,6 +1051,8 @@ struct lan743x_adapter {
#ifdef CONFIG_PM
u32 wolopts;
u8 sopass[SOPASS_MAX];
+ u32 phy_wolopts;
+ u32 phy_wol_supported;
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2635ef895..6695ed661 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -474,14 +474,14 @@ static int lan966x_port_hwtstamp_set(struct net_device *dev,
cfg->source != HWTSTAMP_SOURCE_PHYLIB)
return -EOPNOTSUPP;
+ if (cfg->source == HWTSTAMP_SOURCE_NETDEV && !port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
err = lan966x_ptp_setup_traps(port, cfg);
if (err)
return err;
if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
- if (!port->lan966x->ptp)
- return -EOPNOTSUPP;
-
err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
if (err) {
lan966x_ptp_del_traps(port);
@@ -1087,8 +1087,6 @@ static int lan966x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
- lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
-
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
@@ -1179,6 +1177,8 @@ static int lan966x_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"no ethernet-ports child found\n");
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -1257,6 +1257,8 @@ cleanup_ports:
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
return err;
}
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 2729a2c5a..ca814fe8a 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -848,7 +848,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (!wait_for_completion_timeout(&ctx->comp_event,
- (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
+ (msecs_to_jiffies(hwc->hwc_timeout)))) {
dev_err(hwc->dev, "HWC: Request timed out!\n");
err = -ETIMEDOUT;
goto out;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7f0c6cdc3..0cd819bc4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -304,10 +304,8 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
if (ret)
return ret;
- if (qcq->napi.poll)
- napi_enable(&qcq->napi);
-
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ napi_enable(&qcq->napi);
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 5dba6d2d6..2427610f4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -586,6 +586,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
goto out_xdp_abort;
}
+ buf_info->page = NULL;
stats->xdp_tx++;
/* the Tx completion will free the buffers */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c278f8893..8159b4c31 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1206,7 +1206,6 @@ out:
static int qed_slowpath_wq_start(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
- char name[NAME_SIZE];
int i;
if (IS_VF(cdev))
@@ -1215,11 +1214,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
- snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
- cdev->pdev->bus->number,
- PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
+ 0, 0, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn),
+ hwfn->abs_pf_id);
- hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
if (!hwfn->slowpath_wq) {
DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index ff3b89e90..ad06da0fd 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -98,10 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
seq_printf(s, "IRQ : %d\n",
qca->spi_dev->irq);
- seq_printf(s, "INTR REQ : %u\n",
- qca->intr_req);
- seq_printf(s, "INTR SVC : %u\n",
- qca->intr_svc);
+ seq_printf(s, "INTR : %lx\n",
+ qca->intr);
seq_printf(s, "SPI max speed : %lu\n",
(unsigned long)qca->spi_dev->max_speed_hz);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5799ecc88..8f7ce6b51 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -35,6 +35,8 @@
#define MAX_DMA_BURST_LEN 5000
+#define SPI_INTR 0
+
/* Modules parameters */
#define QCASPI_CLK_SPEED_MIN 1000000
#define QCASPI_CLK_SPEED_MAX 16000000
@@ -579,14 +581,14 @@ qcaspi_spi_thread(void *data)
continue;
}
- if ((qca->intr_req == qca->intr_svc) &&
+ if (!test_bit(SPI_INTR, &qca->intr) &&
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
- netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
- qca->intr_req - qca->intr_svc,
+ netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
+ qca->intr,
qca->txr.skb[qca->txr.head]);
qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
@@ -600,8 +602,7 @@ qcaspi_spi_thread(void *data)
msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
}
- if (qca->intr_svc != qca->intr_req) {
- qca->intr_svc = qca->intr_req;
+ if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
start_spi_intr_handling(qca, &intr_cause);
if (intr_cause & SPI_INT_CPU_ON) {
@@ -663,7 +664,7 @@ qcaspi_intr_handler(int irq, void *data)
{
struct qcaspi *qca = data;
- qca->intr_req++;
+ set_bit(SPI_INTR, &qca->intr);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
@@ -679,8 +680,7 @@ qcaspi_netdev_open(struct net_device *dev)
if (!qca)
return -EINVAL;
- qca->intr_req = 1;
- qca->intr_svc = 0;
+ set_bit(SPI_INTR, &qca->intr);
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index d59cb2352..8f4808695 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -81,8 +81,7 @@ struct qcaspi {
struct qcafrm_handle frm_handle;
struct sk_buff *rx_skb;
- unsigned int intr_req;
- unsigned int intr_svc;
+ unsigned long intr;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0fc5fe564..7df2017c9 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4335,11 +4335,11 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int frags = skb_shinfo(skb)->nr_frags;
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
struct TxDesc *txd_first, *txd_last;
bool stop_queue, door_bell;
+ unsigned int frags;
u32 opts[2];
if (unlikely(!rtl_tx_slots_avail(tp))) {
@@ -4362,6 +4362,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first = tp->TxDescArray + entry;
+ frags = skb_shinfo(skb)->nr_frags;
if (frags) {
if (rtl8169_xmit_frags(tp, skb, opts, entry))
goto err_dma_1;
@@ -4655,10 +4656,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
- if (napi_schedule_prep(&tp->napi)) {
- rtl_irq_disable(tp);
- __napi_schedule(&tp->napi);
- }
+ rtl_irq_disable(tp);
+ napi_schedule(&tp->napi);
out:
rtl_ack_events(tp, status);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 46eee747c..45ef5ac07 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -156,8 +156,8 @@ static inline void mcf_outsw(void *a, unsigned char *p, int l)
writew(*wp++, a);
}
-#define SMC_inw(a, r) _swapw(readw((a) + (r)))
-#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
+#define SMC_inw(a, r) ioread16be((a) + (r))
+#define SMC_outw(lp, v, a, r) iowrite16be(v, (a) + (r))
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e254b21fd..65d7370b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -93,6 +93,7 @@ struct ethqos_emac_driver_data {
bool has_emac_ge_3;
const char *link_clk_name;
bool has_integrated_pcs;
+ u32 dma_addr_width;
struct dwmac4_addrs dwmac4_addrs;
};
@@ -276,6 +277,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
.has_emac_ge_3 = true,
.link_clk_name = "phyaux",
.has_integrated_pcs = true,
+ .dma_addr_width = 36,
.dwmac4_addrs = {
.dma_chan = 0x00008100,
.dma_chan_offset = 0x1000,
@@ -845,6 +847,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
if (data->has_integrated_pcs)
plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
+ if (data->dma_addr_width)
+ plat_dat->host_dma_width = data->dma_addr_width;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dddcaa922..64b21c83e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -261,6 +261,8 @@ struct stmmac_priv {
struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
struct stmmac_safety_stats sstats;
struct plat_stmmacenet_data *plat;
+ /* Protect est parameters */
+ struct mutex est_lock;
struct dma_features dma_cap;
struct stmmac_counters mmc;
int hw_cap_support;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index f05bd757d..5ef52ef26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -218,6 +218,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
{
u32 num_snapshot, ts_status, tsync_int;
struct ptp_clock_event event;
+ u32 acr_value, channel;
unsigned long flags;
u64 ptp_time;
int i;
@@ -243,12 +244,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
+ acr_value = readl(priv->ptpaddr + PTP_ACR);
+ channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
+
for (i = 0; i < num_snapshot; i++) {
read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
+ event.index = channel;
event.timestamp = ptp_time;
ptp_clock_event(priv->ptp_clock, &event);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index e04830a3a..0c5aab6dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -70,11 +70,11 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
/* If EST is enabled, disabled it before adjust ptp time. */
if (priv->plat->est && priv->plat->est->enable) {
est_rst = true;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
write_lock_irqsave(&priv->ptp_lock, flags);
@@ -87,7 +87,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
ktime_t current_time_ns, basetime;
u64 cycle_time;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
time.tv_nsec = priv->plat->est->btr_reserve[0];
@@ -104,7 +104,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->plat->est->enable = true;
ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret)
netdev_err(priv->dev, "failed to configure EST\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index cce007199..7d240a2b5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
struct tc_cbs_qopt_offload *qopt)
{
u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ s64 port_transmit_rate_kbps;
u32 queue = qopt->queue;
- u32 ptr, speed_div;
u32 mode_to_use;
u64 value;
+ u32 ptr;
int ret;
/* Queue 0 is not AVB capable */
@@ -355,30 +356,30 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- /* Port Transmit Rate and Speed Divider */
- switch (priv->speed) {
- case SPEED_10000:
- ptr = 32;
- speed_div = 10000000;
- break;
- case SPEED_5000:
- ptr = 32;
- speed_div = 5000000;
- break;
- case SPEED_2500:
- ptr = 8;
- speed_div = 2500000;
- break;
- case SPEED_1000:
- ptr = 8;
- speed_div = 1000000;
- break;
- case SPEED_100:
- ptr = 4;
- speed_div = 100000;
- break;
- default:
- return -EOPNOTSUPP;
+ port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
+
+ if (qopt->enable) {
+ /* Port Transmit Rate and Speed Divider */
+ switch (div_s64(port_transmit_rate_kbps, 1000)) {
+ case SPEED_10000:
+ case SPEED_5000:
+ ptr = 32;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ ptr = 8;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ break;
+ default:
+ netdev_err(priv->dev,
+ "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
+ port_transmit_rate_kbps);
+ return -EINVAL;
+ }
+ } else {
+ ptr = 0;
}
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
@@ -398,10 +399,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
}
/* Final adjustments for HW */
- value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
- value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
value = qopt->hicredit * 1024ll * 8;
@@ -1004,17 +1005,19 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
if (!plat->est)
return -ENOMEM;
- mutex_init(&priv->plat->est->lock);
+ mutex_init(&priv->est_lock);
} else {
+ mutex_lock(&priv->est_lock);
memset(plat->est, 0, sizeof(*plat->est));
+ mutex_unlock(&priv->est_lock);
}
size = qopt->num_entries;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->plat->est->gcl_size = size;
priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@@ -1045,7 +1048,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
}
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
@@ -1068,7 +1071,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
tc_taprio_map_maxsdu_txq(priv, qopt);
if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
return -EOPNOTSUPP;
}
@@ -1079,7 +1082,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
ret = stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@@ -1096,7 +1099,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
disable:
if (priv->plat->est) {
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv, priv->plat->est,
priv->plat->clk_ptp_rate);
@@ -1105,7 +1108,7 @@ disable:
priv->xstats.max_sdu_txq_drop[i] = 0;
priv->xstats.mtl_est_txq_hlbf[i] = 0;
}
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
priv->plat->fpe_cfg->enable = false;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9bd1df830..d3a2fbb14 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void gem_poll_controller(struct net_device *dev)
-{
- struct gem *gp = netdev_priv(dev);
-
- disable_irq(gp->pdev->irq);
- gem_interrupt(gp->pdev->irq, dev);
- enable_irq(gp->pdev->irq);
-}
-#endif
-
static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gem *gp = netdev_priv(dev);
@@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = {
.ndo_change_mtu = gem_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = gem_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = gem_poll_controller,
-#endif
};
static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
index 6df53ab17..902a27177 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -360,7 +360,7 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
{
const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
- rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index b69af69a1..61ebac7ba 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -2152,7 +2152,12 @@ static int prueth_probe(struct platform_device *pdev)
prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
- emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ if (ret) {
+ dev_err(dev,
+ "can't connect to MII0 PHY, error -%d", ret);
+ goto netdev_unregister;
+ }
phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
}
@@ -2164,7 +2169,12 @@ static int prueth_probe(struct platform_device *pdev)
}
prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
- emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ if (ret) {
+ dev_err(dev,
+ "can't connect to MII1 PHY, error %d", ret);
+ goto netdev_unregister;
+ }
phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 945c13d1a..c09a6f744 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1958,6 +1958,8 @@ int wx_sw_init(struct wx *wx)
return -ENOMEM;
}
+ bitmap_zero(wx->state, WX_STATE_NBITS);
+
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 6fae161cb..07ba3a270 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -2690,15 +2690,63 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
wx->rss_enabled = false;
}
- if (changed &
- (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX))
+ netdev->features = features;
+
+ if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ wx->do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
- return 1;
+ return 0;
}
EXPORT_SYMBOL(wx_set_features);
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
+#define NETIF_VLAN_INSERTION_FEATURES (NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct wx *wx = netdev_priv(netdev);
+
+ if (changed & NETIF_VLAN_STRIPPING_FEATURES) {
+ if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_INSERTION_FEATURES) {
+ if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES &&
+ (features & NETIF_VLAN_INSERTION_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_INSERTION_FEATURES;
+ features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_FILTERING_FEATURES) {
+ if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES &&
+ (features & NETIF_VLAN_FILTERING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off.");
+ }
+ }
+
+ return features;
+}
+EXPORT_SYMBOL(wx_fix_features);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index ec909e876..c41b29ea8 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -30,6 +30,8 @@ int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features);
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 1fdeb464d..5aaf7b1fa 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -982,8 +982,13 @@ struct wx_hw_stats {
u64 qmprc;
};
+enum wx_state {
+ WX_STATE_RESETTING,
+ WX_STATE_NBITS, /* must be last */
+};
struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ DECLARE_BITMAP(state, WX_STATE_NBITS);
void *priv;
u8 __iomem *hw_addr;
@@ -1071,6 +1076,8 @@ struct wx {
u64 hw_csum_rx_good;
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+
+ void (*do_reset)(struct net_device *netdev);
};
#define WX_INTR_ALL (~0ULL)
@@ -1131,4 +1138,19 @@ static inline struct wx *phylink_to_wx(struct phylink_config *config)
return container_of(config, struct wx, phylink_config);
}
+static inline int wx_set_state_reset(struct wx *wx)
+{
+ u8 timeout = 50;
+
+ while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 786a652ae..46a5a3e95 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -52,7 +52,7 @@ static int ngbe_set_ringparam(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
- int i;
+ int i, err = 0;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -64,6 +64,10 @@ static int ngbe_set_ringparam(struct net_device *netdev,
new_rx_count == wx->rx_ring_count)
return 0;
+ err = wx_set_state_reset(wx);
+ if (err)
+ return err;
+
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
@@ -72,14 +76,16 @@ static int ngbe_set_ringparam(struct net_device *netdev,
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
- return 0;
+ goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
- if (!temp_ring)
- return -ENOMEM;
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
ngbe_down(wx);
@@ -89,7 +95,9 @@ static int ngbe_set_ringparam(struct net_device *netdev,
wx_configure(wx);
ngbe_up(wx);
- return 0;
+clear_reset:
+ clear_bit(WX_STATE_RESETTING, wx->state);
+ return err;
}
static int ngbe_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index fdd6b4f70..e894e01d0 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -499,6 +499,7 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
+ .ndo_fix_features = wx_fix_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index db675512c..31fde3fa7 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -19,7 +19,7 @@ static int txgbe_set_ringparam(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
- int i;
+ int i, err = 0;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -31,6 +31,10 @@ static int txgbe_set_ringparam(struct net_device *netdev,
new_rx_count == wx->rx_ring_count)
return 0;
+ err = wx_set_state_reset(wx);
+ if (err)
+ return err;
+
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
@@ -39,14 +43,16 @@ static int txgbe_set_ringparam(struct net_device *netdev,
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
- return 0;
+ goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
- if (!temp_ring)
- return -ENOMEM;
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
txgbe_down(wx);
@@ -55,7 +61,9 @@ static int txgbe_set_ringparam(struct net_device *netdev,
txgbe_up(wx);
- return 0;
+clear_reset:
+ clear_bit(WX_STATE_RESETTING, wx->state);
+ return err;
}
static int txgbe_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index bd4624d14..8c7a74981 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -269,6 +269,8 @@ static int txgbe_sw_init(struct wx *wx)
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->do_reset = txgbe_do_reset;
+
return 0;
}
@@ -421,6 +423,34 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
return 0;
}
+static void txgbe_reinit_locked(struct wx *wx)
+{
+ int err = 0;
+
+ netif_trans_update(wx->netdev);
+
+ err = wx_set_state_reset(wx);
+ if (err) {
+ wx_err(wx, "wait device reset timeout\n");
+ return;
+ }
+
+ txgbe_down(wx);
+ txgbe_up(wx);
+
+ clear_bit(WX_STATE_RESETTING, wx->state);
+}
+
+void txgbe_do_reset(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ txgbe_reinit_locked(wx);
+ else
+ txgbe_reset(wx);
+}
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -428,6 +458,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
+ .ndo_fix_features = wx_fix_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 1b4ff50d5..f434a7865 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -134,6 +134,7 @@ extern char txgbe_driver_name[];
void txgbe_down(struct wx *wx);
void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
+void txgbe_do_reset(struct net_device *netdev);
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6c2835086..76053d2c1 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -811,6 +811,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
@@ -822,7 +823,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs4)
@@ -903,7 +904,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
@@ -919,6 +920,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
@@ -929,7 +931,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs6)
@@ -991,7 +993,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2d5b021b4..fef4eff77 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- err = ip_local_out(net, skb->sk, skb);
+ err = ip_local_out(net, NULL, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
@@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
- err = ip6_local_out(dev_net(dev), skb->sk, skb);
+ err = ip6_local_out(dev_net(dev), NULL, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 8330bc0bc..d40580903 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -292,7 +292,8 @@ static int nsim_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nsim->peer);
- iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) :
+ READ_ONCE(dev->ifindex);
rcu_read_unlock();
return iflink;
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index a4d2e76a8..16789cd44 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet)
skb_scrub_packet(skb, xnet);
skb->priority = 0;
nf_skip_egress(skb, true);
+ skb_reset_mac_header(skb);
}
static struct netkit *netkit_priv(const struct net_device *dev)
@@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop;
netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)));
+ eth_skb_pkt_type(skb, peer);
skb->dev = peer;
entry = rcu_dereference(nk->active);
if (entry)
@@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
switch (ret) {
case NETKIT_NEXT:
case NETKIT_PASS:
- skb->protocol = eth_type_trans(skb, skb->dev);
+ eth_skb_pull_mac(skb);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
dev_sw_netstats_tx_add(dev, 1, len);
@@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev)
/* Nothing to do, we receive whatever gets pushed to us! */
}
+static int netkit_set_macaddr(struct net_device *dev, void *sa)
+{
+ struct netkit *nk = netkit_priv(dev);
+
+ if (nk->mode != NETKIT_L2)
+ return -EOPNOTSUPP;
+
+ return eth_mac_addr(dev, sa);
+}
+
static void netkit_set_headroom(struct net_device *dev, int headroom)
{
struct netkit *nk = netkit_priv(dev), *nk2;
@@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_start_xmit = netkit_xmit,
.ndo_set_rx_mode = netkit_set_multicast,
.ndo_set_rx_headroom = netkit_set_headroom,
+ .ndo_set_mac_address = netkit_set_macaddr,
.ndo_get_iflink = netkit_get_iflink,
.ndo_get_peer_dev = netkit_peer_dev,
.ndo_get_stats64 = netkit_get_stats,
@@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
if (!attr)
return 0;
- NL_SET_ERR_MSG_ATTR(extack, attr,
- "Setting Ethernet address is not supported");
- return -EOPNOTSUPP;
+ if (nla_len(attr) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(attr)))
+ return -EADDRNOTAVAIL;
+ return 0;
}
static struct rtnl_link_ops netkit_link_ops;
@@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
strscpy(ifname, "nk%d", IFNAMSIZ);
ifname_assign_type = NET_NAME_ENUM;
}
+ if (mode != NETKIT_L2 &&
+ (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
+ return -EOPNOTSUPP;
net = rtnl_link_get_net(src_net, tbp);
if (IS_ERR(net))
@@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
netif_inherit_tso_max(peer, dev);
- if (mode == NETKIT_L2)
+ if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer);
if (ifmp && dev->ifindex)
peer->ifindex = ifmp->ifi_index;
@@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
if (err < 0)
goto err_configure_peer;
- if (mode == NETKIT_L2)
+ if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
if (tb[IFLA_IFNAME])
nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 326c9770a..c706429b2 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -17,6 +17,11 @@
#define DP83TG720S_PHY_RESET 0x1f
#define DP83TG720S_HW_RESET BIT(15)
+#define DP83TG720S_LPS_CFG3 0x18c
+/* Power modes are documented as bit fields but used as values */
+/* Power Mode 0 is Normal mode */
+#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0)
+
#define DP83TG720S_RGMII_DELAY_CTRL 0x602
/* In RGMII mode, Enable or disable the internal delay for RXD */
#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
@@ -31,11 +36,20 @@
static int dp83tg720_config_aneg(struct phy_device *phydev)
{
+ int ret;
+
/* Autoneg is not supported and this PHY supports only one speed.
* We need to care only about master/slave configuration if it was
* changed by user.
*/
- return genphy_c45_pma_baset1_setup_master_slave(phydev);
+ ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
+ if (ret)
+ return ret;
+
+ /* Re-read role configuration to make changes visible even if
+ * the link is in administrative down state.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static int dp83tg720_read_status(struct phy_device *phydev)
@@ -64,6 +78,8 @@ static int dp83tg720_read_status(struct phy_device *phydev)
return ret;
/* After HW reset we need to restore master/slave configuration.
+ * genphy_c45_pma_baset1_read_master_slave() call will be done
+ * by the dp83tg720_config_aneg() function.
*/
ret = dp83tg720_config_aneg(phydev);
if (ret)
@@ -154,10 +170,24 @@ static int dp83tg720_config_init(struct phy_device *phydev)
*/
usleep_range(1000, 2000);
- if (phy_interface_is_rgmii(phydev))
- return dp83tg720_config_rgmii_delay(phydev);
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = dp83tg720_config_rgmii_delay(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* In case the PHY is bootstrapped in managed mode, we need to
+ * wake it.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LPS_CFG3,
+ DP83TG720S_LPS_CFG3_PWR_MODE_0);
+ if (ret)
+ return ret;
- return 0;
+ /* Make role configuration visible for ethtool on init and after
+ * rest.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static struct phy_driver dp83tg720_driver[] = {
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ddb50a0e2..4b22bb639 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -785,6 +785,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
+ /* Chip can be powered down by the bootstrap code. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (ret & BMCR_PDOWN) {
+ ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
+ if (ret < 0)
+ return ret;
+ usleep_range(1000, 2000);
+ }
+
ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
if (ret)
return ret;
@@ -1858,7 +1869,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
{0x1c, 0x20, 0xeeee},
};
-static int ksz9477_config_init(struct phy_device *phydev)
+static int ksz9477_phy_errata(struct phy_device *phydev)
{
int err;
int i;
@@ -1886,16 +1897,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
+ err = genphy_restart_aneg(phydev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int ksz9477_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Only KSZ9897 family of switches needs this fix. */
+ if ((phydev->phy_id & 0xf) == 1) {
+ err = ksz9477_phy_errata(phydev);
+ if (err)
+ return err;
+ }
+
/* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
* in this switch shall be regarded as broken.
*/
if (phydev->dev_flags & MICREL_NO_EEE)
phydev->eee_broken_modes = -1;
- err = genphy_restart_aneg(phydev);
- if (err)
- return err;
-
return kszphy_config_init(phydev);
}
@@ -2004,6 +2029,71 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
+static int ksz9477_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* No need to initialize registers if not powered down. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+ genphy_resume(phydev);
+
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+ * read/write access to the PHY registers.
+ */
+ usleep_range(1000, 2000);
+
+ /* Only KSZ9897 family of switches needs this fix. */
+ if ((phydev->phy_id & 0xf) == 1) {
+ ret = ksz9477_phy_errata(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
+
+ return 0;
+}
+
+static int ksz8061_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* This function can be called twice when the Ethernet device is on. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+ genphy_resume(phydev);
+ usleep_range(1000, 2000);
+
+ /* Re-program the value after chip is reset. */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
+
+ return 0;
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -3516,7 +3606,7 @@ static int lan8841_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
err = phy_read(phydev, LAN8814_INTS);
- if (err)
+ if (err < 0)
return err;
/* Enable / disable interrupts. It is OK to enable PTP interrupt
@@ -3532,6 +3622,14 @@ static int lan8841_config_intr(struct phy_device *phydev)
return err;
err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+
+ /* Getting a positive value doesn't mean that is an error, it
+ * just indicates what was the status. Therefore make sure to
+ * clear the value and say that there is no error.
+ */
+ err = 0;
}
return err;
@@ -4676,7 +4774,8 @@ static int lan8841_suspend(struct phy_device *phydev)
struct kszphy_priv *priv = phydev->priv;
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
- ptp_cancel_worker_sync(ptp_priv->ptp_clock);
+ if (ptp_priv->ptp_clock)
+ ptp_cancel_worker_sync(ptp_priv->ptp_clock);
return genphy_suspend(phydev);
}
@@ -4813,10 +4912,11 @@ static struct phy_driver ksphy_driver[] = {
/* PHY_BASIC_FEATURES */
.probe = kszphy_probe,
.config_init = ksz8061_config_init,
+ .soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = kszphy_suspend,
- .resume = kszphy_resume,
+ .resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@ -4970,7 +5070,7 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = ksz9477_resume,
.get_features = ksz9477_get_features,
} };
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index b2d36a3a9..e5f8ac4b4 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -107,6 +107,7 @@ struct gpy_priv {
u8 fw_major;
u8 fw_minor;
+ u32 wolopts;
/* It takes 3 seconds to fully switch out of loopback mode before
* it can safely re-enter loopback mode. Record the time when
@@ -221,6 +222,15 @@ static int gpy_hwmon_register(struct phy_device *phydev)
}
#endif
+static int gpy_ack_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Clear all pending interrupts */
+ ret = phy_read(phydev, PHY_ISTAT);
+ return ret < 0 ? ret : 0;
+}
+
static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
{
struct gpy_priv *priv = phydev->priv;
@@ -262,16 +272,8 @@ out:
static int gpy_config_init(struct phy_device *phydev)
{
- int ret;
-
- /* Mask all interrupts */
- ret = phy_write(phydev, PHY_IMASK, 0);
- if (ret)
- return ret;
-
- /* Clear all pending interrupts */
- ret = phy_read(phydev, PHY_ISTAT);
- return ret < 0 ? ret : 0;
+ /* Nothing to configure. Configuration Requirement Placeholder */
+ return 0;
}
static int gpy21x_config_init(struct phy_device *phydev)
@@ -627,11 +629,23 @@ static int gpy_read_status(struct phy_device *phydev)
static int gpy_config_intr(struct phy_device *phydev)
{
+ struct gpy_priv *priv = phydev->priv;
u16 mask = 0;
+ int ret;
+
+ ret = gpy_ack_interrupt(phydev);
+ if (ret)
+ return ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
mask = PHY_IMASK_MASK;
+ if (priv->wolopts & WAKE_MAGIC)
+ mask |= PHY_IMASK_WOL;
+
+ if (priv->wolopts & WAKE_PHY)
+ mask |= PHY_IMASK_LSTC;
+
return phy_write(phydev, PHY_IMASK, mask);
}
@@ -678,6 +692,7 @@ static int gpy_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
struct net_device *attach_dev = phydev->attached_dev;
+ struct gpy_priv *priv = phydev->priv;
int ret;
if (wol->wolopts & WAKE_MAGIC) {
@@ -725,6 +740,8 @@ static int gpy_set_wol(struct phy_device *phydev,
ret = phy_read(phydev, PHY_ISTAT);
if (ret < 0)
return ret;
+
+ priv->wolopts |= WAKE_MAGIC;
} else {
/* Disable magic packet matching */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
@@ -732,6 +749,13 @@ static int gpy_set_wol(struct phy_device *phydev,
WOL_EN);
if (ret < 0)
return ret;
+
+ /* Disable the WOL interrupt */
+ ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL);
+ if (ret < 0)
+ return ret;
+
+ priv->wolopts &= ~WAKE_MAGIC;
}
if (wol->wolopts & WAKE_PHY) {
@@ -748,9 +772,11 @@ static int gpy_set_wol(struct phy_device *phydev,
if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
phy_trigger_machine(phydev);
+ priv->wolopts |= WAKE_PHY;
return 0;
}
+ priv->wolopts &= ~WAKE_PHY;
/* Disable the link state change interrupt */
return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
}
@@ -758,18 +784,10 @@ static int gpy_set_wol(struct phy_device *phydev,
static void gpy_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int ret;
+ struct gpy_priv *priv = phydev->priv;
wol->supported = WAKE_MAGIC | WAKE_PHY;
- wol->wolopts = 0;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
- if (ret & WOL_EN)
- wol->wolopts |= WAKE_MAGIC;
-
- ret = phy_read(phydev, PHY_IMASK);
- if (ret & PHY_IMASK_LSTC)
- wol->wolopts |= WAKE_PHY;
+ wol->wolopts = priv->wolopts;
}
static int gpy_loopback(struct phy_device *phydev, bool enable)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index f75c9eb39..52b71c7e7 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -385,18 +385,23 @@ static void sfp_fixup_rollball(struct sfp *sfp)
sfp->phy_t_retry = msecs_to_jiffies(1000);
}
-static void sfp_fixup_fs_10gt(struct sfp *sfp)
+static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
{
- sfp_fixup_10gbaset_30m(sfp);
sfp_fixup_rollball(sfp);
- /* The RollBall fixup is not enough for FS modules, the AQR chip inside
+ /* The RollBall fixup is not enough for FS modules, the PHY chip inside
* them does not return 0xffff for PHY ID registers in all MMDs for the
* while initializing. They need a 4 second wait before accessing PHY.
*/
sfp->module_t_wait = msecs_to_jiffies(4000);
}
+static void sfp_fixup_fs_10gt(struct sfp *sfp)
+{
+ sfp_fixup_10gbaset_30m(sfp);
+ sfp_fixup_fs_2_5gt(sfp);
+}
+
static void sfp_fixup_halny_gsfp(struct sfp *sfp)
{
/* Ignore the TX_FAULT and LOS signals on this module.
@@ -472,6 +477,10 @@ static const struct sfp_quirk sfp_quirks[] = {
// Rollball protocol to talk to the PHY.
SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+ // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
+ // needs 4 sec wait before probing the PHY.
+ SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
+
// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
// NRZ in their EEPROM
SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
@@ -488,9 +497,6 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
sfp_fixup_ignore_tx_fault),
- // FS 2.5G Base-T
- SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
-
// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
// 2500MBd NRZ in their EEPROM
SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
@@ -502,6 +508,9 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt),
SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt),
+ // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator
+ SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
+
SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
@@ -2418,8 +2427,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Handle remove event globally, it resets this state machine */
if (event == SFP_E_REMOVE) {
- if (sfp->sm_mod_state > SFP_MOD_PROBE)
- sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_remove(sfp);
sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
return;
}
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 7b8afa589..284375f66 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1141,17 +1141,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
continue;
}
- /* Clone SKB */
- new_skb = skb_clone(skb, GFP_ATOMIC);
+ new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!new_skb)
goto err;
- new_skb->len = pkt_len;
+ skb_put(new_skb, pkt_len);
+ memcpy(new_skb->data, skb->data, pkt_len);
skb_pull(new_skb, AQ_RX_HW_PAD);
- skb_set_tail_pointer(new_skb, new_skb->len);
- new_skb->truesize = SKB_TRUESIZE(new_skb->len);
if (aqc111_data->rx_checksum)
aqc111_rx_checksum(new_skb, pkt_desc);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 56ede5fa0..94d8b647f 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -174,7 +174,6 @@ struct ax88179_data {
u32 wol_supported;
u32 wolopts;
u8 disconnecting;
- u8 initialized;
};
struct ax88179_int_data {
@@ -1676,12 +1675,21 @@ static int ax88179_reset(struct usbnet *dev)
static int ax88179_net_reset(struct usbnet *dev)
{
- struct ax88179_data *ax179_data = dev->driver_priv;
+ u16 tmp16;
- if (ax179_data->initialized)
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR,
+ 2, &tmp16);
+ if (tmp16) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) {
+ tmp16 |= AX_MEDIUM_RECEIVE_EN;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ }
+ } else {
ax88179_reset(dev);
- else
- ax179_data->initialized = 1;
+ }
return 0;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 97afd7335..01a3b2417 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
rtl8150_t *dev = netdev_priv(netdev);
- short lpa, bmcr;
+ short lpa = 0;
+ short bmcr = 0;
u32 supported;
supported = (SUPPORTED_10baseT_Half |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2fa46baa5..8e82184be 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
static int smsc95xx_reset(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
- u32 read_buf, write_buf, burst_cap;
+ u32 read_buf, burst_cap;
int ret = 0, timeout;
netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
+ ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
+ if (ret < 0)
+ return ret;
/* Configure GPIO pins as LED outputs */
- write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
- LED_GPIO_CFG_FDX_LED;
- ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
+ read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+ LED_GPIO_CFG_FDX_LED;
+ ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
if (ret < 0)
return ret;
@@ -1810,9 +1813,11 @@ static int smsc95xx_reset_resume(struct usb_interface *intf)
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
{
- skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
+ u16 *csum_ptr = (u16 *)(skb_tail_pointer(skb) - 2);
+
+ skb->csum = (__force __wsum)get_unaligned(csum_ptr);
skb->ip_summed = CHECKSUM_COMPLETE;
- skb_trim(skb, skb->len - 2);
+ skb_trim(skb, skb->len - 2); /* remove csum */
}
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
@@ -1870,25 +1875,22 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (dev->net->features & NETIF_F_RXCSUM)
smsc95xx_rx_csum_offload(skb);
skb_trim(skb, skb->len - 4); /* remove fcs */
- skb->truesize = size + sizeof(struct sk_buff);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
if (unlikely(!ax_skb)) {
netdev_warn(dev->net, "Error allocating skb\n");
return 0;
}
- ax_skb->len = size;
- ax_skb->data = packet;
- skb_set_tail_pointer(ax_skb, size);
+ skb_put(ax_skb, size);
+ memcpy(ax_skb->data, packet, size);
if (dev->net->features & NETIF_F_RXCSUM)
smsc95xx_rx_csum_offload(ax_skb);
skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
- ax_skb->truesize = size + sizeof(struct sk_buff);
usbnet_skb_return(dev, ax_skb);
}
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 3164451e1..0a662e42e 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -421,19 +421,15 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, 3);
skb->len = len;
skb_set_tail_pointer(skb, len);
- skb->truesize = len + sizeof(struct sk_buff);
return 2;
}
- /* skb_clone is used for address align */
- sr_skb = skb_clone(skb, GFP_ATOMIC);
+ sr_skb = netdev_alloc_skb_ip_align(dev->net, len);
if (!sr_skb)
return 0;
- sr_skb->len = len;
- sr_skb->data = skb->data + 3;
- skb_set_tail_pointer(sr_skb, len);
- sr_skb->truesize = len + sizeof(struct sk_buff);
+ skb_put(sr_skb, len);
+ memcpy(sr_skb->data, skb->data + 3, len);
usbnet_skb_return(dev, sr_skb);
skb_pull(skb, len + SR_RX_OVERHEAD);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 115c3c541..290bec292 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1225,6 +1225,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ goto err_xdp;
+
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1542,6 +1546,10 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
if (unlikely(hdr->hdr.gso_type))
return NULL;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return NULL;
+
/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
* with headroom may add hole in truesize, which
* make their length exceed PAGE_SIZE. So we disabled the
@@ -1808,6 +1816,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
struct net_device *dev = vi->dev;
struct sk_buff *skb;
struct virtio_net_common_hdr *hdr;
+ u8 flags;
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
@@ -1816,6 +1825,15 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
return;
}
+ /* 1. Save the flags early, as the XDP program might overwrite them.
+ * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
+ * stay valid after XDP processing.
+ * 2. XDP doesn't work with partially checksummed packets (refer to
+ * virtnet_xdp_set()), so packets marked as
+ * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
+ */
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
+
if (vi->mergeable_rx_bufs)
skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
stats);
@@ -1831,7 +1849,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
@@ -3589,10 +3607,10 @@ static void virtnet_rx_dim_work(struct work_struct *work)
if (err)
pr_debug("%s: Failed to send dim parameters on rxq%d\n",
dev->name, qnum);
- dim->state = DIM_START_MEASURE;
}
}
+ dim->state = DIM_START_MEASURE;
rtnl_unlock();
}
@@ -4698,8 +4716,16 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
}
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
- dev->features |= NETIF_F_RXCSUM;
+
+ /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
+ * need to calculate checksums for partially checksummed packets,
+ * as they're considered valid by the upper layer.
+ * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
+ * receives fully checksummed packets. The device may assist in
+ * validating these packets' checksums, so the driver won't have to.
+ */
+ dev->features |= NETIF_F_RXCSUM;
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_GRO_HW;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 057886479..beebe09eb 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
rq->data_ring.base,
rq->data_ring.basePA);
rq->data_ring.base = NULL;
- rq->data_ring.desc_size = 0;
}
+ rq->data_ring.desc_size = 0;
}
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bb95ce43c..c3af9ad5e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -653,7 +653,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
skb->dev = dev;
rcu_read_lock();
- nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+ nexthop = rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -860,7 +860,7 @@ static int vrf_rt6_create(struct net_device *dev)
static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct rtable *rt = (struct rtable *)dst;
+ struct rtable *rt = dst_rtable(dst);
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 3a9148fb1..b2d054f59 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
struct vxlan_fdb *f;
u32 ifindex = 0;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(src_mac))
+ return true;
+
#if IS_ENABLED(CONFIG_IPV6)
if (src_ip->sa.sa_family == AF_INET6 &&
(ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
@@ -1615,10 +1619,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
- /* Ignore packets from invalid src-address */
- if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
- return false;
-
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
@@ -2528,7 +2528,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (!info) {
- u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+ u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags;
err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6,
dst_port, ifindex, vni,
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 815f8f599..5a55db349 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1594,6 +1594,20 @@ static int ar5523_probe(struct usb_interface *intf,
struct ar5523 *ar;
int error = -ENOMEM;
+ static const u8 bulk_ep_addr[] = {
+ AR5523_CMD_TX_PIPE | USB_DIR_OUT,
+ AR5523_DATA_TX_PIPE | USB_DIR_OUT,
+ AR5523_CMD_RX_PIPE | USB_DIR_IN,
+ AR5523_DATA_RX_PIPE | USB_DIR_IN,
+ 0};
+
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) {
+ dev_err(&dev->dev,
+ "Could not find all expected endpoints\n");
+ error = -ENODEV;
+ goto out;
+ }
+
/*
* Load firmware if the device requires it. This will return
* -ENXIO on success and we'll get called back afer the usb
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index f02a308a9..34654f710 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -171,8 +171,10 @@ struct ath_common {
unsigned int clockrate;
spinlock_t cc_lock;
- struct ath_cycle_counters cc_ani;
- struct ath_cycle_counters cc_survey;
+ struct_group(cc,
+ struct ath_cycle_counters cc_ani;
+ struct ath_cycle_counters cc_survey;
+ );
struct ath_regulatory regulatory;
struct ath_regulatory reg_world_copy;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index e6ea884ca..4f385f4a8 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -45,6 +45,7 @@ config ATH10K_SNOC
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_SMEM
+ depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
select QCOM_SCM
select QCOM_QMI_HELPERS
help
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 9ce6f49ab..fa5e2e651 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -720,6 +720,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.max_spatial_stream = 4,
.fw = {
.dir = WCN3990_HW_1_0_FW_DIR,
+ .board = WCN3990_HW_1_0_BOARD_DATA_FILE,
+ .board_size = WCN3990_BOARD_DATA_SZ,
+ .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
.rx_desc_ops = &wcn3990_rx_desc_ops,
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 394bf3c32..0f6de862c 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -439,7 +439,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
}
out:
mutex_unlock(&ar->conf_mutex);
- return count;
+ return ret ?: count;
}
static const struct file_operations fops_peer_debug_trigger = {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 93c073091..9aa2d821b 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -133,6 +133,7 @@ enum qca9377_chip_id_rev {
/* WCN3990 1.0 definitions */
#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990
#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0"
+#define WCN3990_HW_1_0_BOARD_DATA_FILE "board.bin"
#define ATH10K_FW_FILE_BASE "firmware"
#define ATH10K_FW_API_MAX 6
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index ec556bb88..ba37e6c7c 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -491,4 +491,7 @@ struct host_interest {
#define QCA4019_BOARD_DATA_SZ 12064
#define QCA4019_BOARD_EXT_DATA_SZ 0
+#define WCN3990_BOARD_DATA_SZ 26328
+#define WCN3990_BOARD_EXT_DATA_SZ 0
+
#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2e9661f4b..80d255aaf 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1763,12 +1763,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
- unsigned long time_left;
+ unsigned long time_left, i;
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left)
- return -ETIMEDOUT;
+ if (!time_left) {
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings once.
+ */
+ ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_hif_send_complete_check(ar, i, 1);
+
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_warn(ar, "polling timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ath10k_warn(ar, "service ready completion received, continuing normally\n");
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index c78bce19b..5d07585e5 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -595,7 +595,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = true,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 3,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 9f4bf41a3..790277f54 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1231,14 +1231,7 @@ static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
enable_ps = arvif->ps;
- if (!arvif->is_started) {
- /* mac80211 can update vif powersave state while disconnected.
- * Firmware doesn't behave nicely and consumes more power than
- * necessary if PS is disabled on a non-started vdev. Hence
- * force-enable PS for non-running vdevs.
- */
- psmode = WMI_STA_PS_MODE_ENABLED;
- } else if (enable_ps) {
+ if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
@@ -7896,8 +7889,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
- struct cur_regulatory_info *reg_info;
- enum ieee80211_ap_reg_power power_type;
mutex_lock(&ar->conf_mutex);
@@ -7908,17 +7899,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
ctx->def.chan->band == NL80211_BAND_6GHZ &&
arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- reg_info = &ab->reg_info_store[ar->pdev_idx];
- power_type = vif->bss_conf.power_type;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
-
- if (power_type == IEEE80211_REG_UNSET_AP) {
- ret = -EINVAL;
- goto out;
- }
-
- ath11k_reg_handle_chan_list(ab, reg_info, power_type);
arvif->chanctx = *ctx;
ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
}
@@ -9532,6 +9512,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ enum ieee80211_ap_reg_power power_type;
+ struct cur_regulatory_info *reg_info;
struct ath11k_peer *peer;
int ret = 0;
@@ -9611,6 +9593,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
sta->addr, arvif->vdev_id, ret);
}
+
+ if (!ret &&
+ ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->chanctx.def.chan &&
+ arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) {
+ reg_info = &ar->ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_warn(ar->ab, "invalid power type %d\n",
+ power_type);
+ ret = -EINVAL;
+ } else {
+ ret = ath11k_reg_handle_chan_list(ar->ab,
+ reg_info,
+ power_type);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to handle chan list with power type %d\n",
+ power_type);
+ }
+ }
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
spin_lock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 391b6fb2b..bff4598de 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -1132,7 +1132,6 @@ static void ath12k_core_reset(struct work_struct *work)
ATH12K_RECOVER_START_TIMEOUT_HZ);
ath12k_hif_power_down(ab);
- ath12k_qmi_free_resource(ab);
ath12k_hif_power_up(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 52a5fb8b0..82ef4d4da 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -6286,14 +6286,24 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
enum nl80211_band band,
enum nl80211_iftype type)
{
- struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_sta_eht_cap *eht_cap = NULL;
enum wmi_phy_mode down_mode;
+ int n = ar->mac.sbands[band].n_iftype_data;
+ int i;
+ struct ieee80211_sband_iftype_data *data;
if (mode < MODE_11BE_EHT20)
return mode;
- eht_cap = &ar->mac.iftype[band][type].eht_cap;
- if (eht_cap->has_eht)
+ data = ar->mac.iftype[band];
+ for (i = 0; i < n; i++) {
+ if (data[i].types_mask & BIT(type)) {
+ eht_cap = &data[i].eht_cap;
+ break;
+ }
+ }
+
+ if (eht_cap && eht_cap->has_eht)
return mode;
switch (mode) {
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 92845ffff..e8eb99638 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -2325,8 +2325,9 @@ static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
if (!ab->qmi.target_mem[i].v.addr)
continue;
+
dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].prev_size,
ab->qmi.target_mem[i].v.addr,
ab->qmi.target_mem[i].paddr);
ab->qmi.target_mem[i].v.addr = NULL;
@@ -2352,6 +2353,20 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
case M3_DUMP_REGION_TYPE:
case PAGEABLE_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
+ /* Firmware reloads in recovery/resume.
+ * In such cases, no need to allocate memory for FW again.
+ */
+ if (chunk->v.addr) {
+ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ goto this_chunk_done;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->v.addr, chunk->paddr);
+ chunk->v.addr = NULL;
+ }
+
chunk->v.addr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
@@ -2370,6 +2385,10 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
chunk->type, chunk->size);
return -ENOMEM;
}
+
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
+this_chunk_done:
break;
default:
ath12k_warn(ab, "memory type %u not supported\n",
@@ -2666,6 +2685,19 @@ out:
return ret;
}
+static void ath12k_qmi_m3_free(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
+}
+
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
@@ -2675,10 +2707,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
size_t m3_len;
int ret;
- if (m3_mem->vaddr)
- /* m3 firmware buffer is already available in the DMA buffer */
- return 0;
-
if (ab->fw.m3_data && ab->fw.m3_len > 0) {
/* firmware-N.bin had a m3 firmware file so use that */
m3_data = ab->fw.m3_data;
@@ -2700,6 +2728,15 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
m3_len = fw->size;
}
+ /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */
+ if (m3_mem->vaddr) {
+ if (m3_mem->size >= m3_len)
+ goto skip_m3_alloc;
+
+ /* Old buffer is too small, free and reallocate */
+ ath12k_qmi_m3_free(ab);
+ }
+
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
m3_len, &m3_mem->paddr,
GFP_KERNEL);
@@ -2710,6 +2747,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
goto out;
}
+skip_m3_alloc:
memcpy(m3_mem->vaddr, m3_data, m3_len);
m3_mem->size = m3_len;
@@ -2721,19 +2759,6 @@ out:
return ret;
}
-static void ath12k_qmi_m3_free(struct ath12k_base *ab)
-{
- struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
-
- if (!m3_mem->vaddr)
- return;
-
- dma_free_coherent(ab->dev, m3_mem->size,
- m3_mem->vaddr, m3_mem->paddr);
- m3_mem->vaddr = NULL;
- m3_mem->size = 0;
-}
-
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
@@ -3178,6 +3203,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
.decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
.fn = ath12k_qmi_msg_fw_ready_cb,
},
+
+ /* end of list */
+ {},
};
static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 6ee33c985..f34263d4b 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -96,6 +96,8 @@ struct ath12k_qmi_event_msg {
struct target_mem_chunk {
u32 size;
u32 type;
+ u32 prev_size;
+ u32 prev_type;
dma_addr_t paddr;
union {
void __iomem *ioaddr;
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 9d69a1769..34de3d16e 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -1900,7 +1900,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
if (arg->bw_160)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
if (arg->bw_320)
- cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
+ cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
/* Typically if STBC is enabled for VHT it should be enabled
* for HT as well
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a2943aaec..01173aac3 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -135,8 +135,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
if (power_mode != ATH9K_PM_AWAKE) {
spin_lock(&common->cc_lock);
ath_hw_cycle_counters_update(common);
- memset(&common->cc_survey, 0, sizeof(common->cc_survey));
- memset(&common->cc_ani, 0, sizeof(common->cc_ani));
+ memset(&common->cc, 0, sizeof(common->cc));
spin_unlock(&common->cc_lock);
}
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index e902ca80e..0226c31a6 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -280,7 +280,8 @@ static void carl9170_tx_release(struct kref *ref)
* carl9170_tx_fill_rateinfo() has filled the rate information
* before we get to this point.
*/
- memset_after(&txinfo->status, 0, rates);
+ memset(&txinfo->pad, 0, sizeof(txinfo->pad));
+ memset(&txinfo->rate_driver_data, 0, sizeof(txinfo->rate_driver_data));
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c4edf8355..a3e03580c 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1069,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf,
ar->usb_ep_cmd_is_bulk = true;
}
+ /* Verify that all expected endpoints are present */
+ if (ar->usb_ep_cmd_is_bulk) {
+ u8 bulk_ep_addr[] = {
+ AR9170_USB_EP_RX | USB_DIR_IN,
+ AR9170_USB_EP_TX | USB_DIR_OUT,
+ AR9170_USB_EP_CMD | USB_DIR_OUT,
+ 0};
+ u8 int_ep_addr[] = {
+ AR9170_USB_EP_IRQ | USB_DIR_IN,
+ 0};
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr))
+ err = -ENODEV;
+ } else {
+ u8 bulk_ep_addr[] = {
+ AR9170_USB_EP_RX | USB_DIR_IN,
+ AR9170_USB_EP_TX | USB_DIR_OUT,
+ 0};
+ u8 int_ep_addr[] = {
+ AR9170_USB_EP_IRQ | USB_DIR_IN,
+ AR9170_USB_EP_CMD | USB_DIR_OUT,
+ 0};
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr))
+ err = -ENODEV;
+ }
+
+ if (err) {
+ carl9170_free(ar);
+ return err;
+ }
+
usb_set_intfdata(intf, ar);
SET_IEEE80211_DEV(ar->hw, &intf->dev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index d7fb88bb6..06698a714 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1675,6 +1675,15 @@ struct brcmf_random_seed_footer {
#define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de
#define BRCMF_RANDOM_SEED_LENGTH 0x100
+static noinline_for_stack void
+brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address)
+{
+ u8 randbuf[BRCMF_RANDOM_SEED_LENGTH];
+
+ get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH);
+ memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH);
+}
+
static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
const struct firmware *fw, void *nvram,
u32 nvram_len)
@@ -1717,7 +1726,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
.length = cpu_to_le32(rand_len),
.magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
};
- void *randbuf;
/* Some Apple chips/firmwares expect a buffer of random
* data to be present before NVRAM
@@ -1729,10 +1737,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
sizeof(footer));
address -= rand_len;
- randbuf = kzalloc(rand_len, GFP_KERNEL);
- get_random_bytes(randbuf, rand_len);
- memcpy_toio(devinfo->tcm + address, randbuf, rand_len);
- kfree(randbuf);
+ brcmf_pcie_provide_random_bytes(devinfo, address);
}
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 0bf38243f..ce18ef9d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -385,6 +385,33 @@ struct iwl_dev_tx_power_cmd_v7 {
__le32 timer_period;
__le32 flags;
} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v8 - TX power reduction command version 8
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ * @tpc_vlp_backoff_level: user backoff of UNII5,7 VLP channels in USA.
+ * Not in use.
+ */
+struct iwl_dev_tx_power_cmd_v8 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+ __le32 flags;
+ __le32 tpc_vlp_backoff_level;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */
+
/**
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
@@ -392,6 +419,8 @@ struct iwl_dev_tx_power_cmd_v7 {
* @v4: version 4 part of the command
* @v5: version 5 part of the command
* @v6: version 6 part of the command
+ * @v7: version 7 part of the command
+ * @v8: version 8 part of the command
*/
struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_common common;
@@ -401,6 +430,7 @@ struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v5 v5;
struct iwl_dev_tx_power_cmd_v6 v6;
struct iwl_dev_tx_power_cmd_v7 v7;
+ struct iwl_dev_tx_power_cmd_v8 v8;
};
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 1b7254569..6c27ef2f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1821,8 +1821,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_drv);
- iwl_dbg_tlv_free(drv->trans);
#endif
+ iwl_dbg_tlv_free(drv->trans);
kfree(drv);
err:
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 535edb51d..5416e4118 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -259,7 +259,6 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id;
lockdep_assert_held(&mvm->mutex);
@@ -267,37 +266,22 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
return;
/* Done already */
- if (mvmvif->bt_coex_esr_disabled == !enable)
+ if ((mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX) == !enable)
return;
- mvmvif->bt_coex_esr_disabled = !enable;
-
- /* Nothing to do */
- if (mvmvif->esr_active == enable)
- return;
-
- if (enable) {
- /* Try to re-enable eSR*/
- iwl_mvm_mld_select_links(mvm, vif, false);
- return;
- }
-
- /*
- * Find the primary link, as we want to switch to it and drop the
- * secondary one.
- */
- link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
- WARN_ON(link_id < 0);
+ if (enable)
+ mvmvif->esr_disable_reason &= ~IWL_MVM_ESR_DISABLE_COEX;
+ else
+ mvmvif->esr_disable_reason |= IWL_MVM_ESR_DISABLE_COEX;
- ieee80211_set_active_links_async(vif,
- vif->active_links & BIT(link_id));
+ iwl_mvm_recalc_esr(mvm, vif);
}
/*
* This function receives the LB link id and checks if eSR should be
* enabled or disabled (due to BT coex)
*/
-bool
+static bool
iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
int link_id, int primary_link)
@@ -338,7 +322,7 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
if (!link_rssi)
wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
- else if (!mvmvif->bt_coex_esr_disabled)
+ else if (!(mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX))
/* RSSI needs to get really low to disable eSR... */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
@@ -354,9 +338,9 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
}
-void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id)
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
{
unsigned long usable_links = ieee80211_vif_usable_links(vif);
int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
@@ -418,7 +402,7 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
return;
}
- iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id);
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e1c2b7fc9..855267ea6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -94,20 +94,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
- __le32 *dump_data = mfu_dump_notif->data;
- int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
- int i;
if (mfu_dump_notif->index_num == 0)
IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
le32_to_cpu(mfu_dump_notif->assert_id));
-
- for (i = 0; i < n_words; i++)
- IWL_DEBUG_INFO(mvm,
- "MFUART assert dump, dword %u: 0x%08x\n",
- le16_to_cpu(mfu_dump_notif->index_num) *
- n_words + i,
- le32_to_cpu(dump_data[i]));
}
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
@@ -894,13 +884,15 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == 7) {
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+
+ if (cmd_ver >= 7) {
len = sizeof(cmd.v7);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v7.per_chain[0][0];
cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
} else if (cmd_ver == 6) {
len = sizeof(cmd.v6);
n_subbands = IWL_NUM_SUB_BANDS_V2;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 8f4b063d6..5f6b16d3f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1399,7 +1399,9 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (cmd_ver == 7)
+ if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
+ else if (cmd_ver == 7)
len = sizeof(cmd.v7);
else if (cmd_ver == 6)
len = sizeof(cmd.v6);
@@ -1564,6 +1566,17 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
IWL_STA_MULTICAST);
}
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
+
+ INIT_DELAYED_WORK(&mvmvif->csa_work,
+ iwl_mvm_channel_switch_disconnect_wk);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1574,6 +1587,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
+
mvmvif->mvm = mvm;
/* the first link always points to the default one */
@@ -1651,15 +1666,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
- vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
-
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
@@ -1697,6 +1707,8 @@ out:
void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
/*
* Flush the ROC worker which will flush the OFFCHANNEL queue.
@@ -1705,6 +1717,8 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
*/
flush_work(&mvm->roc_done_wk);
}
+
+ cancel_delayed_work_sync(&mvmvif->csa_work);
}
/* This function is doing the common part of removing the interface for
@@ -3792,6 +3806,24 @@ out:
return callbacks->update_sta(mvm, vif, sta);
}
+static void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ u8 link_id;
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ return;
+
+ if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ)
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
+ }
+}
+
static int
iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
@@ -3819,6 +3851,9 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
callbacks->mac_ctxt_changed(mvm, vif, false);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
+ /* Calculate eSR mode due to BT coex */
+ iwl_mvm_bt_coex_update_vif_esr(mvm, vif);
+
/* when client is authorized (AP station marked as such),
* try to enable more links
*/
@@ -4608,7 +4643,7 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) {
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration);
- } else if (fw_ver == 3) {
+ } else if (fw_ver >= 3) {
ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration,
ROC_ACTIVITY_HOTSPOT);
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 084314bf6..43f3002ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -14,6 +14,8 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
+
mvmvif->mvm = mvm;
/* Not much to do here. The stack will not allow interface
@@ -73,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
goto out_free_bf;
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
@@ -92,6 +92,9 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
mvm->csme_vif = vif;
}
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
goto out_unlock;
out_free_bf:
@@ -189,17 +192,13 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif)
+static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
{
unsigned int n_active = 0;
int i;
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- struct ieee80211_bss_conf *link_conf;
-
- link_conf = link_conf_dereference_protected(vif, i);
- if (link_conf &&
- rcu_access_pointer(link_conf->chanctx_conf))
+ if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt)
n_active++;
}
@@ -245,18 +244,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
int ret;
- /* if the assigned one was not counted yet, count it now */
- if (!rcu_access_pointer(link_conf->chanctx_conf))
- n_active++;
-
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
+ /* if the assigned one was not counted yet, count it now */
+ if (!mvmvif->link[link_id]->phy_ctxt)
+ n_active++;
+
/* mac parameters such as HE support can change at this stage
* For sta, need first to configure correct state from drv_sta_state
* and only after that update mac config.
@@ -296,13 +295,8 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
* this needs the phy context assigned (and in FW?), and we cannot
* do it later because it needs to be initialized as soon as we're
* able to TX on the link, i.e. when active.
- *
- * Firmware restart isn't quite correct yet for MLO, but we don't
- * need to do it in that case anyway since it will happen from the
- * normal station state callback.
*/
- if (mvmvif->ap_sta &&
- !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ if (mvmvif->ap_sta) {
struct ieee80211_link_sta *link_sta;
rcu_read_lock();
@@ -416,7 +410,7 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
/* shouldn't happen, but verify link_id is valid before accessing */
@@ -608,10 +602,49 @@ struct iwl_mvm_link_sel_data {
bool active;
};
-static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a,
+static bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ struct iwl_mvm_link_sel_data *a,
struct iwl_mvm_link_sel_data *b)
{
- return a->band != b->band;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (a->band == b->band)
+ return false;
+
+ /* BT Coex effects eSR mode only if one of the link is on LB */
+ if (a->band == NL80211_BAND_2GHZ || b->band == NL80211_BAND_2GHZ)
+ return !(mvmvif->esr_disable_reason & IWL_MVM_ESR_DISABLE_COEX);
+
+ return true;
+}
+
+static u8
+iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
+ struct iwl_mvm_link_sel_data *data,
+ unsigned long usable_links)
+{
+ u8 n_data = 0;
+ unsigned long link_id;
+
+ rcu_read_lock();
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(vif->link_conf[link_id]);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ data[n_data].width = link_conf->chanreq.oper.width;
+ data[n_data].active = vif->active_links & BIT(link_id);
+ n_data++;
+ }
+
+ rcu_read_unlock();
+
+ return n_data;
}
void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -621,7 +654,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned long usable_links = ieee80211_vif_usable_links(vif);
u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
u16 new_active_links;
- u8 link_id, n_data = 0, i, j;
+ u8 n_data, i, j;
if (!IWL_MVM_AUTO_EML_ENABLE)
return;
@@ -646,23 +679,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (hweight16(vif->active_links) == max_active_links)
return;
- rcu_read_lock();
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chanreq.oper.chan->band;
- data[n_data].width = link_conf->chanreq.oper.width;
- data[n_data].active = vif->active_links & BIT(link_id);
- n_data++;
- }
-
- rcu_read_unlock();
+ n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links);
/* this is expected to be the current active link */
if (n_data == 1)
@@ -686,7 +703,8 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (i == j)
continue;
- if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j]))
+ if (iwl_mvm_mld_valid_link_pair(vif, &data[i],
+ &data[j]))
break;
}
@@ -702,7 +720,7 @@ void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (i == j)
continue;
- if (iwl_mvm_mld_valid_link_pair(&data[i],
+ if (iwl_mvm_mld_valid_link_pair(vif, &data[i],
&data[j]))
break;
}
@@ -1264,6 +1282,33 @@ int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
return data[1].link_id;
}
+void iwl_mvm_recalc_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool enable = !mvmvif->esr_disable_reason;
+ int link_id;
+
+ /* Nothing to do */
+ if (mvmvif->esr_active == enable)
+ return;
+
+ if (enable) {
+ /* Try to re-enable eSR */
+ iwl_mvm_mld_select_links(mvm, vif, false);
+ return;
+ }
+
+ /*
+ * Find the primary link, as we want to switch to it and drop the
+ * secondary one.
+ */
+ link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
+ WARN_ON(link_id < 0);
+
+ ieee80211_set_active_links_async(vif,
+ vif->active_links & BIT(link_id));
+}
+
/*
* This function receives a bitmap of usable links and check if we can enter
* eSR on those links.
@@ -1272,14 +1317,13 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
unsigned long desired_links)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
- desired_links);
+ u16 usable_links = ieee80211_vif_usable_links(vif);
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
const struct wiphy_iftype_ext_capab *ext_capa;
- bool ret = true;
- int link_id;
+ u8 n_data;
- if (primary_link < 0)
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc ||
+ hweight16(usable_links) <= 1)
return false;
if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
@@ -1291,26 +1335,13 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
!(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP))
return false;
- for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, link_id);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
+ n_data = iwl_mvm_set_link_selection_data(vif, data, desired_links);
- /* BT Coex effects eSR mode only if one of the link is on LB */
- if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
- continue;
+ if (n_data != 2)
+ return false;
- ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
- primary_link);
- // Mark eSR as disabled for the next time
- if (!ret)
- mvmvif->bt_coex_esr_disabled = true;
- break;
- }
- return ret;
+ return iwl_mvm_mld_valid_link_pair(vif, &data[0], &data[1]);
}
static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 23e64a757..36dc291d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -9,7 +9,9 @@
u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int filter_link_id)
{
+ struct ieee80211_link_sta *link_sta;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
unsigned int link_id;
u32 result = 0;
@@ -17,26 +19,27 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return 0;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
/* it's easy when the STA is not an MLD */
if (!sta->valid_links)
return BIT(mvmsta->deflink.sta_id);
/* but if it is an MLD, get the mask of all the FW STAs it has ... */
- for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) {
- struct iwl_mvm_link_sta *link_sta;
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
/* unless we have a specific link in mind */
if (filter_link_id >= 0 && link_id != filter_link_id)
continue;
- link_sta =
+ mvm_link_sta =
rcu_dereference_check(mvmsta->link[link_id],
lockdep_is_held(&mvm->mutex));
- if (!link_sta)
+ if (!mvm_link_sta)
continue;
- result |= BIT(link_sta->sta_id);
+ result |= BIT(mvm_link_sta->sta_id);
}
return result;
@@ -582,14 +585,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
unsigned int link_id;
int ret;
lockdep_assert_held(&mvm->mutex);
- for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
- if (!rcu_access_pointer(sta->link[link_id]) ||
- mvm_sta->link[link_id])
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ if (WARN_ON(mvm_sta->link[link_id]))
continue;
ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index f0b24f009..d1ab35eb5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -347,6 +347,14 @@ struct iwl_mvm_vif_link_info {
};
/**
+ * enum iwl_mvm_esr_disable_reason - reasons for which we can't enable EMLSR
+ * @IWL_MVM_ESR_DISABLE_COEX: COEX is preventing the enablement of EMLSR
+ */
+enum iwl_mvm_esr_disable_reason {
+ IWL_MVM_ESR_DISABLE_COEX = BIT(0),
+};
+
+/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @mvm: pointer back to the mvm struct
* @id: between 0 and 3
@@ -361,7 +369,6 @@ struct iwl_mvm_vif_link_info {
* @pm_enabled - indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
- * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex
* @low_latency: bit flags for low latency
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
@@ -378,6 +385,7 @@ struct iwl_mvm_vif_link_info {
* @deflink: default link data for use in non-MLO
* @link: link data for each link in MLO
* @esr_active: indicates eSR mode is active
+ * @esr_disable_reason: a bitmap of enum iwl_mvm_esr_disable_reason
* @pm_enabled: indicates powersave is enabled
*/
struct iwl_mvm_vif {
@@ -392,7 +400,6 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
- bool bt_coex_esr_disabled;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -400,6 +407,7 @@ struct iwl_mvm_vif {
u8 authorized:1;
bool ps_disabled;
+ u32 esr_disable_reason;
u32 ap_beacon_time;
struct iwl_mvm_vif_bf_data bf_data;
@@ -1572,15 +1580,14 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
struct iwl_trans *trans = mvm->fwrt.trans;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- lockdep_assert_held(&mvm->mutex);
-
if (vif->type == NL80211_IFTYPE_AP)
return mvm->fw->ucode_capa.num_beacons;
+ /* Check if HW supports eSR or STR */
if ((iwl_mvm_is_esr_supported(trans) &&
- !mvmvif->bt_coex_esr_disabled) ||
- ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id))))
+ !(mvmvif->esr_disable_reason & ~IWL_MVM_ESR_DISABLE_COEX)) ||
+ (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1776,6 +1783,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif);
+
/*
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
@@ -2133,12 +2142,9 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
-bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id, int primary_link);
-void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id);
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2779,4 +2785,8 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
int duration, u32 activity);
+
+/* EMLSR */
+void iwl_mvm_recalc_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 376b23b40..6cd4ec4d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -122,13 +122,8 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
-/*
- * FIXME - various places in firmware API still use u8,
- * e.g. LQ command and SCD config command.
- * This should be 256 instead.
- */
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index b1add7942..395aef04f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -889,8 +889,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
if (link_info->phy_ctxt &&
link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
- iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif,
- link_id);
+ iwl_mvm_bt_coex_update_link_esr(mvm, bss_conf->vif,
+ link_id);
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index ce8d83c77..8ac5c045f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -2456,8 +2456,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
*
* We mark it as mac header, for upper layers to know where
* all radio tap header ends.
+ *
+ * Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
*/
- skb_reset_mac_header(skb);
+ skb_set_mac_header(skb, skb->len);
/*
* Override the nss from the rx_vec since the rate_n_flags has
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 11559563a..525d8efcc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1303,7 +1303,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1405,7 +1405,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
general_params->adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
general_params->adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -3171,8 +3171,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_mvm_vif_link_info *link_info =
scan_vif->link[mvm->scan_link_id];
- if (!WARN_ON(!link_info))
+ /* It is possible that by the time the scan is complete the link
+ * was already removed and is not valid.
+ */
+ if (link_info)
memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN);
+ else
+ IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n");
ieee80211_scan_completed(mvm->hw, &info);
mvm->scan_vif = NULL;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index ce8fea76d..7a4ef5c83 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -2718,7 +2718,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
cmd->numaddr = cpu_to_le16(mc_count);
netdev_hw_addr_list_for_each(ha, mc_list) {
- memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
+ memcpy(cmd->addr[i++], ha->addr, ETH_ALEN);
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 7a2f5d385..14304b063 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -4,6 +4,13 @@
#include "mac.h"
#include "../dma.h"
+static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+};
+
static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
@@ -22,10 +29,10 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
+ u8 tid = 0, hwq = 0;
void *priv;
int idx;
u32 val;
- u8 tid = 0;
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
@@ -42,19 +49,36 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
goto free;
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
- val = le32_to_cpu(txd[0]);
- val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
- val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
- txd[0] = cpu_to_le32(val);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
- if (ieee80211_is_data_qos(hdr->frame_control))
+
+ hwq = wmm_queue_map[IEEE80211_AC_BE];
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
tid = *ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_TAG1D_MASK;
- skb_set_queue_mapping(skb, tid_to_ac[tid]);
+ IEEE80211_QOS_CTL_TAG1D_MASK;
+ u8 qid = tid_to_ac[tid];
+ hwq = wmm_queue_map[qid];
+ skb_set_queue_mapping(skb, qid);
+ } else if (ieee80211_is_data(hdr->frame_control)) {
+ skb_set_queue_mapping(skb, IEEE80211_AC_BE);
+ hwq = wmm_queue_map[IEEE80211_AC_BE];
+ } else {
+ skb_pull(skb, MT_TXD_SIZE);
+ if (!ieee80211_is_bufferable_mmpdu(skb))
+ goto free;
+ skb_push(skb, MT_TXD_SIZE);
+ skb_set_queue_mapping(skb, MT_TXQ_PSD);
+ hwq = MT_TX_HW_QUEUE_MGMT;
+ }
+
ieee80211_sta_set_buffered(sta, tid, true);
+ val = le32_to_cpu(txd[0]);
+ val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+ val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq);
+ txd[0] = cpu_to_le32(val);
+
spin_lock_bh(&dev->ps_lock);
__skb_queue_tail(&msta->psq, skb);
if (skb_queue_len(&msta->psq) >= 64) {
@@ -151,12 +175,6 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
int mt7603_dma_init(struct mt7603_dev *dev)
{
- static const u8 wmm_queue_map[] = {
- [IEEE80211_AC_BK] = 0,
- [IEEE80211_AC_BE] = 1,
- [IEEE80211_AC_VI] = 2,
- [IEEE80211_AC_VO] = 3,
- };
int ret;
int i;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index cf21d0625..dc8a77f0a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1393,6 +1393,7 @@ void mt7603_pse_client_reset(struct mt7603_dev *dev)
MT_CLIENT_RESET_TX_R_E_2_S);
/* Start PSE client TX abort */
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
MT_CLIENT_RESET_TX_R_E_1_S, 500);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 0971c164b..c27acaf0e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
#endif /* CONFIG_PM */
const struct ieee80211_ops mt7615_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7615_tx,
.start = mt7615_start,
.stop = mt7615_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index af0c2b2aa..fb8bd50eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
- if (is_mt799x(dev) && !wcid->sta)
+ if (wcid && !wcid->sta)
hdr.muar_idx = 0xe;
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
@@ -2527,6 +2527,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
__le16 tag;
__le16 len;
u8 suspend;
+ u8 pad[7];
} __packed hif_suspend;
} req = {
.hif_suspend = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 6c3696c8c..450f4d221 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -1049,6 +1049,7 @@ static ssize_t
mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
+ int i, ret, pwr, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
@@ -1057,7 +1058,6 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
.band_idx = phy->mt76->band_idx,
};
char buf[100];
- int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
enum mac80211_rx_encoding mode;
u32 offs = 0, len = 0;
@@ -1130,8 +1130,8 @@ skip:
if (ret)
goto out;
- mphy->txpower_cur = max(mphy->txpower_cur,
- max(pwr160, max(pwr80, max(pwr40, pwr20))));
+ pwr = max3(pwr80, pwr40, pwr20);
+ mphy->txpower_cur = max3(mphy->txpower_cur, pwr160, pwr);
out:
mutex_unlock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 867e14f6b..73e42ef42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -663,6 +663,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
int i, ret;
dev_dbg(dev->mt76.dev, "chip reset\n");
+ set_bit(MT76_RESET, &dev->mphy.state);
dev->hw_full_reset = true;
ieee80211_stop_queues(hw);
@@ -691,6 +692,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
}
dev->hw_full_reset = false;
+ clear_bit(MT76_RESET, &dev->mphy.state);
pm->suspended = false;
ieee80211_wake_queues(hw);
ieee80211_iterate_active_interfaces(hw,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index c866144ff..031ba9aaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -64,7 +64,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -115,7 +114,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
err = __mt7921_start(&dev->phy);
out:
- clear_bit(MT76_RESET, &dev->mphy.state);
local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index 389eb0903..1f77cf71c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -98,7 +98,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt76_txq_schedule_all(&dev->mphy);
mt76_worker_disable(&dev->mt76.tx_worker);
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -135,7 +134,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
err = __mt7921_start(&dev->phy);
out:
- clear_bit(MT76_RESET, &dev->mphy.state);
mt76_worker_enable(&dev->mt76.tx_worker);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 2a0bbfe7b..b8315a89f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -535,7 +535,7 @@ struct mt7925_wow_pattern_tlv {
u8 offset;
u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN];
u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN];
- u8 rsv[4];
+ u8 rsv[7];
} __packed;
static inline enum connac3_mcu_cipher_type
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index b44abe2ac..e86c05d0e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -3729,6 +3729,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
} __packed * res;
struct sk_buff *skb;
int ret;
+ u32 temp;
ret = mt76_mcu_send_and_get_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL),
&req, sizeof(req), true, &skb);
@@ -3736,8 +3737,10 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
return ret;
res = (void *)skb->data;
+ temp = le32_to_cpu(res->temperature);
+ dev_kfree_skb(skb);
- return le32_to_cpu(res->temperature);
+ return temp;
}
int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state)
@@ -4464,7 +4467,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
u8 band_idx;
} __packed req = {
.tag = cpu_to_le16(UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL),
- .len = cpu_to_le16(sizeof(req) + MT7996_SKU_RATE_NUM - 4),
+ .len = cpu_to_le16(sizeof(req) + MT7996_SKU_PATH_NUM - 4),
.power_ctrl_id = UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL,
.power_limit_type = TX_POWER_LIMIT_TABLE_RATE,
.band_idx = phy->mt76->band_idx,
@@ -4479,7 +4482,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
mphy->txpower_cur = tx_power;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- sizeof(req) + MT7996_SKU_RATE_NUM);
+ sizeof(req) + MT7996_SKU_PATH_NUM);
if (!skb)
return -ENOMEM;
@@ -4503,6 +4506,9 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
/* eht */
skb_put_data(skb, &la.eht[0], sizeof(la.eht));
+ /* padding */
+ skb_put_zero(skb, MT7996_SKU_PATH_NUM - MT7996_SKU_RATE_NUM);
+
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WM_UNI_CMD(TXPOWER), true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 304e5fd14..928a9663b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -519,7 +519,7 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
- u32 i, intr, mask, intr1;
+ u32 i, intr, mask, intr1 = 0;
if (dev->hif2 && mtk_wed_device_active(wed_hif2)) {
mtk_wed_device_irq_set_mask(wed_hif2, 0);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 36d1f247d..ddeb40d52 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -50,6 +50,7 @@
#define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7996_SKU_RATE_NUM 417
+#define MT7996_SKU_PATH_NUM 494
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 3e88798df..a4ed00eeb 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -499,7 +499,8 @@ static void mt76s_tx_status_data(struct mt76_worker *worker)
dev = container_of(sdio, struct mt76_dev, sdio);
while (true) {
- if (test_bit(MT76_REMOVED, &dev->phy.state))
+ if (test_bit(MT76_RESET, &dev->phy.state) ||
+ test_bit(MT76_REMOVED, &dev->phy.state))
break;
if (!dev->drv->tx_status_data(dev, &update))
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index fd92d23c4..16d884a3d 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -122,6 +122,15 @@ enum rtl8xxxu_rx_type {
RX_TYPE_ERROR = -1
};
+enum rtl8xxxu_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
struct rtl8xxxu_rxdesc16 {
#ifdef __LITTLE_ENDIAN
u32 pktlen:14;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 4a49f8f9d..fbbc97161 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1505,13 +1505,13 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
- u8 val8;
+ u8 val8, base;
int group, i;
group = rtl8xxxu_gen1_channel_to_group(channel);
- cck[0] = priv->cck_tx_power_index_A[group] - 1;
- cck[1] = priv->cck_tx_power_index_B[group] - 1;
+ cck[0] = priv->cck_tx_power_index_A[group];
+ cck[1] = priv->cck_tx_power_index_B[group];
if (priv->hi_pa) {
if (cck[0] > 0x20)
@@ -1522,10 +1522,6 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
- if (ofdm[0])
- ofdm[0] -= 1;
- if (ofdm[1])
- ofdm[1] -= 1;
ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
@@ -1614,20 +1610,19 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
mcs_a + power_base->reg_0e1c);
+ val8 = u32_get_bits(mcs_a + power_base->reg_0e1c, 0xff000000);
for (i = 0; i < 3; i++) {
- if (i != 2)
- val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
- else
- val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
+ base = i != 2 ? 8 : 6;
+ val8 = max_t(int, val8 - base, 0);
rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
}
+
rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
mcs_b + power_base->reg_0868);
+ val8 = u32_get_bits(mcs_b + power_base->reg_0868, 0xff000000);
for (i = 0; i < 3; i++) {
- if (i != 2)
- val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
- else
- val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
+ base = i != 2 ? 8 : 6;
+ val8 = max_t(int, val8 - base, 0);
rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
}
}
@@ -6473,7 +6468,8 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
- if (!rx_desc->swdec)
+ if (!rx_desc->swdec &&
+ rx_desc->security != RX_DESC_ENC_NONE)
rx_status->flag |= RX_FLAG_DECRYPTED;
if (rx_desc->crc32)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -6578,7 +6574,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
- if (!rx_desc->swdec)
+ if (!rx_desc->swdec &&
+ rx_desc->security != RX_DESC_ENC_NONE)
rx_status->flag |= RX_FLAG_DECRYPTED;
if (rx_desc->crc32)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -7998,6 +7995,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 2e60a6991..42b7db12b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
- /* brought up everything changes (changed == ~0) indicates first
- * open, so use our default value instead of that of wiphy.
- */
- if (changed != ~0) {
- mac->retry_long = hw->conf.long_frame_max_tx_count;
- mac->retry_short = hw->conf.long_frame_max_tx_count;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
- (u8 *)(&hw->conf.long_frame_max_tx_count));
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
!rtlpriv->proximity.proxim_on) {
struct ieee80211_channel *channel = hw->conf.chandef.chan;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index d835a2742..56b5cd032 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -892,8 +892,8 @@ static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
u8 place = chnl;
if (chnl > 14) {
- for (place = 14; place < ARRAY_SIZE(channel5g); place++) {
- if (channel5g[place] == chnl) {
+ for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
+ if (channel_all[place] == chnl) {
place++;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 192982ec8..cbc7b4dbe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -35,7 +35,7 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
- struct rx_desc_92d *pdesc,
+ __le32 *pdesc,
struct rx_fwinfo_92d *p_drvinfo,
bool packet_match_bssid,
bool packet_toself,
@@ -50,8 +50,10 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
bool is_cck_rate;
+ u8 rxmcs;
- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
+ rxmcs = get_rx_desc_rxmcs(pdesc);
+ is_cck_rate = rxmcs <= DESC_RATE11M;
pstats->packet_matchbssid = packet_match_bssid;
pstats->packet_toself = packet_toself;
pstats->packet_beacon = packet_beacon;
@@ -157,8 +159,8 @@ static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
- if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
- pdesc->rxmcs <= DESC_RATEMCS15)
+ if (get_rx_desc_rxht(pdesc) && rxmcs >= DESC_RATEMCS8 &&
+ rxmcs <= DESC_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
@@ -364,7 +366,7 @@ static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstats,
- struct rx_desc_92d *pdesc,
+ __le32 *pdesc,
struct rx_fwinfo_92d *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -413,7 +415,8 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
stats->icv = (u16)get_rx_desc_icv(pdesc);
stats->crc = (u16)get_rx_desc_crc32(pdesc);
stats->hwerror = (stats->crc | stats->icv);
- stats->decrypted = !get_rx_desc_swdec(pdesc);
+ stats->decrypted = !get_rx_desc_swdec(pdesc) &&
+ get_rx_desc_enc_type(pdesc) != RX_DESC_ENC_NONE;
stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
@@ -426,8 +429,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
rx_status->band = hw->conf.chandef.chan->band;
if (get_rx_desc_crc32(pdesc))
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (!get_rx_desc_swdec(pdesc))
- rx_status->flag |= RX_FLAG_DECRYPTED;
if (get_rx_desc_bw(pdesc))
rx_status->bw = RATE_INFO_BW_40;
if (get_rx_desc_rxht(pdesc))
@@ -441,9 +442,7 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
if (phystatus) {
p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
stats->rx_bufshift);
- _rtl92de_translate_rx_signal_stuff(hw,
- skb, stats,
- (struct rx_desc_92d *)pdesc,
+ _rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc,
p_drvinfo);
}
/*rx_status->qual = stats->signal; */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 2992668c1..2d4887490 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -14,6 +14,15 @@
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
+enum rtl92d_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
/* macros to read/write various fields in RX or TX descriptors */
static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
@@ -246,6 +255,11 @@ static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
return le32_get_bits(*__pdesc, GENMASK(19, 16));
}
+static inline u32 get_rx_desc_enc_type(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(22, 20));
+}
+
static inline u32 get_rx_desc_shift(__le32 *__pdesc)
{
return le32_get_bits(*__pdesc, GENMASK(25, 24));
@@ -380,10 +394,17 @@ struct rx_fwinfo_92d {
u8 csi_target[2];
u8 sigevm;
u8 max_ex_pwr;
+#ifdef __LITTLE_ENDIAN
u8 ex_intf_flag:1;
u8 sgi_en:1;
u8 rxsc:2;
u8 reserve:4;
+#else
+ u8 reserve:4;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 ex_intf_flag:1;
+#endif
} __packed;
struct tx_desc_92d {
@@ -488,64 +509,6 @@ struct tx_desc_92d {
u32 reserve_pass_pcie_mm_limit[4];
} __packed;
-struct rx_desc_92d {
- u32 length:14;
- u32 crc32:1;
- u32 icverror:1;
- u32 drv_infosize:4;
- u32 security:3;
- u32 qos:1;
- u32 shift:2;
- u32 phystatus:1;
- u32 swdec:1;
- u32 lastseg:1;
- u32 firstseg:1;
- u32 eor:1;
- u32 own:1;
-
- u32 macid:5;
- u32 tid:4;
- u32 hwrsvd:5;
- u32 paggr:1;
- u32 faggr:1;
- u32 a1_fit:4;
- u32 a2_fit:4;
- u32 pam:1;
- u32 pwr:1;
- u32 moredata:1;
- u32 morefrag:1;
- u32 type:2;
- u32 mc:1;
- u32 bc:1;
-
- u32 seq:12;
- u32 frag:4;
- u32 nextpktlen:14;
- u32 nextind:1;
- u32 rsvd:1;
-
- u32 rxmcs:6;
- u32 rxht:1;
- u32 amsdu:1;
- u32 splcp:1;
- u32 bandwidth:1;
- u32 htc:1;
- u32 tcpchk_rpt:1;
- u32 ipcchk_rpt:1;
- u32 tcpchk_valid:1;
- u32 hwpcerr:1;
- u32 hwpcind:1;
- u32 iv0:16;
-
- u32 iv1;
-
- u32 tsfl;
-
- u32 bufferaddress;
- u32 bufferaddress64;
-
-} __packed;
-
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index d474b8d5d..b8d419a5b 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -4069,6 +4069,24 @@ void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg eve
}
}
+void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks)
+{
+ const struct dmi_system_id *match;
+ enum rtw89_quirks quirk;
+
+ if (!quirks)
+ return;
+
+ for (match = dmi_first_match(quirks); match; match = dmi_first_match(match + 1)) {
+ quirk = (uintptr_t)match->driver_data;
+ if (quirk >= NUM_OF_RTW89_QUIRKS)
+ continue;
+
+ set_bit(quirk, rtwdev->quirks);
+ }
+}
+EXPORT_SYMBOL(rtw89_check_quirks);
+
int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 2e854c9af..509d84a49 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -7,6 +7,7 @@
#include <linux/average.h>
#include <linux/bitfield.h>
+#include <linux/dmi.h>
#include <linux/firmware.h>
#include <linux/iopoll.h>
#include <linux/workqueue.h>
@@ -3977,6 +3978,7 @@ union rtw89_bus_info {
struct rtw89_driver_info {
const struct rtw89_chip_info *chip;
+ const struct dmi_system_id *quirks;
union rtw89_bus_info bus;
};
@@ -4324,6 +4326,12 @@ enum rtw89_flags {
NUM_OF_RTW89_FLAGS,
};
+enum rtw89_quirks {
+ RTW89_QUIRK_PCI_BER,
+
+ NUM_OF_RTW89_QUIRKS,
+};
+
enum rtw89_pkt_drop_sel {
RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
@@ -5084,6 +5092,7 @@ struct rtw89_dev {
DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
+ DECLARE_BITMAP(quirks, NUM_OF_RTW89_QUIRKS);
struct rtw89_phy_stat phystat;
struct rtw89_rfk_wait_info rfk_wait;
@@ -6129,6 +6138,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta,
struct cfg80211_tid_config *tid_config);
+void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks);
int rtw89_core_init(struct rtw89_dev *rtwdev);
void rtw89_core_deinit(struct rtw89_dev *rtwdev);
int rtw89_core_register(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 31d1ffb16..c4707a036 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -318,7 +318,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
u8 sifs;
slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
- sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
+ sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16;
return aifsn * slot_time + sifs;
}
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 19001130a..3b0d97da0 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -1089,7 +1089,8 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
spin_lock_bh(&rtwpci->trx_lock);
cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
- cnt = min(cnt, wd_ring->curr_num);
+ if (txch != RTW89_TXCH_CH12)
+ cnt = min(cnt, wd_ring->curr_num);
spin_unlock_bh(&rtwpci->trx_lock);
return cnt;
@@ -2298,6 +2299,22 @@ static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
return 0;
}
+static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
+{
+ u32 phy_offset;
+
+ if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
+ return;
+
+ phy_offset = R_RAC_DIRECT_OFFSET_G1;
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
+
+ phy_offset = R_RAC_DIRECT_OFFSET_G2;
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
+}
+
static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
{
if (rtwdev->chip->chip_id != RTL8852A)
@@ -2695,6 +2712,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
const struct rtw89_pci_info *info = rtwdev->pci_info;
int ret;
+ rtw89_pci_ber(rtwdev);
rtw89_pci_rxdma_prefth(rtwdev);
rtw89_pci_l1off_pwroff(rtwdev);
rtw89_pci_deglitch_setting(rtwdev);
@@ -4171,6 +4189,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
+ rtw89_check_quirks(rtwdev, info->quirks);
+
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
ret = rtw89_core_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index a63b6b7c9..87e708166 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -26,11 +26,16 @@
#define RAC_REG_FLD_0 0x1D
#define BAC_AUTOK_N_MASK GENMASK(3, 2)
#define PCIE_AUTOK_4 0x3
+#define RAC_ANA1E 0x1E
+#define RAC_ANA1E_G1_VAL 0x66EA
+#define RAC_ANA1E_G2_VAL 0x6EEA
#define RAC_ANA1F 0x1F
#define RAC_ANA24 0x24
#define B_AX_DEGLITCH GENMASK(11, 8)
#define RAC_ANA26 0x26
#define B_AX_RXEN GENMASK(15, 14)
+#define RAC_ANA2E 0x2E
+#define RAC_ANA2E_VAL 0xFFFE
#define RAC_CTRL_PPR_V1 0x30
#define B_AX_CLK_CALIB_EN BIT(12)
#define B_AX_CALIB_EN BIT(13)
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 31290d8cb..92074b73e 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -55,7 +55,8 @@ static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev,
static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
{
- if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode))
+ if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode) &&
+ !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags))
rtw89_ps_power_mode_change_with_hci(rtwdev, enter);
else
rtw89_mac_power_mode_change(rtwdev, enter);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index ca1374a71..ec3629d95 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
static const struct rtw89_driver_info rtw89_8851be_info = {
.chip = &rtw8851b_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8851b_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 7c6ffedb7..fdee5dd4b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
static const struct rtw89_driver_info rtw89_8852ae_info = {
.chip = &rtw8852a_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8852a_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index ed71364e6..5f9411226 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -63,6 +63,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
static const struct rtw89_driver_info rtw89_8852be_info = {
.chip = &rtw8852b_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8852b_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 583ea673a..e07c7f3ad 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -68,8 +68,31 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.recognize_intrs = rtw89_pci_recognize_intrs_v1,
};
+static const struct dmi_system_id rtw8852c_pci_quirks[] = {
+ {
+ .ident = "Dell Inc. Vostro 16 5640",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 16 5640"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0CA0"),
+ },
+ .driver_data = (void *)RTW89_QUIRK_PCI_BER,
+ },
+ {
+ .ident = "Dell Inc. Inspiron 16 5640",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5640"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0C9F"),
+ },
+ .driver_data = (void *)RTW89_QUIRK_PCI_BER,
+ },
+ {},
+};
+
static const struct rtw89_driver_info rtw89_8852ce_info = {
.chip = &rtw8852c_chip_info,
+ .quirks = rtw8852c_pci_quirks,
.bus = {
.pci = &rtw8852c_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index 4981b657b..ce8aaa950 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -61,6 +61,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
static const struct rtw89_driver_info rtw89_8922ae_info = {
.chip = &rtw8922a_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8922a_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index ccad026de..ca4835008 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -457,14 +457,17 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
const struct rtw89_chip_info *chip = rtwdev->chip;
bool include_bb = !!chip->bbmcu_nr;
+ bool disable_intr_for_dlfw = false;
struct ieee80211_sta *wow_sta;
struct rtw89_sta *rtwsta = NULL;
bool is_conn = true;
int ret;
- rtw89_hci_disable_intr(rtwdev);
+ if (chip_id == RTL8852C || chip_id == RTL8922A)
+ disable_intr_for_dlfw = true;
wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid);
if (wow_sta)
@@ -472,12 +475,18 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
else
is_conn = false;
+ if (disable_intr_for_dlfw)
+ rtw89_hci_disable_intr(rtwdev);
+
ret = rtw89_fw_download(rtwdev, fw_type, include_bb);
if (ret) {
rtw89_warn(rtwdev, "download fw failed\n");
return ret;
}
+ if (disable_intr_for_dlfw)
+ rtw89_hci_enable_intr(rtwdev);
+
rtw89_phy_init_rf_reg(rtwdev, true);
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
@@ -520,7 +529,6 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
}
rtw89_mac_hw_mgnt_sec(rtwdev, wow);
- rtw89_hci_enable_intr(rtwdev);
return 0;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index 2fe724d62..33c5a46f1 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -210,7 +210,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
rc = PTR_ERR(devlink->cd_regions[i]);
dev_err(devlink->dev, "Devlink region fail,err %d", rc);
/* Delete previously created regions */
- for ( ; i >= 0; i--)
+ for (i--; i >= 0; i--)
devlink_region_destroy(devlink->cd_regions[i]);
goto region_create_fail;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 095f59e7a..d513fd275 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -414,7 +414,15 @@ static inline void nvme_end_req_zoned(struct request *req)
}
}
-static inline void nvme_end_req(struct request *req)
+static inline void __nvme_end_req(struct request *req)
+{
+ nvme_end_req_zoned(req);
+ nvme_trace_bio_complete(req);
+ if (req->cmd_flags & REQ_NVME_MPATH)
+ nvme_mpath_end_request(req);
+}
+
+void nvme_end_req(struct request *req)
{
blk_status_t status = nvme_error_status(nvme_req(req)->status);
@@ -424,10 +432,7 @@ static inline void nvme_end_req(struct request *req)
else
nvme_log_error(req);
}
- nvme_end_req_zoned(req);
- nvme_trace_bio_complete(req);
- if (req->cmd_flags & REQ_NVME_MPATH)
- nvme_mpath_end_request(req);
+ __nvme_end_req(req);
blk_mq_end_request(req, status);
}
@@ -476,7 +481,7 @@ void nvme_complete_batch_req(struct request *req)
{
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
- nvme_end_req_zoned(req);
+ __nvme_end_req(req);
}
EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d16e976ae..a4e46eb20 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -118,7 +118,8 @@ void nvme_failover_req(struct request *req)
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
- blk_mq_end_request(req, 0);
+ nvme_req(req)->status = 0;
+ nvme_end_req(req);
kblockd_schedule_work(&ns->head->requeue_work);
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 05532c281..d7bcc6d51 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -767,6 +767,7 @@ static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
}
}
+void nvme_end_req(struct request *req);
void nvme_complete_rq(struct request *req);
void nvme_complete_batch_req(struct request *req);
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index e05571b2a..8fa1ffcda 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -77,7 +77,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
if (nvme_is_path_error(nvme_sc))
return PR_STS_PATH_FAILED;
- switch (nvme_sc) {
+ switch (nvme_sc & 0x7ff) {
case NVME_SC_SUCCESS:
return PR_STS_SUCCESS;
case NVME_SC_RESERVATION_CONFLICT:
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 7fda69395..dfdff6aba 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -676,10 +676,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
if (kstrtobool(page, &enable))
return -EINVAL;
+ /*
+ * take a global nvmet_config_sem because the disable routine has a
+ * window where it releases the subsys-lock, giving a chance to
+ * a parallel enable to concurrently execute causing the disable to
+ * have a misaccounting of the ns percpu_ref.
+ */
+ down_write(&nvmet_config_sem);
if (enable)
ret = nvmet_ns_enable(ns);
else
nvmet_ns_disable(ns);
+ up_write(&nvmet_config_sem);
return ret ? ret : count;
}
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index bb4a69d53..f003782d4 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
- nvmet_passthru_override_id_ctrl(req);
+ status = nvmet_passthru_override_id_ctrl(req);
break;
case NVME_ID_CNS_NS:
- nvmet_passthru_override_id_ns(req);
+ status = nvmet_passthru_override_id_ns(req);
break;
case NVME_ID_CNS_NS_DESC_LIST:
- nvmet_passthru_override_id_descs(req);
+ status = nvmet_passthru_override_id_descs(req);
break;
}
} else if (status < 0)
diff --git a/drivers/of/module.c b/drivers/of/module.c
index f58e62495..780fd82a7 100644
--- a/drivers/of/module.c
+++ b/drivers/of/module.c
@@ -29,14 +29,15 @@ ssize_t of_modalias(const struct device_node *np, char *str, ssize_t len)
csize = snprintf(str, len, "of:N%pOFn%c%s", np, 'T',
of_node_get_device_type(np));
tsize = csize;
+ if (csize >= len)
+ csize = len > 0 ? len - 1 : 0;
len -= csize;
- if (str)
- str += csize;
+ str += csize;
of_property_for_each_string(np, "compatible", p, compat) {
csize = strlen(compat) + 1;
tsize += csize;
- if (csize > len)
+ if (csize >= len)
continue;
csize = snprintf(str, len, "C%s", compat);
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index e233734b7..cb4611fe1 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -2394,7 +2394,8 @@ static void _opp_detach_genpd(struct opp_table *opp_table)
static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
const char * const *names, struct device ***virt_devs)
{
- struct device *virt_dev;
+ struct device *virt_dev, *gdev;
+ struct opp_table *genpd_table;
int index = 0, ret = -EINVAL;
const char * const *name = names;
@@ -2428,6 +2429,34 @@ static int _opp_attach_genpd(struct opp_table *opp_table, struct device *dev,
}
/*
+ * The required_opp_tables parsing is not perfect, as the OPP
+ * core does the parsing solely based on the DT node pointers.
+ * The core sets the required_opp_tables entry to the first OPP
+ * table in the "opp_tables" list, that matches with the node
+ * pointer.
+ *
+ * If the target DT OPP table is used by multiple devices and
+ * they all create separate instances of 'struct opp_table' from
+ * it, then it is possible that the required_opp_tables entry
+ * may be set to the incorrect sibling device.
+ *
+ * Cross check it again and fix if required.
+ */
+ gdev = dev_to_genpd_dev(virt_dev);
+ if (IS_ERR(gdev))
+ return PTR_ERR(gdev);
+
+ genpd_table = _find_opp_table(gdev);
+ if (!IS_ERR(genpd_table)) {
+ if (genpd_table != opp_table->required_opp_tables[index]) {
+ dev_pm_opp_put_opp_table(opp_table->required_opp_tables[index]);
+ opp_table->required_opp_tables[index] = genpd_table;
+ } else {
+ dev_pm_opp_put_opp_table(genpd_table);
+ }
+ }
+
+ /*
* Add the virtual genpd device as a user of the OPP table, so
* we can call dev_pm_opp_set_opp() on it directly.
*
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 746a11dcb..c43a1479d 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -604,11 +604,16 @@ static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap)
int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ struct dw_pcie_ep_func *ep_func;
+ struct device *dev = pci->dev;
+ struct pci_epc *epc = ep->epc;
unsigned int offset, ptm_cap_base;
unsigned int nbars;
u8 hdr_type;
+ u8 func_no;
+ int i, ret;
+ void *addr;
u32 reg;
- int i;
hdr_type = dw_pcie_readb_dbi(pci, PCI_HEADER_TYPE) &
PCI_HEADER_TYPE_MASK;
@@ -619,6 +624,58 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
return -EIO;
}
+ dw_pcie_version_detect(pci);
+
+ dw_pcie_iatu_detect(pci);
+
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ return ret;
+
+ if (!ep->ib_window_map) {
+ ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
+ GFP_KERNEL);
+ if (!ep->ib_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->ob_window_map) {
+ ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
+ GFP_KERNEL);
+ if (!ep->ob_window_map)
+ goto err_remove_edma;
+ }
+
+ if (!ep->outbound_addr) {
+ addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
+ GFP_KERNEL);
+ if (!addr)
+ goto err_remove_edma;
+ ep->outbound_addr = addr;
+ }
+
+ for (func_no = 0; func_no < epc->max_functions; func_no++) {
+
+ ep_func = dw_pcie_ep_get_func_from_ep(ep, func_no);
+ if (ep_func)
+ continue;
+
+ ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
+ if (!ep_func)
+ goto err_remove_edma;
+
+ ep_func->func_no = func_no;
+ ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSI);
+ ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
+ PCI_CAP_ID_MSIX);
+
+ list_add_tail(&ep_func->list, &ep->func_list);
+ }
+
+ if (ep->ops->init)
+ ep->ops->init(ep);
+
offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR);
ptm_cap_base = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_PTM);
@@ -658,14 +715,17 @@ int dw_pcie_ep_init_complete(struct dw_pcie_ep *ep)
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
+
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_init_complete);
int dw_pcie_ep_init(struct dw_pcie_ep *ep)
{
int ret;
- void *addr;
- u8 func_no;
struct resource *res;
struct pci_epc *epc;
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@@ -673,7 +733,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
struct platform_device *pdev = to_platform_device(dev);
struct device_node *np = dev->of_node;
const struct pci_epc_features *epc_features;
- struct dw_pcie_ep_func *ep_func;
INIT_LIST_HEAD(&ep->func_list);
@@ -691,26 +750,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ep->ops->pre_init)
ep->ops->pre_init(ep);
- dw_pcie_version_detect(pci);
-
- dw_pcie_iatu_detect(pci);
-
- ep->ib_window_map = devm_bitmap_zalloc(dev, pci->num_ib_windows,
- GFP_KERNEL);
- if (!ep->ib_window_map)
- return -ENOMEM;
-
- ep->ob_window_map = devm_bitmap_zalloc(dev, pci->num_ob_windows,
- GFP_KERNEL);
- if (!ep->ob_window_map)
- return -ENOMEM;
-
- addr = devm_kcalloc(dev, pci->num_ob_windows, sizeof(phys_addr_t),
- GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- ep->outbound_addr = addr;
-
epc = devm_pci_epc_create(dev, &epc_ops);
if (IS_ERR(epc)) {
dev_err(dev, "Failed to create epc device\n");
@@ -724,23 +763,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
if (ret < 0)
epc->max_functions = 1;
- for (func_no = 0; func_no < epc->max_functions; func_no++) {
- ep_func = devm_kzalloc(dev, sizeof(*ep_func), GFP_KERNEL);
- if (!ep_func)
- return -ENOMEM;
-
- ep_func->func_no = func_no;
- ep_func->msi_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSI);
- ep_func->msix_cap = dw_pcie_ep_find_capability(ep, func_no,
- PCI_CAP_ID_MSIX);
-
- list_add_tail(&ep_func->list, &ep->func_list);
- }
-
- if (ep->ops->init)
- ep->ops->init(ep);
-
ret = pci_epc_mem_init(epc, ep->phys_base, ep->addr_size,
ep->page_size);
if (ret < 0) {
@@ -756,25 +778,25 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
- ret = dw_pcie_edma_detect(pci);
- if (ret)
- goto err_free_epc_mem;
-
if (ep->ops->get_features) {
epc_features = ep->ops->get_features(ep);
if (epc_features->core_init_notifier)
return 0;
}
+ /*
+ * NOTE:- Avoid accessing the hardware (Ex:- DBI space) before this
+ * step as platforms that implement 'core_init_notifier' feature may
+ * not have the hardware ready (i.e. core initialized) for access
+ * (Ex: tegra194). Any hardware access on such platforms result
+ * in system hang.
+ */
ret = dw_pcie_ep_init_complete(ep);
if (ret)
- goto err_remove_edma;
+ goto err_free_epc_mem;
return 0;
-err_remove_edma:
- dw_pcie_edma_remove(pci);
-
err_free_epc_mem:
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 1f7b662cb..e440c09d1 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -2273,11 +2273,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
ret = tegra_pcie_config_ep(pcie, pdev);
if (ret < 0)
goto fail;
+ else
+ return 0;
break;
default:
dev_err(dev, "Invalid PCIe device type %d\n",
pcie->of_data->mode);
+ ret = -EINVAL;
}
fail:
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index c9046e97a..79c3e7e9a 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -98,10 +98,8 @@ static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
/* All functions share the same vendor ID with function 0 */
if (fn == 0) {
- u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) |
- (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16;
-
- rockchip_pcie_write(rockchip, vid_regs,
+ rockchip_pcie_write(rockchip,
+ hdr->vendorid | hdr->subsys_vendor_id << 16,
PCIE_CORE_CONFIG_VENDOR);
}
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index c2c733415..03539e505 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -238,6 +238,8 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
return 0;
int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL);
+ if (!int_map)
+ return -ENOMEM;
mapp = int_map;
list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e5f243dd4..cbbf197df 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1277,6 +1277,11 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
for (;;) {
u32 id;
+ if (pci_dev_is_disconnected(dev)) {
+ pci_dbg(dev, "disconnected; not waiting\n");
+ return -ENOTTY;
+ }
+
pci_read_config_dword(dev, PCI_COMMAND, &id);
if (!PCI_POSSIBLE_ERROR(id))
break;
@@ -2962,6 +2967,18 @@ static const struct dmi_system_id bridge_d3_blacklist[] = {
DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"),
},
},
+ {
+ /*
+ * Changing power state of root port dGPU is connected fails
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3229
+ */
+ .ident = "Hewlett-Packard HP Pavilion 17 Notebook PC/1972",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_BOARD_NAME, "1972"),
+ DMI_MATCH(DMI_BOARD_VERSION, "95.33"),
+ },
+ },
#endif
{ }
};
@@ -4629,7 +4646,7 @@ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt)
* avoid LTSSM race as recommended in Implementation Note at the
* end of PCIe r6.0.1 sec 7.5.3.7.
*/
- rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt);
+ rc = pcie_wait_for_link_status(pdev, true, false);
if (rc)
return rc;
diff --git a/drivers/pci/pcie/edr.c b/drivers/pci/pcie/edr.c
index 5f4914d31..e86298dbb 100644
--- a/drivers/pci/pcie/edr.c
+++ b/drivers/pci/pcie/edr.c
@@ -32,10 +32,10 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
int status = 0;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * Per PCI Firmware r3.3, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
+ * optional. Return success if it's not implemented.
*/
- if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
1ULL << EDR_PORT_DPC_ENABLE_DSM))
return 0;
@@ -46,12 +46,7 @@ static int acpi_enable_dpc(struct pci_dev *pdev)
argv4.package.count = 1;
argv4.package.elements = &req;
- /*
- * Per Downstream Port Containment Related Enhancements ECN to PCI
- * Firmware Specification r3.2, sec 4.6.12, EDR_PORT_DPC_ENABLE_DSM is
- * optional. Return success if it's not implemented.
- */
- obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
+ obj = acpi_evaluate_dsm(adev->handle, &pci_acpi_dsm_guid, 6,
EDR_PORT_DPC_ENABLE_DSM, &argv4);
if (!obj)
return 0;
@@ -85,8 +80,9 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
u16 port;
/*
- * Behavior when calling unsupported _DSM functions is undefined,
- * so check whether EDR_PORT_DPC_ENABLE_DSM is supported.
+ * If EDR_PORT_LOCATE_DSM is not implemented under the target of
+ * EDR, the target is the port that experienced the containment
+ * event (PCI Firmware r3.3, sec 4.6.13).
*/
if (!acpi_check_dsm(adev->handle, &pci_acpi_dsm_guid, 5,
1ULL << EDR_PORT_LOCATE_DSM))
@@ -104,6 +100,16 @@ static struct pci_dev *acpi_dpc_port_get(struct pci_dev *pdev)
}
/*
+ * Bit 31 represents the success/failure of the operation. If bit
+ * 31 is set, the operation failed.
+ */
+ if (obj->integer.value & BIT(31)) {
+ ACPI_FREE(obj);
+ pci_err(pdev, "Locate Port _DSM failed\n");
+ return NULL;
+ }
+
+ /*
* Firmware returns DPC port BDF details in following format:
* 15:8 = bus
* 7:3 = device
diff --git a/drivers/perf/arm_dmc620_pmu.c b/drivers/perf/arm_dmc620_pmu.c
index 8a81be2dd..88c17c1d6 100644
--- a/drivers/perf/arm_dmc620_pmu.c
+++ b/drivers/perf/arm_dmc620_pmu.c
@@ -542,12 +542,16 @@ static int dmc620_pmu_event_init(struct perf_event *event)
if (event->cpu < 0)
return -EINVAL;
+ hwc->idx = -1;
+
+ if (event->group_leader == event)
+ return 0;
+
/*
* We can't atomically disable all HW counters so only one event allowed,
* although software events are acceptable.
*/
- if (event->group_leader != event &&
- !is_software_event(event->group_leader))
+ if (!is_software_event(event->group_leader))
return -EINVAL;
for_each_sibling_event(sibling, event->group_leader) {
@@ -556,7 +560,6 @@ static int dmc620_pmu_event_init(struct perf_event *event)
return -EINVAL;
}
- hwc->idx = -1;
return 0;
}
diff --git a/drivers/perf/hisilicon/hisi_pcie_pmu.c b/drivers/perf/hisilicon/hisi_pcie_pmu.c
index 5d1f0e9fd..dba399125 100644
--- a/drivers/perf/hisilicon/hisi_pcie_pmu.c
+++ b/drivers/perf/hisilicon/hisi_pcie_pmu.c
@@ -350,15 +350,27 @@ static bool hisi_pcie_pmu_validate_event_group(struct perf_event *event)
return false;
for (num = 0; num < counters; num++) {
+ /*
+ * If we find a related event, then it's a valid group
+ * since we don't need to allocate a new counter for it.
+ */
if (hisi_pcie_pmu_cmp_event(event_group[num], sibling))
break;
}
+ /*
+ * Otherwise it's a new event but if there's no available counter,
+ * fail the check since we cannot schedule all the events in
+ * the group simultaneously.
+ */
+ if (num == HISI_PCIE_MAX_COUNTERS)
+ return false;
+
if (num == counters)
event_group[counters++] = sibling;
}
- return counters <= HISI_PCIE_MAX_COUNTERS;
+ return true;
}
static int hisi_pcie_pmu_event_init(struct perf_event *event)
diff --git a/drivers/perf/hisilicon/hns3_pmu.c b/drivers/perf/hisilicon/hns3_pmu.c
index 16869bf5b..60062eaa3 100644
--- a/drivers/perf/hisilicon/hns3_pmu.c
+++ b/drivers/perf/hisilicon/hns3_pmu.c
@@ -1085,15 +1085,27 @@ static bool hns3_pmu_validate_event_group(struct perf_event *event)
return false;
for (num = 0; num < counters; num++) {
+ /*
+ * If we find a related event, then it's a valid group
+ * since we don't need to allocate a new counter for it.
+ */
if (hns3_pmu_cmp_event(event_group[num], sibling))
break;
}
+ /*
+ * Otherwise it's a new event but if there's no available counter,
+ * fail the check since we cannot schedule all the events in
+ * the group simultaneously.
+ */
+ if (num == HNS3_PMU_MAX_HW_EVENTS)
+ return false;
+
if (num == counters)
event_group[counters++] = sibling;
}
- return counters <= HNS3_PMU_MAX_HW_EVENTS;
+ return true;
}
static u32 hns3_pmu_get_filter_condition(struct perf_event *event)
@@ -1515,7 +1527,7 @@ static int hns3_pmu_irq_register(struct pci_dev *pdev,
return ret;
}
- ret = devm_add_action(&pdev->dev, hns3_pmu_free_irq, pdev);
+ ret = devm_add_action_or_reset(&pdev->dev, hns3_pmu_free_irq, pdev);
if (ret) {
pci_err(pdev, "failed to add free irq action, ret = %d.\n", ret);
return ret;
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 8cbe6e5f9..3e44d2fb8 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -27,7 +27,7 @@
#define ALT_SBI_PMU_OVERFLOW(__ovl) \
asm volatile(ALTERNATIVE_2( \
- "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(CSR_SCOUNTOVF), \
"csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
CONFIG_ERRATA_THEAD_PMU, \
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index c21cdb8db..b8919443e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -187,6 +187,31 @@ static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN,
};
+static const unsigned int qmp_v6_n4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V6_N4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V6_N4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V6_N4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_N4_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V6_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V6_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+ [QPHY_DP_PHY_VCO_DIV] = QSERDES_V6_DP_PHY_VCO_DIV,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V6_N4_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_N4_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_N4_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_N4_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN,
+};
+
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -997,6 +1022,31 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
+};
+
static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40),
QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
@@ -1011,6 +1061,19 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_INTERFACE_SELECT, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_N4_TX_TX_BAND, 0x1),
+};
+
static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
@@ -1059,6 +1122,74 @@ static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr3[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
};
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x02),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_n4_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x6b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+};
+
static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
@@ -1273,20 +1404,20 @@ static const struct qmp_phy_init_tbl x1e80100_usb43dp_rx_tbl[] = {
};
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x55),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_ALIGN_DETECT_CONFIG2, 0x30),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_SIGDET_LVL, 0x55),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_RX_CONFIG, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1, 0xd4),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2, 0x30),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V6_N4_PCS_EQ_CONFIG5, 0x10),
};
static const struct qmp_phy_init_tbl x1e80100_usb43dp_pcs_usb_tbl[] = {
@@ -1382,6 +1513,13 @@ static const u8 qmp_dp_v5_voltage_swing_hbr_rbr[4][4] = {
{ 0x3f, 0xff, 0xff, 0xff }
};
+static const u8 qmp_dp_v6_voltage_swing_hbr_rbr[4][4] = {
+ { 0x27, 0x2f, 0x36, 0x3f },
+ { 0x31, 0x3e, 0x3f, 0xff },
+ { 0x36, 0x3f, 0xff, 0xff },
+ { 0x3f, 0xff, 0xff, 0xff }
+};
+
static const u8 qmp_dp_v6_pre_emphasis_hbr_rbr[4][4] = {
{ 0x20, 0x2d, 0x34, 0x3a },
{ 0x20, 0x2e, 0x35, 0xff },
@@ -1787,22 +1925,22 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
.pcs_usb_tbl = x1e80100_usb43dp_pcs_usb_tbl,
.pcs_usb_tbl_num = ARRAY_SIZE(x1e80100_usb43dp_pcs_usb_tbl),
- .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
- .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
- .dp_tx_tbl = qmp_v6_dp_tx_tbl,
- .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
-
- .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
- .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
- .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
- .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
- .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
- .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
- .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
- .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
-
- .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
- .pre_emphasis_hbr_rbr = &qmp_dp_v5_pre_emphasis_hbr_rbr,
+ .dp_serdes_tbl = qmp_v6_n4_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_n4_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_n4_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v6_n4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_n4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_n4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_n4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_n4_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
.swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
.pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
@@ -1815,7 +1953,7 @@ static const struct qmp_phy_cfg x1e80100_usb3dpphy_cfg = {
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v45_usb3phy_regs_layout,
+ .regs = qmp_v6_n4_usb3phy_regs_layout,
};
static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
@@ -2001,6 +2139,51 @@ static const struct qmp_phy_cfg sm8550_usb3dpphy_cfg = {
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
};
+static const struct qmp_phy_cfg sm8650_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
+ .serdes_tbl = sm8550_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8550_usb3_serdes_tbl),
+ .tx_tbl = sm8550_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8550_usb3_tx_tbl),
+ .rx_tbl = sm8550_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8550_usb3_rx_tbl),
+ .pcs_tbl = sm8550_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8550_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_usb_tbl),
+
+ .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v6_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v4_dp_aux_init,
+ .configure_dp_tx = qmp_v4_configure_dp_tx,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
+
+ .regs = qmp_v6_usb3phy_regs_layout,
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+};
+
static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
{
const struct qmp_phy_cfg *cfg = qmp->cfg;
@@ -3631,7 +3814,7 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
},
{
.compatible = "qcom,sm8650-qmp-usb3-dp-phy",
- .data = &sm8550_usb3dpphy_cfg,
+ .data = &sm8650_usb3dpphy_cfg,
},
{
.compatible = "qcom,x1e80100-qmp-usb3-dp-phy",
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
new file mode 100644
index 000000000..b3024714d
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6-n4.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V6_N4_H_
+#define QCOM_PHY_QMP_PCS_V6_N4_H_
+
+/* Only for QMP V6 N4 PHY - USB/PCIe PCS registers */
+#define QPHY_V6_N4_PCS_SW_RESET 0x000
+#define QPHY_V6_N4_PCS_PCS_STATUS1 0x014
+#define QPHY_V6_N4_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V6_N4_PCS_START_CONTROL 0x044
+#define QPHY_V6_N4_PCS_POWER_STATE_CONFIG1 0x090
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG1 0x0c4
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG2 0x0c8
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG3 0x0cc
+#define QPHY_V6_N4_PCS_LOCK_DETECT_CONFIG6 0x0d8
+#define QPHY_V6_N4_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V6_N4_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_V6_N4_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_V6_N4_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V6_N4_PCS_RX_CONFIG 0x1b0
+#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_V6_N4_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_V6_N4_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_V6_N4_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_V6_N4_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V6_N4_PCS_EQ_CONFIG5 0x1ec
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
index a814ad11a..d37cc0d4f 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_n4.h
@@ -6,11 +6,24 @@
#ifndef QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
#define QCOM_PHY_QMP_QSERDES_TXRX_V6_N4_H_
+#define QSERDES_V6_N4_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V6_N4_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V6_N4_TX_TX_DRV_LVL 0x14
+#define QSERDES_V6_N4_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V6_N4_TX_PRE_STALL_LDO_BOOST_EN 0x20
#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_TX 0x30
#define QSERDES_V6_N4_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V6_N4_TX_TRANSCEIVER_BIAS_EN 0x48
+#define QSERDES_V6_N4_TX_HIGHZ_DRVR_EN 0x4c
+#define QSERDES_V6_N4_TX_TX_POL_INV 0x50
+#define QSERDES_V6_N4_TX_PARRATE_REC_DETECT_IDLE_EN 0x54
#define QSERDES_V6_N4_TX_LANE_MODE_1 0x78
#define QSERDES_V6_N4_TX_LANE_MODE_2 0x7c
#define QSERDES_V6_N4_TX_LANE_MODE_3 0x80
+#define QSERDES_V6_N4_TX_TRAN_DRVR_EMP_EN 0xac
+#define QSERDES_V6_N4_TX_TX_BAND 0xd8
+#define QSERDES_V6_N4_TX_INTERFACE_SELECT 0xe4
+#define QSERDES_V6_N4_TX_VMODE_CTRL1 0xb0
#define QSERDES_V6_N4_RX_UCDR_FO_GAIN_RATE2 0x8
#define QSERDES_V6_N4_RX_UCDR_SO_GAIN_RATE2 0x18
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index d10b8f653..d0f41e4aa 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -46,6 +46,8 @@
#include "phy-qcom-qmp-pcs-v6.h"
+#include "phy-qcom-qmp-pcs-v6-n4.h"
+
#include "phy-qcom-qmp-pcs-v6_20.h"
#include "phy-qcom-qmp-pcs-v7.h"
diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
index c25357ca1..b9f067de8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
@@ -65,7 +65,7 @@ enum {
.intr_detection_width = 2, \
}
-#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+#define SDC_QDSD_PINGROUP(pg_name, _tile, ctl, pull, drv) \
{ \
.grp = PINCTRL_PINGROUP(#pg_name, \
pg_name##_pins, \
@@ -75,7 +75,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
- .tile = SOUTH, \
+ .tile = _tile, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
@@ -101,7 +101,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
- .tile = SOUTH, \
+ .tile = WEST, \
.mux_bit = -1, \
.pull_bit = 3, \
.drv_bit = 0, \
@@ -1199,13 +1199,13 @@ static const struct msm_pingroup sm7150_groups[] = {
[117] = PINGROUP(117, NORTH, _, _, _, _, _, _, _, _, _),
[118] = PINGROUP(118, NORTH, _, _, _, _, _, _, _, _, _),
[119] = UFS_RESET(ufs_reset, 0x9f000),
- [120] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
- [121] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
- [122] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
- [123] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
- [124] = SDC_QDSD_PINGROUP(sdc2_clk, 0x98000, 14, 6),
- [125] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x98000, 11, 3),
- [126] = SDC_QDSD_PINGROUP(sdc2_data, 0x98000, 9, 0),
+ [120] = SDC_QDSD_PINGROUP(sdc1_rclk, WEST, 0x9a000, 15, 0),
+ [121] = SDC_QDSD_PINGROUP(sdc1_clk, WEST, 0x9a000, 13, 6),
+ [122] = SDC_QDSD_PINGROUP(sdc1_cmd, WEST, 0x9a000, 11, 3),
+ [123] = SDC_QDSD_PINGROUP(sdc1_data, WEST, 0x9a000, 9, 0),
+ [124] = SDC_QDSD_PINGROUP(sdc2_clk, SOUTH, 0x98000, 14, 6),
+ [125] = SDC_QDSD_PINGROUP(sdc2_cmd, SOUTH, 0x98000, 11, 3),
+ [126] = SDC_QDSD_PINGROUP(sdc2_data, SOUTH, 0x98000, 9, 0),
};
static const struct msm_gpio_wakeirq_map sm7150_pdc_map[] = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a779h0.c b/drivers/pinctrl/renesas/pfc-r8a779h0.c
index afa8f06c8..0cbfe7637 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779h0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779h0.c
@@ -75,10 +75,10 @@
#define GPSR0_9 F_(MSIOF5_SYNC, IP1SR0_7_4)
#define GPSR0_8 F_(MSIOF5_SS1, IP1SR0_3_0)
#define GPSR0_7 F_(MSIOF5_SS2, IP0SR0_31_28)
-#define GPSR0_6 F_(IRQ0, IP0SR0_27_24)
-#define GPSR0_5 F_(IRQ1, IP0SR0_23_20)
-#define GPSR0_4 F_(IRQ2, IP0SR0_19_16)
-#define GPSR0_3 F_(IRQ3, IP0SR0_15_12)
+#define GPSR0_6 F_(IRQ0_A, IP0SR0_27_24)
+#define GPSR0_5 F_(IRQ1_A, IP0SR0_23_20)
+#define GPSR0_4 F_(IRQ2_A, IP0SR0_19_16)
+#define GPSR0_3 F_(IRQ3_A, IP0SR0_15_12)
#define GPSR0_2 F_(GP0_02, IP0SR0_11_8)
#define GPSR0_1 F_(GP0_01, IP0SR0_7_4)
#define GPSR0_0 F_(GP0_00, IP0SR0_3_0)
@@ -265,10 +265,10 @@
#define IP0SR0_3_0 F_(0, 0) FM(ERROROUTC_N_B) FM(TCLK2_B) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_7_4 F_(0, 0) FM(MSIOF3_SS1) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_11_8 F_(0, 0) FM(MSIOF3_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_15_12 FM(IRQ3) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_19_16 FM(IRQ2) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_23_20 FM(IRQ1) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
-#define IP0SR0_27_24 FM(IRQ0) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_15_12 FM(IRQ3_A) FM(MSIOF3_SCK) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_19_16 FM(IRQ2_A) FM(MSIOF3_TXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_23_20 FM(IRQ1_A) FM(MSIOF3_RXD) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
+#define IP0SR0_27_24 FM(IRQ0_A) FM(MSIOF3_SYNC) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
#define IP0SR0_31_28 FM(MSIOF5_SS2) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0) F_(0, 0)
/* IP1SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
@@ -672,16 +672,16 @@ static const u16 pinmux_data[] = {
PINMUX_IPSR_GPSR(IP0SR0_11_8, MSIOF3_SS2),
- PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3),
+ PINMUX_IPSR_GPSR(IP0SR0_15_12, IRQ3_A),
PINMUX_IPSR_GPSR(IP0SR0_15_12, MSIOF3_SCK),
- PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2),
+ PINMUX_IPSR_GPSR(IP0SR0_19_16, IRQ2_A),
PINMUX_IPSR_GPSR(IP0SR0_19_16, MSIOF3_TXD),
- PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1),
+ PINMUX_IPSR_GPSR(IP0SR0_23_20, IRQ1_A),
PINMUX_IPSR_GPSR(IP0SR0_23_20, MSIOF3_RXD),
- PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0),
+ PINMUX_IPSR_GPSR(IP0SR0_27_24, IRQ0_A),
PINMUX_IPSR_GPSR(IP0SR0_27_24, MSIOF3_SYNC),
PINMUX_IPSR_GPSR(IP0SR0_31_28, MSIOF5_SS2),
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 20425afc6..248ab71b9 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -892,6 +892,8 @@ static int rzg2l_set_power_source(struct rzg2l_pinctrl *pctrl, u32 pin, u32 caps
val = PVDD_1800;
break;
case 2500:
+ if (!(caps & (PIN_CFG_IO_VMC_ETH0 | PIN_CFG_IO_VMC_ETH1)))
+ return -EINVAL;
val = PVDD_2500;
break;
case 3300:
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index badc68bba..47d19f7e2 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -432,6 +432,12 @@ static void cros_ec_send_resume_event(struct cros_ec_device *ec_dev)
void cros_ec_resume_complete(struct cros_ec_device *ec_dev)
{
cros_ec_send_resume_event(ec_dev);
+
+ /*
+ * Let the mfd devices know about events that occur during
+ * suspend. This way the clients know what to do with them.
+ */
+ cros_ec_report_events_during_suspend(ec_dev);
}
EXPORT_SYMBOL(cros_ec_resume_complete);
@@ -442,12 +448,6 @@ static void cros_ec_enable_irq(struct cros_ec_device *ec_dev)
if (ec_dev->wake_enabled)
disable_irq_wake(ec_dev->irq);
-
- /*
- * Let the mfd devices know about events that occur during
- * suspend. This way the clients know what to do with them.
- */
- cros_ec_report_events_during_suspend(ec_dev);
}
/**
@@ -475,8 +475,8 @@ EXPORT_SYMBOL(cros_ec_resume_early);
*/
int cros_ec_resume(struct cros_ec_device *ec_dev)
{
- cros_ec_enable_irq(ec_dev);
- cros_ec_send_resume_event(ec_dev);
+ cros_ec_resume_early(ec_dev);
+ cros_ec_resume_complete(ec_dev);
return 0;
}
EXPORT_SYMBOL(cros_ec_resume);
diff --git a/drivers/platform/chrome/cros_usbpd_logger.c b/drivers/platform/chrome/cros_usbpd_logger.c
index f618757f8..930c2f472 100644
--- a/drivers/platform/chrome/cros_usbpd_logger.c
+++ b/drivers/platform/chrome/cros_usbpd_logger.c
@@ -7,6 +7,7 @@
#include <linux/ktime.h>
#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -249,6 +250,12 @@ static int __maybe_unused cros_usbpd_logger_suspend(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_usbpd_logger_pm_ops, cros_usbpd_logger_suspend,
cros_usbpd_logger_resume);
+static const struct platform_device_id cros_usbpd_logger_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_usbpd_logger_id);
+
static struct platform_driver cros_usbpd_logger_driver = {
.driver = {
.name = DRV_NAME,
@@ -256,10 +263,10 @@ static struct platform_driver cros_usbpd_logger_driver = {
},
.probe = cros_usbpd_logger_probe,
.remove_new = cros_usbpd_logger_remove,
+ .id_table = cros_usbpd_logger_id,
};
module_platform_driver(cros_usbpd_logger_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Logging driver for ChromeOS EC USBPD Charger.");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
index aacad022f..c83f81d86 100644
--- a/drivers/platform/chrome/cros_usbpd_notify.c
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -6,6 +6,7 @@
*/
#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_usbpd_notify.h>
@@ -218,12 +219,19 @@ static void cros_usbpd_notify_remove_plat(struct platform_device *pdev)
&pdnotify->nb);
}
+static const struct platform_device_id cros_usbpd_notify_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_usbpd_notify_id);
+
static struct platform_driver cros_usbpd_notify_plat_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cros_usbpd_notify_probe_plat,
.remove_new = cros_usbpd_notify_remove_plat,
+ .id_table = cros_usbpd_notify_id,
};
static int __init cros_usbpd_notify_init(void)
@@ -258,4 +266,3 @@ module_exit(cros_usbpd_notify_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS power delivery notifier device");
MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c
index e61bfaf8b..86b95206c 100644
--- a/drivers/platform/x86/dell/dell-smbios-base.c
+++ b/drivers/platform/x86/dell/dell-smbios-base.c
@@ -11,6 +11,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/container_of.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/capability.h>
@@ -25,11 +26,16 @@ static u32 da_supported_commands;
static int da_num_tokens;
static struct platform_device *platform_device;
static struct calling_interface_token *da_tokens;
-static struct device_attribute *token_location_attrs;
-static struct device_attribute *token_value_attrs;
+static struct token_sysfs_data *token_entries;
static struct attribute **token_attrs;
static DEFINE_MUTEX(smbios_mutex);
+struct token_sysfs_data {
+ struct device_attribute location_attr;
+ struct device_attribute value_attr;
+ struct calling_interface_token *token;
+};
+
struct smbios_device {
struct list_head list;
struct device *device;
@@ -416,47 +422,26 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int match_attribute(struct device *dev,
- struct device_attribute *attr)
-{
- int i;
-
- for (i = 0; i < da_num_tokens * 2; i++) {
- if (!token_attrs[i])
- continue;
- if (strcmp(token_attrs[i]->name, attr->attr.name) == 0)
- return i/2;
- }
- dev_dbg(dev, "couldn't match: %s\n", attr->attr.name);
- return -EINVAL;
-}
-
static ssize_t location_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i;
+ struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, location_attr);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- i = match_attribute(dev, attr);
- if (i > 0)
- return sysfs_emit(buf, "%08x", da_tokens[i].location);
- return 0;
+ return sysfs_emit(buf, "%08x", data->token->location);
}
static ssize_t value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int i;
+ struct token_sysfs_data *data = container_of(attr, struct token_sysfs_data, value_attr);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- i = match_attribute(dev, attr);
- if (i > 0)
- return sysfs_emit(buf, "%08x", da_tokens[i].value);
- return 0;
+ return sysfs_emit(buf, "%08x", data->token->value);
}
static struct attribute_group smbios_attribute_group = {
@@ -473,22 +458,15 @@ static int build_tokens_sysfs(struct platform_device *dev)
{
char *location_name;
char *value_name;
- size_t size;
int ret;
int i, j;
- /* (number of tokens + 1 for null terminated */
- size = sizeof(struct device_attribute) * (da_num_tokens + 1);
- token_location_attrs = kzalloc(size, GFP_KERNEL);
- if (!token_location_attrs)
+ token_entries = kcalloc(da_num_tokens, sizeof(*token_entries), GFP_KERNEL);
+ if (!token_entries)
return -ENOMEM;
- token_value_attrs = kzalloc(size, GFP_KERNEL);
- if (!token_value_attrs)
- goto out_allocate_value;
/* need to store both location and value + terminator*/
- size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1);
- token_attrs = kzalloc(size, GFP_KERNEL);
+ token_attrs = kcalloc((2 * da_num_tokens) + 1, sizeof(*token_attrs), GFP_KERNEL);
if (!token_attrs)
goto out_allocate_attrs;
@@ -496,27 +474,32 @@ static int build_tokens_sysfs(struct platform_device *dev)
/* skip empty */
if (da_tokens[i].tokenID == 0)
continue;
+
+ token_entries[i].token = &da_tokens[i];
+
/* add location */
location_name = kasprintf(GFP_KERNEL, "%04x_location",
da_tokens[i].tokenID);
if (location_name == NULL)
goto out_unwind_strings;
- sysfs_attr_init(&token_location_attrs[i].attr);
- token_location_attrs[i].attr.name = location_name;
- token_location_attrs[i].attr.mode = 0444;
- token_location_attrs[i].show = location_show;
- token_attrs[j++] = &token_location_attrs[i].attr;
+
+ sysfs_attr_init(&token_entries[i].location_attr.attr);
+ token_entries[i].location_attr.attr.name = location_name;
+ token_entries[i].location_attr.attr.mode = 0444;
+ token_entries[i].location_attr.show = location_show;
+ token_attrs[j++] = &token_entries[i].location_attr.attr;
/* add value */
value_name = kasprintf(GFP_KERNEL, "%04x_value",
da_tokens[i].tokenID);
if (value_name == NULL)
goto loop_fail_create_value;
- sysfs_attr_init(&token_value_attrs[i].attr);
- token_value_attrs[i].attr.name = value_name;
- token_value_attrs[i].attr.mode = 0444;
- token_value_attrs[i].show = value_show;
- token_attrs[j++] = &token_value_attrs[i].attr;
+
+ sysfs_attr_init(&token_entries[i].value_attr.attr);
+ token_entries[i].value_attr.attr.name = value_name;
+ token_entries[i].value_attr.attr.mode = 0444;
+ token_entries[i].value_attr.show = value_show;
+ token_attrs[j++] = &token_entries[i].value_attr.attr;
continue;
loop_fail_create_value:
@@ -532,14 +515,12 @@ loop_fail_create_value:
out_unwind_strings:
while (i--) {
- kfree(token_location_attrs[i].attr.name);
- kfree(token_value_attrs[i].attr.name);
+ kfree(token_entries[i].location_attr.attr.name);
+ kfree(token_entries[i].value_attr.attr.name);
}
kfree(token_attrs);
out_allocate_attrs:
- kfree(token_value_attrs);
-out_allocate_value:
- kfree(token_location_attrs);
+ kfree(token_entries);
return -ENOMEM;
}
@@ -551,12 +532,11 @@ static void free_group(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj,
&smbios_attribute_group);
for (i = 0; i < da_num_tokens; i++) {
- kfree(token_location_attrs[i].attr.name);
- kfree(token_value_attrs[i].attr.name);
+ kfree(token_entries[i].location_attr.attr.name);
+ kfree(token_entries[i].value_attr.attr.name);
}
kfree(token_attrs);
- kfree(token_value_attrs);
- kfree(token_location_attrs);
+ kfree(token_entries);
}
static int __init dell_smbios_init(void)
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index 910df7c65..003e765de 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -763,8 +763,11 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
* when actual device nodes created outside this
* loop via tpmi_create_devices().
*/
- if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID)
- tpmi_process_info(tpmi_info, pfs);
+ if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID) {
+ ret = tpmi_process_info(tpmi_info, pfs);
+ if (ret)
+ return ret;
+ }
if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
tpmi_set_control_base(auxdev, tpmi_info, pfs);
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
index ef730200a..bb8e72deb 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
@@ -240,6 +240,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
bool read_blocked = 0, write_blocked = 0;
struct intel_tpmi_plat_info *plat_info;
struct tpmi_uncore_struct *tpmi_uncore;
+ bool uncore_sysfs_added = false;
int ret, i, pkg = 0;
int num_resources;
@@ -384,9 +385,15 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
}
/* Point to next cluster offset */
cluster_offset >>= UNCORE_MAX_CLUSTER_PER_DOMAIN;
+ uncore_sysfs_added = true;
}
}
+ if (!uncore_sysfs_added) {
+ ret = -ENODEV;
+ goto remove_clusters;
+ }
+
auxiliary_set_drvdata(auxdev, tpmi_uncore);
tpmi_uncore->root_cluster.root_domain = true;
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index 3d66e1d4e..1ac30034f 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -56,12 +56,9 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0;
}
-static bool p2sb_valid_resource(struct resource *res)
+static bool p2sb_valid_resource(const struct resource *res)
{
- if (res->flags)
- return true;
-
- return false;
+ return res->flags & ~IORESOURCE_UNSET;
}
/* Copy resource from the first BAR of the device in question */
@@ -220,16 +217,20 @@ EXPORT_SYMBOL_GPL(p2sb_bar);
static int __init p2sb_fs_init(void)
{
- p2sb_cache_resources();
- return 0;
+ return p2sb_cache_resources();
}
/*
- * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
- * not be locked in sysfs pci bus rescan path because of deadlock. To
- * avoid the deadlock, access to P2SB devices with the lock at an early
- * step in kernel initialization and cache required resources. This
- * should happen after subsys_initcall which initializes PCI subsystem
- * and before device_initcall which requires P2SB resources.
+ * pci_rescan_remove_lock() can not be locked in sysfs PCI bus rescan path
+ * because of deadlock. To avoid the deadlock, access P2SB devices with the lock
+ * at an early step in kernel initialization and cache required resources.
+ *
+ * We want to run as early as possible. If the P2SB was assigned a bad BAR,
+ * we'll need to wait on pcibios_assign_resources() to fix it. So, our list of
+ * initcall dependencies looks something like this:
+ *
+ * ...
+ * subsys_initcall (pci_subsys_init)
+ * fs_initcall (pcibios_assign_resources)
*/
-fs_initcall(p2sb_fs_init);
+fs_initcall_sync(p2sb_fs_init);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 82429e599..87a4a381b 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3044,10 +3044,9 @@ static void tpacpi_send_radiosw_update(void)
static void hotkey_exit(void)
{
-#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
mutex_lock(&hotkey_mutex);
+#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
hotkey_poll_stop_sync();
- mutex_unlock(&hotkey_mutex);
#endif
dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
"restoring original HKEY status and mask\n");
@@ -3057,6 +3056,8 @@ static void hotkey_exit(void)
hotkey_mask_set(hotkey_orig_mask)) |
hotkey_status_set(false)) != 0)
pr_err("failed to restore hot key mask to BIOS defaults\n");
+
+ mutex_unlock(&hotkey_mutex);
}
static void __init hotkey_unmap(const unsigned int scancode)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 77244c9aa..16e941449 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -57,6 +57,11 @@ module_param(turn_on_panel_on_resume, int, 0644);
MODULE_PARM_DESC(turn_on_panel_on_resume,
"Call HCI_PANEL_POWER_ON on resume (-1 = auto, 0 = no, 1 = yes");
+static int hci_hotkey_quickstart = -1;
+module_param(hci_hotkey_quickstart, int, 0644);
+MODULE_PARM_DESC(hci_hotkey_quickstart,
+ "Call HCI_HOTKEY_EVENT with value 0x5 for quickstart button support (-1 = auto, 0 = no, 1 = yes");
+
#define TOSHIBA_WMI_EVENT_GUID "59142400-C6A3-40FA-BADB-8A2652834100"
/* Scan code for Fn key on TOS1900 models */
@@ -136,6 +141,7 @@ MODULE_PARM_DESC(turn_on_panel_on_resume,
#define HCI_ACCEL_MASK 0x7fff
#define HCI_ACCEL_DIRECTION_MASK 0x8000
#define HCI_HOTKEY_DISABLE 0x0b
+#define HCI_HOTKEY_ENABLE_QUICKSTART 0x05
#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
@@ -2731,10 +2737,15 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev)
return -ENODEV;
/*
+ * Enable quickstart buttons if supported.
+ *
* Enable the "Special Functions" mode only if they are
* supported and if they are activated.
*/
- if (dev->kbd_function_keys_supported && dev->special_functions)
+ if (hci_hotkey_quickstart)
+ result = hci_write(dev, HCI_HOTKEY_EVENT,
+ HCI_HOTKEY_ENABLE_QUICKSTART);
+ else if (dev->kbd_function_keys_supported && dev->special_functions)
result = hci_write(dev, HCI_HOTKEY_EVENT,
HCI_HOTKEY_SPECIAL_FUNCTIONS);
else
@@ -3258,7 +3269,14 @@ static const char *find_hci_method(acpi_handle handle)
* works. toshiba_acpi_resume() uses HCI_PANEL_POWER_ON to avoid changing
* the configured brightness level.
*/
-static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
+#define QUIRK_TURN_ON_PANEL_ON_RESUME BIT(0)
+/*
+ * Some Toshibas use "quickstart" keys. On these, HCI_HOTKEY_EVENT must use
+ * the value HCI_HOTKEY_ENABLE_QUICKSTART.
+ */
+#define QUIRK_HCI_HOTKEY_QUICKSTART BIT(1)
+
+static const struct dmi_system_id toshiba_dmi_quirks[] = {
{
/* Toshiba Portégé R700 */
/* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
@@ -3266,6 +3284,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
},
+ .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
},
{
/* Toshiba Satellite/Portégé R830 */
@@ -3275,6 +3294,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "R830"),
},
+ .driver_data = (void *)QUIRK_TURN_ON_PANEL_ON_RESUME,
},
{
/* Toshiba Satellite/Portégé Z830 */
@@ -3282,6 +3302,7 @@ static const struct dmi_system_id turn_on_panel_on_resume_dmi_ids[] = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Z830"),
},
+ .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART),
},
};
@@ -3290,6 +3311,8 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
struct toshiba_acpi_dev *dev;
const char *hci_method;
u32 dummy;
+ const struct dmi_system_id *dmi_id;
+ long quirks = 0;
int ret = 0;
if (toshiba_acpi)
@@ -3442,8 +3465,15 @@ iio_error:
}
#endif
+ dmi_id = dmi_first_match(toshiba_dmi_quirks);
+ if (dmi_id)
+ quirks = (long)dmi_id->driver_data;
+
if (turn_on_panel_on_resume == -1)
- turn_on_panel_on_resume = dmi_check_system(turn_on_panel_on_resume_dmi_ids);
+ turn_on_panel_on_resume = !!(quirks & QUIRK_TURN_ON_PANEL_ON_RESUME);
+
+ if (hci_hotkey_quickstart == -1)
+ hci_hotkey_quickstart = !!(quirks & QUIRK_HCI_HOTKEY_QUICKSTART);
toshiba_wwan_available(dev);
if (dev->wwan_supported)
diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
index a3415f1c0..6559bb4ea 100644
--- a/drivers/platform/x86/x86-android-tablets/core.c
+++ b/drivers/platform/x86/x86-android-tablets/core.c
@@ -278,25 +278,25 @@ static void x86_android_tablet_remove(struct platform_device *pdev)
{
int i;
- for (i = 0; i < serdev_count; i++) {
+ for (i = serdev_count - 1; i >= 0; i--) {
if (serdevs[i])
serdev_device_remove(serdevs[i]);
}
kfree(serdevs);
- for (i = 0; i < pdev_count; i++)
+ for (i = pdev_count - 1; i >= 0; i--)
platform_device_unregister(pdevs[i]);
kfree(pdevs);
kfree(buttons);
- for (i = 0; i < spi_dev_count; i++)
+ for (i = spi_dev_count - 1; i >= 0; i--)
spi_unregister_device(spi_devs[i]);
kfree(spi_devs);
- for (i = 0; i < i2c_client_count; i++)
+ for (i = i2c_client_count - 1; i >= 0; i--)
i2c_unregister_device(i2c_clients[i]);
kfree(i2c_clients);
diff --git a/drivers/platform/x86/x86-android-tablets/dmi.c b/drivers/platform/x86/x86-android-tablets/dmi.c
index 5d6c12494..141a2d25e 100644
--- a/drivers/platform/x86/x86-android-tablets/dmi.c
+++ b/drivers/platform/x86/x86-android-tablets/dmi.c
@@ -106,6 +106,24 @@ const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
},
{
/*
+ * Lenovo Yoga Tablet 2 Pro 1380F/L (13") This has more or less
+ * the same BIOS as the 830F/L or 1050F/L (8" and 10") below,
+ * but unlike the 8" / 10" models which share the same mainboard
+ * this model has a different mainboard.
+ * This match for the 13" model MUST come before the 8" + 10"
+ * match since that one will also match the 13" model!
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
+ DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
+ /* Full match so as to NOT match the 830/1050 BIOS */
+ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"),
+ },
+ .driver_data = (void *)&lenovo_yoga_tab2_1380_info,
+ },
+ {
+ /*
* Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10"
* Lenovo Yoga Tablet 2 use the same mainboard)
*/
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index c29739195..16fa04d60 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -19,6 +19,7 @@
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/lp855x.h>
#include <linux/platform_device.h>
+#include <linux/power/bq24190_charger.h>
#include <linux/reboot.h>
#include <linux/rmi.h>
#include <linux/spi/spi.h>
@@ -565,6 +566,221 @@ static void lenovo_yoga_tab2_830_1050_exit(void)
}
}
+/*
+ * Lenovo Yoga Tablet 2 Pro 1380F/L
+ *
+ * The Lenovo Yoga Tablet 2 Pro 1380F/L mostly has the same design as the 830F/L
+ * and the 1050F/L so this re-uses some of the handling for that from above.
+ */
+static const char * const lc824206xa_chg_det_psy[] = { "lc824206xa-charger-detect" };
+
+static const struct property_entry lenovo_yoga_tab2_1380_bq24190_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lc824206xa_chg_det_psy),
+ PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node),
+ PROPERTY_ENTRY_BOOL("omit-battery-class"),
+ PROPERTY_ENTRY_BOOL("disable-reset"),
+ { }
+};
+
+static const struct software_node lenovo_yoga_tab2_1380_bq24190_node = {
+ .properties = lenovo_yoga_tab2_1380_bq24190_props,
+};
+
+/* For enabling the bq24190 5V boost based on id-pin */
+static struct regulator_consumer_supply lc824206xa_consumer = {
+ .supply = "vbus",
+ .dev_name = "i2c-lc824206xa",
+};
+
+static const struct regulator_init_data lenovo_yoga_tab2_1380_bq24190_vbus_init_data = {
+ .constraints = {
+ .name = "bq24190_vbus",
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = &lc824206xa_consumer,
+ .num_consumer_supplies = 1,
+};
+
+struct bq24190_platform_data lenovo_yoga_tab2_1380_bq24190_pdata = {
+ .regulator_init_data = &lenovo_yoga_tab2_1380_bq24190_vbus_init_data,
+};
+
+static const struct property_entry lenovo_yoga_tab2_1380_lc824206xa_props[] = {
+ PROPERTY_ENTRY_BOOL("onnn,enable-miclr-for-dcp"),
+ { }
+};
+
+static const struct software_node lenovo_yoga_tab2_1380_lc824206xa_node = {
+ .properties = lenovo_yoga_tab2_1380_lc824206xa_props,
+};
+
+static const char * const lenovo_yoga_tab2_1380_lms303d_mount_matrix[] = {
+ "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1"
+};
+
+static const struct property_entry lenovo_yoga_tab2_1380_lms303d_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", lenovo_yoga_tab2_1380_lms303d_mount_matrix),
+ { }
+};
+
+static const struct software_node lenovo_yoga_tab2_1380_lms303d_node = {
+ .properties = lenovo_yoga_tab2_1380_lms303d_props,
+};
+
+static const struct x86_i2c_client_info lenovo_yoga_tab2_1380_i2c_clients[] __initconst = {
+ {
+ /* BQ27541 fuel-gauge */
+ .board_info = {
+ .type = "bq27541",
+ .addr = 0x55,
+ .dev_name = "bq27541",
+ .swnode = &fg_bq24190_supply_node,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ }, {
+ /* bq24292i battery charger */
+ .board_info = {
+ .type = "bq24190",
+ .addr = 0x6b,
+ .dev_name = "bq24292i",
+ .swnode = &lenovo_yoga_tab2_1380_bq24190_node,
+ .platform_data = &lenovo_yoga_tab2_1380_bq24190_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C1",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 2,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ .con_id = "bq24292i_irq",
+ },
+ }, {
+ /* LP8557 Backlight controller */
+ .board_info = {
+ .type = "lp8557",
+ .addr = 0x2c,
+ .dev_name = "lp8557",
+ .platform_data = &lenovo_lp8557_pwm_and_reg_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ }, {
+ /* LC824206XA Micro USB Switch */
+ .board_info = {
+ .type = "lc824206xa",
+ .addr = 0x48,
+ .dev_name = "lc824206xa",
+ .swnode = &lenovo_yoga_tab2_1380_lc824206xa_node,
+ },
+ .adapter_path = "\\_SB_.I2C3",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_GPIOINT,
+ .chip = "INT33FC:02",
+ .index = 1,
+ .trigger = ACPI_LEVEL_SENSITIVE,
+ .polarity = ACPI_ACTIVE_LOW,
+ .con_id = "lc824206xa_irq",
+ },
+ }, {
+ /* AL3320A ambient light sensor */
+ .board_info = {
+ .type = "al3320a",
+ .addr = 0x1c,
+ .dev_name = "al3320a",
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* LSM303DA accelerometer + magnetometer */
+ .board_info = {
+ .type = "lsm303d",
+ .addr = 0x1d,
+ .dev_name = "lsm303d",
+ .swnode = &lenovo_yoga_tab2_1380_lms303d_node,
+ },
+ .adapter_path = "\\_SB_.I2C5",
+ }, {
+ /* Synaptics RMI touchscreen */
+ .board_info = {
+ .type = "rmi4_i2c",
+ .addr = 0x38,
+ .dev_name = "rmi4_i2c",
+ .platform_data = &lenovo_yoga_tab2_830_1050_rmi_pdata,
+ },
+ .adapter_path = "\\_SB_.I2C6",
+ .irq_data = {
+ .type = X86_ACPI_IRQ_TYPE_APIC,
+ .index = 0x45,
+ .trigger = ACPI_EDGE_SENSITIVE,
+ .polarity = ACPI_ACTIVE_HIGH,
+ },
+ }
+};
+
+static const struct platform_device_info lenovo_yoga_tab2_1380_pdevs[] __initconst = {
+ {
+ /* For the Tablet 2 Pro 1380's custom fast charging driver */
+ .name = "lenovo-yoga-tab2-pro-1380-fastcharger",
+ .id = PLATFORM_DEVID_NONE,
+ },
+};
+
+const char * const lenovo_yoga_tab2_1380_modules[] __initconst = {
+ "bq24190_charger", /* For the Vbus regulator for lc824206xa */
+ NULL
+};
+
+static int __init lenovo_yoga_tab2_1380_init(void)
+{
+ int ret;
+
+ /* To verify that the DMI matching works vs the 830 / 1050 models */
+ pr_info("detected Lenovo Yoga Tablet 2 Pro 1380F/L\n");
+
+ ret = lenovo_yoga_tab2_830_1050_init_codec();
+ if (ret)
+ return ret;
+
+ /* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */
+ lenovo_yoga_tab2_830_1050_sys_off_handler =
+ register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1,
+ lenovo_yoga_tab2_830_1050_power_off, NULL);
+ if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler))
+ return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler);
+
+ return 0;
+}
+
+static struct gpiod_lookup_table lenovo_yoga_tab2_1380_fc_gpios = {
+ .dev_id = "serial0-0",
+ .table = {
+ GPIO_LOOKUP("INT33FC:00", 57, "uart3_txd", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("INT33FC:00", 61, "uart3_rxd", GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct gpiod_lookup_table * const lenovo_yoga_tab2_1380_gpios[] = {
+ &lenovo_yoga_tab2_830_1050_codec_gpios,
+ &lenovo_yoga_tab2_1380_fc_gpios,
+ NULL
+};
+
+const struct x86_dev_info lenovo_yoga_tab2_1380_info __initconst = {
+ .i2c_client_info = lenovo_yoga_tab2_1380_i2c_clients,
+ .i2c_client_count = ARRAY_SIZE(lenovo_yoga_tab2_1380_i2c_clients),
+ .pdev_info = lenovo_yoga_tab2_1380_pdevs,
+ .pdev_count = ARRAY_SIZE(lenovo_yoga_tab2_1380_pdevs),
+ .gpio_button = &lenovo_yoga_tab2_830_1050_lid,
+ .gpio_button_count = 1,
+ .gpiod_lookup_tables = lenovo_yoga_tab2_1380_gpios,
+ .bat_swnode = &generic_lipo_hv_4v35_battery_node,
+ .modules = lenovo_yoga_tab2_1380_modules,
+ .init = lenovo_yoga_tab2_1380_init,
+ .exit = lenovo_yoga_tab2_830_1050_exit,
+};
+
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
/*
diff --git a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
index 468993edf..821dc094b 100644
--- a/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
+++ b/drivers/platform/x86/x86-android-tablets/x86-android-tablets.h
@@ -112,6 +112,7 @@ extern const struct x86_dev_info czc_p10t;
extern const struct x86_dev_info lenovo_yogabook_x90_info;
extern const struct x86_dev_info lenovo_yogabook_x91_info;
extern const struct x86_dev_info lenovo_yoga_tab2_830_1050_info;
+extern const struct x86_dev_info lenovo_yoga_tab2_1380_info;
extern const struct x86_dev_info lenovo_yt3_info;
extern const struct x86_dev_info medion_lifetab_s10346_info;
extern const struct x86_dev_info nextbook_ares8_info;
diff --git a/drivers/platform/x86/xiaomi-wmi.c b/drivers/platform/x86/xiaomi-wmi.c
index 54a2546bb..be80f0bda 100644
--- a/drivers/platform/x86/xiaomi-wmi.c
+++ b/drivers/platform/x86/xiaomi-wmi.c
@@ -2,8 +2,10 @@
/* WMI driver for Xiaomi Laptops */
#include <linux/acpi.h>
+#include <linux/device.h>
#include <linux/input.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/wmi.h>
#include <uapi/linux/input-event-codes.h>
@@ -20,12 +22,21 @@
struct xiaomi_wmi {
struct input_dev *input_dev;
+ struct mutex key_lock; /* Protects the key event sequence */
unsigned int key_code;
};
+static void xiaomi_mutex_destroy(void *data)
+{
+ struct mutex *lock = data;
+
+ mutex_destroy(lock);
+}
+
static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
{
struct xiaomi_wmi *data;
+ int ret;
if (wdev == NULL || context == NULL)
return -EINVAL;
@@ -35,6 +46,11 @@ static int xiaomi_wmi_probe(struct wmi_device *wdev, const void *context)
return -ENOMEM;
dev_set_drvdata(&wdev->dev, data);
+ mutex_init(&data->key_lock);
+ ret = devm_add_action_or_reset(&wdev->dev, xiaomi_mutex_destroy, &data->key_lock);
+ if (ret < 0)
+ return ret;
+
data->input_dev = devm_input_allocate_device(&wdev->dev);
if (data->input_dev == NULL)
return -ENOMEM;
@@ -59,10 +75,12 @@ static void xiaomi_wmi_notify(struct wmi_device *wdev, union acpi_object *dummy)
if (data == NULL)
return;
+ mutex_lock(&data->key_lock);
input_report_key(data->input_dev, data->key_code, 1);
input_sync(data->input_dev);
input_report_key(data->input_dev, data->key_code, 0);
input_sync(data->input_dev);
+ mutex_unlock(&data->key_lock);
}
static const struct wmi_device_id xiaomi_wmi_id_table[] = {
diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index 4215ffd9b..c40eda92a 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -184,6 +184,16 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
return pd_to_genpd(dev->pm_domain);
}
+struct device *dev_to_genpd_dev(struct device *dev)
+{
+ struct generic_pm_domain *genpd = dev_to_genpd(dev);
+
+ if (IS_ERR(genpd))
+ return ERR_CAST(genpd);
+
+ return &genpd->dev;
+}
+
static int genpd_stop_dev(const struct generic_pm_domain *genpd,
struct device *dev)
{
diff --git a/drivers/pmdomain/ti/ti_sci_pm_domains.c b/drivers/pmdomain/ti/ti_sci_pm_domains.c
index 9dddf227a..1510d5dda 100644
--- a/drivers/pmdomain/ti/ti_sci_pm_domains.c
+++ b/drivers/pmdomain/ti/ti_sci_pm_domains.c
@@ -114,6 +114,18 @@ static const struct of_device_id ti_sci_pm_domain_matches[] = {
};
MODULE_DEVICE_TABLE(of, ti_sci_pm_domain_matches);
+static bool ti_sci_pm_idx_exists(struct ti_sci_genpd_provider *pd_provider, u32 idx)
+{
+ struct ti_sci_pm_domain *pd;
+
+ list_for_each_entry(pd, &pd_provider->pd_list, node) {
+ if (pd->idx == idx)
+ return true;
+ }
+
+ return false;
+}
+
static int ti_sci_pm_domain_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -149,8 +161,14 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev)
break;
if (args.args_count >= 1 && args.np == dev->of_node) {
- if (args.args[0] > max_id)
+ if (args.args[0] > max_id) {
max_id = args.args[0];
+ } else {
+ if (ti_sci_pm_idx_exists(pd_provider, args.args[0])) {
+ index++;
+ continue;
+ }
+ }
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
if (!pd) {
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index b6c963767..8008e31c0 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -5,6 +5,7 @@
* Copyright (c) 2014 - 2018 Google, Inc
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -711,16 +712,22 @@ static int cros_usbpd_charger_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(cros_usbpd_charger_pm_ops, NULL,
cros_usbpd_charger_resume);
+static const struct platform_device_id cros_usbpd_charger_id[] = {
+ { DRV_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, cros_usbpd_charger_id);
+
static struct platform_driver cros_usbpd_charger_driver = {
.driver = {
.name = DRV_NAME,
.pm = &cros_usbpd_charger_pm_ops,
},
- .probe = cros_usbpd_charger_probe
+ .probe = cros_usbpd_charger_probe,
+ .id_table = cros_usbpd_charger_id,
};
module_platform_driver(cros_usbpd_charger_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ChromeOS EC USBPD charger");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 0d2c3724d..b86e11bdc 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -271,23 +271,6 @@ static ssize_t power_supply_show_usb_type(struct device *dev,
return count;
}
-static ssize_t power_supply_show_charge_behaviour(struct device *dev,
- struct power_supply *psy,
- union power_supply_propval *value,
- char *buf)
-{
- int ret;
-
- ret = power_supply_get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
- value);
- if (ret < 0)
- return ret;
-
- return power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
- value->intval, buf);
-}
-
static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
@@ -321,7 +304,8 @@ static ssize_t power_supply_show_property(struct device *dev,
&value, buf);
break;
case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
- ret = power_supply_show_charge_behaviour(dev, psy, &value, buf);
+ ret = power_supply_charge_behaviour_show(dev, psy->desc->charge_behaviours,
+ value.intval, buf);
break;
case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
ret = sysfs_emit(buf, "%s\n", value.strval);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 7513018c9..2067b0120 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
}
if (info->verify(info, pin, func, chan)) {
- pr_err("driver cannot use function %u on pin %u\n", func, chan);
+ pr_err("driver cannot use function %u and channel %u on pin %u\n",
+ func, chan, pin);
return -EOPNOTSUPP;
}
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 6506cfb89..ee2ced88a 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -4562,7 +4562,7 @@ static int ptp_ocp_dpll_direction_set(const struct dpll_pin *pin,
return -EOPNOTSUPP;
mode = direction == DPLL_PIN_DIRECTION_INPUT ?
SMA_MODE_IN : SMA_MODE_OUT;
- return ptp_ocp_sma_store_val(bp, 0, mode, sma_nr);
+ return ptp_ocp_sma_store_val(bp, 0, mode, sma_nr + 1);
}
static int ptp_ocp_dpll_frequency_set(const struct dpll_pin *pin,
@@ -4583,7 +4583,7 @@ static int ptp_ocp_dpll_frequency_set(const struct dpll_pin *pin,
tbl = bp->sma_op->tbl[sma->mode];
for (i = 0; tbl[i].name; i++)
if (tbl[i].frequency == frequency)
- return ptp_ocp_sma_store_val(bp, i, sma->mode, sma_nr);
+ return ptp_ocp_sma_store_val(bp, i, sma->mode, sma_nr + 1);
return -EINVAL;
}
@@ -4600,7 +4600,7 @@ static int ptp_ocp_dpll_frequency_get(const struct dpll_pin *pin,
u32 val;
int i;
- val = bp->sma_op->get(bp, sma_nr);
+ val = bp->sma_op->get(bp, sma_nr + 1);
tbl = bp->sma_op->tbl[sma->mode];
for (i = 0; tbl[i].name; i++)
if (val == tbl[i].value) {
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index a15460aaa..6b1b8f57c 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -296,8 +296,7 @@ static ssize_t max_vclocks_store(struct device *dev,
if (max < ptp->n_vclocks)
goto out;
- size = sizeof(int) * max;
- vclock_index = kzalloc(size, GFP_KERNEL);
+ vclock_index = kcalloc(max, sizeof(int), GFP_KERNEL);
if (!vclock_index) {
err = -ENOMEM;
goto out;
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index a02fdbc61..e12b6ff70 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -147,7 +147,7 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
struct meson_pwm *meson = to_meson_pwm(chip);
struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm];
unsigned int cnt, duty_cnt;
- unsigned long fin_freq;
+ long fin_freq;
u64 duty, period, freq;
duty = state->duty_cycle;
@@ -167,14 +167,15 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
freq = ULONG_MAX;
fin_freq = clk_round_rate(channel->clk, freq);
- if (fin_freq == 0) {
- dev_err(pwmchip_parent(chip), "invalid source clock frequency\n");
- return -EINVAL;
+ if (fin_freq <= 0) {
+ dev_err(pwmchip_parent(chip),
+ "invalid source clock frequency %llu\n", freq);
+ return fin_freq ? fin_freq : -EINVAL;
}
- dev_dbg(pwmchip_parent(chip), "fin_freq: %lu Hz\n", fin_freq);
+ dev_dbg(pwmchip_parent(chip), "fin_freq: %ld Hz\n", fin_freq);
- cnt = div_u64(fin_freq * period, NSEC_PER_SEC);
+ cnt = mul_u64_u64_div_u64(fin_freq, period, NSEC_PER_SEC);
if (cnt > 0xffff) {
dev_err(pwmchip_parent(chip), "unable to get period cnt\n");
return -EINVAL;
@@ -189,7 +190,7 @@ static int meson_pwm_calc(struct pwm_chip *chip, struct pwm_device *pwm,
channel->hi = 0;
channel->lo = cnt;
} else {
- duty_cnt = div_u64(fin_freq * duty, NSEC_PER_SEC);
+ duty_cnt = mul_u64_u64_div_u64(fin_freq, duty, NSEC_PER_SEC);
dev_dbg(pwmchip_parent(chip), "duty=%llu duty_cnt=%u\n", duty, duty_cnt);
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 39d80da0e..f07b1126e 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -624,32 +624,20 @@ static int sti_pwm_probe(struct platform_device *pdev)
return ret;
if (cdata->pwm_num_devs) {
- pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
+ pc->pwm_clk = devm_clk_get_prepared(dev, "pwm");
if (IS_ERR(pc->pwm_clk)) {
dev_err(dev, "failed to get PWM clock\n");
return PTR_ERR(pc->pwm_clk);
}
-
- ret = clk_prepare(pc->pwm_clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
- }
}
if (cdata->cpt_num_devs) {
- pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
+ pc->cpt_clk = devm_clk_get_prepared(dev, "capture");
if (IS_ERR(pc->cpt_clk)) {
dev_err(dev, "failed to get PWM capture clock\n");
return PTR_ERR(pc->cpt_clk);
}
- ret = clk_prepare(pc->cpt_clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
- }
-
cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
if (!cdata->ddata)
return -ENOMEM;
@@ -664,27 +652,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
mutex_init(&ddata->lock);
}
- ret = pwmchip_add(chip);
- if (ret < 0) {
- clk_unprepare(pc->pwm_clk);
- clk_unprepare(pc->cpt_clk);
- return ret;
- }
-
- platform_set_drvdata(pdev, chip);
-
- return 0;
-}
-
-static void sti_pwm_remove(struct platform_device *pdev)
-{
- struct pwm_chip *chip = platform_get_drvdata(pdev);
- struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
-
- pwmchip_remove(chip);
-
- clk_unprepare(pc->pwm_clk);
- clk_unprepare(pc->cpt_clk);
+ return devm_pwmchip_add(dev, chip);
}
static const struct of_device_id sti_pwm_of_match[] = {
@@ -699,7 +667,6 @@ static struct platform_driver sti_pwm_driver = {
.of_match_table = sti_pwm_of_match,
},
.probe = sti_pwm_probe,
- .remove_new = sti_pwm_remove,
};
module_platform_driver(sti_pwm_driver);
diff --git a/drivers/ras/amd/atl/internal.h b/drivers/ras/amd/atl/internal.h
index 5de69e0bb..196c1c8b5 100644
--- a/drivers/ras/amd/atl/internal.h
+++ b/drivers/ras/amd/atl/internal.h
@@ -224,7 +224,7 @@ int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo);
int get_df_system_info(void);
int determine_node_id(struct addr_ctx *ctx, u8 socket_num, u8 die_num);
-int get_addr_hash_mi300(void);
+int get_umc_info_mi300(void);
int get_address_map(struct addr_ctx *ctx);
diff --git a/drivers/ras/amd/atl/system.c b/drivers/ras/amd/atl/system.c
index 701349e84..6979fa3d4 100644
--- a/drivers/ras/amd/atl/system.c
+++ b/drivers/ras/amd/atl/system.c
@@ -127,7 +127,7 @@ static int df4_determine_df_rev(u32 reg)
if (reg == DF_FUNC0_ID_MI300) {
df_cfg.flags.heterogeneous = 1;
- if (get_addr_hash_mi300())
+ if (get_umc_info_mi300())
return -EINVAL;
}
diff --git a/drivers/ras/amd/atl/umc.c b/drivers/ras/amd/atl/umc.c
index 59b616909..a1b4accf7 100644
--- a/drivers/ras/amd/atl/umc.c
+++ b/drivers/ras/amd/atl/umc.c
@@ -68,6 +68,8 @@ struct xor_bits {
};
#define NUM_BANK_BITS 4
+#define NUM_COL_BITS 5
+#define NUM_SID_BITS 2
static struct {
/* UMC::CH::AddrHashBank */
@@ -80,7 +82,22 @@ static struct {
u8 bank_xor;
} addr_hash;
+static struct {
+ u8 bank[NUM_BANK_BITS];
+ u8 col[NUM_COL_BITS];
+ u8 sid[NUM_SID_BITS];
+ u8 num_row_lo;
+ u8 num_row_hi;
+ u8 row_lo;
+ u8 row_hi;
+ u8 pc;
+} bit_shifts;
+
#define MI300_UMC_CH_BASE 0x90000
+#define MI300_ADDR_CFG (MI300_UMC_CH_BASE + 0x30)
+#define MI300_ADDR_SEL (MI300_UMC_CH_BASE + 0x40)
+#define MI300_COL_SEL_LO (MI300_UMC_CH_BASE + 0x50)
+#define MI300_ADDR_SEL_2 (MI300_UMC_CH_BASE + 0xA4)
#define MI300_ADDR_HASH_BANK0 (MI300_UMC_CH_BASE + 0xC8)
#define MI300_ADDR_HASH_PC (MI300_UMC_CH_BASE + 0xE0)
#define MI300_ADDR_HASH_PC2 (MI300_UMC_CH_BASE + 0xE4)
@@ -90,17 +107,42 @@ static struct {
#define ADDR_HASH_ROW_XOR GENMASK(31, 14)
#define ADDR_HASH_BANK_XOR GENMASK(5, 0)
+#define ADDR_CFG_NUM_ROW_LO GENMASK(11, 8)
+#define ADDR_CFG_NUM_ROW_HI GENMASK(15, 12)
+
+#define ADDR_SEL_BANK0 GENMASK(3, 0)
+#define ADDR_SEL_BANK1 GENMASK(7, 4)
+#define ADDR_SEL_BANK2 GENMASK(11, 8)
+#define ADDR_SEL_BANK3 GENMASK(15, 12)
+#define ADDR_SEL_BANK4 GENMASK(20, 16)
+#define ADDR_SEL_ROW_LO GENMASK(27, 24)
+#define ADDR_SEL_ROW_HI GENMASK(31, 28)
+
+#define COL_SEL_LO_COL0 GENMASK(3, 0)
+#define COL_SEL_LO_COL1 GENMASK(7, 4)
+#define COL_SEL_LO_COL2 GENMASK(11, 8)
+#define COL_SEL_LO_COL3 GENMASK(15, 12)
+#define COL_SEL_LO_COL4 GENMASK(19, 16)
+
+#define ADDR_SEL_2_BANK5 GENMASK(4, 0)
+#define ADDR_SEL_2_CHAN GENMASK(15, 12)
+
/*
* Read UMC::CH::AddrHash{Bank,PC,PC2} registers to get XOR bits used
- * for hashing. Do this during module init, since the values will not
- * change during run time.
+ * for hashing.
+ *
+ * Also, read UMC::CH::Addr{Cfg,Sel,Sel2} and UMC::CH:ColSelLo registers to
+ * get the values needed to reconstruct the normalized address. Apply additional
+ * offsets to the raw register values, as needed.
+ *
+ * Do this during module init, since the values will not change during run time.
*
* These registers are instantiated for each UMC across each AMD Node.
* However, they should be identically programmed due to the fixed hardware
* design of MI300 systems. So read the values from Node 0 UMC 0 and keep a
* single global structure for simplicity.
*/
-int get_addr_hash_mi300(void)
+int get_umc_info_mi300(void)
{
u32 temp;
int ret;
@@ -130,6 +172,44 @@ int get_addr_hash_mi300(void)
addr_hash.bank_xor = FIELD_GET(ADDR_HASH_BANK_XOR, temp);
+ ret = amd_smn_read(0, MI300_ADDR_CFG, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.num_row_hi = FIELD_GET(ADDR_CFG_NUM_ROW_HI, temp);
+ bit_shifts.num_row_lo = 10 + FIELD_GET(ADDR_CFG_NUM_ROW_LO, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_SEL, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.bank[0] = 5 + FIELD_GET(ADDR_SEL_BANK0, temp);
+ bit_shifts.bank[1] = 5 + FIELD_GET(ADDR_SEL_BANK1, temp);
+ bit_shifts.bank[2] = 5 + FIELD_GET(ADDR_SEL_BANK2, temp);
+ bit_shifts.bank[3] = 5 + FIELD_GET(ADDR_SEL_BANK3, temp);
+ /* Use BankBit4 for the SID0 position. */
+ bit_shifts.sid[0] = 5 + FIELD_GET(ADDR_SEL_BANK4, temp);
+ bit_shifts.row_lo = 12 + FIELD_GET(ADDR_SEL_ROW_LO, temp);
+ bit_shifts.row_hi = 24 + FIELD_GET(ADDR_SEL_ROW_HI, temp);
+
+ ret = amd_smn_read(0, MI300_COL_SEL_LO, &temp);
+ if (ret)
+ return ret;
+
+ bit_shifts.col[0] = 2 + FIELD_GET(COL_SEL_LO_COL0, temp);
+ bit_shifts.col[1] = 2 + FIELD_GET(COL_SEL_LO_COL1, temp);
+ bit_shifts.col[2] = 2 + FIELD_GET(COL_SEL_LO_COL2, temp);
+ bit_shifts.col[3] = 2 + FIELD_GET(COL_SEL_LO_COL3, temp);
+ bit_shifts.col[4] = 2 + FIELD_GET(COL_SEL_LO_COL4, temp);
+
+ ret = amd_smn_read(0, MI300_ADDR_SEL_2, &temp);
+ if (ret)
+ return ret;
+
+ /* Use BankBit5 for the SID1 position. */
+ bit_shifts.sid[1] = 5 + FIELD_GET(ADDR_SEL_2_BANK5, temp);
+ bit_shifts.pc = 5 + FIELD_GET(ADDR_SEL_2_CHAN, temp);
+
return 0;
}
@@ -146,9 +226,6 @@ int get_addr_hash_mi300(void)
* The MCA address format is as follows:
* MCA_ADDR[27:0] = {S[1:0], P[0], R[14:0], B[3:0], C[4:0], Z[0]}
*
- * The normalized address format is fixed in hardware and is as follows:
- * NA[30:0] = {S[1:0], R[13:0], C4, B[1:0], B[3:2], C[3:2], P, C[1:0], Z[4:0]}
- *
* Additionally, the PC and Bank bits may be hashed. This must be accounted for before
* reconstructing the normalized address.
*/
@@ -158,18 +235,10 @@ int get_addr_hash_mi300(void)
#define MI300_UMC_MCA_PC BIT(25)
#define MI300_UMC_MCA_SID GENMASK(27, 26)
-#define MI300_NA_COL_1_0 GENMASK(6, 5)
-#define MI300_NA_PC BIT(7)
-#define MI300_NA_COL_3_2 GENMASK(9, 8)
-#define MI300_NA_BANK_3_2 GENMASK(11, 10)
-#define MI300_NA_BANK_1_0 GENMASK(13, 12)
-#define MI300_NA_COL_4 BIT(14)
-#define MI300_NA_ROW GENMASK(28, 15)
-#define MI300_NA_SID GENMASK(30, 29)
-
static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
{
- u16 i, col, row, bank, pc, sid, temp;
+ u16 i, col, row, bank, pc, sid;
+ u32 temp;
col = FIELD_GET(MI300_UMC_MCA_COL, addr);
bank = FIELD_GET(MI300_UMC_MCA_BANK, addr);
@@ -189,49 +258,48 @@ static unsigned long convert_dram_to_norm_addr_mi300(unsigned long addr)
/* Calculate hash for PC bit. */
if (addr_hash.pc.xor_enable) {
- /* Bits SID[1:0] act as Bank[6:5] for PC hash, so apply them here. */
- bank |= sid << 5;
-
temp = bitwise_xor_bits(col & addr_hash.pc.col_xor);
temp ^= bitwise_xor_bits(row & addr_hash.pc.row_xor);
- temp ^= bitwise_xor_bits(bank & addr_hash.bank_xor);
+ /* Bits SID[1:0] act as Bank[5:4] for PC hash, so apply them here. */
+ temp ^= bitwise_xor_bits((bank | sid << NUM_BANK_BITS) & addr_hash.bank_xor);
pc ^= temp;
-
- /* Drop SID bits for the sake of debug printing later. */
- bank &= 0x1F;
}
/* Reconstruct the normalized address starting with NA[4:0] = 0 */
addr = 0;
- /* NA[6:5] = Column[1:0] */
- temp = col & 0x3;
- addr |= FIELD_PREP(MI300_NA_COL_1_0, temp);
-
- /* NA[7] = PC */
- addr |= FIELD_PREP(MI300_NA_PC, pc);
-
- /* NA[9:8] = Column[3:2] */
- temp = (col >> 2) & 0x3;
- addr |= FIELD_PREP(MI300_NA_COL_3_2, temp);
+ /* Column bits */
+ for (i = 0; i < NUM_COL_BITS; i++) {
+ temp = (col >> i) & 0x1;
+ addr |= temp << bit_shifts.col[i];
+ }
- /* NA[11:10] = Bank[3:2] */
- temp = (bank >> 2) & 0x3;
- addr |= FIELD_PREP(MI300_NA_BANK_3_2, temp);
+ /* Bank bits */
+ for (i = 0; i < NUM_BANK_BITS; i++) {
+ temp = (bank >> i) & 0x1;
+ addr |= temp << bit_shifts.bank[i];
+ }
- /* NA[13:12] = Bank[1:0] */
- temp = bank & 0x3;
- addr |= FIELD_PREP(MI300_NA_BANK_1_0, temp);
+ /* Row lo bits */
+ for (i = 0; i < bit_shifts.num_row_lo; i++) {
+ temp = (row >> i) & 0x1;
+ addr |= temp << (i + bit_shifts.row_lo);
+ }
- /* NA[14] = Column[4] */
- temp = (col >> 4) & 0x1;
- addr |= FIELD_PREP(MI300_NA_COL_4, temp);
+ /* Row hi bits */
+ for (i = 0; i < bit_shifts.num_row_hi; i++) {
+ temp = (row >> (i + bit_shifts.num_row_lo)) & 0x1;
+ addr |= temp << (i + bit_shifts.row_hi);
+ }
- /* NA[28:15] = Row[13:0] */
- addr |= FIELD_PREP(MI300_NA_ROW, row);
+ /* PC bit */
+ addr |= pc << bit_shifts.pc;
- /* NA[30:29] = SID[1:0] */
- addr |= FIELD_PREP(MI300_NA_SID, sid);
+ /* SID bits */
+ for (i = 0; i < NUM_SID_BITS; i++) {
+ temp = (sid >> i) & 0x1;
+ addr |= temp << bit_shifts.sid[i];
+ }
pr_debug("Addr=0x%016lx", addr);
pr_debug("Bank=%u Row=%u Column=%u PC=%u SID=%u", bank, row, col, pc, sid);
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index 26192d55a..79fbb4529 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -256,7 +256,7 @@ static int buck12_set_hw_dvs_levels(struct device_node *np,
* 10: 2.50mV/usec 10mV 4uS
* 11: 1.25mV/usec 10mV 8uS
*/
-static const unsigned int bd7181x_ramp_table[] = { 1250, 2500, 5000, 10000 };
+static const unsigned int bd7181x_ramp_table[] = { 10000, 5000, 2500, 1250 };
static int bd7181x_led_set_current_limit(struct regulator_dev *rdev,
int min_uA, int max_uA)
diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c
index 08d4ee369..dd871ffe9 100644
--- a/drivers/regulator/bd71828-regulator.c
+++ b/drivers/regulator/bd71828-regulator.c
@@ -206,14 +206,11 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
.suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT,
.suspend_mask = BD71828_MASK_BUCK1267_VOLT,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
- .lpsr_on_mask = BD71828_MASK_LPSR_EN,
/*
* LPSR voltage is same as SUSPEND voltage. Allow
- * setting it so that regulator can be set enabled at
- * LPSR state
+ * only enabling/disabling regulator for LPSR state
*/
- .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT,
- .lpsr_mask = BD71828_MASK_BUCK1267_VOLT,
+ .lpsr_on_mask = BD71828_MASK_LPSR_EN,
},
.reg_inits = buck1_inits,
.reg_init_amnt = ARRAY_SIZE(buck1_inits),
@@ -288,13 +285,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
ROHM_DVS_LEVEL_SUSPEND |
ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71828_REG_BUCK3_VOLT,
- .idle_reg = BD71828_REG_BUCK3_VOLT,
- .suspend_reg = BD71828_REG_BUCK3_VOLT,
- .lpsr_reg = BD71828_REG_BUCK3_VOLT,
.run_mask = BD71828_MASK_BUCK3_VOLT,
- .idle_mask = BD71828_MASK_BUCK3_VOLT,
- .suspend_mask = BD71828_MASK_BUCK3_VOLT,
- .lpsr_mask = BD71828_MASK_BUCK3_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
@@ -329,13 +320,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
ROHM_DVS_LEVEL_SUSPEND |
ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71828_REG_BUCK4_VOLT,
- .idle_reg = BD71828_REG_BUCK4_VOLT,
- .suspend_reg = BD71828_REG_BUCK4_VOLT,
- .lpsr_reg = BD71828_REG_BUCK4_VOLT,
.run_mask = BD71828_MASK_BUCK4_VOLT,
- .idle_mask = BD71828_MASK_BUCK4_VOLT,
- .suspend_mask = BD71828_MASK_BUCK4_VOLT,
- .lpsr_mask = BD71828_MASK_BUCK4_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
@@ -370,13 +355,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
ROHM_DVS_LEVEL_SUSPEND |
ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71828_REG_BUCK5_VOLT,
- .idle_reg = BD71828_REG_BUCK5_VOLT,
- .suspend_reg = BD71828_REG_BUCK5_VOLT,
- .lpsr_reg = BD71828_REG_BUCK5_VOLT,
.run_mask = BD71828_MASK_BUCK5_VOLT,
- .idle_mask = BD71828_MASK_BUCK5_VOLT,
- .suspend_mask = BD71828_MASK_BUCK5_VOLT,
- .lpsr_mask = BD71828_MASK_BUCK5_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
@@ -493,13 +472,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
ROHM_DVS_LEVEL_SUSPEND |
ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71828_REG_LDO1_VOLT,
- .idle_reg = BD71828_REG_LDO1_VOLT,
- .suspend_reg = BD71828_REG_LDO1_VOLT,
- .lpsr_reg = BD71828_REG_LDO1_VOLT,
.run_mask = BD71828_MASK_LDO_VOLT,
- .idle_mask = BD71828_MASK_LDO_VOLT,
- .suspend_mask = BD71828_MASK_LDO_VOLT,
- .lpsr_mask = BD71828_MASK_LDO_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
@@ -533,13 +506,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
ROHM_DVS_LEVEL_SUSPEND |
ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71828_REG_LDO2_VOLT,
- .idle_reg = BD71828_REG_LDO2_VOLT,
- .suspend_reg = BD71828_REG_LDO2_VOLT,
- .lpsr_reg = BD71828_REG_LDO2_VOLT,
.run_mask = BD71828_MASK_LDO_VOLT,
- .idle_mask = BD71828_MASK_LDO_VOLT,
- .suspend_mask = BD71828_MASK_LDO_VOLT,
- .lpsr_mask = BD71828_MASK_LDO_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
@@ -573,13 +540,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
ROHM_DVS_LEVEL_SUSPEND |
ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71828_REG_LDO3_VOLT,
- .idle_reg = BD71828_REG_LDO3_VOLT,
- .suspend_reg = BD71828_REG_LDO3_VOLT,
- .lpsr_reg = BD71828_REG_LDO3_VOLT,
.run_mask = BD71828_MASK_LDO_VOLT,
- .idle_mask = BD71828_MASK_LDO_VOLT,
- .suspend_mask = BD71828_MASK_LDO_VOLT,
- .lpsr_mask = BD71828_MASK_LDO_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
@@ -614,13 +575,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
ROHM_DVS_LEVEL_SUSPEND |
ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71828_REG_LDO4_VOLT,
- .idle_reg = BD71828_REG_LDO4_VOLT,
- .suspend_reg = BD71828_REG_LDO4_VOLT,
- .lpsr_reg = BD71828_REG_LDO4_VOLT,
.run_mask = BD71828_MASK_LDO_VOLT,
- .idle_mask = BD71828_MASK_LDO_VOLT,
- .suspend_mask = BD71828_MASK_LDO_VOLT,
- .lpsr_mask = BD71828_MASK_LDO_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
@@ -655,13 +610,7 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
ROHM_DVS_LEVEL_SUSPEND |
ROHM_DVS_LEVEL_LPSR,
.run_reg = BD71828_REG_LDO5_VOLT,
- .idle_reg = BD71828_REG_LDO5_VOLT,
- .suspend_reg = BD71828_REG_LDO5_VOLT,
- .lpsr_reg = BD71828_REG_LDO5_VOLT,
.run_mask = BD71828_MASK_LDO_VOLT,
- .idle_mask = BD71828_MASK_LDO_VOLT,
- .suspend_mask = BD71828_MASK_LDO_VOLT,
- .lpsr_mask = BD71828_MASK_LDO_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
@@ -720,9 +669,6 @@ static const struct bd71828_regulator_data bd71828_rdata[] = {
.suspend_reg = BD71828_REG_LDO7_VOLT,
.lpsr_reg = BD71828_REG_LDO7_VOLT,
.run_mask = BD71828_MASK_LDO_VOLT,
- .idle_mask = BD71828_MASK_LDO_VOLT,
- .suspend_mask = BD71828_MASK_LDO_VOLT,
- .lpsr_mask = BD71828_MASK_LDO_VOLT,
.idle_on_mask = BD71828_MASK_IDLE_EN,
.suspend_on_mask = BD71828_MASK_SUSP_EN,
.lpsr_on_mask = BD71828_MASK_LPSR_EN,
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 2c33653ff..51ff3f9ea 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3347,6 +3347,7 @@ struct regmap *regulator_get_regmap(struct regulator *regulator)
return map ? map : ERR_PTR(-EOPNOTSUPP);
}
+EXPORT_SYMBOL_GPL(regulator_get_regmap);
/**
* regulator_get_hardware_vsel_register - get the HW voltage selector register
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index d49268336..6e1ace660 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -161,6 +161,32 @@ int regulator_get_voltage_sel_pickable_regmap(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_pickable_regmap);
+static int write_separate_vsel_and_range(struct regulator_dev *rdev,
+ unsigned int sel, unsigned int range)
+{
+ bool range_updated;
+ int ret;
+
+ ret = regmap_update_bits_base(rdev->regmap, rdev->desc->vsel_range_reg,
+ rdev->desc->vsel_range_mask,
+ range, &range_updated, false, false);
+ if (ret)
+ return ret;
+
+ /*
+ * Some PMICs treat the vsel_reg same as apply-bit. Force it to be
+ * written if the range changed, even if the old selector was same as
+ * the new one
+ */
+ if (rdev->desc->range_applied_by_vsel && range_updated)
+ return regmap_write_bits(rdev->regmap,
+ rdev->desc->vsel_reg,
+ rdev->desc->vsel_mask, sel);
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ rdev->desc->vsel_mask, sel);
+}
+
/**
* regulator_set_voltage_sel_pickable_regmap - pickable range set_voltage_sel
*
@@ -199,21 +225,12 @@ int regulator_set_voltage_sel_pickable_regmap(struct regulator_dev *rdev,
range = rdev->desc->linear_range_selectors_bitfield[i];
range <<= ffs(rdev->desc->vsel_range_mask) - 1;
- if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg) {
- ret = regmap_update_bits(rdev->regmap,
- rdev->desc->vsel_reg,
+ if (rdev->desc->vsel_reg == rdev->desc->vsel_range_reg)
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
rdev->desc->vsel_range_mask |
rdev->desc->vsel_mask, sel | range);
- } else {
- ret = regmap_update_bits(rdev->regmap,
- rdev->desc->vsel_range_reg,
- rdev->desc->vsel_range_mask, range);
- if (ret)
- return ret;
-
- ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
- rdev->desc->vsel_mask, sel);
- }
+ else
+ ret = write_separate_vsel_and_range(rdev, sel, range);
if (ret)
return ret;
diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
index 9b7c3d777..3c9d79e00 100644
--- a/drivers/regulator/tps6287x-regulator.c
+++ b/drivers/regulator/tps6287x-regulator.c
@@ -115,6 +115,7 @@ static struct regulator_desc tps6287x_reg = {
.vsel_mask = 0xFF,
.vsel_range_reg = TPS6287X_CTRL2,
.vsel_range_mask = TPS6287X_CTRL2_VRANGE,
+ .range_applied_by_vsel = true,
.ramp_reg = TPS6287X_CTRL1,
.ramp_mask = TPS6287X_CTRL1_VRAMP,
.ramp_delay_table = tps6287x_ramp_table,
diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
index b7f0c8779..5fad61785 100644
--- a/drivers/regulator/tps6594-regulator.c
+++ b/drivers/regulator/tps6594-regulator.c
@@ -287,30 +287,30 @@ static struct tps6594_regulator_irq_type *tps6594_ldos_irq_types[] = {
static const struct regulator_desc multi_regs[] = {
TPS6594_REGULATOR("BUCK12", "buck12", TPS6594_BUCK_1,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
- TPS6594_REG_BUCKX_VOUT_1(1),
+ TPS6594_REG_BUCKX_VOUT_1(0),
TPS6594_MASK_BUCKS_VSET,
- TPS6594_REG_BUCKX_CTRL(1),
+ TPS6594_REG_BUCKX_CTRL(0),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 4000, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK34", "buck34", TPS6594_BUCK_3,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
- TPS6594_REG_BUCKX_VOUT_1(3),
+ TPS6594_REG_BUCKX_VOUT_1(2),
TPS6594_MASK_BUCKS_VSET,
- TPS6594_REG_BUCKX_CTRL(3),
+ TPS6594_REG_BUCKX_CTRL(2),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 0, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK123", "buck123", TPS6594_BUCK_1,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
- TPS6594_REG_BUCKX_VOUT_1(1),
+ TPS6594_REG_BUCKX_VOUT_1(0),
TPS6594_MASK_BUCKS_VSET,
- TPS6594_REG_BUCKX_CTRL(1),
+ TPS6594_REG_BUCKX_CTRL(0),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 4000, 0, NULL, 0, 0),
TPS6594_REGULATOR("BUCK1234", "buck1234", TPS6594_BUCK_1,
REGULATOR_VOLTAGE, tps6594_bucks_ops, TPS6594_MASK_BUCKS_VSET,
- TPS6594_REG_BUCKX_VOUT_1(1),
+ TPS6594_REG_BUCKX_VOUT_1(0),
TPS6594_MASK_BUCKS_VSET,
- TPS6594_REG_BUCKX_CTRL(1),
+ TPS6594_REG_BUCKX_CTRL(0),
TPS6594_BIT_BUCK_EN, 0, 0, bucks_ranges,
4, 4000, 0, NULL, 0, 0),
};
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index ad3415a38..50e486bcf 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -103,12 +103,14 @@ struct k3_r5_soc_data {
* @dev: cached device pointer
* @mode: Mode to configure the Cluster - Split or LockStep
* @cores: list of R5 cores within the cluster
+ * @core_transition: wait queue to sync core state changes
* @soc_data: SoC-specific feature data for a R5FSS
*/
struct k3_r5_cluster {
struct device *dev;
enum cluster_mode mode;
struct list_head cores;
+ wait_queue_head_t core_transition;
const struct k3_r5_soc_data *soc_data;
};
@@ -128,6 +130,7 @@ struct k3_r5_cluster {
* @atcm_enable: flag to control ATCM enablement
* @btcm_enable: flag to control BTCM enablement
* @loczrama: flag to dictate which TCM is at device address 0x0
+ * @released_from_reset: flag to signal when core is out of reset
*/
struct k3_r5_core {
struct list_head elem;
@@ -144,6 +147,7 @@ struct k3_r5_core {
u32 atcm_enable;
u32 btcm_enable;
u32 loczrama;
+ bool released_from_reset;
};
/**
@@ -460,6 +464,8 @@ static int k3_r5_rproc_prepare(struct rproc *rproc)
ret);
return ret;
}
+ core->released_from_reset = true;
+ wake_up_interruptible(&cluster->core_transition);
/*
* Newer IP revisions like on J7200 SoCs support h/w auto-initialization
@@ -542,7 +548,7 @@ static int k3_r5_rproc_start(struct rproc *rproc)
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
struct device *dev = kproc->dev;
- struct k3_r5_core *core;
+ struct k3_r5_core *core0, *core;
u32 boot_addr;
int ret;
@@ -568,6 +574,16 @@ static int k3_r5_rproc_start(struct rproc *rproc)
goto unroll_core_run;
}
} else {
+ /* do not allow core 1 to start before core 0 */
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core,
+ elem);
+ if (core != core0 && core0->rproc->state == RPROC_OFFLINE) {
+ dev_err(dev, "%s: can not start core 1 before core 0\n",
+ __func__);
+ ret = -EPERM;
+ goto put_mbox;
+ }
+
ret = k3_r5_core_run(core);
if (ret)
goto put_mbox;
@@ -613,7 +629,8 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
{
struct k3_r5_rproc *kproc = rproc->priv;
struct k3_r5_cluster *cluster = kproc->cluster;
- struct k3_r5_core *core = kproc->core;
+ struct device *dev = kproc->dev;
+ struct k3_r5_core *core1, *core = kproc->core;
int ret;
/* halt all applicable cores */
@@ -626,6 +643,16 @@ static int k3_r5_rproc_stop(struct rproc *rproc)
}
}
} else {
+ /* do not allow core 0 to stop before core 1 */
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core,
+ elem);
+ if (core != core1 && core1->rproc->state != RPROC_OFFLINE) {
+ dev_err(dev, "%s: can not stop core 0 before core 1\n",
+ __func__);
+ ret = -EPERM;
+ goto out;
+ }
+
ret = k3_r5_core_halt(core);
if (ret)
goto out;
@@ -1140,6 +1167,12 @@ static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
return ret;
}
+ /*
+ * Skip the waiting mechanism for sequential power-on of cores if the
+ * core has already been booted by another entity.
+ */
+ core->released_from_reset = c_state;
+
ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
&stat);
if (ret < 0) {
@@ -1280,6 +1313,26 @@ init_rmem:
cluster->mode == CLUSTER_MODE_SINGLECPU ||
cluster->mode == CLUSTER_MODE_SINGLECORE)
break;
+
+ /*
+ * R5 cores require to be powered on sequentially, core0
+ * should be in higher power state than core1 in a cluster
+ * So, wait for current core to power up before proceeding
+ * to next core and put timeout of 2sec for each core.
+ *
+ * This waiting mechanism is necessary because
+ * rproc_auto_boot_callback() for core1 can be called before
+ * core0 due to thread execution order.
+ */
+ ret = wait_event_interruptible_timeout(cluster->core_transition,
+ core->released_from_reset,
+ msecs_to_jiffies(2000));
+ if (ret <= 0) {
+ dev_err(dev,
+ "Timed out waiting for %s core to power up!\n",
+ rproc->name);
+ return ret;
+ }
}
return 0;
@@ -1709,6 +1762,7 @@ static int k3_r5_probe(struct platform_device *pdev)
cluster->dev = dev;
cluster->soc_data = data;
INIT_LIST_HEAD(&cluster->cores);
+ init_waitqueue_head(&cluster->core_transition);
ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
if (ret < 0 && ret != -EINVAL) {
diff --git a/drivers/s390/cio/trace.h b/drivers/s390/cio/trace.h
index 86993de25..a4c5c6736 100644
--- a/drivers/s390/cio/trace.h
+++ b/drivers/s390/cio/trace.h
@@ -50,7 +50,7 @@ DECLARE_EVENT_CLASS(s390_class_schib,
__entry->devno = schib->pmcw.dev;
__entry->schib = *schib;
__entry->pmcw_ena = schib->pmcw.ena;
- __entry->pmcw_st = schib->pmcw.ena;
+ __entry->pmcw_st = schib->pmcw.st;
__entry->pmcw_dnv = schib->pmcw.dnv;
__entry->pmcw_dev = schib->pmcw.dev;
__entry->pmcw_lpm = schib->pmcw.lpm;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index cce0bafd4..ca7e626a9 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -767,9 +767,9 @@ static void ap_check_bindings_complete(void)
if (bound == apqns) {
if (!completion_done(&ap_apqn_bindings_complete)) {
complete_all(&ap_apqn_bindings_complete);
+ ap_send_bindings_complete_uevent();
pr_debug("%s all apqn bindings complete\n", __func__);
}
- ap_send_bindings_complete_uevent();
}
}
}
@@ -929,6 +929,12 @@ static int ap_device_probe(struct device *dev)
goto out;
}
+ /*
+ * Rearm the bindings complete completion to trigger
+ * bindings complete when all devices are bound again
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+
/* Add queue/card to list of active queues/cards */
spin_lock_bh(&ap_queues_lock);
if (is_queue_dev(dev))
@@ -1123,7 +1129,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
*/
static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
{
- int a, i, z;
+ unsigned long a, i, z;
char *np, sign;
/* bits needs to be a multiple of 8 */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 613eab729..41fe8a043 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -956,7 +956,7 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
struct dst_entry *dst = skb_dst(skb);
struct rt6_info *rt;
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (dst) {
if (proto == htons(ETH_P_IPV6))
dst = dst_check(dst, rt6_get_cookie(rt));
@@ -970,15 +970,14 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
struct dst_entry *dst)
{
- struct rtable *rt = (struct rtable *) dst;
-
- return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr;
+ return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) :
+ ip_hdr(skb)->daddr;
}
static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
struct dst_entry *dst)
{
- struct rt6_info *rt = (struct rt6_info *) dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
return &rt->rt6i_gateway;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 52db147d9..f6dd077d4 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -250,7 +250,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
@@ -317,7 +317,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
unsigned long flags;
void *kern_buf;
- kern_buf = memdup_user(buf, nbytes);
+ kern_buf = memdup_user_nul(buf, nbytes);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index af18d20f3..49c57a9c1 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h)
{
struct Scsi_Host *sh;
- sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info));
+ sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info *));
if (sh == NULL) {
dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index f6e6db8b8..e97f4e01a 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -239,8 +239,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
/* help some expanders that fail to zero sas_address in the 'no
* device' case
*/
- if (phy->attached_dev_type == SAS_PHY_UNUSED ||
- phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
+ if (phy->attached_dev_type == SAS_PHY_UNUSED)
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
else
memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c
index 55d590b91..6a3db7032 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_app.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_app.c
@@ -2158,10 +2158,72 @@ persistent_id_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(persistent_id);
+/**
+ * sas_ncq_prio_supported_show - Indicate if device supports NCQ priority
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_supported attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_supported_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
+}
+static DEVICE_ATTR_RO(sas_ncq_prio_supported);
+
+/**
+ * sas_ncq_prio_enable_show - send prioritized io commands to device
+ * @dev: pointer to embedded device
+ * @attr: sas_ncq_prio_enable attribute descriptor
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read/write' sdev attribute, only works with SATA devices
+ */
+static ssize_t
+sas_ncq_prio_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+
+ if (!sdev_priv_data)
+ return 0;
+
+ return sysfs_emit(buf, "%d\n", sdev_priv_data->ncq_prio_enable);
+}
+
+static ssize_t
+sas_ncq_prio_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ bool ncq_prio_enable = 0;
+
+ if (kstrtobool(buf, &ncq_prio_enable))
+ return -EINVAL;
+
+ if (!sas_ata_ncq_prio_supported(sdev))
+ return -EINVAL;
+
+ sdev_priv_data->ncq_prio_enable = ncq_prio_enable;
+
+ return strlen(buf);
+}
+static DEVICE_ATTR_RW(sas_ncq_prio_enable);
+
static struct attribute *mpi3mr_dev_attrs[] = {
&dev_attr_sas_address.attr,
&dev_attr_device_handle.attr,
&dev_attr_persistent_id.attr,
+ &dev_attr_sas_ncq_prio_supported.attr,
+ &dev_attr_sas_ncq_prio_enable.attr,
NULL,
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 1b492e9a3..86f553c61 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -8512,6 +8512,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pd_handles_sz++;
+ /*
+ * pd_handles_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
+ */
+ ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
+
ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
GFP_KERNEL);
if (!ioc->pd_handles) {
@@ -8529,6 +8535,13 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
if (ioc->facts.MaxDevHandle % 8)
ioc->pend_os_device_add_sz++;
+
+ /*
+ * pend_os_device_add_sz should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory may occur.
+ */
+ ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
+ sizeof(unsigned long));
ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
GFP_KERNEL);
if (!ioc->pend_os_device_add) {
@@ -8820,6 +8833,12 @@ _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
if (ioc->facts.MaxDevHandle % 8)
pd_handles_sz++;
+ /*
+ * pd_handles should have, at least, the minimal room for
+ * set_bit()/test_bit(), otherwise out-of-memory touch may
+ * occur.
+ */
+ pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
GFP_KERNEL);
if (!pd_handles) {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index bf100a4eb..fe1e96fda 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -2048,9 +2048,6 @@ void
mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
struct _raid_device *raid_device, Mpi25SCSIIORequest_t *mpi_request);
-/* NCQ Prio Handling Check */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev);
-
void mpt3sas_setup_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_destroy_debugfs(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_init_debugfs(void);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 1c9fd2619..87784c962 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -4088,7 +4088,7 @@ sas_ncq_prio_supported_show(struct device *dev,
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sysfs_emit(buf, "%d\n", scsih_ncq_prio_supp(sdev));
+ return sysfs_emit(buf, "%d\n", sas_ata_ncq_prio_supported(sdev));
}
static DEVICE_ATTR_RO(sas_ncq_prio_supported);
@@ -4123,7 +4123,7 @@ sas_ncq_prio_enable_store(struct device *dev,
if (kstrtobool(buf, &ncq_prio_enable))
return -EINVAL;
- if (!scsih_ncq_prio_supp(sdev))
+ if (!sas_ata_ncq_prio_supported(sdev))
return -EINVAL;
sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index ef8ee9300..7e923e024 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12573,29 +12573,6 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/**
- * scsih_ncq_prio_supp - Check for NCQ command priority support
- * @sdev: scsi device struct
- *
- * This is called when a user indicates they would like to enable
- * ncq command priorities. This works only on SATA devices.
- */
-bool scsih_ncq_prio_supp(struct scsi_device *sdev)
-{
- struct scsi_vpd *vpd;
- bool ncq_prio_supp = false;
-
- rcu_read_lock();
- vpd = rcu_dereference(sdev->vpd_pg89);
- if (!vpd || vpd->len < 214)
- goto out;
-
- ncq_prio_supp = (vpd->data[213] >> 4) & 1;
-out:
- rcu_read_unlock();
-
- return ncq_prio_supp;
-}
/*
* The pci device ids are defined in mpi/mpi2_cnfg.h.
*/
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index 451fd236b..96174353e 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -170,7 +170,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer,
if (!count || *ppos)
return 0;
- kern_buf = memdup_user(buffer, count);
+ kern_buf = memdup_user_nul(buffer, count);
if (IS_ERR(kern_buf))
return PTR_ERR(kern_buf);
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 8deb2001d..37eed6a27 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -120,15 +120,11 @@ static ssize_t
qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
- size_t cnt = 0;
-
- if (*ppos)
- return 0;
+ char buf[64];
+ int len;
- cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ len = sprintf(buf, "do_not_recover=%d\n", qedi_do_not_recover);
+ return simple_read_from_buffer(buffer, count, ppos, buf, len);
}
static int
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 55ff3d748..a1545dad0 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -274,7 +274,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
iocbs_used, ha->base_qpair->fwres.iocbs_limit);
- seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
+ seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n",
exch_used, ha->base_qpair->fwres.exch_limit);
if (ql2xenforce_iocb_limit == 2) {
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3e0c03812..ee69bd358 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -350,6 +350,13 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page)
if (result < SCSI_VPD_HEADER_SIZE)
return 0;
+ if (result > sizeof(vpd)) {
+ dev_warn_once(&sdev->sdev_gendev,
+ "%s: long VPD page 0 length: %d bytes\n",
+ __func__, result);
+ result = sizeof(vpd);
+ }
+
result -= SCSI_VPD_HEADER_SIZE;
if (!memchr(&vpd[SCSI_VPD_HEADER_SIZE], page, result))
return 0;
@@ -666,6 +673,13 @@ void scsi_cdl_check(struct scsi_device *sdev)
sdev->use_10_for_rw = 0;
sdev->cdl_supported = 1;
+
+ /*
+ * If the device supports CDL, make sure that the current drive
+ * feature status is consistent with the user controlled
+ * cdl_enable state.
+ */
+ scsi_cdl_enable(sdev, sdev->cdl_enable);
} else {
sdev->cdl_supported = 0;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index d704c484a..7fdd2b61f 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -416,6 +416,29 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *sdev)
}
EXPORT_SYMBOL_GPL(sas_is_tlr_enabled);
+/**
+ * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support
+ * @sdev: SCSI device
+ *
+ * Check if an ATA device supports NCQ priority using VPD page 89h (ATA
+ * Information). Since this VPD page is implemented only for ATA devices,
+ * this function always returns false for SCSI devices.
+ */
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev)
+{
+ struct scsi_vpd *vpd;
+ bool ncq_prio_supported = false;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg89);
+ if (vpd && vpd->len >= 214)
+ ncq_prio_supported = (vpd->data[213] >> 4) & 1;
+ rcu_read_unlock();
+
+ return ncq_prio_supported;
+}
+EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported);
+
/*
* SAS Phy attributes
*/
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 65cdc8b77..a4638ea92 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -63,6 +63,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
@@ -3125,6 +3126,9 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
struct scsi_mode_data data;
int res;
+ if (sdp->sdev_bflags & BLIST_SKIP_IO_HINTS)
+ return;
+
res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
/*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr);
@@ -3572,16 +3576,23 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
static void sd_read_block_zero(struct scsi_disk *sdkp)
{
- unsigned int buf_len = sdkp->device->sector_size;
- char *buffer, cmd[10] = { };
+ struct scsi_device *sdev = sdkp->device;
+ unsigned int buf_len = sdev->sector_size;
+ u8 *buffer, cmd[16] = { };
buffer = kmalloc(buf_len, GFP_KERNEL);
if (!buffer)
return;
- cmd[0] = READ_10;
- put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
- put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ if (sdev->use_16_for_rw) {
+ cmd[0] = READ_16;
+ put_unaligned_be64(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be32(1, &cmd[10]);/* Transfer 1 logical block */
+ } else {
+ cmd[0] = READ_10;
+ put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
+ put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
+ }
scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
SD_TIMEOUT, sdkp->max_retries, NULL);
@@ -3707,8 +3718,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
*/
if (sdkp->first_scan ||
q->limits.max_sectors > q->limits.max_dev_sectors ||
- q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors > q->limits.max_hw_sectors) {
q->limits.max_sectors = rw_max;
+ q->limits.max_user_sectors = rw_max;
+ }
sdkp->first_scan = 0;
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index b0cd071c4..0b2e5690d 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -14,7 +14,8 @@
#define CMDQ_POLL_ENABLE_MASK BIT(0)
#define CMDQ_EOC_IRQ_EN BIT(0)
#define CMDQ_REG_TYPE 1
-#define CMDQ_JUMP_RELATIVE 1
+#define CMDQ_JUMP_RELATIVE 0
+#define CMDQ_JUMP_ABSOLUTE 1
struct cmdq_instruction {
union {
@@ -397,7 +398,7 @@ int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr)
struct cmdq_instruction inst = {};
inst.op = CMDQ_CODE_JUMP;
- inst.offset = CMDQ_JUMP_RELATIVE;
+ inst.offset = CMDQ_JUMP_ABSOLUTE;
inst.value = addr >>
cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
return cmdq_pkt_append_command(pkt, inst);
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index a5fd68411..86fb2cd4f 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,6 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -17,6 +21,8 @@
#define MAX_SLV_ID 8
#define SLAVE_ID_MASK 0x7
#define SLAVE_ID_SHIFT 16
+#define SLAVE_ID(addr) FIELD_GET(GENMASK(19, 16), addr)
+#define VRM_ADDR(addr) FIELD_GET(GENMASK(19, 4), addr)
/**
* struct entry_header: header for each entry in cmddb
@@ -221,6 +227,30 @@ const void *cmd_db_read_aux_data(const char *id, size_t *len)
EXPORT_SYMBOL_GPL(cmd_db_read_aux_data);
/**
+ * cmd_db_match_resource_addr() - Compare if both Resource addresses are same
+ *
+ * @addr1: Resource address to compare
+ * @addr2: Resource address to compare
+ *
+ * Return: true if two addresses refer to the same resource, false otherwise
+ */
+bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
+{
+ /*
+ * Each RPMh VRM accelerator resource has 3 or 4 contiguous 4-byte
+ * aligned addresses associated with it. Ignore the offset to check
+ * for VRM requests.
+ */
+ if (addr1 == addr2)
+ return true;
+ else if (SLAVE_ID(addr1) == CMD_DB_HW_VRM && VRM_ADDR(addr1) == VRM_ADDR(addr2))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(cmd_db_match_resource_addr);
+
+/**
* cmd_db_read_slave_id - Get the slave ID for a given resource address
*
* @id: Resource id to query the DB for version
diff --git a/drivers/soc/qcom/pmic_glink.c b/drivers/soc/qcom/pmic_glink.c
index f913e9bd5..823fd108f 100644
--- a/drivers/soc/qcom/pmic_glink.c
+++ b/drivers/soc/qcom/pmic_glink.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/soc/qcom/pdr.h>
#include <linux/soc/qcom/pmic_glink.h>
+#include <linux/spinlock.h>
enum {
PMIC_GLINK_CLIENT_BATT = 0,
@@ -36,7 +37,7 @@ struct pmic_glink {
unsigned int pdr_state;
/* serializing clients list updates */
- struct mutex client_lock;
+ spinlock_t client_lock;
struct list_head clients;
};
@@ -58,10 +59,11 @@ static void _devm_pmic_glink_release_client(struct device *dev, void *res)
{
struct pmic_glink_client *client = (struct pmic_glink_client *)res;
struct pmic_glink *pg = client->pg;
+ unsigned long flags;
- mutex_lock(&pg->client_lock);
+ spin_lock_irqsave(&pg->client_lock, flags);
list_del(&client->node);
- mutex_unlock(&pg->client_lock);
+ spin_unlock_irqrestore(&pg->client_lock, flags);
}
struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
@@ -72,6 +74,7 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
{
struct pmic_glink_client *client;
struct pmic_glink *pg = dev_get_drvdata(dev->parent);
+ unsigned long flags;
client = devres_alloc(_devm_pmic_glink_release_client, sizeof(*client), GFP_KERNEL);
if (!client)
@@ -83,9 +86,14 @@ struct pmic_glink_client *devm_pmic_glink_register_client(struct device *dev,
client->pdr_notify = pdr;
client->priv = priv;
- mutex_lock(&pg->client_lock);
+ mutex_lock(&pg->state_lock);
+ spin_lock_irqsave(&pg->client_lock, flags);
+
list_add(&client->node, &pg->clients);
- mutex_unlock(&pg->client_lock);
+ client->pdr_notify(client->priv, pg->client_state);
+
+ spin_unlock_irqrestore(&pg->client_lock, flags);
+ mutex_unlock(&pg->state_lock);
devres_add(dev, client);
@@ -107,6 +115,7 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
struct pmic_glink_client *client;
struct pmic_glink_hdr *hdr;
struct pmic_glink *pg = dev_get_drvdata(&rpdev->dev);
+ unsigned long flags;
if (len < sizeof(*hdr)) {
dev_warn(pg->dev, "ignoring truncated message\n");
@@ -115,10 +124,12 @@ static int pmic_glink_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
hdr = data;
+ spin_lock_irqsave(&pg->client_lock, flags);
list_for_each_entry(client, &pg->clients, node) {
if (client->id == le32_to_cpu(hdr->owner))
client->cb(data, len, client->priv);
}
+ spin_unlock_irqrestore(&pg->client_lock, flags);
return 0;
}
@@ -158,6 +169,7 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
{
struct pmic_glink_client *client;
unsigned int new_state = pg->client_state;
+ unsigned long flags;
if (pg->client_state != SERVREG_SERVICE_STATE_UP) {
if (pg->pdr_state == SERVREG_SERVICE_STATE_UP && pg->ept)
@@ -168,8 +180,10 @@ static void pmic_glink_state_notify_clients(struct pmic_glink *pg)
}
if (new_state != pg->client_state) {
+ spin_lock_irqsave(&pg->client_lock, flags);
list_for_each_entry(client, &pg->clients, node)
client->pdr_notify(client->priv, new_state);
+ spin_unlock_irqrestore(&pg->client_lock, flags);
pg->client_state = new_state;
}
}
@@ -256,7 +270,7 @@ static int pmic_glink_probe(struct platform_device *pdev)
pg->dev = &pdev->dev;
INIT_LIST_HEAD(&pg->clients);
- mutex_init(&pg->client_lock);
+ spin_lock_init(&pg->client_lock);
mutex_init(&pg->state_lock);
match_data = (unsigned long *)of_device_get_match_data(&pdev->dev);
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index a021dc718..daf64be96 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
@@ -557,7 +558,7 @@ static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j);
for (k = 0; k < msg->num_cmds; k++) {
- if (addr == msg->cmds[k].addr)
+ if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr))
return -EBUSY;
}
}
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 0efc1c3be..3e7cf04aa 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -1880,7 +1880,7 @@ struct sdw_cdns_pdi *sdw_cdns_alloc_pdi(struct sdw_cdns *cdns,
/* check if we found a PDI, else find in bi-directional */
if (!pdi)
- pdi = cdns_find_pdi(cdns, 2, stream->num_bd, stream->bd,
+ pdi = cdns_find_pdi(cdns, 0, stream->num_bd, stream->bd,
dai_id);
if (pdi) {
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index aabef9fc8..0d9c948e1 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -21,7 +21,7 @@
#include <linux/units.h>
#define CS42L43_FIFO_SIZE 16
-#define CS42L43_SPI_ROOT_HZ (40 * HZ_PER_MHZ)
+#define CS42L43_SPI_ROOT_HZ 49152000
#define CS42L43_SPI_MAX_LENGTH 65532
enum cs42l43_spi_cmd {
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c3e5cee18..09b6c1b45 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -660,18 +660,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (spi_imx->target_burst * 8 - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else {
- if (spi_imx->usedma) {
- ctrl |= (spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- } else {
- if (spi_imx->count >= MX51_ECSPI_CTRL_MAX_BURST)
- ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- else
- ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
- BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
- }
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
}
/* set clock speed */
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index f1e922fd3..955c920c4 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -349,7 +349,7 @@ static int stm32_qspi_wait_poll_status(struct stm32_qspi *qspi)
static int stm32_qspi_get_mode(u8 buswidth)
{
- if (buswidth == 4)
+ if (buswidth >= 4)
return CCR_BUSWIDTH_4;
return buswidth;
@@ -653,9 +653,7 @@ static int stm32_qspi_setup(struct spi_device *spi)
return -EINVAL;
mode = spi->mode & (SPI_TX_OCTAL | SPI_RX_OCTAL);
- if ((mode == SPI_TX_OCTAL || mode == SPI_RX_OCTAL) ||
- ((mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) &&
- gpiod_count(qspi->dev, "cs") == -ENOENT)) {
+ if (mode && gpiod_count(qspi->dev, "cs") == -ENOENT) {
dev_err(qspi->dev, "spi-rx-bus-width\\/spi-tx-bus-width\\/cs-gpios\n");
dev_err(qspi->dev, "configuration not supported\n");
@@ -676,10 +674,10 @@ static int stm32_qspi_setup(struct spi_device *spi)
qspi->cr_reg = CR_APMS | 3 << CR_FTHRES_SHIFT | CR_SSHIFT | CR_EN;
/*
- * Dual flash mode is only enable in case SPI_TX_OCTAL and SPI_TX_OCTAL
- * are both set in spi->mode and "cs-gpios" properties is found in DT
+ * Dual flash mode is only enable in case SPI_TX_OCTAL or SPI_RX_OCTAL
+ * is set in spi->mode and "cs-gpios" properties is found in DT
*/
- if (mode == (SPI_TX_OCTAL | SPI_RX_OCTAL)) {
+ if (mode) {
qspi->cr_reg |= CR_DFM;
dev_dbg(qspi->dev, "Dual flash mode enable");
}
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 4a68abcdc..4c4ff074e 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1016,8 +1016,10 @@ end_irq:
static irqreturn_t stm32fx_spi_irq_thread(int irq, void *dev_id)
{
struct spi_controller *ctrl = dev_id;
+ struct stm32_spi *spi = spi_controller_get_devdata(ctrl);
spi_finalize_current_transfer(ctrl);
+ stm32fx_spi_disable(spi);
return IRQ_HANDLED;
}
@@ -1055,7 +1057,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
if (!(sr & mask)) {
- dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
sr, ier);
spin_unlock_irqrestore(&spi->lock, flags);
return IRQ_NONE;
@@ -1185,8 +1187,6 @@ static int stm32_spi_prepare_msg(struct spi_controller *ctrl,
~clrb) | setb,
spi->base + spi->cfg->regs->cpol.reg);
- stm32_spi_enable(spi);
-
spin_unlock_irqrestore(&spi->lock, flags);
return 0;
@@ -1204,6 +1204,7 @@ static void stm32fx_spi_dma_tx_cb(void *data)
if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
spi_finalize_current_transfer(spi->ctrl);
+ stm32fx_spi_disable(spi);
}
}
@@ -1218,6 +1219,7 @@ static void stm32_spi_dma_rx_cb(void *data)
struct stm32_spi *spi = data;
spi_finalize_current_transfer(spi->ctrl);
+ spi->cfg->disable(spi);
}
/**
@@ -1305,6 +1307,8 @@ static int stm32fx_spi_transfer_one_irq(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, cr2);
+ stm32_spi_enable(spi);
+
/* starting data transfer when buffer is loaded */
if (spi->tx_buf)
spi->cfg->write_tx(spi);
@@ -1341,6 +1345,8 @@ static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
spin_lock_irqsave(&spi->lock, flags);
+ stm32_spi_enable(spi);
+
/* Be sure to have data in fifo before starting data transfer */
if (spi->tx_buf)
stm32h7_spi_write_txfifo(spi);
@@ -1372,6 +1378,8 @@ static void stm32fx_spi_transfer_one_dma_start(struct stm32_spi *spi)
*/
stm32_spi_set_bits(spi, STM32FX_SPI_CR2, STM32FX_SPI_CR2_ERRIE);
}
+
+ stm32_spi_enable(spi);
}
/**
@@ -1405,6 +1413,8 @@ static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
stm32_spi_set_bits(spi, STM32H7_SPI_IER, ier);
+ stm32_spi_enable(spi);
+
if (STM32_SPI_HOST_MODE(spi))
stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a2c467d9e..c349d6012 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -685,10 +685,12 @@ static int __spi_add_device(struct spi_device *spi)
* Make sure that multiple logical CS doesn't map to the same physical CS.
* For example, spi->chip_select[0] != spi->chip_select[1] and so on.
*/
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
- if (status)
- return status;
+ if (!spi_controller_is_target(ctlr)) {
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
+ status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
+ if (status)
+ return status;
+ }
}
/* Set the bus ID string */
@@ -1242,6 +1244,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
else
rx_dev = ctlr->dev.parent;
+ ret = -ENOMSG;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
/* The sync is done before each transfer. */
unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
@@ -1271,6 +1274,9 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
}
}
}
+ /* No transfer has been mapped, bail out with success */
+ if (ret)
+ return 0;
ctlr->cur_rx_dma_dev = rx_dev;
ctlr->cur_tx_dma_dev = tx_dev;
diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c
index 674a350cc..fa068b34b 100644
--- a/drivers/spmi/hisi-spmi-controller.c
+++ b/drivers/spmi/hisi-spmi-controller.c
@@ -300,7 +300,6 @@ static int spmi_controller_probe(struct platform_device *pdev)
spin_lock_init(&spmi_controller->lock);
- ctrl->nr = spmi_controller->channel;
ctrl->dev.parent = pdev->dev.parent;
ctrl->dev.of_node = of_node_get(pdev->dev.of_node);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 9ed1180fe..937c15324 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -1462,8 +1462,8 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
*/
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
core = devm_ioremap(&ctrl->dev, res->start, resource_size(res));
- if (IS_ERR(core))
- return PTR_ERR(core);
+ if (!core)
+ return -ENOMEM;
pmic_arb->core_size = resource_size(res);
@@ -1495,15 +1495,15 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
"obsrvr");
pmic_arb->rd_base = devm_ioremap(&ctrl->dev, res->start,
resource_size(res));
- if (IS_ERR(pmic_arb->rd_base))
- return PTR_ERR(pmic_arb->rd_base);
+ if (!pmic_arb->rd_base)
+ return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"chnls");
pmic_arb->wr_base = devm_ioremap(&ctrl->dev, res->start,
resource_size(res));
- if (IS_ERR(pmic_arb->wr_base))
- return PTR_ERR(pmic_arb->wr_base);
+ if (!pmic_arb->wr_base)
+ return -ENOMEM;
}
pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 9f30e0eda..bdb6595ff 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -341,11 +341,13 @@ static int ssb_bus_match(struct device *dev, struct device_driver *drv)
static int ssb_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- const struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
+ const struct ssb_device *ssb_dev;
if (!dev)
return -ENODEV;
+ ssb_dev = dev_to_ssb_dev(dev);
+
return add_uevent_var(env,
"MODALIAS=ssb:v%04Xid%04Xrev%02X",
ssb_dev->id.vendor, ssb_dev->id.coreid,
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
index 854199500..aa6f266b6 100644
--- a/drivers/staging/greybus/arche-apb-ctrl.c
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -466,6 +466,7 @@ static const struct of_device_id arche_apb_ctrl_of_match[] = {
{ .compatible = "usbffff,2", },
{ },
};
+MODULE_DEVICE_TABLE(of, arche_apb_ctrl_of_match);
static struct platform_driver arche_apb_ctrl_device_driver = {
.probe = arche_apb_ctrl_probe,
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index 891b75327..b33977ccd 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -619,14 +619,7 @@ static const struct of_device_id arche_platform_of_match[] = {
{ .compatible = "google,arche-platform", },
{ },
};
-
-static const struct of_device_id arche_combined_id[] = {
- /* Use PID/VID of SVC device */
- { .compatible = "google,arche-platform", },
- { .compatible = "usbffff,2", },
- { },
-};
-MODULE_DEVICE_TABLE(of, arche_combined_id);
+MODULE_DEVICE_TABLE(of, arche_platform_of_match);
static struct platform_driver arche_platform_device_driver = {
.probe = arche_platform_probe,
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index a5c2fe963..00360f4a0 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -142,6 +142,9 @@ static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
channel = get_channel_from_mode(channel->light,
GB_CHANNEL_MODE_TORCH);
+ if (!channel)
+ return -EINVAL;
+
/* For not flash we need to convert brightness to intensity */
intensity = channel->intensity_uA.min +
(channel->intensity_uA.step * channel->led->brightness);
@@ -528,7 +531,10 @@ static int gb_lights_light_v4l2_register(struct gb_light *light)
}
channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH);
- WARN_ON(!channel_flash);
+ if (!channel_flash) {
+ dev_err(dev, "failed to get flash channel from mode\n");
+ return -EINVAL;
+ }
fled = &channel_flash->fled;
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 938a4ea89..8c30191b2 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -4690,6 +4690,7 @@ static int load_video_binaries(struct ia_css_pipe *pipe)
sizeof(struct ia_css_binary),
GFP_KERNEL);
if (!mycs->yuv_scaler_binary) {
+ mycs->num_yuv_scaler = 0;
err = -ENOMEM;
return err;
}
diff --git a/drivers/staging/media/starfive/camss/stf-camss.c b/drivers/staging/media/starfive/camss/stf-camss.c
index a587f8601..323aa70fd 100644
--- a/drivers/staging/media/starfive/camss/stf-camss.c
+++ b/drivers/staging/media/starfive/camss/stf-camss.c
@@ -162,6 +162,12 @@ err_isp_unregister:
static void stfcamss_unregister_devs(struct stfcamss *stfcamss)
{
+ struct stf_capture *cap_yuv = &stfcamss->captures[STF_CAPTURE_YUV];
+ struct stf_isp_dev *isp_dev = &stfcamss->isp_dev;
+
+ media_entity_remove_links(&isp_dev->subdev.entity);
+ media_entity_remove_links(&cap_yuv->video.vdev.entity);
+
stf_isp_unregister(&stfcamss->isp_dev);
stf_capture_unregister(stfcamss);
}
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 14e34eabc..4a1bfebb1 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -150,7 +150,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
{
struct proc_thermal_pci *pci_info = devid;
struct proc_thermal_device *proc_priv;
- int ret = IRQ_HANDLED;
+ int ret = IRQ_NONE;
u32 status;
proc_priv = pci_info->proc_priv;
@@ -175,6 +175,7 @@ static irqreturn_t proc_thermal_irq_handler(int irq, void *devid)
/* Disable enable interrupt flag */
proc_thermal_mmio_write(pci_info, PROC_THERMAL_MMIO_INT_ENABLE_0, 0);
pkg_thermal_schedule_work(&pci_info->work);
+ ret = IRQ_HANDLED;
}
pci_write_config_byte(pci_info->pdev, 0xdc, 0x01);
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index fd4bd650c..6b9422bd8 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -740,7 +740,11 @@ static int lvts_golden_temp_init(struct device *dev, u32 *value, int temp_offset
gt = (*value) >> 24;
- if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
+ /* A zero value for gt means that device has invalid efuse data */
+ if (!gt)
+ return -ENODATA;
+
+ if (gt < LVTS_GOLDEN_TEMP_MAX)
golden_temp = gt;
golden_temp_offset = golden_temp * 500 + temp_offset;
@@ -1530,11 +1534,15 @@ static const struct lvts_data mt7988_lvts_ap_data = {
static const struct lvts_data mt8192_lvts_mcu_data = {
.lvts_ctrl = mt8192_lvts_mcu_data_ctrl,
.num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_mcu_data_ctrl),
+ .temp_factor = LVTS_COEFF_A_MT8195,
+ .temp_offset = LVTS_COEFF_B_MT8195,
};
static const struct lvts_data mt8192_lvts_ap_data = {
.lvts_ctrl = mt8192_lvts_ap_data_ctrl,
.num_lvts_ctrl = ARRAY_SIZE(mt8192_lvts_ap_data_ctrl),
+ .temp_factor = LVTS_COEFF_A_MT8195,
+ .temp_offset = LVTS_COEFF_B_MT8195,
};
static const struct lvts_data mt8195_lvts_mcu_data = {
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index f6edb12ec..5225b3621 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -95,6 +95,9 @@ static int lmh_probe(struct platform_device *pdev)
unsigned int enable_alg;
u32 node_id;
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
lmh_data = devm_kzalloc(dev, sizeof(*lmh_data), GFP_KERNEL);
if (!lmh_data)
return -ENOMEM;
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 6d7c16ccb..4edee8d92 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -264,7 +264,7 @@ void compute_intercept_slope(struct tsens_priv *priv, u32 *p1,
for (i = 0; i < priv->num_sensors; i++) {
dev_dbg(priv->dev,
"%s: sensor%d - data_point1:%#x data_point2:%#x\n",
- __func__, i, p1[i], p2[i]);
+ __func__, i, p1[i], p2 ? p2[i] : 0);
if (!priv->sensor[i].slope)
priv->sensor[i].slope = SLOPE_DEFAULT;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 34a31bc72..28888ac43 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -431,7 +431,6 @@ static void update_temperature(struct thermal_zone_device *tz)
trace_thermal_temperature(tz);
thermal_genl_sampling_temp(tz->id, temp);
- thermal_debug_update_temp(tz);
}
static void thermal_zone_device_check(struct work_struct *work)
@@ -475,6 +474,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
for_each_trip(tz, trip)
handle_thermal_trip(tz, trip);
+ thermal_debug_update_temp(tz);
+
monitor_thermal_zone(tz);
}
@@ -897,6 +898,7 @@ __thermal_cooling_device_register(struct device_node *np,
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
+ unsigned long current_state;
int id, ret;
if (!ops || !ops->get_max_state || !ops->get_cur_state ||
@@ -934,6 +936,18 @@ __thermal_cooling_device_register(struct device_node *np,
if (ret)
goto out_cdev_type;
+ /*
+ * The cooling device's current state is only needed for debug
+ * initialization below, so a failure to get it does not cause
+ * the entire cooling device initialization to fail. However,
+ * the debug will not work for the device if its initial state
+ * cannot be determined and drivers are responsible for ensuring
+ * that this will not happen.
+ */
+ ret = cdev->ops->get_cur_state(cdev, &current_state);
+ if (ret)
+ current_state = ULONG_MAX;
+
thermal_cooling_device_setup_sysfs(cdev);
ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -947,6 +961,9 @@ __thermal_cooling_device_register(struct device_node *np,
return ERR_PTR(ret);
}
+ if (current_state <= cdev->max_state)
+ thermal_debug_cdev_add(cdev, current_state);
+
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -962,8 +979,6 @@ __thermal_cooling_device_register(struct device_node *np,
mutex_unlock(&thermal_list_lock);
- thermal_debug_cdev_add(cdev);
-
return cdev;
out_cooling_dev:
@@ -1618,6 +1633,12 @@ static int thermal_pm_notify(struct notifier_block *nb,
static struct notifier_block thermal_pm_nb = {
.notifier_call = thermal_pm_notify,
+ /*
+ * Run at the lowest priority to avoid interference between the thermal
+ * zone resume work items spawned by thermal_pm_notify() and the other
+ * PM notifiers.
+ */
+ .priority = INT_MIN,
};
static int __init thermal_init(void)
diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c
index 5693cc8b2..403f74d66 100644
--- a/drivers/thermal/thermal_debugfs.c
+++ b/drivers/thermal/thermal_debugfs.c
@@ -435,6 +435,14 @@ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
}
cdev_dbg->current_state = new_state;
+
+ /*
+ * Create a record for the new state if it is not there, so its
+ * duration will be printed by cdev_dt_seq_show() as expected if it
+ * runs before the next state transition.
+ */
+ thermal_debugfs_cdev_record_get(thermal_dbg, cdev_dbg->durations, new_state);
+
transition = (old_state << 16) | new_state;
/*
@@ -460,8 +468,9 @@ void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
* Allocates a cooling device object for debug, initializes the
* statistics and create the entries in sysfs.
* @cdev: a pointer to a cooling device
+ * @state: current state of the cooling device
*/
-void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
+void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state)
{
struct thermal_debugfs *thermal_dbg;
struct cdev_debugfs *cdev_dbg;
@@ -478,9 +487,16 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
INIT_LIST_HEAD(&cdev_dbg->durations[i]);
}
- cdev_dbg->current_state = 0;
+ cdev_dbg->current_state = state;
cdev_dbg->timestamp = ktime_get();
+ /*
+ * Create a record for the initial cooling device state, so its
+ * duration will be printed by cdev_dt_seq_show() as expected if it
+ * runs before the first state transition.
+ */
+ thermal_debugfs_cdev_record_get(thermal_dbg, cdev_dbg->durations, state);
+
debugfs_create_file("trans_table", 0400, thermal_dbg->d_top,
thermal_dbg, &tt_fops);
@@ -555,7 +571,6 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
struct tz_episode *tze;
struct tz_debugfs *tz_dbg;
struct thermal_debugfs *thermal_dbg = tz->debugfs;
- int temperature = tz->temperature;
int trip_id = thermal_zone_trip_id(tz, trip);
ktime_t now = ktime_get();
@@ -624,12 +639,6 @@ void thermal_debug_tz_trip_up(struct thermal_zone_device *tz,
tze = list_first_entry(&tz_dbg->tz_episodes, struct tz_episode, node);
tze->trip_stats[trip_id].timestamp = now;
- tze->trip_stats[trip_id].max = max(tze->trip_stats[trip_id].max, temperature);
- tze->trip_stats[trip_id].min = min(tze->trip_stats[trip_id].min, temperature);
- tze->trip_stats[trip_id].count++;
- tze->trip_stats[trip_id].avg = tze->trip_stats[trip_id].avg +
- (temperature - tze->trip_stats[trip_id].avg) /
- tze->trip_stats[trip_id].count;
unlock:
mutex_unlock(&thermal_dbg->lock);
diff --git a/drivers/thermal/thermal_debugfs.h b/drivers/thermal/thermal_debugfs.h
index 155b9af5f..c28bd4c11 100644
--- a/drivers/thermal/thermal_debugfs.h
+++ b/drivers/thermal/thermal_debugfs.h
@@ -2,7 +2,7 @@
#ifdef CONFIG_THERMAL_DEBUGFS
void thermal_debug_init(void);
-void thermal_debug_cdev_add(struct thermal_cooling_device *cdev);
+void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state);
void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev);
void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev, int state);
void thermal_debug_tz_add(struct thermal_zone_device *tz);
@@ -14,7 +14,7 @@ void thermal_debug_tz_trip_down(struct thermal_zone_device *tz,
void thermal_debug_update_temp(struct thermal_zone_device *tz);
#else
static inline void thermal_debug_init(void) {}
-static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) {}
+static inline void thermal_debug_cdev_add(struct thermal_cooling_device *cdev, int state) {}
static inline void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev) {}
static inline void thermal_debug_cdev_state_update(const struct thermal_cooling_device *cdev,
int state) {}
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index e324cd899..0754fe76e 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -943,8 +943,9 @@ static void margining_port_init(struct tb_port *port)
debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
- if (independent_voltage_margins(usb4) ||
- (supports_time(usb4) && independent_time_margins(usb4)))
+ if (independent_voltage_margins(usb4) == USB4_MARGIN_CAP_0_VOLTAGE_HL ||
+ (supports_time(usb4) &&
+ independent_time_margins(usb4) == USB4_MARGIN_CAP_1_TIME_LR))
debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 4036566fe..afbf7837b 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -245,16 +245,18 @@ enum gsm_encoding {
enum gsm_mux_state {
GSM_SEARCH,
- GSM_START,
- GSM_ADDRESS,
- GSM_CONTROL,
- GSM_LEN,
- GSM_DATA,
- GSM_FCS,
- GSM_OVERRUN,
- GSM_LEN0,
- GSM_LEN1,
- GSM_SSOF,
+ GSM0_ADDRESS,
+ GSM0_CONTROL,
+ GSM0_LEN0,
+ GSM0_LEN1,
+ GSM0_DATA,
+ GSM0_FCS,
+ GSM0_SSOF,
+ GSM1_START,
+ GSM1_ADDRESS,
+ GSM1_CONTROL,
+ GSM1_DATA,
+ GSM1_OVERRUN,
};
/*
@@ -2847,6 +2849,30 @@ invalid:
return;
}
+/**
+ * gsm0_receive_state_check_and_fix - check and correct receive state
+ * @gsm: gsm data for this ldisc instance
+ *
+ * Ensures that the current receive state is valid for basic option mode.
+ */
+
+static void gsm0_receive_state_check_and_fix(struct gsm_mux *gsm)
+{
+ switch (gsm->state) {
+ case GSM_SEARCH:
+ case GSM0_ADDRESS:
+ case GSM0_CONTROL:
+ case GSM0_LEN0:
+ case GSM0_LEN1:
+ case GSM0_DATA:
+ case GSM0_FCS:
+ case GSM0_SSOF:
+ break;
+ default:
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+}
/**
* gsm0_receive - perform processing for non-transparency
@@ -2860,26 +2886,27 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
{
unsigned int len;
+ gsm0_receive_state_check_and_fix(gsm);
switch (gsm->state) {
case GSM_SEARCH: /* SOF marker */
if (c == GSM0_SOF) {
- gsm->state = GSM_ADDRESS;
+ gsm->state = GSM0_ADDRESS;
gsm->address = 0;
gsm->len = 0;
gsm->fcs = INIT_FCS;
}
break;
- case GSM_ADDRESS: /* Address EA */
+ case GSM0_ADDRESS: /* Address EA */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->address, c))
- gsm->state = GSM_CONTROL;
+ gsm->state = GSM0_CONTROL;
break;
- case GSM_CONTROL: /* Control Byte */
+ case GSM0_CONTROL: /* Control Byte */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
gsm->control = c;
- gsm->state = GSM_LEN0;
+ gsm->state = GSM0_LEN0;
break;
- case GSM_LEN0: /* Length EA */
+ case GSM0_LEN0: /* Length EA */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->len, c)) {
if (gsm->len > gsm->mru) {
@@ -2889,14 +2916,14 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
}
gsm->count = 0;
if (!gsm->len)
- gsm->state = GSM_FCS;
+ gsm->state = GSM0_FCS;
else
- gsm->state = GSM_DATA;
+ gsm->state = GSM0_DATA;
break;
}
- gsm->state = GSM_LEN1;
+ gsm->state = GSM0_LEN1;
break;
- case GSM_LEN1:
+ case GSM0_LEN1:
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
len = c;
gsm->len |= len << 7;
@@ -2907,26 +2934,29 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
}
gsm->count = 0;
if (!gsm->len)
- gsm->state = GSM_FCS;
+ gsm->state = GSM0_FCS;
else
- gsm->state = GSM_DATA;
+ gsm->state = GSM0_DATA;
break;
- case GSM_DATA: /* Data */
+ case GSM0_DATA: /* Data */
gsm->buf[gsm->count++] = c;
- if (gsm->count == gsm->len) {
+ if (gsm->count >= MAX_MRU) {
+ gsm->bad_size++;
+ gsm->state = GSM_SEARCH;
+ } else if (gsm->count >= gsm->len) {
/* Calculate final FCS for UI frames over all data */
if ((gsm->control & ~PF) != UIH) {
gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf,
gsm->count);
}
- gsm->state = GSM_FCS;
+ gsm->state = GSM0_FCS;
}
break;
- case GSM_FCS: /* FCS follows the packet */
+ case GSM0_FCS: /* FCS follows the packet */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
- gsm->state = GSM_SSOF;
+ gsm->state = GSM0_SSOF;
break;
- case GSM_SSOF:
+ case GSM0_SSOF:
gsm->state = GSM_SEARCH;
if (c == GSM0_SOF)
gsm_queue(gsm);
@@ -2940,6 +2970,29 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
}
/**
+ * gsm1_receive_state_check_and_fix - check and correct receive state
+ * @gsm: gsm data for this ldisc instance
+ *
+ * Ensures that the current receive state is valid for advanced option mode.
+ */
+
+static void gsm1_receive_state_check_and_fix(struct gsm_mux *gsm)
+{
+ switch (gsm->state) {
+ case GSM_SEARCH:
+ case GSM1_START:
+ case GSM1_ADDRESS:
+ case GSM1_CONTROL:
+ case GSM1_DATA:
+ case GSM1_OVERRUN:
+ break;
+ default:
+ gsm->state = GSM_SEARCH;
+ break;
+ }
+}
+
+/**
* gsm1_receive - perform processing for non-transparency
* @gsm: gsm data for this ldisc instance
* @c: character
@@ -2949,6 +3002,7 @@ static void gsm0_receive(struct gsm_mux *gsm, u8 c)
static void gsm1_receive(struct gsm_mux *gsm, u8 c)
{
+ gsm1_receive_state_check_and_fix(gsm);
/* handle XON/XOFF */
if ((c & ISO_IEC_646_MASK) == XON) {
gsm->constipated = true;
@@ -2961,11 +3015,11 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c)
}
if (c == GSM1_SOF) {
/* EOF is only valid in frame if we have got to the data state */
- if (gsm->state == GSM_DATA) {
+ if (gsm->state == GSM1_DATA) {
if (gsm->count < 1) {
/* Missing FSC */
gsm->malformed++;
- gsm->state = GSM_START;
+ gsm->state = GSM1_START;
return;
}
/* Remove the FCS from data */
@@ -2981,14 +3035,14 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c)
gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->buf[gsm->count]);
gsm->len = gsm->count;
gsm_queue(gsm);
- gsm->state = GSM_START;
+ gsm->state = GSM1_START;
return;
}
/* Any partial frame was a runt so go back to start */
- if (gsm->state != GSM_START) {
+ if (gsm->state != GSM1_START) {
if (gsm->state != GSM_SEARCH)
gsm->malformed++;
- gsm->state = GSM_START;
+ gsm->state = GSM1_START;
}
/* A SOF in GSM_START means we are still reading idling or
framing bytes */
@@ -3009,30 +3063,30 @@ static void gsm1_receive(struct gsm_mux *gsm, u8 c)
gsm->escape = false;
}
switch (gsm->state) {
- case GSM_START: /* First byte after SOF */
+ case GSM1_START: /* First byte after SOF */
gsm->address = 0;
- gsm->state = GSM_ADDRESS;
+ gsm->state = GSM1_ADDRESS;
gsm->fcs = INIT_FCS;
fallthrough;
- case GSM_ADDRESS: /* Address continuation */
+ case GSM1_ADDRESS: /* Address continuation */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
if (gsm_read_ea(&gsm->address, c))
- gsm->state = GSM_CONTROL;
+ gsm->state = GSM1_CONTROL;
break;
- case GSM_CONTROL: /* Control Byte */
+ case GSM1_CONTROL: /* Control Byte */
gsm->fcs = gsm_fcs_add(gsm->fcs, c);
gsm->control = c;
gsm->count = 0;
- gsm->state = GSM_DATA;
+ gsm->state = GSM1_DATA;
break;
- case GSM_DATA: /* Data */
- if (gsm->count > gsm->mru) { /* Allow one for the FCS */
- gsm->state = GSM_OVERRUN;
+ case GSM1_DATA: /* Data */
+ if (gsm->count > gsm->mru || gsm->count > MAX_MRU) { /* Allow one for the FCS */
+ gsm->state = GSM1_OVERRUN;
gsm->bad_size++;
} else
gsm->buf[gsm->count++] = c;
break;
- case GSM_OVERRUN: /* Over-long - eg a dropped SOF */
+ case GSM1_OVERRUN: /* Over-long - eg a dropped SOF */
break;
default:
pr_debug("%s: unhandled state: %d\n", __func__, gsm->state);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index f252d0b5a..5e9ca4376 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1619,15 +1619,25 @@ static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
else if (tty->closing && !L_EXTPROC(tty)) {
- if (la_count > 0)
+ if (la_count > 0) {
n_tty_receive_buf_closing(tty, cp, fp, la_count, true);
- if (count > la_count)
- n_tty_receive_buf_closing(tty, cp, fp, count - la_count, false);
+ cp += la_count;
+ if (fp)
+ fp += la_count;
+ count -= la_count;
+ }
+ if (count > 0)
+ n_tty_receive_buf_closing(tty, cp, fp, count, false);
} else {
- if (la_count > 0)
+ if (la_count > 0) {
n_tty_receive_buf_standard(tty, cp, fp, la_count, true);
- if (count > la_count)
- n_tty_receive_buf_standard(tty, cp, fp, count - la_count, false);
+ cp += la_count;
+ if (fp)
+ fp += la_count;
+ count -= la_count;
+ }
+ if (count > 0)
+ n_tty_receive_buf_standard(tty, cp, fp, count, false);
flush_echoes(tty);
if (tty->ops->flush_chars)
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 5daa38d9c..61d81b11f 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -675,18 +675,46 @@ static void init_real_clk_rates(struct device *dev, struct brcmuart_priv *priv)
clk_set_rate(priv->baud_mux_clk, priv->default_mux_rate);
}
+static u32 find_quot(struct device *dev, u32 freq, u32 baud, u32 *percent)
+{
+ u32 quot;
+ u32 rate;
+ u64 hires_rate;
+ u64 hires_baud;
+ u64 hires_err;
+
+ rate = freq / 16;
+ quot = DIV_ROUND_CLOSEST(rate, baud);
+ if (!quot)
+ return 0;
+
+ /* increase resolution to get xx.xx percent */
+ hires_rate = div_u64((u64)rate * 10000, (u64)quot);
+ hires_baud = (u64)baud * 10000;
+
+ /* get the delta */
+ if (hires_rate > hires_baud)
+ hires_err = (hires_rate - hires_baud);
+ else
+ hires_err = (hires_baud - hires_rate);
+
+ *percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud);
+
+ dev_dbg(dev, "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n",
+ baud, freq, *percent / 100, *percent % 100);
+
+ return quot;
+}
+
static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
u32 baud)
{
u32 percent;
u32 best_percent = UINT_MAX;
u32 quot;
+ u32 freq;
u32 best_quot = 1;
- u32 rate;
- int best_index = -1;
- u64 hires_rate;
- u64 hires_baud;
- u64 hires_err;
+ u32 best_freq = 0;
int rc;
int i;
int real_baud;
@@ -695,44 +723,35 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
if (priv->baud_mux_clk == NULL)
return;
- /* Find the closest match for specified baud */
- for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) {
- if (priv->real_rates[i] == 0)
- continue;
- rate = priv->real_rates[i] / 16;
- quot = DIV_ROUND_CLOSEST(rate, baud);
- if (!quot)
- continue;
-
- /* increase resolution to get xx.xx percent */
- hires_rate = (u64)rate * 10000;
- hires_baud = (u64)baud * 10000;
-
- hires_err = div_u64(hires_rate, (u64)quot);
-
- /* get the delta */
- if (hires_err > hires_baud)
- hires_err = (hires_err - hires_baud);
- else
- hires_err = (hires_baud - hires_err);
-
- percent = (unsigned long)DIV_ROUND_CLOSEST_ULL(hires_err, baud);
- dev_dbg(up->dev,
- "Baud rate: %u, MUX Clk: %u, Error: %u.%u%%\n",
- baud, priv->real_rates[i], percent / 100,
- percent % 100);
- if (percent < best_percent) {
- best_percent = percent;
- best_index = i;
- best_quot = quot;
+ /* Try default_mux_rate first */
+ quot = find_quot(up->dev, priv->default_mux_rate, baud, &percent);
+ if (quot) {
+ best_percent = percent;
+ best_freq = priv->default_mux_rate;
+ best_quot = quot;
+ }
+ /* If more than 1% error, find the closest match for specified baud */
+ if (best_percent > 100) {
+ for (i = 0; i < ARRAY_SIZE(priv->real_rates); i++) {
+ freq = priv->real_rates[i];
+ if (freq == 0 || freq == priv->default_mux_rate)
+ continue;
+ quot = find_quot(up->dev, freq, baud, &percent);
+ if (!quot)
+ continue;
+
+ if (percent < best_percent) {
+ best_percent = percent;
+ best_freq = freq;
+ best_quot = quot;
+ }
}
}
- if (best_index == -1) {
+ if (!best_freq) {
dev_err(up->dev, "Error, %d BAUD rate is too fast.\n", baud);
return;
}
- rate = priv->real_rates[best_index];
- rc = clk_set_rate(priv->baud_mux_clk, rate);
+ rc = clk_set_rate(priv->baud_mux_clk, best_freq);
if (rc)
dev_err(up->dev, "Error selecting BAUD MUX clock\n");
@@ -741,8 +760,8 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
dev_err(up->dev, "Error, baud: %d has %u.%u%% error\n",
baud, percent / 100, percent % 100);
- real_baud = rate / 16 / best_quot;
- dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", rate);
+ real_baud = best_freq / 16 / best_quot;
+ dev_dbg(up->dev, "Selecting BAUD MUX rate: %u\n", best_freq);
dev_dbg(up->dev, "Requested baud: %u, Actual baud: %u\n",
baud, real_baud);
@@ -751,7 +770,7 @@ static void set_clock_mux(struct uart_port *up, struct brcmuart_priv *priv,
i += (i / 2);
priv->char_wait = ns_to_ktime(i);
- up->uartclk = rate;
+ up->uartclk = best_freq;
}
static void brcmstb_set_termios(struct uart_port *up,
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 1300c92b8..a58890fd5 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -55,6 +55,34 @@
#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
#define DW_UART_QUIRK_IS_DMA_FC BIT(3)
#define DW_UART_QUIRK_APMC0D08 BIT(4)
+#define DW_UART_QUIRK_CPR_VALUE BIT(5)
+
+struct dw8250_platform_data {
+ u8 usr_reg;
+ u32 cpr_value;
+ unsigned int quirks;
+};
+
+struct dw8250_data {
+ struct dw8250_port_data data;
+ const struct dw8250_platform_data *pdata;
+
+ int msr_mask_on;
+ int msr_mask_off;
+ struct clk *clk;
+ struct clk *pclk;
+ struct notifier_block clk_notifier;
+ struct work_struct clk_work;
+ struct reset_control *rst;
+
+ unsigned int skip_autocfg:1;
+ unsigned int uart_16550_compatible:1;
+};
+
+static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
+{
+ return container_of(data, struct dw8250_data, data);
+}
static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
{
@@ -445,6 +473,10 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
+ u32 cpr_value = data->pdata ? data->pdata->cpr_value : 0;
+
+ if (quirks & DW_UART_QUIRK_CPR_VALUE)
+ data->data.cpr_value = cpr_value;
#ifdef CONFIG_64BIT
if (quirks & DW_UART_QUIRK_OCTEON) {
@@ -727,8 +759,8 @@ static const struct dw8250_platform_data dw8250_armada_38x_data = {
static const struct dw8250_platform_data dw8250_renesas_rzn1_data = {
.usr_reg = DW_UART_USR,
- .cpr_val = 0x00012f32,
- .quirks = DW_UART_QUIRK_IS_DMA_FC,
+ .cpr_value = 0x00012f32,
+ .quirks = DW_UART_QUIRK_CPR_VALUE | DW_UART_QUIRK_IS_DMA_FC,
};
static const struct dw8250_platform_data dw8250_starfive_jh7100_data = {
diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c
index 3e33ddf7b..5a2520943 100644
--- a/drivers/tty/serial/8250/8250_dwlib.c
+++ b/drivers/tty/serial/8250/8250_dwlib.c
@@ -242,7 +242,6 @@ static const struct serial_rs485 dw8250_rs485_supported = {
void dw8250_setup_port(struct uart_port *p)
{
struct dw8250_port_data *pd = p->private_data;
- struct dw8250_data *data = to_dw8250_data(pd);
struct uart_8250_port *up = up_to_u8250p(p);
u32 reg, old_dlf;
@@ -278,7 +277,7 @@ void dw8250_setup_port(struct uart_port *p)
reg = dw8250_readl_ext(p, DW_UART_CPR);
if (!reg) {
- reg = data->pdata->cpr_val;
+ reg = pd->cpr_value;
dev_dbg(p->dev, "CPR is not available, using 0x%08x instead\n", reg);
}
if (!reg)
diff --git a/drivers/tty/serial/8250/8250_dwlib.h b/drivers/tty/serial/8250/8250_dwlib.h
index f13e91f2c..7dd2a8e7b 100644
--- a/drivers/tty/serial/8250/8250_dwlib.h
+++ b/drivers/tty/serial/8250/8250_dwlib.h
@@ -2,15 +2,10 @@
/* Synopsys DesignWare 8250 library header file. */
#include <linux/io.h>
-#include <linux/notifier.h>
#include <linux/types.h>
-#include <linux/workqueue.h>
#include "8250.h"
-struct clk;
-struct reset_control;
-
struct dw8250_port_data {
/* Port properties */
int line;
@@ -19,42 +14,16 @@ struct dw8250_port_data {
struct uart_8250_dma dma;
/* Hardware configuration */
+ u32 cpr_value;
u8 dlf_size;
/* RS485 variables */
bool hw_rs485_support;
};
-struct dw8250_platform_data {
- u8 usr_reg;
- u32 cpr_val;
- unsigned int quirks;
-};
-
-struct dw8250_data {
- struct dw8250_port_data data;
- const struct dw8250_platform_data *pdata;
-
- int msr_mask_on;
- int msr_mask_off;
- struct clk *clk;
- struct clk *pclk;
- struct notifier_block clk_notifier;
- struct work_struct clk_work;
- struct reset_control *rst;
-
- unsigned int skip_autocfg:1;
- unsigned int uart_16550_compatible:1;
-};
-
void dw8250_do_set_termios(struct uart_port *p, struct ktermios *termios, const struct ktermios *old);
void dw8250_setup_port(struct uart_port *p);
-static inline struct dw8250_data *to_dw8250_data(struct dw8250_port_data *data)
-{
- return container_of(data, struct dw8250_data, data);
-}
-
static inline u32 dw8250_readl_ext(struct uart_port *p, int offset)
{
if (p->iotype == UPIO_MEM32BE)
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 0440df7de..4d1e07343 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -46,8 +46,50 @@
#define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021
#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
+#define PCI_VENDOR_ID_CONNECT_TECH 0x12c4
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_SP_OPTO 0x0340
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_A 0x0341
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_SP_OPTO_B 0x0342
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS 0x0350
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_A 0x0351
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_B 0x0352
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS 0x0353
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_A 0x0354
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_16_XPRS_B 0x0355
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XPRS_OPTO 0x0360
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_A 0x0361
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XPRS_OPTO_B 0x0362
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP 0x0370
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232 0x0371
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_485 0x0372
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_SP 0x0373
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_6_2_SP 0x0374
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_6_SP 0x0375
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_SP_232_NS 0x0376
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_LEFT 0x0380
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_2_XP_OPTO_RIGHT 0x0381
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_XP_OPTO 0x0382
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_4_4_XPRS_OPTO 0x0392
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP 0x03A0
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232 0x03A1
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_485 0x03A2
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_XPRS_LP_232_NS 0x03A3
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XEG001 0x0602
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_BASE 0x1000
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_2 0x1002
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_4 0x1004
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_8 0x1008
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_12 0x100C
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_PCIE_XR35X_16 0x1010
+#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG00X 0x110c
+#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_12_XIG01X 0x110d
+#define PCI_DEVICE_ID_CONNECT_TECH_PCI_XR79X_16 0x1110
+
#define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
+#define PCI_DEVICE_ID_EXAR_XR17V252 0x0252
+#define PCI_DEVICE_ID_EXAR_XR17V254 0x0254
+#define PCI_DEVICE_ID_EXAR_XR17V258 0x0258
#define PCI_SUBDEVICE_ID_USR_2980 0x0128
#define PCI_SUBDEVICE_ID_USR_2981 0x0129
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 9ff6bbe9c..d14988d14 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -209,15 +209,19 @@ static int mtk8250_startup(struct uart_port *port)
static void mtk8250_shutdown(struct uart_port *port)
{
-#ifdef CONFIG_SERIAL_8250_DMA
struct uart_8250_port *up = up_to_u8250p(port);
struct mtk8250_data *data = port->private_data;
+ int irq = data->rx_wakeup_irq;
+#ifdef CONFIG_SERIAL_8250_DMA
if (up->dma)
data->rx_status = DMA_RX_SHUTDOWN;
#endif
- return serial8250_do_shutdown(port);
+ serial8250_do_shutdown(port);
+
+ if (irq >= 0)
+ serial8250_do_set_mctrl(&up->port, TIOCM_RTS);
}
static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask)
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index f1a51b00b..ba96fa913 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -125,6 +125,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
uart.port.iotype = UPIO_MEM32;
uart.port.regshift = 2;
uart.port.fifosize = 64;
+ uart.tx_loadsz = 32;
uart.dl_write = serial_pxa_dl_write;
ret = serial8250_register_8250_port(&uart);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index e14813250..09c1678dd 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/dma-mapping.h>
#include <asm/irq.h>
@@ -2010,7 +2011,7 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
struct imx_port *sport = imx_uart_ports[co->index];
struct imx_port_ucrs old_ucr;
unsigned long flags;
- unsigned int ucr1;
+ unsigned int ucr1, usr2;
int locked = 1;
if (sport->port.sysrq)
@@ -2041,8 +2042,8 @@ imx_uart_console_write(struct console *co, const char *s, unsigned int count)
* Finally, wait for transmitter to become empty
* and restore UCR1/2/3
*/
- while (!(imx_uart_readl(sport, USR2) & USR2_TXDC));
-
+ read_poll_timeout_atomic(imx_uart_readl, usr2, usr2 & USR2_TXDC,
+ 0, USEC_PER_SEC, false, sport, USR2);
imx_uart_ucrs_restore(sport, &old_ucr);
if (locked)
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 5efb2b593..3d2b83d6a 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -45,6 +45,9 @@
#include <linux/freezer.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
#include <linux/serial_max3100.h>
@@ -191,7 +194,7 @@ static void max3100_timeout(struct timer_list *t)
static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
{
struct spi_message message;
- u16 etx, erx;
+ __be16 etx, erx;
int status;
struct spi_transfer tran = {
.tx_buf = &etx,
@@ -213,7 +216,7 @@ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
return 0;
}
-static int max3100_handlerx(struct max3100_port *s, u16 rx)
+static int max3100_handlerx_unlocked(struct max3100_port *s, u16 rx)
{
unsigned int status = 0;
int ret = 0, cts;
@@ -254,6 +257,17 @@ static int max3100_handlerx(struct max3100_port *s, u16 rx)
return ret;
}
+static int max3100_handlerx(struct max3100_port *s, u16 rx)
+{
+ unsigned long flags;
+ int ret;
+
+ uart_port_lock_irqsave(&s->port, &flags);
+ ret = max3100_handlerx_unlocked(s, rx);
+ uart_port_unlock_irqrestore(&s->port, flags);
+ return ret;
+}
+
static void max3100_work(struct work_struct *w)
{
struct max3100_port *s = container_of(w, struct max3100_port, work);
@@ -738,13 +752,14 @@ static int max3100_probe(struct spi_device *spi)
mutex_lock(&max3100s_lock);
if (!uart_driver_registered) {
- uart_driver_registered = 1;
retval = uart_register_driver(&max3100_uart_driver);
if (retval) {
printk(KERN_ERR "Couldn't register max3100 uart driver\n");
mutex_unlock(&max3100s_lock);
return retval;
}
+
+ uart_driver_registered = 1;
}
for (i = 0; i < MAX_MAX3100; i++)
@@ -830,6 +845,7 @@ static void max3100_remove(struct spi_device *spi)
}
pr_debug("removing max3100 driver\n");
uart_unregister_driver(&max3100_uart_driver);
+ uart_driver_registered = 0;
mutex_unlock(&max3100s_lock);
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 929206a9a..ace2c4b33 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/sched.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/tty.h>
@@ -25,7 +26,6 @@
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
#include <linux/units.h>
-#include <uapi/linux/sched/types.h>
#define SC16IS7XX_NAME "sc16is7xx"
#define SC16IS7XX_MAX_DEVS 8
@@ -554,16 +554,28 @@ static bool sc16is7xx_regmap_noinc(struct device *dev, unsigned int reg)
return reg == SC16IS7XX_RHR_REG;
}
+/*
+ * Configure programmable baud rate generator (divisor) according to the
+ * desired baud rate.
+ *
+ * From the datasheet, the divisor is computed according to:
+ *
+ * XTAL1 input frequency
+ * -----------------------
+ * prescaler
+ * divisor = ---------------------------
+ * baud-rate x sampling-rate
+ */
static int sc16is7xx_set_baud(struct uart_port *port, int baud)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
u8 lcr;
- u8 prescaler = 0;
+ unsigned int prescaler = 1;
unsigned long clk = port->uartclk, div = clk / 16 / baud;
if (div >= BIT(16)) {
- prescaler = SC16IS7XX_MCR_CLKSEL_BIT;
- div /= 4;
+ prescaler = 4;
+ div /= prescaler;
}
/* Enable enhanced features */
@@ -573,9 +585,10 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
SC16IS7XX_EFR_ENABLE_BIT);
sc16is7xx_efr_unlock(port);
+ /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
- prescaler);
+ prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT);
/* Backup LCR and access special register set (DLL/DLH) */
lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
@@ -591,7 +604,7 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
/* Restore LCR and access to general register set */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
- return DIV_ROUND_CLOSEST(clk / 16, div);
+ return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div);
}
static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 7e3a1c7b0..66fd117d8 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -63,6 +63,13 @@ static int serial_port_runtime_suspend(struct device *dev)
if (port->flags & UPF_DEAD)
return 0;
+ /*
+ * Nothing to do on pm_runtime_force_suspend(), see
+ * DEFINE_RUNTIME_DEV_PM_OPS.
+ */
+ if (!pm_runtime_enabled(dev))
+ return 0;
+
uart_port_lock_irqsave(port, &flags);
if (!port_dev->tx_enabled) {
uart_port_unlock_irqrestore(port, flags);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index e512eaa57..a6f3517dc 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1271,9 +1271,14 @@ static void sci_dma_rx_chan_invalidate(struct sci_port *s)
static void sci_dma_rx_release(struct sci_port *s)
{
struct dma_chan *chan = s->chan_rx_saved;
+ struct uart_port *port = &s->port;
+ unsigned long flags;
+ uart_port_lock_irqsave(port, &flags);
s->chan_rx_saved = NULL;
sci_dma_rx_chan_invalidate(s);
+ uart_port_unlock_irqrestore(port, flags);
+
dmaengine_terminate_sync(chan);
dma_free_coherent(chan->device->dev, s->buf_len_rx * 2, s->rx_buf[0],
sg_dma_address(&s->sg_rx[0]));
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 3f68e213d..d80e9d4c9 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -545,6 +545,12 @@ int tty_set_ldisc(struct tty_struct *tty, int disc)
goto out;
}
+ if (tty->ops->ldisc_ok) {
+ retval = tty->ops->ldisc_ok(tty, disc);
+ if (retval)
+ goto out;
+ }
+
old_ldisc = tty->ldisc;
/* Shutdown the old discipline. */
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 9b5b98dfc..cd87e3d12 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3576,6 +3576,15 @@ static void con_cleanup(struct tty_struct *tty)
tty_port_put(&vc->port);
}
+/*
+ * We can't deal with anything but the N_TTY ldisc,
+ * because we can sleep in our write() routine.
+ */
+static int con_ldisc_ok(struct tty_struct *tty, int ldisc)
+{
+ return ldisc == N_TTY ? 0 : -EINVAL;
+}
+
static int default_color = 7; /* white */
static int default_italic_color = 2; // green (ASCII)
static int default_underline_color = 3; // cyan (ASCII)
@@ -3695,6 +3704,7 @@ static const struct tty_operations con_ops = {
.resize = vt_resize,
.shutdown = con_shutdown,
.cleanup = con_cleanup,
+ .ldisc_ok = con_ldisc_ok,
};
static struct cdev vc0_cdev;
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 768bf87cd..8944548c3 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -601,8 +601,7 @@ static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
while (sq_head_slot != hwq->sq_tail_slot) {
- utrd = hwq->sqe_base_addr +
- sq_head_slot * sizeof(struct utp_transfer_req_desc);
+ utrd = hwq->sqe_base_addr + sq_head_slot;
match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
if (addr == match) {
ufshcd_mcq_nullify_sqe(utrd);
@@ -635,20 +634,20 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
unsigned long flags;
- int err = FAILED;
+ int err;
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
dev_err(hba->dev,
"%s: skip abort. cmd at tag %d already completed.\n",
__func__, tag);
- goto out;
+ return FAILED;
}
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
__func__, tag);
- goto out;
+ return FAILED;
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
@@ -660,7 +659,7 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
*/
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
__func__, hwq->id, tag);
- goto out;
+ return FAILED;
}
/*
@@ -668,18 +667,17 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
* in the completion queue either. Query the device to see if
* the command is being processed in the device.
*/
- if (ufshcd_try_to_abort_task(hba, tag)) {
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (err) {
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
lrbp->req_abort_skip = true;
- goto out;
+ return FAILED;
}
- err = SUCCESS;
spin_lock_irqsave(&hwq->cq_lock, flags);
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
-out:
- return err;
+ return SUCCESS;
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index a0f8e9301..f7d04f7c0 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1392,7 +1392,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
- ufshcd_scsi_block_requests(hba);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
@@ -1401,7 +1401,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
goto out;
}
@@ -1422,7 +1422,7 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
mutex_unlock(&hba->wb_mutex);
- ufshcd_scsi_unblock_requests(hba);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
ufshcd_release(hba);
}
@@ -4289,7 +4289,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
* Make sure UIC command completion interrupt is disabled before
* issuing UIC command.
*/
- wmb();
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
reenable_intr = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -8972,6 +8972,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
(hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH)) {
/* Reset the device and controller before doing reinit */
ufshcd_device_reset(hba);
+ ufs_put_device_desc(hba);
ufshcd_hba_stop(hba);
ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
@@ -10400,7 +10401,7 @@ int ufshcd_system_restore(struct device *dev)
* are updated with the latest queue addresses. Only after
* updating these addresses, we can queue the new commands.
*/
- mb();
+ ufshcd_readl(hba, REG_UTP_TASK_REQ_LIST_BASE_H);
/* Resuming from hibernate, assume that link was OFF */
ufshcd_set_link_off(hba);
@@ -10621,7 +10622,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
* Make sure that UFS interrupts are disabled and any pending interrupt
* status is cleared before registering UFS interrupt handler.
*/
- mb();
+ ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
index bb30267da..66811d8d1 100644
--- a/drivers/ufs/host/cdns-pltfrm.c
+++ b/drivers/ufs/host/cdns-pltfrm.c
@@ -136,7 +136,7 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
* Make sure the register was updated,
* UniPro layer will not work with an incorrect value.
*/
- mb();
+ ufshcd_readl(hba, CDNS_UFS_REG_HCLKDIV);
return 0;
}
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 7a00004bf..62c343444 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -284,9 +284,6 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
if (host->hw_ver.major >= 0x05)
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
-
- /* make sure above configuration is applied before we return */
- mb();
}
/*
@@ -415,7 +412,7 @@ static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
- mb();
+ ufshcd_readl(hba, REG_UFS_CFG2);
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -507,7 +504,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* make sure above write gets applied before we return from
* this function.
*/
- mb();
+ ufshcd_readl(hba, REG_UFS_SYS1CLK_1US);
}
return 0;
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 9dd9a391e..b9de17098 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -151,10 +151,10 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
/*
- * Make sure assertion of ufs phy reset is written to
- * register before returning
+ * Dummy read to ensure the write takes effect before doing any sort
+ * of delay
*/
- mb();
+ ufshcd_readl(hba, REG_UFS_CFG1);
}
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
@@ -162,10 +162,10 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, 0, REG_UFS_CFG1);
/*
- * Make sure de-assertion of ufs phy reset is written to
- * register before returning
+ * Dummy read to ensure the write takes effect before doing any sort
+ * of delay
*/
- mb();
+ ufshcd_readl(hba, REG_UFS_CFG1);
}
/* Host controller hardware version: major.minor.step */
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 3a9a0dd4b..949eca0ad 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_FSL_USB2) += host/
obj-$(CONFIG_USB_FOTG210_HCD) += host/
obj-$(CONFIG_USB_MAX3421_HCD) += host/
+obj-$(CONFIG_USB_XEN_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index c553decb5..6830be441 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -266,14 +266,14 @@ static void wdm_int_callback(struct urb *urb)
dev_err(&desc->intf->dev, "Stall on int endpoint\n");
goto sw; /* halt is cleared in work */
default:
- dev_err(&desc->intf->dev,
+ dev_err_ratelimited(&desc->intf->dev,
"nonzero urb status received: %d\n", status);
break;
}
}
if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
- dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
+ dev_err_ratelimited(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
urb->actual_length);
goto exit;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index c0e005670..fb1aa0d4f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1623,6 +1623,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
+ unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1640,13 +1641,14 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
/*
- * This function can be called in task context inside another remote
- * coverage collection section, but kcov doesn't support that kind of
- * recursion yet. Only collect coverage in softirq context for now.
+ * Only collect coverage in the softirq context and disable interrupts
+ * to avoid scenarios with nested remote coverage collection sections
+ * that KCOV does not support.
+ * See the comment next to kcov_remote_start_usb_softirq() for details.
*/
- kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
+ flags = kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- kcov_remote_stop_softirq();
+ kcov_remote_stop_softirq(flags);
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 497deed38..9ef821ca2 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -8,6 +8,7 @@
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -220,6 +221,7 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
struct gpio_desc *gpio;
+ const char *bios_ver;
int ret;
/* On BYT the FW does not always enable the refclock */
@@ -277,8 +279,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc,
* detection. These can be identified by them _not_
* using the standard ACPI battery and ac drivers.
*/
+ bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
if (acpi_dev_present("INT33FD", "1", 2) &&
- acpi_quirk_skip_acpi_ac_and_battery()) {
+ acpi_quirk_skip_acpi_ac_and_battery() &&
+ /* Lenovo Yoga Tablet 2 Pro 1380 uses LC824206XA instead */
+ !(bios_ver &&
+ strstarts(bios_ver, "BLADE_21.X64.0005.R00.1504101516"))) {
dev_info(&pdev->dev, "Using TUSB1211 phy for charger detection\n");
swnode = &dwc3_pci_intel_phy_charger_detect_swnode;
}
diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
index 958fc40ea..0655afe7f 100644
--- a/drivers/usb/fotg210/fotg210-core.c
+++ b/drivers/usb/fotg210/fotg210-core.c
@@ -95,6 +95,7 @@ static int fotg210_gemini_init(struct fotg210 *fotg, struct resource *res,
/**
* fotg210_vbus() - Called by gadget driver to enable/disable VBUS
+ * @fotg: pointer to a private fotg210 object
* @enable: true to enable VBUS, false to disable VBUS
*/
void fotg210_vbus(struct fotg210 *fotg, bool enable)
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 3c8a9dd58..2db01e03b 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -1029,9 +1029,9 @@ static inline int hidg_get_minor(void)
{
int ret;
- ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&hidg_ida, GFP_KERNEL);
if (ret >= HIDG_MINORS) {
- ida_simple_remove(&hidg_ida, ret);
+ ida_free(&hidg_ida, ret);
ret = -ENODEV;
}
@@ -1176,7 +1176,7 @@ static const struct config_item_type hid_func_type = {
static inline void hidg_put_minor(int minor)
{
- ida_simple_remove(&hidg_ida, minor);
+ ida_free(&hidg_ida, minor);
}
static void hidg_free_inst(struct usb_function_instance *f)
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 076dd4c1b..ba7d180cc 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1312,9 +1312,9 @@ static inline int gprinter_get_minor(void)
{
int ret;
- ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&printer_ida, GFP_KERNEL);
if (ret >= PRINTER_MINORS) {
- ida_simple_remove(&printer_ida, ret);
+ ida_free(&printer_ida, ret);
ret = -ENODEV;
}
@@ -1323,7 +1323,7 @@ static inline int gprinter_get_minor(void)
static inline void gprinter_put_minor(int minor)
{
- ida_simple_remove(&printer_ida, minor);
+ ida_free(&printer_ida, minor);
}
static int gprinter_setup(int);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 29bf8664b..12c5d9cf4 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -869,12 +869,12 @@ EXPORT_SYMBOL_GPL(rndis_msg_parser);
static inline int rndis_get_nr(void)
{
- return ida_simple_get(&rndis_ida, 0, 1000, GFP_KERNEL);
+ return ida_alloc_max(&rndis_ida, 999, GFP_KERNEL);
}
static inline void rndis_put_nr(int nr)
{
- ida_simple_remove(&rndis_ida, nr);
+ ida_free(&rndis_ida, nr);
}
struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 4a42574b4..ec1dceb08 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -57,13 +57,13 @@ struct uac_rtd_params {
/* Volume/Mute controls and their state */
int fu_id; /* Feature Unit ID */
- struct snd_kcontrol *snd_kctl_volume;
- struct snd_kcontrol *snd_kctl_mute;
+ struct snd_ctl_elem_id snd_kctl_volume_id;
+ struct snd_ctl_elem_id snd_kctl_mute_id;
s16 volume_min, volume_max, volume_res;
s16 volume;
int mute;
- struct snd_kcontrol *snd_kctl_rate; /* read-only current rate */
+ struct snd_ctl_elem_id snd_kctl_rate_id; /* read-only current rate */
int srate; /* selected samplerate */
int active; /* playback/capture running */
@@ -494,14 +494,13 @@ static inline void free_ep_fback(struct uac_rtd_params *prm, struct usb_ep *ep)
static void set_active(struct uac_rtd_params *prm, bool active)
{
// notifying through the Rate ctrl
- struct snd_kcontrol *kctl = prm->snd_kctl_rate;
unsigned long flags;
spin_lock_irqsave(&prm->lock, flags);
if (prm->active != active) {
prm->active = active;
snd_ctl_notify(prm->uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
- &kctl->id);
+ &prm->snd_kctl_rate_id);
}
spin_unlock_irqrestore(&prm->lock, flags);
}
@@ -807,7 +806,7 @@ int u_audio_set_volume(struct g_audio *audio_dev, int playback, s16 val)
if (change)
snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
- &prm->snd_kctl_volume->id);
+ &prm->snd_kctl_volume_id);
return 0;
}
@@ -856,7 +855,7 @@ int u_audio_set_mute(struct g_audio *audio_dev, int playback, int val)
if (change)
snd_ctl_notify(uac->card, SNDRV_CTL_EVENT_MASK_VALUE,
- &prm->snd_kctl_mute->id);
+ &prm->snd_kctl_mute_id);
return 0;
}
@@ -1331,7 +1330,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
err = snd_ctl_add(card, kctl);
if (err < 0)
goto snd_fail;
- prm->snd_kctl_mute = kctl;
+ prm->snd_kctl_mute_id = kctl->id;
prm->mute = 0;
}
@@ -1359,7 +1358,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
err = snd_ctl_add(card, kctl);
if (err < 0)
goto snd_fail;
- prm->snd_kctl_volume = kctl;
+ prm->snd_kctl_volume_id = kctl->id;
prm->volume = fu->volume_max;
prm->volume_max = fu->volume_max;
prm->volume_min = fu->volume_min;
@@ -1383,7 +1382,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
err = snd_ctl_add(card, kctl);
if (err < 0)
goto snd_fail;
- prm->snd_kctl_rate = kctl;
+ prm->snd_kctl_rate_id = kctl->id;
}
strscpy(card->driver, card_name, sizeof(card->driver));
@@ -1420,6 +1419,8 @@ void g_audio_cleanup(struct g_audio *g_audio)
return;
uac = g_audio->uac;
+ g_audio->uac = NULL;
+
card = uac->card;
if (card)
snd_card_free_when_closed(card);
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index a4377df61..6fac696ea 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -13,6 +13,7 @@
#include "uvc_configfs.h"
#include <linux/sort.h>
+#include <linux/usb/uvc.h>
#include <linux/usb/video.h>
/* -----------------------------------------------------------------------------
@@ -2260,6 +2261,8 @@ static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item,
struct f_uvc_opts *opts;
struct config_item *opts_item;
struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex;
+ const struct uvc_format_desc *format;
+ u8 tmpguidFormat[sizeof(ch->desc.guidFormat)];
int ret;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
@@ -2273,7 +2276,16 @@ static ssize_t uvcg_uncompressed_guid_format_store(struct config_item *item,
goto end;
}
- memcpy(ch->desc.guidFormat, page,
+ memcpy(tmpguidFormat, page,
+ min(sizeof(tmpguidFormat), len));
+
+ format = uvc_format_by_guid(tmpguidFormat);
+ if (!format) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ memcpy(ch->desc.guidFormat, tmpguidFormat,
min(sizeof(ch->desc.guidFormat), len));
ret = sizeof(ch->desc.guidFormat);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 69dd86669..990008aeb 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2269,24 +2269,24 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
}
static struct xhci_interrupter *
-xhci_alloc_interrupter(struct xhci_hcd *xhci, int segs, gfp_t flags)
+xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int segs, gfp_t flags)
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_interrupter *ir;
- unsigned int num_segs = segs;
+ unsigned int max_segs;
int ret;
+ if (!segs)
+ segs = ERST_DEFAULT_SEGS;
+
+ max_segs = BIT(HCS_ERST_MAX(xhci->hcs_params2));
+ segs = min(segs, max_segs);
+
ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
if (!ir)
return NULL;
- /* number of ring segments should be greater than 0 */
- if (segs <= 0)
- num_segs = min_t(unsigned int, 1 << HCS_ERST_MAX(xhci->hcs_params2),
- ERST_MAX_SEGS);
-
- ir->event_ring = xhci_ring_alloc(xhci, num_segs, 1, TYPE_EVENT, 0,
- flags);
+ ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags);
if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
kfree(ir);
@@ -2344,7 +2344,7 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
}
struct xhci_interrupter *
-xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_interrupter *ir;
@@ -2354,7 +2354,7 @@ xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg)
if (!xhci->interrupters || xhci->max_interrupters <= 1)
return NULL;
- ir = xhci_alloc_interrupter(xhci, num_seg, GFP_KERNEL);
+ ir = xhci_alloc_interrupter(xhci, segs, GFP_KERNEL);
if (!ir)
return NULL;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 93b697648..febf64723 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -36,6 +36,7 @@
#define PCI_VENDOR_ID_ETRON 0x1b6f
#define PCI_DEVICE_ID_EJ168 0x7023
+#define PCI_DEVICE_ID_EJ188 0x7052
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
@@ -270,17 +271,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Fresco Logic revision %u "
"has broken MSI implementation",
pdev->revision);
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
- pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1100)
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
-
if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST;
@@ -307,11 +303,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
- if (pdev->vendor == PCI_VENDOR_ID_AMD) {
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
- if (pdev->device == 0x43f7)
- xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
- }
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43f7)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
@@ -399,12 +392,16 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_BROKEN_STREAMS;
}
+ if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ pdev->device == PCI_DEVICE_ID_EJ188) {
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
+ }
+
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
pdev->device == 0x0014) {
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_ZERO_64B_REGS;
}
if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
@@ -434,7 +431,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) {
- xhci->quirks |= XHCI_TRUST_TX_LENGTH;
xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index ab9c5969e..8b3576477 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -214,8 +214,7 @@ static int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
*/
#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \
.firmware_name = firmware, \
- .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \
- XHCI_SLOW_SUSPEND, \
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND, \
.init_quirk = xhci_rcar_init_quirk, \
.plat_start = xhci_rcar_start, \
.resume_quirk = xhci_rcar_resume_quirk,
@@ -229,8 +228,7 @@ static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
};
static const struct xhci_plat_priv xhci_plat_renesas_rzv2m = {
- .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH |
- XHCI_SLOW_SUSPEND,
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND,
.init_quirk = xhci_rzv2m_init_quirk,
.plat_start = xhci_rzv2m_start,
};
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 575f0fd9c..48d745e9f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1034,13 +1034,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
break;
case TD_DIRTY: /* TD is cached, clear it */
case TD_HALTED:
+ case TD_CLEARING_CACHE_DEFERRED:
+ if (cached_td) {
+ if (cached_td->urb->stream_id != td->urb->stream_id) {
+ /* Multiple streams case, defer move dq */
+ xhci_dbg(xhci,
+ "Move dq deferred: stream %u URB %p\n",
+ td->urb->stream_id, td->urb);
+ td->cancel_status = TD_CLEARING_CACHE_DEFERRED;
+ break;
+ }
+
+ /* Should never happen, but clear the TD if it does */
+ xhci_warn(xhci,
+ "Found multiple active URBs %p and %p in stream %u?\n",
+ td->urb, cached_td->urb,
+ td->urb->stream_id);
+ td_to_noop(xhci, ring, cached_td, false);
+ cached_td->cancel_status = TD_CLEARED;
+ }
+
td->cancel_status = TD_CLEARING_CACHE;
- if (cached_td)
- /* FIXME stream case, several stopped rings */
- xhci_dbg(xhci,
- "Move dq past stream %u URB %p instead of stream %u URB %p\n",
- td->urb->stream_id, td->urb,
- cached_td->urb->stream_id, cached_td->urb);
cached_td = td;
break;
}
@@ -1060,10 +1074,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
if (err) {
/* Failed to move past cached td, just set cached TDs to no-op */
list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
- if (td->cancel_status != TD_CLEARING_CACHE)
+ /*
+ * Deferred TDs need to have the deq pointer set after the above command
+ * completes, so if that failed we just give up on all of them (and
+ * complain loudly since this could cause issues due to caching).
+ */
+ if (td->cancel_status != TD_CLEARING_CACHE &&
+ td->cancel_status != TD_CLEARING_CACHE_DEFERRED)
continue;
- xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
- td->urb);
+ xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
+ td->urb);
td_to_noop(xhci, ring, td, false);
td->cancel_status = TD_CLEARED;
}
@@ -1350,6 +1370,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_td *td, *tmp_td;
+ bool deferred = false;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
@@ -1436,6 +1457,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
__func__, td->urb);
xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
+ } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) {
+ deferred = true;
} else {
xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
__func__, td->urb, td->cancel_status);
@@ -1445,8 +1468,17 @@ cleanup:
ep->ep_state &= ~SET_DEQ_PENDING;
ep->queued_deq_seg = NULL;
ep->queued_deq_ptr = NULL;
- /* Restart any rings with pending URBs */
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+
+ if (deferred) {
+ /* We have more streams to clear */
+ xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n",
+ __func__);
+ xhci_invalidate_cancelled_tds(ep);
+ } else {
+ /* Restart any rings with pending URBs */
+ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ }
}
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
@@ -2399,8 +2431,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
break;
if (remaining) {
frame->status = short_framestatus;
- if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
- sum_trbs_for_length = true;
+ sum_trbs_for_length = true;
break;
}
frame->status = 0;
@@ -2535,9 +2566,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
goto finish_td;
case COMP_STOPPED_LENGTH_INVALID:
/* stopped on ep trb with invalid length, exclude it */
- ep_trb_len = 0;
- remaining = 0;
- break;
+ td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb);
+ goto finish_td;
case COMP_USB_TRANSACTION_ERROR:
if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
(ep->err_count++ > MAX_SOFT_RETRY) ||
@@ -2650,15 +2680,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* transfer type
*/
case COMP_SUCCESS:
- if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
- break;
- if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
- ep_ring->last_td_was_short)
+ if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
trb_comp_code = COMP_SHORT_PACKET;
- else
- xhci_warn_ratelimited(xhci,
- "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
- slot_id, ep_index);
+ xhci_dbg(xhci, "Successful completion on short TX for slot %u ep %u with last td short %d\n",
+ slot_id, ep_index, ep_ring->last_td_was_short);
+ }
break;
case COMP_SHORT_PACKET:
break;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6f4bf98a6..1683d779e 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1276,6 +1276,7 @@ enum xhci_cancelled_td_status {
TD_DIRTY = 0,
TD_HALTED,
TD_CLEARING_CACHE,
+ TD_CLEARING_CACHE_DEFERRED,
TD_CLEARED,
};
@@ -1392,8 +1393,8 @@ struct urb_priv {
struct xhci_td td[] __counted_by(num_tds);
};
-/* Reasonable limit for number of Event Ring segments (spec allows 32k) */
-#define ERST_MAX_SEGS 2
+/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */
+#define ERST_DEFAULT_SEGS 2
/* Poll every 60 seconds */
#define POLL_TIMEOUT 60
/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
@@ -1589,7 +1590,7 @@ struct xhci_hcd {
#define XHCI_RESET_ON_RESUME BIT_ULL(7)
#define XHCI_SW_BW_CHECKING BIT_ULL(8)
#define XHCI_AMD_0x96_HOST BIT_ULL(9)
-#define XHCI_TRUST_TX_LENGTH BIT_ULL(10)
+#define XHCI_TRUST_TX_LENGTH BIT_ULL(10) /* Deprecated */
#define XHCI_LPM_SUPPORT BIT_ULL(11)
#define XHCI_INTEL_HOST BIT_ULL(12)
#define XHCI_SPURIOUS_REBOOT BIT_ULL(13)
@@ -1729,8 +1730,6 @@ static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci)
dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_warn(xhci, fmt, args...) \
dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
-#define xhci_warn_ratelimited(xhci, fmt, args...) \
- dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_info(xhci, fmt, args...) \
dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
@@ -1833,7 +1832,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
struct xhci_interrupter *
-xhci_create_secondary_interrupter(struct usb_hcd *hcd, int num_seg);
+xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs);
void xhci_remove_secondary_interrupter(struct usb_hcd
*hcd, struct xhci_interrupter *ir);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index b00d92db5..eb5a8e0d9 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -677,7 +677,7 @@ static int uss720_probe(struct usb_interface *intf,
struct parport_uss720_private *priv;
struct parport *pp;
unsigned char reg;
- int i;
+ int ret;
dev_dbg(&intf->dev, "probe: vendor id 0x%x, device id 0x%x\n",
le16_to_cpu(usbdev->descriptor.idVendor),
@@ -688,8 +688,8 @@ static int uss720_probe(struct usb_interface *intf,
usb_put_dev(usbdev);
return -ENODEV;
}
- i = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
- dev_dbg(&intf->dev, "set interface result %d\n", i);
+ ret = usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 2);
+ dev_dbg(&intf->dev, "set interface result %d\n", ret);
interface = intf->cur_altsetting;
@@ -725,12 +725,18 @@ static int uss720_probe(struct usb_interface *intf,
set_1284_register(pp, 7, 0x00, GFP_KERNEL);
set_1284_register(pp, 6, 0x30, GFP_KERNEL); /* PS/2 mode */
set_1284_register(pp, 2, 0x0c, GFP_KERNEL);
- /* debugging */
- get_1284_register(pp, 0, &reg, GFP_KERNEL);
+
+ /* The Belkin F5U002 Rev 2 P80453-B USB parallel port adapter shares the
+ * device ID 050d:0002 with some other device that works with this
+ * driver, but it itself does not. Detect and handle the bad cable
+ * here. */
+ ret = get_1284_register(pp, 0, &reg, GFP_KERNEL);
dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
+ if (ret < 0)
+ return ret;
- i = usb_find_last_int_in_endpoint(interface, &epd);
- if (!i) {
+ ret = usb_find_last_int_in_endpoint(interface, &epd);
+ if (!ret) {
dev_dbg(&intf->dev, "epaddr %d interval %d\n",
epd->bEndpointAddress, epd->bInterval);
}
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 115f05a62..40d34cc28 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -105,6 +105,8 @@ struct alauda_info {
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
+
+ bool media_initialized;
};
#define short_pack(lsb,msb) ( ((u16)(lsb)) | ( ((u16)(msb))<<8 ) )
@@ -476,11 +478,12 @@ static int alauda_check_media(struct us_data *us)
}
/* Check for media change */
- if (status[0] & 0x08) {
+ if (status[0] & 0x08 || !info->media_initialized) {
usb_stor_dbg(us, "Media change detected\n");
alauda_free_maps(&MEDIA_INFO(us));
- alauda_init_media(us);
-
+ rc = alauda_init_media(us);
+ if (rc == USB_STOR_TRANSPORT_GOOD)
+ info->media_initialized = true;
info->sense_key = UNIT_ATTENTION;
info->sense_asc = 0x28;
info->sense_ascq = 0x00;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 12cf9940e..7d87e2a52 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -86,6 +86,12 @@ static int slave_alloc (struct scsi_device *sdev)
if (us->protocol == USB_PR_BULK && us->max_lun > 0)
sdev->sdev_bflags |= BLIST_FORCELUN;
+ /*
+ * Some USB storage devices reset if the IO advice hints grouping mode
+ * page is queried. Hence skip that mode page.
+ */
+ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
+
return 0;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 08953f0d4..1ed2bdda0 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -21,6 +21,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
@@ -820,6 +821,12 @@ static int uas_slave_alloc(struct scsi_device *sdev)
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
+ /*
+ * Some USB storage devices reset if the IO advice hints grouping mode
+ * page is queried. Hence skip that mode page.
+ */
+ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS;
+
sdev->hostdata = devinfo;
/*
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index d3958c061..501eddb29 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -41,7 +41,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
const struct pmic_typec_resources *res;
struct regmap *regmap;
- struct device *bridge_dev;
+ struct auxiliary_device *bridge_dev;
u32 base;
int ret;
@@ -92,7 +92,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
if (!tcpm->tcpc.fwnode)
return -EINVAL;
- bridge_dev = drm_dp_hpd_bridge_register(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
+ bridge_dev = devm_drm_dp_hpd_bridge_alloc(tcpm->dev, to_of_node(tcpm->tcpc.fwnode));
if (IS_ERR(bridge_dev))
return PTR_ERR(bridge_dev);
@@ -110,8 +110,14 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
if (ret)
goto port_stop;
+ ret = devm_drm_dp_hpd_bridge_add(tcpm->dev, bridge_dev);
+ if (ret)
+ goto pdphy_stop;
+
return 0;
+pdphy_stop:
+ tcpm->pdphy_stop(tcpm);
port_stop:
tcpm->port_stop(tcpm);
port_unregister:
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 8a1af08f7..5d4da962a 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -3014,8 +3014,10 @@ static int tcpm_register_source_caps(struct tcpm_port *port)
memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
caps.role = TYPEC_SOURCE;
- if (cap)
+ if (cap) {
usb_power_delivery_unregister_capabilities(cap);
+ port->partner_source_caps = NULL;
+ }
cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
if (IS_ERR(cap))
@@ -6172,6 +6174,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
port->tcpc->set_bist_data(port->tcpc, false);
switch (port->state) {
+ case TOGGLING:
case ERROR_RECOVERY:
case PORT_RESET:
case PORT_RESET_WAIT_OFF:
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index bd6ae92aa..780150183 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -619,7 +619,8 @@ static int ucsi_read_pdos(struct ucsi_connector *con,
u64 command;
int ret;
- if (ucsi->quirks & UCSI_NO_PARTNER_PDOS)
+ if (is_partner &&
+ ucsi->quirks & UCSI_NO_PARTNER_PDOS)
return 0;
command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
@@ -823,12 +824,6 @@ static int ucsi_register_partner_pdos(struct ucsi_connector *con)
return PTR_ERR(cap);
con->partner_source_caps = cap;
-
- ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
- if (ret) {
- usb_power_delivery_unregister_capabilities(con->partner_source_caps);
- return ret;
- }
}
ret = ucsi_get_pdos(con, TYPEC_SINK, 1, caps.pdo);
@@ -843,15 +838,9 @@ static int ucsi_register_partner_pdos(struct ucsi_connector *con)
return PTR_ERR(cap);
con->partner_sink_caps = cap;
-
- ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
- if (ret) {
- usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
- return ret;
- }
}
- return 0;
+ return typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
}
static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
@@ -1572,7 +1561,6 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
}
con->port_source_caps = pd_cap;
- typec_port_set_usb_power_delivery(con->port, con->pd);
}
memset(&pd_caps, 0, sizeof(pd_caps));
@@ -1589,9 +1577,10 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
}
con->port_sink_caps = pd_cap;
- typec_port_set_usb_power_delivery(con->port, con->pd);
}
+ typec_port_set_usb_power_delivery(con->port, con->pd);
+
/* Alternate modes */
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
if (ret) {
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index ce08eb33e..1d0e2d87e 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -176,7 +176,8 @@ static int pmic_glink_ucsi_sync_write(struct ucsi *__ucsi, unsigned int offset,
left = wait_for_completion_timeout(&ucsi->sync_ack, 5 * HZ);
if (!left) {
dev_err(ucsi->dev, "timeout waiting for UCSI sync write response\n");
- ret = -ETIMEDOUT;
+ /* return 0 here and let core UCSI code handle the CCI_BUSY */
+ ret = 0;
} else if (ucsi->sync_val) {
dev_err(ucsi->dev, "sync write returned: %d\n", ucsi->sync_val);
}
@@ -243,10 +244,7 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
ucsi_connector_change(ucsi->ucsi, con_num);
}
- if (ucsi->sync_pending && cci & UCSI_CCI_BUSY) {
- ucsi->sync_val = -EBUSY;
- complete(&ucsi->sync_ack);
- } else if (ucsi->sync_pending &&
+ if (ucsi->sync_pending &&
(cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))) {
complete(&ucsi->sync_ack);
}
@@ -311,12 +309,14 @@ static void pmic_glink_ucsi_destroy(void *data)
mutex_unlock(&ucsi->lock);
}
+static unsigned long quirk_sc8180x = UCSI_NO_PARTNER_PDOS;
+
static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
- { .compatible = "qcom,qcm6490-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
- { .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
- { .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
- { .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
- { .compatible = "qcom,sm8550-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
+ { .compatible = "qcom,qcm6490-pmic-glink", .data = &quirk_sc8180x, },
+ { .compatible = "qcom,sc8180x-pmic-glink", .data = &quirk_sc8180x, },
+ { .compatible = "qcom,sc8280xp-pmic-glink", .data = &quirk_sc8180x, },
+ { .compatible = "qcom,sm8350-pmic-glink", .data = &quirk_sc8180x, },
+ { .compatible = "qcom,sm8550-pmic-glink", .data = &quirk_sc8180x, },
{}
};
@@ -354,7 +354,7 @@ static int pmic_glink_ucsi_probe(struct auxiliary_device *adev,
match = of_match_device(pmic_glink_ucsi_of_quirks, dev->parent);
if (match)
- ucsi->ucsi->quirks = (unsigned long)match->data;
+ ucsi->ucsi->quirks = *(unsigned long *)match->data;
ucsi_set_drvdata(ucsi->ucsi, ucsi);
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index d94d61b92..d8c95cc16 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -778,25 +778,26 @@ static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
}
struct vfio_pci_fill_info {
- struct vfio_pci_dependent_device __user *devices;
- struct vfio_pci_dependent_device __user *devices_end;
struct vfio_device *vdev;
+ struct vfio_pci_dependent_device *devices;
+ int nr_devices;
u32 count;
u32 flags;
};
static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
{
- struct vfio_pci_dependent_device info = {
- .segment = pci_domain_nr(pdev->bus),
- .bus = pdev->bus->number,
- .devfn = pdev->devfn,
- };
+ struct vfio_pci_dependent_device *info;
struct vfio_pci_fill_info *fill = data;
- fill->count++;
- if (fill->devices >= fill->devices_end)
- return 0;
+ /* The topology changed since we counted devices */
+ if (fill->count >= fill->nr_devices)
+ return -EAGAIN;
+
+ info = &fill->devices[fill->count++];
+ info->segment = pci_domain_nr(pdev->bus);
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
if (fill->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID) {
struct iommufd_ctx *iommufd = vfio_iommufd_device_ictx(fill->vdev);
@@ -809,19 +810,19 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
*/
vdev = vfio_find_device_in_devset(dev_set, &pdev->dev);
if (!vdev) {
- info.devid = VFIO_PCI_DEVID_NOT_OWNED;
+ info->devid = VFIO_PCI_DEVID_NOT_OWNED;
} else {
int id = vfio_iommufd_get_dev_id(vdev, iommufd);
if (id > 0)
- info.devid = id;
+ info->devid = id;
else if (id == -ENOENT)
- info.devid = VFIO_PCI_DEVID_OWNED;
+ info->devid = VFIO_PCI_DEVID_OWNED;
else
- info.devid = VFIO_PCI_DEVID_NOT_OWNED;
+ info->devid = VFIO_PCI_DEVID_NOT_OWNED;
}
/* If devid is VFIO_PCI_DEVID_NOT_OWNED, clear owned flag. */
- if (info.devid == VFIO_PCI_DEVID_NOT_OWNED)
+ if (info->devid == VFIO_PCI_DEVID_NOT_OWNED)
fill->flags &= ~VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED;
} else {
struct iommu_group *iommu_group;
@@ -830,13 +831,10 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
if (!iommu_group)
return -EPERM; /* Cannot reset non-isolated devices */
- info.group_id = iommu_group_id(iommu_group);
+ info->group_id = iommu_group_id(iommu_group);
iommu_group_put(iommu_group);
}
- if (copy_to_user(fill->devices, &info, sizeof(info)))
- return -EFAULT;
- fill->devices++;
return 0;
}
@@ -1258,10 +1256,11 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
{
unsigned long minsz =
offsetofend(struct vfio_pci_hot_reset_info, count);
+ struct vfio_pci_dependent_device *devices = NULL;
struct vfio_pci_hot_reset_info hdr;
struct vfio_pci_fill_info fill = {};
bool slot = false;
- int ret = 0;
+ int ret, count;
if (copy_from_user(&hdr, arg, minsz))
return -EFAULT;
@@ -1277,9 +1276,23 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
else if (pci_probe_reset_bus(vdev->pdev->bus))
return -ENODEV;
- fill.devices = arg->devices;
- fill.devices_end = arg->devices +
- (hdr.argsz - sizeof(hdr)) / sizeof(arg->devices[0]);
+ ret = vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
+ &count, slot);
+ if (ret)
+ return ret;
+
+ if (count > (hdr.argsz - sizeof(hdr)) / sizeof(*devices)) {
+ hdr.count = count;
+ ret = -ENOSPC;
+ goto header;
+ }
+
+ devices = kcalloc(count, sizeof(*devices), GFP_KERNEL);
+ if (!devices)
+ return -ENOMEM;
+
+ fill.devices = devices;
+ fill.nr_devices = count;
fill.vdev = &vdev->vdev;
if (vfio_device_cdev_opened(&vdev->vdev))
@@ -1291,16 +1304,23 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
&fill, slot);
mutex_unlock(&vdev->vdev.dev_set->lock);
if (ret)
- return ret;
+ goto out;
+
+ if (copy_to_user(arg->devices, devices,
+ sizeof(*devices) * fill.count)) {
+ ret = -EFAULT;
+ goto out;
+ }
hdr.count = fill.count;
hdr.flags = fill.flags;
- if (copy_to_user(arg, &hdr, minsz))
- return -EFAULT;
- if (fill.count > fill.devices - arg->devices)
- return -ENOSPC;
- return 0;
+header:
+ if (copy_to_user(arg, &hdr, minsz))
+ ret = -EFAULT;
+out:
+ kfree(devices);
+ return ret;
}
static int
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index fb5392b74..e80c5d75b 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -277,8 +277,10 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev,
return -ENOMEM;
ctx = vfio_irq_ctx_alloc(vdev, 0);
- if (!ctx)
+ if (!ctx) {
+ kfree(name);
return -ENOMEM;
+ }
ctx->name = name;
ctx->trigger = trigger;
diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c
index c80a1481e..4e98e6041 100644
--- a/drivers/video/backlight/mp3309c.c
+++ b/drivers/video/backlight/mp3309c.c
@@ -205,8 +205,9 @@ static int mp3309c_parse_fwnode(struct mp3309c_chip *chip,
struct mp3309c_platform_data *pdata)
{
int ret, i;
- unsigned int num_levels, tmp_value;
+ unsigned int tmp_value;
struct device *dev = chip->dev;
+ int num_levels;
if (!dev_fwnode(dev))
return dev_err_probe(dev, -ENODEV, "failed to get firmware node\n");
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 197b6d526..3f46663aa 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1648,8 +1648,8 @@ config FB_COBALT
select FB_IOMEM_HELPERS
config FB_SH7760
- bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
- depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ tristate "SH7760/SH7763/SH7720/SH7721 LCDC support"
+ depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
select FB_IOMEM_HELPERS
help
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index db09fe87f..0ab8848ba 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -144,6 +144,12 @@ config FB_DMAMEM_HELPERS
select FB_SYS_IMAGEBLIT
select FB_SYSMEM_FOPS
+config FB_DMAMEM_HELPERS_DEFERRED
+ bool
+ depends on FB_CORE
+ select FB_DEFERRED_IO
+ select FB_DMAMEM_HELPERS
+
config FB_IOMEM_FOPS
tristate
depends on FB_CORE
diff --git a/drivers/video/fbdev/savage/savagefb_driver.c b/drivers/video/fbdev/savage/savagefb_driver.c
index ebc9aeffd..ac41f8f37 100644
--- a/drivers/video/fbdev/savage/savagefb_driver.c
+++ b/drivers/video/fbdev/savage/savagefb_driver.c
@@ -2276,7 +2276,10 @@ static int savagefb_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (info->var.xres_virtual > 0x1000)
info->var.xres_virtual = 0x1000;
#endif
- savagefb_check_var(&info->var, info);
+ err = savagefb_check_var(&info->var, info);
+ if (err)
+ goto failed;
+
savagefb_set_fix(info);
/*
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index eb2297b37..d35d2cf99 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1575,7 +1575,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
*/
info->fix = sh_mobile_lcdc_overlay_fix;
snprintf(info->fix.id, sizeof(info->fix.id),
- "SH Mobile LCDC Overlay %u", ovl->index);
+ "SHMobile ovl %u", ovl->index);
info->fix.smem_start = ovl->dma_handle;
info->fix.smem_len = ovl->fb_size;
info->fix.line_length = ovl->pitch;
diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
index a8fb41f1a..093290720 100644
--- a/drivers/video/fbdev/sis/init301.c
+++ b/drivers/video/fbdev/sis/init301.c
@@ -172,7 +172,7 @@ static const unsigned char SiS_HiTVGroup3_2[] = {
};
/* 301C / 302ELV extended Part2 TV registers (4 tap scaler) */
-
+#ifdef CONFIG_FB_SIS_315
static const unsigned char SiS_Part2CLVX_1[] = {
0x00,0x00,
0x00,0x20,0x00,0x00,0x7F,0x20,0x02,0x7F,0x7D,0x20,0x04,0x7F,0x7D,0x1F,0x06,0x7E,
@@ -245,7 +245,6 @@ static const unsigned char SiS_Part2CLVX_6[] = { /* 1080i */
0xFF,0xFF,
};
-#ifdef CONFIG_FB_SIS_315
/* 661 et al LCD data structure (2.03.00) */
static const unsigned char SiS_LCDStruct661[] = {
/* 1024x768 */
diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
index fa5d9ca6b..9c75de065 100644
--- a/drivers/virt/acrn/mm.c
+++ b/drivers/virt/acrn/mm.c
@@ -155,43 +155,83 @@ int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
{
struct vm_memory_region_batch *regions_info;
- int nr_pages, i = 0, order, nr_regions = 0;
+ int nr_pages, i, order, nr_regions = 0;
struct vm_memory_mapping *region_mapping;
struct vm_memory_region_op *vm_region;
struct page **pages = NULL, *page;
void *remap_vaddr;
int ret, pinned;
u64 user_vm_pa;
- unsigned long pfn;
struct vm_area_struct *vma;
if (!vm || !memmap)
return -EINVAL;
+ /* Get the page number of the map region */
+ nr_pages = memmap->len >> PAGE_SHIFT;
+ if (!nr_pages)
+ return -EINVAL;
+
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, memmap->vma_base);
if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
+ unsigned long start_pfn, cur_pfn;
+ spinlock_t *ptl;
+ bool writable;
+ pte_t *ptep;
+
if ((memmap->vma_base + memmap->len) > vma->vm_end) {
mmap_read_unlock(current->mm);
return -EINVAL;
}
- ret = follow_pfn(vma, memmap->vma_base, &pfn);
+ for (i = 0; i < nr_pages; i++) {
+ ret = follow_pte(vma->vm_mm,
+ memmap->vma_base + i * PAGE_SIZE,
+ &ptep, &ptl);
+ if (ret)
+ break;
+
+ cur_pfn = pte_pfn(ptep_get(ptep));
+ if (i == 0)
+ start_pfn = cur_pfn;
+ writable = !!pte_write(ptep_get(ptep));
+ pte_unmap_unlock(ptep, ptl);
+
+ /* Disallow write access if the PTE is not writable. */
+ if (!writable &&
+ (memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Disallow refcounted pages. */
+ if (pfn_valid(cur_pfn) &&
+ !PageReserved(pfn_to_page(cur_pfn))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Disallow non-contiguous ranges. */
+ if (cur_pfn != start_pfn + i) {
+ ret = -EINVAL;
+ break;
+ }
+ }
mmap_read_unlock(current->mm);
- if (ret < 0) {
+
+ if (ret) {
dev_dbg(acrn_dev.this_device,
"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
return ret;
}
return acrn_mm_region_add(vm, memmap->user_vm_pa,
- PFN_PHYS(pfn), memmap->len,
+ PFN_PHYS(start_pfn), memmap->len,
ACRN_MEM_TYPE_WB, memmap->attr);
}
mmap_read_unlock(current->mm);
- /* Get the page number of the map region */
- nr_pages = memmap->len >> PAGE_SHIFT;
pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
if (!pages)
return -ENOMEM;
@@ -235,12 +275,11 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
mutex_unlock(&vm->regions_mapping_lock);
/* Calculate count of vm_memory_region_op */
- while (i < nr_pages) {
+ for (i = 0; i < nr_pages; i += 1 << order) {
page = pages[i];
VM_BUG_ON_PAGE(PageTail(page), page);
order = compound_order(page);
nr_regions++;
- i += 1 << order;
}
/* Prepare the vm_memory_region_batch */
@@ -257,8 +296,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
regions_info->vmid = vm->vmid;
regions_info->regions_gpa = virt_to_phys(vm_region);
user_vm_pa = memmap->user_vm_pa;
- i = 0;
- while (i < nr_pages) {
+ for (i = 0; i < nr_pages; i += 1 << order) {
u32 region_size;
page = pages[i];
@@ -274,7 +312,6 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
vm_region++;
user_vm_pa += region_size;
- i += 1 << order;
}
/* Inform the ACRN Hypervisor to set up EPT mappings */
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 1f5b3dd31..89bc8da80 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -450,7 +450,7 @@ static void start_update_balloon_size(struct virtio_balloon *vb)
vb->adjustment_signal_pending = true;
if (!vb->adjustment_in_progress) {
vb->adjustment_in_progress = true;
- pm_stay_awake(vb->vdev->dev.parent);
+ pm_stay_awake(&vb->vdev->dev);
}
spin_unlock_irqrestore(&vb->adjustment_lock, flags);
@@ -462,7 +462,7 @@ static void end_update_balloon_size(struct virtio_balloon *vb)
spin_lock_irq(&vb->adjustment_lock);
if (!vb->adjustment_signal_pending && vb->adjustment_in_progress) {
vb->adjustment_in_progress = false;
- pm_relax(vb->vdev->dev.parent);
+ pm_relax(&vb->vdev->dev);
}
spin_unlock_irq(&vb->adjustment_lock);
}
@@ -1029,6 +1029,15 @@ static int virtballoon_probe(struct virtio_device *vdev)
spin_lock_init(&vb->adjustment_lock);
+ /*
+ * The virtio balloon itself can't wake up the device, but it is
+ * responsible for processing wakeup events passed up from the transport
+ * layer. Wakeup sources don't support nesting/chaining calls, so we use
+ * our own wakeup source to ensure wakeup events are properly handled
+ * without trampling on the transport layer's wakeup source.
+ */
+ device_set_wakeup_capable(&vb->vdev->dev, true);
+
virtio_device_ready(vdev);
if (towards_target(vb))
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index b655fccaf..584af7816 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -348,8 +348,10 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
vring_interrupt, 0,
vp_dev->msix_names[msix_vec],
vqs[i]);
- if (err)
+ if (err) {
+ vp_del_vq(vqs[i]);
goto error_find;
+ }
}
return 0;
diff --git a/drivers/watchdog/bd9576_wdt.c b/drivers/watchdog/bd9576_wdt.c
index 4a20e07fb..f00ea1b4e 100644
--- a/drivers/watchdog/bd9576_wdt.c
+++ b/drivers/watchdog/bd9576_wdt.c
@@ -29,7 +29,6 @@ struct bd9576_wdt_priv {
struct gpio_desc *gpiod_en;
struct device *dev;
struct regmap *regmap;
- bool always_running;
struct watchdog_device wdd;
};
@@ -62,10 +61,7 @@ static int bd9576_wdt_stop(struct watchdog_device *wdd)
{
struct bd9576_wdt_priv *priv = watchdog_get_drvdata(wdd);
- if (!priv->always_running)
- bd9576_wdt_disable(priv);
- else
- set_bit(WDOG_HW_RUNNING, &wdd->status);
+ bd9576_wdt_disable(priv);
return 0;
}
@@ -264,9 +260,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->always_running = device_property_read_bool(dev->parent,
- "always-running");
-
watchdog_set_drvdata(&priv->wdd, priv);
priv->wdd.info = &bd957x_wdt_ident;
@@ -281,9 +274,6 @@ static int bd9576_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_reboot(&priv->wdd);
- if (priv->always_running)
- bd9576_wdt_start(&priv->wdd);
-
return devm_watchdog_register_device(dev, &priv->wdd);
}
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index 688b112e7..9f279c0e1 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -252,7 +252,7 @@ static void cpu5wdt_exit(void)
if (cpu5wdt_device.queue) {
cpu5wdt_device.queue = 0;
wait_for_completion(&cpu5wdt_device.stop);
- del_timer(&cpu5wdt_device.timer);
+ timer_shutdown_sync(&cpu5wdt_device.timer);
}
misc_deregister(&cpu5wdt_misc);
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 9215793a1..4895a6901 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -59,6 +59,8 @@
#define PON_REASON_EOF_NUM 0xCCCCBBBB
#define RESERVED_MEM_MIN_SIZE 12
+#define MAX_HW_ERROR 250
+
static int heartbeat = DEFAULT_HEARTBEAT;
/*
@@ -97,7 +99,7 @@ static int rti_wdt_start(struct watchdog_device *wdd)
* to be 50% or less than that; we obviouly want to configure the open
* window as large as possible so we select the 50% option.
*/
- wdd->min_hw_heartbeat_ms = 500 * wdd->timeout;
+ wdd->min_hw_heartbeat_ms = 520 * wdd->timeout + MAX_HW_ERROR;
/* Generate NMI when wdt expires */
writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
@@ -131,31 +133,33 @@ static int rti_wdt_setup_hw_hb(struct watchdog_device *wdd, u32 wsize)
* be petted during the open window; not too early or not too late.
* The HW configuration options only allow for the open window size
* to be 50% or less than that.
+ * To avoid any glitches, we accommodate 2% + max hardware error
+ * safety margin.
*/
switch (wsize) {
case RTIWWDSIZE_50P:
- /* 50% open window => 50% min heartbeat */
- wdd->min_hw_heartbeat_ms = 500 * heartbeat;
+ /* 50% open window => 52% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 520 * heartbeat + MAX_HW_ERROR;
break;
case RTIWWDSIZE_25P:
- /* 25% open window => 75% min heartbeat */
- wdd->min_hw_heartbeat_ms = 750 * heartbeat;
+ /* 25% open window => 77% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 770 * heartbeat + MAX_HW_ERROR;
break;
case RTIWWDSIZE_12P5:
- /* 12.5% open window => 87.5% min heartbeat */
- wdd->min_hw_heartbeat_ms = 875 * heartbeat;
+ /* 12.5% open window => 89.5% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 895 * heartbeat + MAX_HW_ERROR;
break;
case RTIWWDSIZE_6P25:
- /* 6.5% open window => 93.5% min heartbeat */
- wdd->min_hw_heartbeat_ms = 935 * heartbeat;
+ /* 6.5% open window => 95.5% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 955 * heartbeat + MAX_HW_ERROR;
break;
case RTIWWDSIZE_3P125:
- /* 3.125% open window => 96.9% min heartbeat */
- wdd->min_hw_heartbeat_ms = 969 * heartbeat;
+ /* 3.125% open window => 98.9% min heartbeat */
+ wdd->min_hw_heartbeat_ms = 989 * heartbeat + MAX_HW_ERROR;
break;
default:
@@ -233,14 +237,6 @@ static int rti_wdt_probe(struct platform_device *pdev)
return -EINVAL;
}
- /*
- * If watchdog is running at 32k clock, it is not accurate.
- * Adjust frequency down in this case so that we don't pet
- * the watchdog too often.
- */
- if (wdt->freq < 32768)
- wdt->freq = wdt->freq * 9 / 10;
-
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c
index 5d2df008b..34a917221 100644
--- a/drivers/watchdog/sa1100_wdt.c
+++ b/drivers/watchdog/sa1100_wdt.c
@@ -191,9 +191,8 @@ static int sa1100dog_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
reg_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- ret = PTR_ERR_OR_ZERO(reg_base);
- if (ret)
- return ret;
+ if (!reg_base)
+ return -ENOMEM;
clk = clk_get(NULL, "OSTIMER0");
if (IS_ERR(clk)) {
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 3205e5d72..1a9ded0cd 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -65,13 +65,17 @@
#include "xenbus.h"
-static int xs_init_irq;
+static int xs_init_irq = -1;
int xen_store_evtchn;
EXPORT_SYMBOL_GPL(xen_store_evtchn);
struct xenstore_domain_interface *xen_store_interface;
EXPORT_SYMBOL_GPL(xen_store_interface);
+#define XS_INTERFACE_READY \
+ ((xen_store_interface != NULL) && \
+ (xen_store_interface->connection == XENSTORE_CONNECTED))
+
enum xenstore_init xen_store_domain_type;
EXPORT_SYMBOL_GPL(xen_store_domain_type);
@@ -751,19 +755,19 @@ static void xenbus_probe(void)
{
xenstored_ready = 1;
- if (!xen_store_interface) {
+ if (!xen_store_interface)
xen_store_interface = memremap(xen_store_gfn << XEN_PAGE_SHIFT,
XEN_PAGE_SIZE, MEMREMAP_WB);
- /*
- * Now it is safe to free the IRQ used for xenstore late
- * initialization. No need to unbind: it is about to be
- * bound again from xb_init_comms. Note that calling
- * unbind_from_irqhandler now would result in xen_evtchn_close()
- * being called and the event channel not being enabled again
- * afterwards, resulting in missed event notifications.
- */
+ /*
+ * Now it is safe to free the IRQ used for xenstore late
+ * initialization. No need to unbind: it is about to be
+ * bound again from xb_init_comms. Note that calling
+ * unbind_from_irqhandler now would result in xen_evtchn_close()
+ * being called and the event channel not being enabled again
+ * afterwards, resulting in missed event notifications.
+ */
+ if (xs_init_irq >= 0)
free_irq(xs_init_irq, &xb_waitq);
- }
/*
* In the HVM case, xenbus_init() deferred its call to
@@ -822,7 +826,7 @@ static int __init xenbus_probe_initcall(void)
if (xen_store_domain_type == XS_PV ||
(xen_store_domain_type == XS_HVM &&
!xs_hvm_defer_init_for_callback() &&
- xen_store_interface != NULL))
+ XS_INTERFACE_READY))
xenbus_probe();
/*
@@ -831,7 +835,7 @@ static int __init xenbus_probe_initcall(void)
* started, then probe. It will be triggered when communication
* starts happening, by waiting on xb_waitq.
*/
- if (xen_store_domain_type == XS_LOCAL || xen_store_interface == NULL) {
+ if (xen_store_domain_type == XS_LOCAL || !XS_INTERFACE_READY) {
struct task_struct *probe_task;
probe_task = kthread_run(xenbus_probe_thread, NULL,
@@ -1014,6 +1018,12 @@ static int __init xenbus_init(void)
xen_store_interface =
memremap(xen_store_gfn << XEN_PAGE_SHIFT,
XEN_PAGE_SIZE, MEMREMAP_WB);
+ if (!xen_store_interface) {
+ pr_err("%s: cannot map HVM_PARAM_STORE_PFN=%llx\n",
+ __func__, v);
+ err = -EINVAL;
+ goto out_error;
+ }
if (xen_store_interface->connection != XENSTORE_CONNECTED)
wait = true;
}
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index f16f73581..01338d4c2 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -48,12 +48,17 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry)
static void v9fs_dentry_release(struct dentry *dentry)
{
struct hlist_node *p, *n;
+ struct hlist_head head;
p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n",
dentry, dentry);
- hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata)
+
+ spin_lock(&dentry->d_lock);
+ hlist_move_list((struct hlist_head *)&dentry->d_fsdata, &head);
+ spin_unlock(&dentry->d_lock);
+
+ hlist_for_each_safe(p, n, &head)
p9_fid_put(hlist_entry(p, struct p9_fid, dlist));
- dentry->d_fsdata = NULL;
}
static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 97f50e9fd..297487ee8 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -140,6 +140,11 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt)
put_page(page);
if (ret < 0)
return ret;
+
+ /* Don't cross a backup volume mountpoint from a backup volume */
+ if (src_as->volume && src_as->volume->type == AFSVL_BACKVOL &&
+ ctx->type == AFSVL_BACKVOL)
+ return -ENODEV;
}
return 0;
diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c
index 477f350a8..e3a57196b 100644
--- a/fs/btrfs/bio.c
+++ b/fs/btrfs/bio.c
@@ -741,7 +741,9 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
ret = btrfs_bio_csum(bbio);
if (ret)
goto fail_put_bio;
- } else if (use_append) {
+ } else if (use_append ||
+ (btrfs_is_zoned(fs_info) && inode &&
+ inode->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_alloc_dummy_sum(bbio);
if (ret)
goto fail_put_bio;
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 1e09aeea6..1a66be33b 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1785,6 +1785,7 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
struct btrfs_block_group *bg;
struct btrfs_space_info *space_info;
+ LIST_HEAD(retry_list);
if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
return;
@@ -1921,8 +1922,11 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
}
next:
- if (ret)
- btrfs_mark_bg_to_reclaim(bg);
+ if (ret) {
+ /* Refcount held by the reclaim_bgs list after splice. */
+ btrfs_get_block_group(bg);
+ list_add_tail(&bg->bg_list, &retry_list);
+ }
btrfs_put_block_group(bg);
mutex_unlock(&fs_info->reclaim_bgs_lock);
@@ -1942,6 +1946,9 @@ next:
spin_unlock(&fs_info->unused_bgs_lock);
mutex_unlock(&fs_info->reclaim_bgs_lock);
end:
+ spin_lock(&fs_info->unused_bgs_lock);
+ list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
+ spin_unlock(&fs_info->unused_bgs_lock);
btrfs_exclop_finish(fs_info);
sb_end_write(fs_info->sb);
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3df5477d4..3a47eec87 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -4544,18 +4544,10 @@ static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_fs_info *fs_info)
{
struct rb_node *node;
- struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
struct btrfs_delayed_ref_node *ref;
- delayed_refs = &trans->delayed_refs;
-
spin_lock(&delayed_refs->lock);
- if (atomic_read(&delayed_refs->num_entries) == 0) {
- spin_unlock(&delayed_refs->lock);
- btrfs_debug(fs_info, "delayed_refs has NO entry");
- return;
- }
-
while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
struct btrfs_delayed_ref_head *head;
struct rb_node *n;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2776112db..41173701f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2773,13 +2773,19 @@ static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *p
goto out;
}
- /* See the comment at fiemap_search_slot() about why we clone. */
- copy_extent_buffer_full(clone, path->nodes[0]);
/*
* Important to preserve the start field, for the optimizations when
* checking if extents are shared (see extent_fiemap()).
+ *
+ * We must set ->start before calling copy_extent_buffer_full(). If we
+ * are on sub-pagesize blocksize, we use ->start to determine the offset
+ * into the folio where our eb exists, and if we update ->start after
+ * the fact then any subsequent reads of the eb may read from a
+ * different offset in the folio than where we originally copied into.
*/
clone->start = path->nodes[0]->start;
+ /* See the comment at fiemap_search_slot() about why we clone. */
+ copy_extent_buffer_full(clone, path->nodes[0]);
slot = path->slots[0];
btrfs_release_path(path);
@@ -3656,6 +3662,8 @@ static struct extent_buffer *grab_extent_buffer(
struct folio *folio = page_folio(page);
struct extent_buffer *exists;
+ lockdep_assert_held(&page->mapping->i_private_lock);
+
/*
* For subpage case, we completely rely on radix tree to ensure we
* don't try to insert two ebs for the same bytenr. So here we always
@@ -3723,13 +3731,14 @@ static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
* The caller needs to free the existing folios and retry using the same order.
*/
static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i,
+ struct btrfs_subpage *prealloc,
struct extent_buffer **found_eb_ret)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
const unsigned long index = eb->start >> PAGE_SHIFT;
- struct folio *existing_folio;
+ struct folio *existing_folio = NULL;
int ret;
ASSERT(found_eb_ret);
@@ -3741,12 +3750,14 @@ retry:
ret = filemap_add_folio(mapping, eb->folios[i], index + i,
GFP_NOFS | __GFP_NOFAIL);
if (!ret)
- return 0;
+ goto finish;
existing_folio = filemap_lock_folio(mapping, index + i);
/* The page cache only exists for a very short time, just retry. */
- if (IS_ERR(existing_folio))
+ if (IS_ERR(existing_folio)) {
+ existing_folio = NULL;
goto retry;
+ }
/* For now, we should only have single-page folios for btree inode. */
ASSERT(folio_nr_pages(existing_folio) == 1);
@@ -3757,14 +3768,13 @@ retry:
return -EAGAIN;
}
- if (fs_info->nodesize < PAGE_SIZE) {
- /*
- * We're going to reuse the existing page, can drop our page
- * and subpage structure now.
- */
+finish:
+ spin_lock(&mapping->i_private_lock);
+ if (existing_folio && fs_info->nodesize < PAGE_SIZE) {
+ /* We're going to reuse the existing page, can drop our folio now. */
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
- } else {
+ } else if (existing_folio) {
struct extent_buffer *existing_eb;
existing_eb = grab_extent_buffer(fs_info,
@@ -3772,6 +3782,7 @@ retry:
if (existing_eb) {
/* The extent buffer still exists, we can use it directly. */
*found_eb_ret = existing_eb;
+ spin_unlock(&mapping->i_private_lock);
folio_unlock(existing_folio);
folio_put(existing_folio);
return 1;
@@ -3780,6 +3791,22 @@ retry:
__free_page(folio_page(eb->folios[i], 0));
eb->folios[i] = existing_folio;
}
+ eb->folio_size = folio_size(eb->folios[i]);
+ eb->folio_shift = folio_shift(eb->folios[i]);
+ /* Should not fail, as we have preallocated the memory. */
+ ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc);
+ ASSERT(!ret);
+ /*
+ * To inform we have an extra eb under allocation, so that
+ * detach_extent_buffer_page() won't release the folio private when the
+ * eb hasn't been inserted into radix tree yet.
+ *
+ * The ref will be decreased when the eb releases the page, in
+ * detach_extent_buffer_page(). Thus needs no special handling in the
+ * error path.
+ */
+ btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
@@ -3791,7 +3818,6 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
int attached = 0;
struct extent_buffer *eb;
struct extent_buffer *existing_eb = NULL;
- struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct btrfs_subpage *prealloc = NULL;
u64 lockdep_owner = owner_root;
bool page_contig = true;
@@ -3857,7 +3883,7 @@ reallocate:
for (int i = 0; i < num_folios; i++) {
struct folio *folio;
- ret = attach_eb_folio_to_filemap(eb, i, &existing_eb);
+ ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb);
if (ret > 0) {
ASSERT(existing_eb);
goto out;
@@ -3894,24 +3920,6 @@ reallocate:
* and free the allocated page.
*/
folio = eb->folios[i];
- eb->folio_size = folio_size(folio);
- eb->folio_shift = folio_shift(folio);
- spin_lock(&mapping->i_private_lock);
- /* Should not fail, as we have preallocated the memory */
- ret = attach_extent_buffer_folio(eb, folio, prealloc);
- ASSERT(!ret);
- /*
- * To inform we have extra eb under allocation, so that
- * detach_extent_buffer_page() won't release the folio private
- * when the eb hasn't yet been inserted into radix tree.
- *
- * The ref will be decreased when the eb released the page, in
- * detach_extent_buffer_page().
- * Thus needs no special handling in error path.
- */
- btrfs_folio_inc_eb_refs(fs_info, folio);
- spin_unlock(&mapping->i_private_lock);
-
WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
/*
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 40e5f7f2f..1167899a1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -468,6 +468,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
}
if (!qgroup) {
struct btrfs_qgroup *prealloc;
+ struct btrfs_root *tree_root = fs_info->tree_root;
prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
if (!prealloc) {
@@ -475,6 +476,25 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
goto out;
}
qgroup = add_qgroup_rb(fs_info, prealloc, found_key.offset);
+ /*
+ * If a qgroup exists for a subvolume ID, it is possible
+ * that subvolume has been deleted, in which case
+ * re-using that ID would lead to incorrect accounting.
+ *
+ * Ensure that we skip any such subvol ids.
+ *
+ * We don't need to lock because this is only called
+ * during mount before we start doing things like creating
+ * subvolumes.
+ */
+ if (is_fstree(qgroup->qgroupid) &&
+ qgroup->qgroupid > tree_root->free_objectid)
+ /*
+ * Don't need to check against BTRFS_LAST_FREE_OBJECTID,
+ * as it will get checked on the next call to
+ * btrfs_get_free_objectid.
+ */
+ tree_root->free_objectid = qgroup->qgroupid + 1;
}
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
if (ret < 0)
@@ -3129,7 +3149,7 @@ static int qgroup_auto_inherit(struct btrfs_fs_info *fs_info,
qgids = res->qgroups;
list_for_each_entry(qg_list, &inode_qg->groups, next_group)
- qgids[i] = qg_list->group->qgroupid;
+ qgids[i++] = qg_list->group->qgroupid;
*inherit = res;
return 0;
@@ -3826,14 +3846,14 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
/* we're resuming qgroup rescan at mount time */
if (!(fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
- btrfs_warn(fs_info,
+ btrfs_debug(fs_info,
"qgroup rescan init failed, qgroup rescan is not queued");
ret = -EINVAL;
} else if (!(fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAG_ON)) {
- btrfs_warn(fs_info,
+ btrfs_debug(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
- ret = -EINVAL;
+ ret = -ENOTCONN;
}
if (ret)
@@ -3844,14 +3864,12 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
if (init_flags) {
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
- btrfs_warn(fs_info,
- "qgroup rescan is already in progress");
ret = -EINPROGRESS;
} else if (!(fs_info->qgroup_flags &
BTRFS_QGROUP_STATUS_FLAG_ON)) {
- btrfs_warn(fs_info,
+ btrfs_debug(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
- ret = -EINVAL;
+ ret = -ENOTCONN;
} else if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_DISABLED) {
/* Quota disable is in progress */
ret = -EBUSY;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 7e44ccaf3..fa6964de3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -119,6 +119,7 @@ enum {
Opt_thread_pool,
Opt_treelog,
Opt_user_subvol_rm_allowed,
+ Opt_norecovery,
/* Rescue options */
Opt_rescue,
@@ -245,6 +246,8 @@ static const struct fs_parameter_spec btrfs_fs_parameters[] = {
__fsparam(NULL, "nologreplay", Opt_nologreplay, fs_param_deprecated, NULL),
/* Deprecated, with alias rescue=usebackuproot */
__fsparam(NULL, "usebackuproot", Opt_usebackuproot, fs_param_deprecated, NULL),
+ /* For compatibility only, alias for "rescue=nologreplay". */
+ fsparam_flag("norecovery", Opt_norecovery),
/* Debugging options. */
fsparam_flag_no("enospc_debug", Opt_enospc_debug),
@@ -438,6 +441,11 @@ static int btrfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
"'nologreplay' is deprecated, use 'rescue=nologreplay' instead");
btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
break;
+ case Opt_norecovery:
+ btrfs_info(NULL,
+"'norecovery' is for compatibility only, recommended to use 'rescue=nologreplay'");
+ btrfs_set_opt(ctx->mount_opt, NOLOGREPLAY);
+ break;
case Opt_flushoncommit:
if (result.negated)
btrfs_clear_opt(ctx->mount_opt, FLUSHONCOMMIT);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 472918a5b..d4fc5fedd 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4856,18 +4856,23 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
path->slots[0]++;
continue;
}
- if (!dropped_extents) {
- /*
- * Avoid logging extent items logged in past fsync calls
- * and leading to duplicate keys in the log tree.
- */
+ /*
+ * Avoid overlapping items in the log tree. The first time we
+ * get here, get rid of everything from a past fsync. After
+ * that, if the current extent starts before the end of the last
+ * extent we copied, truncate the last one. This can happen if
+ * an ordered extent completion modifies the subvolume tree
+ * while btrfs_next_leaf() has the tree unlocked.
+ */
+ if (!dropped_extents || key.offset < truncate_offset) {
ret = truncate_inode_items(trans, root->log_root, inode,
- truncate_offset,
+ min(key.offset, truncate_offset),
BTRFS_EXTENT_DATA_KEY);
if (ret)
goto out;
dropped_extents = true;
}
+ truncate_offset = btrfs_file_extent_end(path);
if (ins_nr == 0)
start_slot = slot;
ins_nr++;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 4cba80b34..459514da1 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1290,7 +1290,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
struct btrfs_chunk_map *map)
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
- struct btrfs_device *device = map->stripes[zone_idx].dev;
+ struct btrfs_device *device;
int dev_replace_is_ongoing = 0;
unsigned int nofs_flag;
struct blk_zone zone;
@@ -1298,7 +1298,11 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
info->physical = map->stripes[zone_idx].physical;
+ down_read(&dev_replace->rwsem);
+ device = map->stripes[zone_idx].dev;
+
if (!device->bdev) {
+ up_read(&dev_replace->rwsem);
info->alloc_offset = WP_MISSING_DEV;
return 0;
}
@@ -1308,6 +1312,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
__set_bit(zone_idx, active);
if (!btrfs_dev_is_sequential(device, info->physical)) {
+ up_read(&dev_replace->rwsem);
info->alloc_offset = WP_CONVENTIONAL;
return 0;
}
@@ -1315,11 +1320,9 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
/* This zone will be used for allocation, so mark this zone non-empty. */
btrfs_dev_clear_zone_empty(device, info->physical);
- down_read(&dev_replace->rwsem);
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
btrfs_dev_clear_zone_empty(dev_replace->tgtdev, info->physical);
- up_read(&dev_replace->rwsem);
/*
* The group is mapped to a sequential zone. Get the zone write pointer
@@ -1330,6 +1333,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
ret = btrfs_get_dev_zone(device, info->physical, &zone);
memalloc_nofs_restore(nofs_flag);
if (ret) {
+ up_read(&dev_replace->rwsem);
if (ret != -EIO && ret != -EOPNOTSUPP)
return ret;
info->alloc_offset = WP_MISSING_DEV;
@@ -1341,6 +1345,7 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
"zoned: unexpected conventional zone %llu on device %s (devid %llu)",
zone.start << SECTOR_SHIFT, rcu_str_deref(device->name),
device->devid);
+ up_read(&dev_replace->rwsem);
return -EIO;
}
@@ -1368,6 +1373,8 @@ static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx,
break;
}
+ up_read(&dev_replace->rwsem);
+
return 0;
}
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 6465e2574..06cdf1a8a 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -133,7 +133,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
return 0;
}
-static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
+void cachefiles_flush_reqs(struct cachefiles_cache *cache)
{
struct xarray *xa = &cache->reqs;
struct cachefiles_req *req;
@@ -159,6 +159,7 @@ static void cachefiles_flush_reqs(struct cachefiles_cache *cache)
xa_for_each(xa, index, req) {
req->error = -EIO;
complete(&req->done);
+ __xa_erase(xa, index);
}
xa_unlock(xa);
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index d33169f00..6845a90cd 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -55,6 +55,7 @@ struct cachefiles_ondemand_info {
int ondemand_id;
enum cachefiles_object_state state;
struct cachefiles_object *object;
+ spinlock_t lock;
};
/*
@@ -138,6 +139,7 @@ static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
struct cachefiles_req {
struct cachefiles_object *object;
struct completion done;
+ refcount_t ref;
int error;
struct cachefiles_msg msg;
};
@@ -186,6 +188,7 @@ extern int cachefiles_has_space(struct cachefiles_cache *cache,
* daemon.c
*/
extern const struct file_operations cachefiles_daemon_fops;
+extern void cachefiles_flush_reqs(struct cachefiles_cache *cache);
extern void cachefiles_get_unbind_pincount(struct cachefiles_cache *cache);
extern void cachefiles_put_unbind_pincount(struct cachefiles_cache *cache);
@@ -424,6 +427,8 @@ do { \
pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
fscache_io_error((___cache)->cache); \
set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
+ if (cachefiles_in_ondemand_mode(___cache)) \
+ cachefiles_flush_reqs(___cache); \
} while (0)
#define cachefiles_io_error_obj(object, FMT, ...) \
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 4ba42f1fa..89f118d68 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -4,19 +4,40 @@
#include <linux/uio.h>
#include "internal.h"
+struct ondemand_anon_file {
+ struct file *file;
+ int fd;
+};
+
+static inline void cachefiles_req_put(struct cachefiles_req *req)
+{
+ if (refcount_dec_and_test(&req->ref))
+ kfree(req);
+}
+
static int cachefiles_ondemand_fd_release(struct inode *inode,
struct file *file)
{
struct cachefiles_object *object = file->private_data;
- struct cachefiles_cache *cache = object->volume->cache;
- struct cachefiles_ondemand_info *info = object->ondemand;
- int object_id = info->ondemand_id;
+ struct cachefiles_cache *cache;
+ struct cachefiles_ondemand_info *info;
+ int object_id;
struct cachefiles_req *req;
- XA_STATE(xas, &cache->reqs, 0);
+ XA_STATE(xas, NULL, 0);
+
+ if (!object)
+ return 0;
+
+ info = object->ondemand;
+ cache = object->volume->cache;
+ xas.xa = &cache->reqs;
xa_lock(&cache->reqs);
+ spin_lock(&info->lock);
+ object_id = info->ondemand_id;
info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED;
cachefiles_ondemand_set_object_close(object);
+ spin_unlock(&info->lock);
/* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
@@ -116,6 +137,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
{
struct cachefiles_req *req;
struct fscache_cookie *cookie;
+ struct cachefiles_ondemand_info *info;
char *pid, *psize;
unsigned long id;
long size;
@@ -166,6 +188,33 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
goto out;
}
+ info = req->object->ondemand;
+ spin_lock(&info->lock);
+ /*
+ * The anonymous fd was closed before copen ? Fail the request.
+ *
+ * t1 | t2
+ * ---------------------------------------------------------
+ * cachefiles_ondemand_copen
+ * req = xa_erase(&cache->reqs, id)
+ * // Anon fd is maliciously closed.
+ * cachefiles_ondemand_fd_release
+ * xa_lock(&cache->reqs)
+ * cachefiles_ondemand_set_object_close(object)
+ * xa_unlock(&cache->reqs)
+ * cachefiles_ondemand_set_object_open
+ * // No one will ever close it again.
+ * cachefiles_ondemand_daemon_read
+ * cachefiles_ondemand_select_req
+ *
+ * Get a read req but its fd is already closed. The daemon can't
+ * issue a cread ioctl with an closed fd, then hung.
+ */
+ if (info->ondemand_id == CACHEFILES_ONDEMAND_ID_CLOSED) {
+ spin_unlock(&info->lock);
+ req->error = -EBADFD;
+ goto out;
+ }
cookie = req->object->cookie;
cookie->object_size = size;
if (size)
@@ -175,6 +224,7 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
trace_cachefiles_ondemand_copen(req->object, id, size);
cachefiles_ondemand_set_object_open(req->object);
+ spin_unlock(&info->lock);
wake_up_all(&cache->daemon_pollwq);
out:
@@ -205,14 +255,14 @@ int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args)
return 0;
}
-static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
+static int cachefiles_ondemand_get_fd(struct cachefiles_req *req,
+ struct ondemand_anon_file *anon_file)
{
struct cachefiles_object *object;
struct cachefiles_cache *cache;
struct cachefiles_open *load;
- struct file *file;
u32 object_id;
- int ret, fd;
+ int ret;
object = cachefiles_grab_object(req->object,
cachefiles_obj_get_ondemand_fd);
@@ -224,35 +274,53 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req)
if (ret < 0)
goto err;
- fd = get_unused_fd_flags(O_WRONLY);
- if (fd < 0) {
- ret = fd;
+ anon_file->fd = get_unused_fd_flags(O_WRONLY);
+ if (anon_file->fd < 0) {
+ ret = anon_file->fd;
goto err_free_id;
}
- file = anon_inode_getfile("[cachefiles]", &cachefiles_ondemand_fd_fops,
- object, O_WRONLY);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
+ anon_file->file = anon_inode_getfile("[cachefiles]",
+ &cachefiles_ondemand_fd_fops, object, O_WRONLY);
+ if (IS_ERR(anon_file->file)) {
+ ret = PTR_ERR(anon_file->file);
goto err_put_fd;
}
- file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
- fd_install(fd, file);
+ spin_lock(&object->ondemand->lock);
+ if (object->ondemand->ondemand_id > 0) {
+ spin_unlock(&object->ondemand->lock);
+ /* Pair with check in cachefiles_ondemand_fd_release(). */
+ anon_file->file->private_data = NULL;
+ ret = -EEXIST;
+ goto err_put_file;
+ }
+
+ anon_file->file->f_mode |= FMODE_PWRITE | FMODE_LSEEK;
load = (void *)req->msg.data;
- load->fd = fd;
+ load->fd = anon_file->fd;
object->ondemand->ondemand_id = object_id;
+ spin_unlock(&object->ondemand->lock);
cachefiles_get_unbind_pincount(cache);
trace_cachefiles_ondemand_open(object, &req->msg, load);
return 0;
+err_put_file:
+ fput(anon_file->file);
+ anon_file->file = NULL;
err_put_fd:
- put_unused_fd(fd);
+ put_unused_fd(anon_file->fd);
+ anon_file->fd = ret;
err_free_id:
xa_erase(&cache->ondemand_ids, object_id);
err:
+ spin_lock(&object->ondemand->lock);
+ /* Avoid marking an opened object as closed. */
+ if (object->ondemand->ondemand_id <= 0)
+ cachefiles_ondemand_set_object_close(object);
+ spin_unlock(&object->ondemand->lock);
cachefiles_put_object(object, cachefiles_obj_put_ondemand_fd);
return ret;
}
@@ -299,9 +367,9 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
{
struct cachefiles_req *req;
struct cachefiles_msg *msg;
- unsigned long id = 0;
size_t n;
int ret = 0;
+ struct ondemand_anon_file anon_file;
XA_STATE(xas, &cache->reqs, cache->req_id_next);
xa_lock(&cache->reqs);
@@ -330,42 +398,45 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
cache->req_id_next = xas.xa_index + 1;
+ refcount_inc(&req->ref);
+ cachefiles_grab_object(req->object, cachefiles_obj_get_read_req);
xa_unlock(&cache->reqs);
- id = xas.xa_index;
-
if (msg->opcode == CACHEFILES_OP_OPEN) {
- ret = cachefiles_ondemand_get_fd(req);
- if (ret) {
- cachefiles_ondemand_set_object_close(req->object);
- goto error;
- }
+ ret = cachefiles_ondemand_get_fd(req, &anon_file);
+ if (ret)
+ goto out;
}
- msg->msg_id = id;
+ msg->msg_id = xas.xa_index;
msg->object_id = req->object->ondemand->ondemand_id;
- if (copy_to_user(_buffer, msg, n) != 0) {
+ if (copy_to_user(_buffer, msg, n) != 0)
ret = -EFAULT;
- goto err_put_fd;
- }
- /* CLOSE request has no reply */
- if (msg->opcode == CACHEFILES_OP_CLOSE) {
- xa_erase(&cache->reqs, id);
- complete(&req->done);
+ if (msg->opcode == CACHEFILES_OP_OPEN) {
+ if (ret < 0) {
+ fput(anon_file.file);
+ put_unused_fd(anon_file.fd);
+ goto out;
+ }
+ fd_install(anon_file.fd, anon_file.file);
}
-
- return n;
-
-err_put_fd:
- if (msg->opcode == CACHEFILES_OP_OPEN)
- close_fd(((struct cachefiles_open *)msg->data)->fd);
-error:
- xa_erase(&cache->reqs, id);
- req->error = ret;
- complete(&req->done);
- return ret;
+out:
+ cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
+ /* Remove error request and CLOSE request has no reply */
+ if (ret || msg->opcode == CACHEFILES_OP_CLOSE) {
+ xas_reset(&xas);
+ xas_lock(&xas);
+ if (xas_load(&xas) == req) {
+ req->error = ret;
+ complete(&req->done);
+ xas_store(&xas, NULL);
+ }
+ xas_unlock(&xas);
+ }
+ cachefiles_req_put(req);
+ return ret ? ret : n;
}
typedef int (*init_req_fn)(struct cachefiles_req *req, void *private);
@@ -395,6 +466,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
goto out;
}
+ refcount_set(&req->ref, 1);
req->object = object;
init_completion(&req->done);
req->msg.opcode = opcode;
@@ -456,7 +528,7 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
wake_up_all(&cache->daemon_pollwq);
wait_for_completion(&req->done);
ret = req->error;
- kfree(req);
+ cachefiles_req_put(req);
return ret;
out:
/* Reset the object to close state in error handling path.
@@ -578,6 +650,7 @@ int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
return -ENOMEM;
object->ondemand->object = object;
+ spin_lock_init(&object->ondemand->lock);
INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker);
return 0;
}
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 1f2f70a1b..decedc4ee 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -12,6 +12,7 @@
#include <trace/events/dlm.h>
#include "dlm_internal.h"
+#include "lvb_table.h"
#include "memory.h"
#include "lock.h"
#include "user.h"
@@ -42,6 +43,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
struct dlm_callback *cb;
+ int copy_lvb = 0;
int prev_mode;
if (flags & DLM_CB_BAST) {
@@ -73,6 +75,17 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
goto out;
}
}
+ } else if (flags & DLM_CB_CAST) {
+ if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) {
+ if (lkb->lkb_last_cast)
+ prev_mode = lkb->lkb_last_cb->mode;
+ else
+ prev_mode = -1;
+
+ if (!status && lkb->lkb_lksb->sb_lvbptr &&
+ dlm_lvb_operations[prev_mode + 1][mode + 1])
+ copy_lvb = 1;
+ }
}
cb = dlm_allocate_cb();
@@ -85,6 +98,7 @@ int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
cb->mode = mode;
cb->sb_status = status;
cb->sb_flags = (sbflags & 0x000000FF);
+ cb->copy_lvb = copy_lvb;
kref_init(&cb->ref);
if (!test_and_set_bit(DLM_IFL_CB_PENDING_BIT, &lkb->lkb_iflags))
rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 3b4dbce84..a9137c90f 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -222,6 +222,7 @@ struct dlm_callback {
int sb_status; /* copy to lksb status */
uint8_t sb_flags; /* copy to lksb flags */
int8_t mode; /* rq mode of bast, gr mode of cast */
+ int copy_lvb;
struct list_head list;
struct kref ref;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 9f9b68448..12a483dee 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -21,7 +21,6 @@
#include "dlm_internal.h"
#include "lockspace.h"
#include "lock.h"
-#include "lvb_table.h"
#include "user.h"
#include "ast.h"
#include "config.h"
@@ -806,8 +805,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
struct dlm_lkb *lkb;
DECLARE_WAITQUEUE(wait, current);
struct dlm_callback *cb;
- int rv, ret, copy_lvb = 0;
- int old_mode, new_mode;
+ int rv, ret;
if (count == sizeof(struct dlm_device_version)) {
rv = copy_version_to_user(buf, count);
@@ -864,9 +862,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
lkb = list_first_entry(&proc->asts, struct dlm_lkb, lkb_cb_list);
- /* rem_lkb_callback sets a new lkb_last_cast */
- old_mode = lkb->lkb_last_cast->mode;
-
rv = dlm_dequeue_lkb_callback(lkb, &cb);
switch (rv) {
case DLM_DEQUEUE_CALLBACK_EMPTY:
@@ -895,12 +890,6 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
if (cb->flags & DLM_CB_BAST) {
trace_dlm_bast(lkb->lkb_resource->res_ls, lkb, cb->mode);
} else if (cb->flags & DLM_CB_CAST) {
- new_mode = cb->mode;
-
- if (!cb->sb_status && lkb->lkb_lksb->sb_lvbptr &&
- dlm_lvb_operations[old_mode + 1][new_mode + 1])
- copy_lvb = 1;
-
lkb->lkb_lksb->sb_status = cb->sb_status;
lkb->lkb_lksb->sb_flags = cb->sb_flags;
trace_dlm_ast(lkb->lkb_resource->res_ls, lkb);
@@ -908,7 +897,7 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
ret = copy_result_to_user(lkb->lkb_ua,
test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags),
- cb->flags, cb->mode, copy_lvb, buf, count);
+ cb->flags, cb->mode, cb->copy_lvb, buf, count);
kref_put(&cb->ref, dlm_release_callback);
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3fe41964c..7f9f68c00 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -300,9 +300,11 @@ write_tag_66_packet(char *signature, u8 cipher_code,
* | Key Identifier Size | 1 or 2 bytes |
* | Key Identifier | arbitrary |
* | File Encryption Key Size | 1 or 2 bytes |
+ * | Cipher Code | 1 byte |
* | File Encryption Key | arbitrary |
+ * | Checksum | 2 bytes |
*/
- data_len = (5 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
+ data_len = (8 + ECRYPTFS_SIG_SIZE_HEX + crypt_stat->key_size);
*packet = kmalloc(data_len, GFP_KERNEL);
message = *packet;
if (!message) {
diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c
index 81e65c453..3a3461561 100644
--- a/fs/erofs/decompressor_deflate.c
+++ b/fs/erofs/decompressor_deflate.c
@@ -46,39 +46,15 @@ int __init z_erofs_deflate_init(void)
/* by default, use # of possible CPUs instead */
if (!z_erofs_deflate_nstrms)
z_erofs_deflate_nstrms = num_possible_cpus();
-
- for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
- ++z_erofs_deflate_avail_strms) {
- struct z_erofs_deflate *strm;
-
- strm = kzalloc(sizeof(*strm), GFP_KERNEL);
- if (!strm)
- goto out_failed;
-
- /* XXX: in-kernel zlib cannot shrink windowbits currently */
- strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
- if (!strm->z.workspace) {
- kfree(strm);
- goto out_failed;
- }
-
- spin_lock(&z_erofs_deflate_lock);
- strm->next = z_erofs_deflate_head;
- z_erofs_deflate_head = strm;
- spin_unlock(&z_erofs_deflate_lock);
- }
return 0;
-
-out_failed:
- erofs_err(NULL, "failed to allocate zlib workspace");
- z_erofs_deflate_exit();
- return -ENOMEM;
}
int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size)
{
struct z_erofs_deflate_cfgs *dfl = data;
+ static DEFINE_MUTEX(deflate_resize_mutex);
+ static bool inited;
if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
erofs_err(sb, "invalid deflate cfgs, size=%u", size);
@@ -89,9 +65,36 @@ int z_erofs_load_deflate_config(struct super_block *sb,
erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
return -EOPNOTSUPP;
}
+ mutex_lock(&deflate_resize_mutex);
+ if (!inited) {
+ for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
+ ++z_erofs_deflate_avail_strms) {
+ struct z_erofs_deflate *strm;
+
+ strm = kzalloc(sizeof(*strm), GFP_KERNEL);
+ if (!strm)
+ goto failed;
+ /* XXX: in-kernel zlib cannot customize windowbits */
+ strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
+ if (!strm->z.workspace) {
+ kfree(strm);
+ goto failed;
+ }
+ spin_lock(&z_erofs_deflate_lock);
+ strm->next = z_erofs_deflate_head;
+ z_erofs_deflate_head = strm;
+ spin_unlock(&z_erofs_deflate_lock);
+ }
+ inited = true;
+ }
+ mutex_unlock(&deflate_resize_mutex);
erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
return 0;
+failed:
+ mutex_unlock(&deflate_resize_mutex);
+ z_erofs_deflate_exit();
+ return -ENOMEM;
}
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
diff --git a/fs/exec.c b/fs/exec.c
index cf1df7f16..0c5f06d08 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -67,6 +67,7 @@
#include <linux/time_namespace.h>
#include <linux/user_events.h>
#include <linux/rseq.h>
+#include <linux/ksm.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -268,6 +269,14 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
}
/*
+ * Need to be called with mmap write lock
+ * held, to avoid race with ksmd.
+ */
+ err = ksm_execve(mm);
+ if (err)
+ goto err_ksm;
+
+ /*
* Place the stack at the largest stack address the architecture
* supports. Later, we'll move this to an appropriate place. We don't
* use STACK_TOP because that can depend on attributes which aren't
@@ -288,6 +297,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
bprm->p = vma->vm_end - sizeof(void *);
return 0;
err:
+ ksm_exit(mm);
+err_ksm:
mmap_write_unlock(mm);
err_free:
bprm->vma = NULL;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 537803250..30e824866 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2334,7 +2334,7 @@ static int mpage_journal_page_buffers(handle_t *handle,
if (folio_pos(folio) + len > size &&
!ext4_verity_in_progress(inode))
- len = size - folio_pos(folio);
+ len = size & (len - 1);
return ext4_journal_folio_buffers(handle, folio, len);
}
@@ -2887,9 +2887,6 @@ retry:
if (IS_ERR(folio))
return PTR_ERR(folio);
- /* In case writeback began while the folio was unlocked */
- folio_wait_stable(folio);
-
#ifdef CONFIG_FS_ENCRYPTION
ret = ext4_block_write_begin(folio, pos, len, ext4_da_get_block_prep);
#else
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 12b3f1960..66b5a68b0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -831,6 +831,8 @@ static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len)
return 0;
if (order == MB_NUM_ORDERS(sb))
order--;
+ if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb)))
+ order = MB_NUM_ORDERS(sb) - 1;
return order;
}
@@ -1008,6 +1010,8 @@ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context
* goal length.
*/
order = fls(ac->ac_g_ex.fe_len) - 1;
+ if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb)))
+ order = MB_NUM_ORDERS(ac->ac_sb);
min_order = order - sbi->s_mb_best_avail_max_trim_order;
if (min_order < 0)
min_order = 0;
@@ -6113,6 +6117,7 @@ ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp)
ext4_mb_mark_bb(sb, block, 1, true);
ar->len = 1;
+ *errp = 0;
return block;
}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 56938532b..7bfc5fb5a 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -193,8 +193,8 @@ struct ext4_allocation_context {
ext4_grpblk_t ac_orig_goal_len;
__u32 ac_flags; /* allocation hints */
+ __u32 ac_groups_linear_remaining;
__u16 ac_groups_scanned;
- __u16 ac_groups_linear_remaining;
__u16 ac_found;
__u16 ac_cX_found[EXT4_MB_NUM_CRS];
__u16 ac_tail;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 5e4f65c14..a630b27a4 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2897,7 +2897,7 @@ retry:
inode = ext4_new_inode_start_handle(idmap, dir, mode,
NULL, 0, NULL,
EXT4_HT_DIR,
- EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb) +
+ EXT4_MAXQUOTAS_TRANS_BLOCKS(dir->i_sb) +
4 + EXT4_XATTR_TRANS_BLOCKS);
handle = ext4_journal_current_handle();
err = PTR_ERR(inode);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 044135796..4b368f4db 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -5551,19 +5551,15 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
if (err)
goto failed_mount6;
- err = ext4_register_sysfs(sb);
- if (err)
- goto failed_mount7;
-
err = ext4_init_orphan_info(sb);
if (err)
- goto failed_mount8;
+ goto failed_mount7;
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount. */
if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
err = ext4_enable_quotas(sb);
if (err)
- goto failed_mount9;
+ goto failed_mount8;
}
#endif /* CONFIG_QUOTA */
@@ -5589,7 +5585,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
ext4_msg(sb, KERN_INFO, "recovery complete");
err = ext4_mark_recovery_complete(sb, es);
if (err)
- goto failed_mount10;
+ goto failed_mount9;
}
if (test_opt(sb, DISCARD) && !bdev_max_discard_sectors(sb->s_bdev))
@@ -5606,15 +5602,17 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
atomic_set(&sbi->s_warning_count, 0);
atomic_set(&sbi->s_msg_count, 0);
+ /* Register sysfs after all initializations are complete. */
+ err = ext4_register_sysfs(sb);
+ if (err)
+ goto failed_mount9;
+
return 0;
-failed_mount10:
+failed_mount9:
ext4_quotas_off(sb, EXT4_MAXQUOTAS);
-failed_mount9: __maybe_unused
+failed_mount8: __maybe_unused
ext4_release_orphan_info(sb);
-failed_mount8:
- ext4_unregister_sysfs(sb);
- kobject_put(&sbi->s_kobj);
failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 6d332dff7..63cbda370 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -29,6 +29,7 @@ typedef enum {
attr_trigger_test_error,
attr_first_error_time,
attr_last_error_time,
+ attr_clusters_in_group,
attr_feature,
attr_pointer_ui,
attr_pointer_ul,
@@ -104,7 +105,7 @@ static ssize_t reserved_clusters_store(struct ext4_sb_info *sbi,
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
- if (ret || val >= clusters)
+ if (ret || val >= clusters || (s64)val < 0)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);
@@ -207,13 +208,14 @@ EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
ext4_sb_info, s_inode_readahead_blks);
+EXT4_ATTR_OFFSET(mb_group_prealloc, 0644, clusters_in_group,
+ ext4_sb_info, s_mb_group_prealloc);
EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
-EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
EXT4_RW_ATTR_SBI_UI(mb_max_linear_groups, s_mb_max_linear_groups);
EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error);
@@ -392,6 +394,7 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
(unsigned long long)
percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
case attr_inode_readahead:
+ case attr_clusters_in_group:
case attr_pointer_ui:
if (!ptr)
return 0;
@@ -451,7 +454,8 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
s_kobj);
struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
void *ptr = calc_ptr(a, sbi);
- unsigned long t;
+ unsigned int t;
+ unsigned long lt;
int ret;
switch (a->attr_id) {
@@ -460,7 +464,7 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
case attr_pointer_ui:
if (!ptr)
return 0;
- ret = kstrtoul(skip_spaces(buf), 0, &t);
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
if (ret)
return ret;
if (a->attr_ptr == ptr_ext4_super_block_offset)
@@ -468,13 +472,21 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
else
*((unsigned int *) ptr) = t;
return len;
+ case attr_clusters_in_group:
+ ret = kstrtouint(skip_spaces(buf), 0, &t);
+ if (ret)
+ return ret;
+ if (t > sbi->s_clusters_per_group)
+ return -EINVAL;
+ *((unsigned int *) ptr) = t;
+ return len;
case attr_pointer_ul:
if (!ptr)
return 0;
- ret = kstrtoul(skip_spaces(buf), 0, &t);
+ ret = kstrtoul(skip_spaces(buf), 0, &lt);
if (ret)
return ret;
- *((unsigned long *) ptr) = t;
+ *((unsigned long *) ptr) = lt;
return len;
case attr_inode_readahead:
return inode_readahead_blks_store(sbi, buf, len);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index b67a176bf..78f06f86c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1619,6 +1619,7 @@ out_err:
static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
struct ext4_xattr_search *s,
handle_t *handle, struct inode *inode,
+ struct inode *new_ea_inode,
bool is_block)
{
struct ext4_xattr_entry *last, *next;
@@ -1626,7 +1627,6 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
size_t min_offs = s->end - s->base, name_len = strlen(i->name);
int in_inode = i->in_inode;
struct inode *old_ea_inode = NULL;
- struct inode *new_ea_inode = NULL;
size_t old_size, new_size;
int ret;
@@ -1711,38 +1711,11 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
old_ea_inode = NULL;
goto out;
}
- }
- if (i->value && in_inode) {
- WARN_ON_ONCE(!i->value_len);
-
- new_ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
- i->value, i->value_len);
- if (IS_ERR(new_ea_inode)) {
- ret = PTR_ERR(new_ea_inode);
- new_ea_inode = NULL;
- goto out;
- }
- }
- if (old_ea_inode) {
/* We are ready to release ref count on the old_ea_inode. */
ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
- if (ret) {
- /* Release newly required ref count on new_ea_inode. */
- if (new_ea_inode) {
- int err;
-
- err = ext4_xattr_inode_dec_ref(handle,
- new_ea_inode);
- if (err)
- ext4_warning_inode(new_ea_inode,
- "dec ref new_ea_inode err=%d",
- err);
- ext4_xattr_inode_free_quota(inode, new_ea_inode,
- i->value_len);
- }
+ if (ret)
goto out;
- }
ext4_xattr_inode_free_quota(inode, old_ea_inode,
le32_to_cpu(here->e_value_size));
@@ -1866,7 +1839,6 @@ update_hash:
ret = 0;
out:
iput(old_ea_inode);
- iput(new_ea_inode);
return ret;
}
@@ -1929,9 +1901,21 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
size_t old_ea_inode_quota = 0;
unsigned int ea_ino;
-
#define header(x) ((struct ext4_xattr_header *)(x))
+ /* If we need EA inode, prepare it before locking the buffer */
+ if (i->value && i->in_inode) {
+ WARN_ON_ONCE(!i->value_len);
+
+ ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+ i->value, i->value_len);
+ if (IS_ERR(ea_inode)) {
+ error = PTR_ERR(ea_inode);
+ ea_inode = NULL;
+ goto cleanup;
+ }
+ }
+
if (s->base) {
int offset = (char *)s->here - bs->bh->b_data;
@@ -1940,6 +1924,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
EXT4_JTR_NONE);
if (error)
goto cleanup;
+
lock_buffer(bs->bh);
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
@@ -1966,7 +1951,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
}
ea_bdebug(bs->bh, "modifying in-place");
error = ext4_xattr_set_entry(i, s, handle, inode,
- true /* is_block */);
+ ea_inode, true /* is_block */);
ext4_xattr_block_csum_set(inode, bs->bh);
unlock_buffer(bs->bh);
if (error == -EFSCORRUPTED)
@@ -2034,29 +2019,13 @@ clone_block:
s->end = s->base + sb->s_blocksize;
}
- error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
+ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
+ true /* is_block */);
if (error == -EFSCORRUPTED)
goto bad_block;
if (error)
goto cleanup;
- if (i->value && s->here->e_value_inum) {
- /*
- * A ref count on ea_inode has been taken as part of the call to
- * ext4_xattr_set_entry() above. We would like to drop this
- * extra ref but we have to wait until the xattr block is
- * initialized and has its own ref count on the ea_inode.
- */
- ea_ino = le32_to_cpu(s->here->e_value_inum);
- error = ext4_xattr_inode_iget(inode, ea_ino,
- le32_to_cpu(s->here->e_hash),
- &ea_inode);
- if (error) {
- ea_inode = NULL;
- goto cleanup;
- }
- }
-
inserted:
if (!IS_LAST_ENTRY(s->first)) {
new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
@@ -2198,17 +2167,16 @@ getblk_failed:
cleanup:
if (ea_inode) {
- int error2;
-
- error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
- if (error2)
- ext4_warning_inode(ea_inode, "dec ref error=%d",
- error2);
+ if (error) {
+ int error2;
- /* If there was an error, revert the quota charge. */
- if (error)
+ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+ if (error2)
+ ext4_warning_inode(ea_inode, "dec ref error=%d",
+ error2);
ext4_xattr_inode_free_quota(inode, ea_inode,
i_size_read(ea_inode));
+ }
iput(ea_inode);
}
if (ce)
@@ -2266,14 +2234,38 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
{
struct ext4_xattr_ibody_header *header;
struct ext4_xattr_search *s = &is->s;
+ struct inode *ea_inode = NULL;
int error;
if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
- if (error)
+ /* If we need EA inode, prepare it before locking the buffer */
+ if (i->value && i->in_inode) {
+ WARN_ON_ONCE(!i->value_len);
+
+ ea_inode = ext4_xattr_inode_lookup_create(handle, inode,
+ i->value, i->value_len);
+ if (IS_ERR(ea_inode))
+ return PTR_ERR(ea_inode);
+ }
+ error = ext4_xattr_set_entry(i, s, handle, inode, ea_inode,
+ false /* is_block */);
+ if (error) {
+ if (ea_inode) {
+ int error2;
+
+ error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
+ if (error2)
+ ext4_warning_inode(ea_inode, "dec ref error=%d",
+ error2);
+
+ ext4_xattr_inode_free_quota(inode, ea_inode,
+ i_size_read(ea_inode));
+ iput(ea_inode);
+ }
return error;
+ }
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -2282,6 +2274,7 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
header->h_magic = cpu_to_le32(0);
ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
}
+ iput(ea_inode);
return 0;
}
@@ -3113,8 +3106,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
if (IS_ERR(bh)) {
- if (PTR_ERR(bh) == -ENOMEM)
+ if (PTR_ERR(bh) == -ENOMEM) {
+ mb_cache_entry_put(ea_block_cache, ce);
return NULL;
+ }
bh = NULL;
EXT4_ERROR_INODE(inode, "block %lu read error",
(unsigned long)ce->e_value);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index eac698b8d..b01320502 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -179,22 +179,22 @@ static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
break;
case META_SIT:
if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
- goto err;
+ goto check_only;
break;
case META_SSA:
if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
blkaddr < SM_I(sbi)->ssa_blkaddr))
- goto err;
+ goto check_only;
break;
case META_CP:
if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
blkaddr < __start_cp_addr(sbi)))
- goto err;
+ goto check_only;
break;
case META_POR:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
blkaddr < MAIN_BLKADDR(sbi)))
- goto err;
+ goto check_only;
break;
case DATA_GENERIC:
case DATA_GENERIC_ENHANCE:
@@ -228,6 +228,7 @@ static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
return true;
err:
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+check_only:
return false;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index d9494b5fc..5f77f9df2 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -3896,15 +3896,14 @@ static int check_swap_activate(struct swap_info_struct *sis,
struct address_space *mapping = swap_file->f_mapping;
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- sector_t cur_lblock;
- sector_t last_lblock;
- sector_t pblock;
- sector_t lowest_pblock = -1;
- sector_t highest_pblock = 0;
+ block_t cur_lblock;
+ block_t last_lblock;
+ block_t pblock;
+ block_t lowest_pblock = -1;
+ block_t highest_pblock = 0;
int nr_extents = 0;
- unsigned long nr_pblocks;
+ unsigned int nr_pblocks;
unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
- unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
unsigned int not_aligned = 0;
int ret = 0;
@@ -3942,8 +3941,8 @@ retry:
pblock = map.m_pblk;
nr_pblocks = map.m_len;
- if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
- nr_pblocks & sec_blks_mask ||
+ if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
+ nr_pblocks % blks_per_sec ||
!f2fs_valid_pinned_area(sbi, pblock)) {
bool last_extent = false;
@@ -4185,7 +4184,7 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
return -EINVAL;
- if (map.m_pblk != NULL_ADDR) {
+ if (map.m_flags & F2FS_MAP_MAPPED) {
iomap->length = blks_to_bytes(inode, map.m_len);
iomap->type = IOMAP_MAPPED;
iomap->flags |= IOMAP_F_MERGED;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index fced2b765..07b3675ea 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -2309,7 +2309,7 @@ static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode, blkcnt_t *count, bool partial)
{
- blkcnt_t diff = 0, release = 0;
+ long long diff = 0, release = 0;
block_t avail_user_block_count;
int ret;
@@ -2329,26 +2329,27 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
spin_lock(&sbi->stat_lock);
- sbi->total_valid_block_count += (block_t)(*count);
- avail_user_block_count = get_available_block_count(sbi, inode, true);
- if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
+ avail_user_block_count = get_available_block_count(sbi, inode, true);
+ diff = (long long)sbi->total_valid_block_count + *count -
+ avail_user_block_count;
+ if (unlikely(diff > 0)) {
if (!partial) {
spin_unlock(&sbi->stat_lock);
+ release = *count;
goto enospc;
}
-
- diff = sbi->total_valid_block_count - avail_user_block_count;
if (diff > *count)
diff = *count;
*count -= diff;
release = diff;
- sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
goto enospc;
}
}
+ sbi->total_valid_block_count += (block_t)(*count);
+
spin_unlock(&sbi->stat_lock);
if (unlikely(release)) {
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 1761ad125..208dedc16 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -952,9 +952,14 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
ATTR_GID | ATTR_TIMES_SET))))
return -EPERM;
- if ((attr->ia_valid & ATTR_SIZE) &&
- !f2fs_is_compress_backend_ready(inode))
- return -EOPNOTSUPP;
+ if ((attr->ia_valid & ATTR_SIZE)) {
+ if (!f2fs_is_compress_backend_ready(inode))
+ return -EOPNOTSUPP;
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) &&
+ !IS_ALIGNED(attr->ia_size,
+ F2FS_BLK_TO_BYTES(F2FS_I(inode)->i_cluster_size)))
+ return -EINVAL;
+ }
err = setattr_prepare(idmap, dentry, attr);
if (err)
@@ -1325,6 +1330,9 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
f2fs_put_page(psrc, 1);
return PTR_ERR(pdst);
}
+
+ f2fs_wait_on_page_writeback(pdst, DATA, true, true);
+
memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE);
set_page_dirty(pdst);
set_page_private_gcing(pdst);
@@ -1817,15 +1825,6 @@ static long f2fs_fallocate(struct file *file, int mode,
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
- /*
- * Pinned file should not support partial truncation since the block
- * can be used by applications.
- */
- if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
- (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
- FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
- return -EOPNOTSUPP;
-
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
FALLOC_FL_INSERT_RANGE))
@@ -1833,6 +1832,17 @@ static long f2fs_fallocate(struct file *file, int mode,
inode_lock(inode);
+ /*
+ * Pinned file should not support partial truncation since the block
+ * can be used by applications.
+ */
+ if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
+ (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
+ FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = file_modified(file);
if (ret)
goto out;
@@ -2837,7 +2847,8 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
goto out;
}
- if (f2fs_compressed_file(src) || f2fs_compressed_file(dst)) {
+ if (f2fs_compressed_file(src) || f2fs_compressed_file(dst) ||
+ f2fs_is_pinned_file(src) || f2fs_is_pinned_file(dst)) {
ret = -EOPNOTSUPP;
goto out_unlock;
}
@@ -3522,9 +3533,6 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
if (!f2fs_sb_has_compression(sbi))
return -EOPNOTSUPP;
- if (!f2fs_compressed_file(inode))
- return -EINVAL;
-
if (f2fs_readonly(sbi->sb))
return -EROFS;
@@ -3543,7 +3551,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
goto out;
}
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (!f2fs_compressed_file(inode) ||
+ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto out;
}
@@ -3570,9 +3579,12 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
struct dnode_of_data dn;
pgoff_t end_offset, count;
+ f2fs_lock_op(sbi);
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
if (ret) {
+ f2fs_unlock_op(sbi);
if (ret == -ENOENT) {
page_idx = f2fs_get_next_page_offset(&dn,
page_idx);
@@ -3590,6 +3602,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
f2fs_put_dnode(&dn);
+ f2fs_unlock_op(sbi);
+
if (ret < 0)
break;
@@ -3641,7 +3655,8 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
while (count) {
int compr_blocks = 0;
- blkcnt_t reserved;
+ blkcnt_t reserved = 0;
+ blkcnt_t to_reserved;
int ret;
for (i = 0; i < cluster_size; i++) {
@@ -3661,20 +3676,26 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
* fails in release_compress_blocks(), so NEW_ADDR
* is a possible case.
*/
- if (blkaddr == NEW_ADDR ||
- __is_valid_data_blkaddr(blkaddr)) {
+ if (blkaddr == NEW_ADDR) {
+ reserved++;
+ continue;
+ }
+ if (__is_valid_data_blkaddr(blkaddr)) {
compr_blocks++;
continue;
}
}
- reserved = cluster_size - compr_blocks;
+ to_reserved = cluster_size - compr_blocks - reserved;
/* for the case all blocks in cluster were reserved */
- if (reserved == 1)
+ if (to_reserved == 1) {
+ dn->ofs_in_node += cluster_size;
goto next;
+ }
- ret = inc_valid_block_count(sbi, dn->inode, &reserved, false);
+ ret = inc_valid_block_count(sbi, dn->inode,
+ &to_reserved, false);
if (unlikely(ret))
return ret;
@@ -3685,7 +3706,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count,
f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
- *reserved_blocks += reserved;
+ *reserved_blocks += to_reserved;
next:
count -= cluster_size;
}
@@ -3704,9 +3725,6 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
if (!f2fs_sb_has_compression(sbi))
return -EOPNOTSUPP;
- if (!f2fs_compressed_file(inode))
- return -EINVAL;
-
if (f2fs_readonly(sbi->sb))
return -EROFS;
@@ -3718,7 +3736,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
inode_lock(inode);
- if (!is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (!f2fs_compressed_file(inode) ||
+ !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto unlock_inode;
}
@@ -3735,9 +3754,12 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
struct dnode_of_data dn;
pgoff_t end_offset, count;
+ f2fs_lock_op(sbi);
+
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
if (ret) {
+ f2fs_unlock_op(sbi);
if (ret == -ENOENT) {
page_idx = f2fs_get_next_page_offset(&dn,
page_idx);
@@ -3755,6 +3777,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
f2fs_put_dnode(&dn);
+ f2fs_unlock_op(sbi);
+
if (ret < 0)
break;
@@ -4119,9 +4143,6 @@ static int f2fs_ioc_decompress_file(struct file *filp)
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
- if (!f2fs_compressed_file(inode))
- return -EINVAL;
-
f2fs_balance_fs(sbi, true);
file_start_write(filp);
@@ -4132,7 +4153,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
goto out;
}
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (!f2fs_compressed_file(inode) ||
+ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto out;
}
@@ -4197,9 +4219,6 @@ static int f2fs_ioc_compress_file(struct file *filp)
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
- if (!f2fs_compressed_file(inode))
- return -EINVAL;
-
f2fs_balance_fs(sbi, true);
file_start_write(filp);
@@ -4210,7 +4229,8 @@ static int f2fs_ioc_compress_file(struct file *filp)
goto out;
}
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
+ if (!f2fs_compressed_file(inode) ||
+ is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
ret = -EINVAL;
goto out;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 8852814da..e86c7f015 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -1554,10 +1554,15 @@ next_step:
int err;
inode = f2fs_iget(sb, dni.ino);
- if (IS_ERR(inode) || is_bad_inode(inode) ||
- special_file(inode->i_mode))
+ if (IS_ERR(inode))
continue;
+ if (is_bad_inode(inode) ||
+ special_file(inode->i_mode)) {
+ iput(inode);
+ continue;
+ }
+
err = f2fs_gc_pinned_control(inode, gc_type, segno);
if (err == -EAGAIN) {
iput(inode);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index c26effdce..2af50bc34 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -361,6 +361,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
+ if (fi->i_xattr_nid && f2fs_check_nid_range(sbi, fi->i_xattr_nid)) {
+ f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_xattr_nid: %u, run fsck to fix.",
+ __func__, inode->i_ino, fi->i_xattr_nid);
+ return false;
+ }
+
return true;
}
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b3de6d6cd..15c9a9f57 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1187,7 +1187,17 @@ skip_partial:
default:
BUG();
}
- if (err < 0 && err != -ENOENT)
+ if (err == -ENOENT) {
+ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
+ f2fs_err_ratelimited(sbi,
+ "truncate node fail, ino:%lu, nid:%u, "
+ "offset[0]:%d, offset[1]:%d, nofs:%d",
+ inode->i_ino, dn.nid, offset[0],
+ offset[1], nofs);
+ err = 0;
+ }
+ if (err < 0)
goto fail;
if (offset[1] == 0 &&
ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
@@ -1319,6 +1329,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
}
if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
err = -EFSCORRUPTED;
+ dec_valid_node_count(sbi, dn->inode, !ofs);
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
goto fail;
@@ -1345,7 +1356,6 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
if (ofs == 0)
inc_valid_inode_count(sbi);
return page;
-
fail:
clear_node_page_dirty(page);
f2fs_put_page(page, 1);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 4fd76e867..6474b7338 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -3559,6 +3559,8 @@ int f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
if (segment_full) {
if (type == CURSEG_COLD_DATA_PINNED &&
!((curseg->segno + 1) % sbi->segs_per_sec)) {
+ write_sum_page(sbi, curseg->sum_blk,
+ GET_SUM_BLOCK(sbi, curseg->segno));
reset_curseg_fields(curseg);
goto skip_new_segment;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index a4bc26dfd..2f75a7dfc 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2132,8 +2132,6 @@ static void default_options(struct f2fs_sb_info *sbi, bool remount)
F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE;
- sbi->sb->s_flags &= ~SB_INLINECRYPT;
-
set_opt(sbi, INLINE_XATTR);
set_opt(sbi, INLINE_DATA);
set_opt(sbi, INLINE_DENTRY);
@@ -4131,9 +4129,15 @@ void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason,
if (shutdown)
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
- /* continue filesystem operators if errors=continue */
- if (continue_fs || f2fs_readonly(sb))
+ /*
+ * Continue filesystem operators if errors=continue. Should not set
+ * RO by shutdown, since RO bypasses thaw_super which can hang the
+ * system.
+ */
+ if (continue_fs || f2fs_readonly(sb) || shutdown) {
+ f2fs_warn(sbi, "Stopped filesystem due to reason: %d", reason);
return;
+ }
f2fs_warn(sbi, "Remounting filesystem read-only");
/*
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e4f17c53d..d31853032 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2069,6 +2069,7 @@ static long wb_writeback(struct bdi_writeback *wb,
struct inode *inode;
long progress;
struct blk_plug plug;
+ bool queued = false;
blk_start_plug(&plug);
for (;;) {
@@ -2111,8 +2112,10 @@ static long wb_writeback(struct bdi_writeback *wb,
dirtied_before = jiffies;
trace_writeback_start(wb, work);
- if (list_empty(&wb->b_io))
+ if (list_empty(&wb->b_io)) {
queue_io(wb, work, dirtied_before);
+ queued = true;
+ }
if (work->sb)
progress = writeback_sb_inodes(work->sb, wb, work);
else
@@ -2127,7 +2130,7 @@ static long wb_writeback(struct bdi_writeback *wb,
* mean the overall work is done. So we keep looping as long
* as made some progress on cleaning pages or inodes.
*/
- if (progress) {
+ if (progress || !queued) {
spin_unlock(&wb->list_lock);
continue;
}
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 3ec8bb5e6..9eb191b5c 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1813,7 +1813,8 @@ static void fuse_resend(struct fuse_conn *fc)
spin_unlock(&fc->lock);
list_for_each_entry_safe(req, next, &to_queue, list) {
- __set_bit(FR_PENDING, &req->flags);
+ set_bit(FR_PENDING, &req->flags);
+ clear_bit(FR_SENT, &req->flags);
/* mark the request as resend request */
req->in.h.unique |= FUSE_UNIQUE_RESEND;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 34540f9d0..2507fe34c 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -166,19 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
return true;
}
-void gfs2_glock_free(struct gfs2_glock *gl)
+static void __gfs2_glock_free(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-
- gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ __gfs2_glock_free(gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_kill_wait);
+}
+
+void gfs2_glock_free_later(struct gfs2_glock *gl) {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ spin_lock(&lru_lock);
+ list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
+ spin_unlock(&lru_lock);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait);
}
+static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
+{
+ struct list_head *list = &sdp->sd_dead_glocks;
+
+ while(!list_empty(list)) {
+ struct gfs2_glock *gl;
+
+ gl = list_first_entry(list, struct gfs2_glock, gl_lru);
+ list_del_init(&gl->gl_lru);
+ __gfs2_glock_free(gl);
+ }
+}
+
/**
* gfs2_glock_hold() - increment reference count on glock
* @gl: The glock to hold
@@ -591,7 +617,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK;
- spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state);
state_change(gl, state);
gh = find_first_waiter(gl);
@@ -639,7 +664,6 @@ retry:
gl->gl_target, state);
GLOCK_BUG_ON(gl, 1);
}
- spin_unlock(&gl->gl_lockref.lock);
return;
}
@@ -662,7 +686,6 @@ retry:
}
out:
clear_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_lockref.lock);
}
static bool is_system_glock(struct gfs2_glock *gl)
@@ -690,6 +713,7 @@ __acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret;
@@ -718,6 +742,9 @@ __acquires(&gl->gl_lockref.lock)
(gl->gl_state == LM_ST_EXCLUSIVE) ||
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags);
+ if (!glops->go_inval && !glops->go_sync)
+ goto skip_inval;
+
spin_unlock(&gl->gl_lockref.lock);
if (glops->go_sync) {
ret = glops->go_sync(gl);
@@ -730,6 +757,7 @@ __acquires(&gl->gl_lockref.lock)
fs_err(sdp, "Error %d syncing glock \n", ret);
gfs2_dump_glock(NULL, gl, true);
}
+ spin_lock(&gl->gl_lockref.lock);
goto skip_inval;
}
}
@@ -750,9 +778,10 @@ __acquires(&gl->gl_lockref.lock)
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
+ spin_lock(&gl->gl_lockref.lock);
skip_inval:
- gfs2_glock_hold(gl);
+ gl->gl_lockref.count++;
/*
* Check for an error encountered since we called go_sync and go_inval.
* If so, we can't withdraw from the glock code because the withdraw
@@ -794,31 +823,37 @@ skip_inval:
*/
clear_bit(GLF_LOCK, &gl->gl_flags);
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
- gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
- goto out;
+ __gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
+ return;
} else {
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
}
- if (sdp->sd_lockstruct.ls_ops->lm_lock) {
- /* lock_dlm */
- ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
+ if (ls->ls_ops->lm_lock) {
+ spin_unlock(&gl->gl_lockref.lock);
+ ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
+ spin_lock(&gl->gl_lockref.lock);
+
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED &&
- test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
- finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
+ test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ /*
+ * The lockspace has been released and the lock has
+ * been unlocked implicitly.
+ */
} else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret);
- GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
+ target = gl->gl_state | LM_OUT_ERROR;
+ } else {
+ /* The operation will be completed asynchronously. */
+ return;
}
- } else { /* lock_nolock */
- finish_xmote(gl, target);
- gfs2_glock_queue_work(gl, 0);
}
-out:
- spin_lock(&gl->gl_lockref.lock);
+
+ /* Complete the operation now. */
+ finish_xmote(gl, target);
+ __gfs2_glock_queue_work(gl, 0);
}
/**
@@ -1071,11 +1106,12 @@ static void glock_work_func(struct work_struct *work)
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1;
- if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ spin_lock(&gl->gl_lockref.lock);
+ if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+ clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
finish_xmote(gl, gl->gl_reply);
drop_refs++;
}
- spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -2148,8 +2184,11 @@ static void thaw_glock(struct gfs2_glock *gl)
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
+
+ spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- gfs2_glock_queue_work(gl, 0);
+ __gfs2_glock_queue_work(gl, 0);
+ spin_unlock(&gl->gl_lockref.lock);
}
/**
@@ -2225,6 +2264,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
wait_event_timeout(sdp->sd_kill_wait,
atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600);
+ gfs2_lm_unmount(sdp);
+ gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp);
}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 0114f3e0e..86987a59a 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -252,6 +252,7 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
void gfs2_glock_thaw(struct gfs2_sbd *sdp);
void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
void gfs2_glock_free(struct gfs2_glock *gl);
+void gfs2_glock_free_later(struct gfs2_glock *gl);
int __init gfs2_glock_init(void);
void gfs2_glock_exit(void);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 45653cbc8..e0e8dfeee 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp);
+
+ if (gfs2_withdrawing(sdp))
+ gfs2_withdraw(sdp);
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 95a334d64..60abd7050 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -838,6 +838,7 @@ struct gfs2_sbd {
/* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh;
struct mutex sd_freeze_mutex;
+ struct list_head sd_dead_glocks;
char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
char sd_table_name[GFS2_FSNAME_LEN];
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index d1ac5d067..e028e55e6 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg)
struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state;
+ /* If the glock is dead, we only react to a dlm_unlock() reply. */
+ if (__lockref_is_dead(&gl->gl_lockref) &&
+ gl->gl_lksb.sb_status != -DLM_EUNLOCK)
+ return;
+
gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode)
{
struct gfs2_glock *gl = arg;
+ if (__lockref_is_dead(&gl->gl_lockref))
+ return;
+
switch (mode) {
case DLM_LOCK_EX:
gfs2_glock_cb(gl, LM_ST_UNLOCKED);
@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
- if (gl->gl_lksb.sb_lkid == 0)
- goto out_free;
+ BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
+
+ if (gl->gl_lksb.sb_lkid == 0) {
+ gfs2_glock_free(gl);
+ return;
+ }
clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
@@ -300,13 +312,17 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_update_request_times(gl);
/* don't want to call dlm if we've unmounted the lock protocol */
- if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags))
- goto out_free;
+ if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+ gfs2_glock_free(gl);
+ return;
+ }
/* don't want to skip dlm_unlock writing the lvb when lock has one */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
- !gl->gl_lksb.sb_lvbptr)
- goto out_free;
+ !gl->gl_lksb.sb_lvbptr) {
+ gfs2_glock_free_later(gl);
+ return;
+ }
again:
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
@@ -321,10 +337,6 @@ again:
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error);
}
- return;
-
-out_free:
- gfs2_glock_free(gl);
}
static void gdlm_cancel(struct gfs2_glock *gl)
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 572d58e86..cde711859 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
atomic_set(&sdp->sd_log_in_flight, 0);
init_waitqueue_head(&sdp->sd_log_flush_wait);
mutex_init(&sdp->sd_freeze_mutex);
+ INIT_LIST_HEAD(&sdp->sd_dead_glocks);
return sdp;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index e5f794663..2d780b470 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -646,10 +646,7 @@ restart:
gfs2_gl_hash_clear(sdp);
truncate_inode_pages_final(&sdp->sd_aspace);
gfs2_delete_debugfs_file(sdp);
- /* Unmount the locking protocol */
- gfs2_lm_unmount(sdp);
- /* At this point, we're through participating in the lockspace */
gfs2_sys_fs_del(sdp);
free_sbd(sdp);
}
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f52141ce9..fc3ecb180 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -350,7 +350,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
fs_err(sdp, "telling LM to unmount\n");
lm->lm_unmount(sdp);
}
- set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
fs_err(sdp, "File system withdrawn\n");
dump_stack();
clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 4e8e41c8b..4ac6c8c40 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -909,11 +909,11 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
{
loff_t length = iomap_length(iter);
- size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
loff_t pos = iter->pos;
ssize_t written = 0;
long status = 0;
struct address_space *mapping = iter->inode->i_mapping;
+ size_t chunk = mapping_max_folio_size(mapping);
unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
do {
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 00224f3a8..defb4162c 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -1110,6 +1110,9 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
return rc;
request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
+ if (request > c->sector_size - c->cleanmarker_size)
+ return -ERANGE;
+
rc = jffs2_reserve_space(c, request, &length,
ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
if (rc) {
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 0fb7afac2..998705529 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -557,9 +557,11 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
size_check:
if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
+ int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size);
+
printk(KERN_ERR "ea_get: invalid extended attribute\n");
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
- ea_buf->xattr, ea_size, 1);
+ ea_buf->xattr, size, 1);
ea_release(inode, ea_buf);
rc = -EIO;
goto clean_up;
diff --git a/fs/libfs.c b/fs/libfs.c
index 3a6f2cb36..b635ee5ad 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -295,6 +295,18 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry)
return 0;
}
+static int simple_offset_replace(struct offset_ctx *octx, struct dentry *dentry,
+ long offset)
+{
+ int ret;
+
+ ret = mtree_store(&octx->mt, offset, dentry, GFP_KERNEL);
+ if (ret)
+ return ret;
+ offset_set(dentry, offset);
+ return 0;
+}
+
/**
* simple_offset_remove - Remove an entry to a directory's offset map
* @octx: directory offset ctx to be updated
@@ -346,12 +358,45 @@ int simple_offset_empty(struct dentry *dentry)
}
/**
+ * simple_offset_rename - handle directory offsets for rename
+ * @old_dir: parent directory of source entry
+ * @old_dentry: dentry of source entry
+ * @new_dir: parent_directory of destination entry
+ * @new_dentry: dentry of destination
+ *
+ * Caller provides appropriate serialization.
+ *
+ * User space expects the directory offset value of the replaced
+ * (new) directory entry to be unchanged after a rename.
+ *
+ * Returns zero on success, a negative errno value on failure.
+ */
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
+ struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
+ long new_offset = dentry2offset(new_dentry);
+
+ simple_offset_remove(old_ctx, old_dentry);
+
+ if (new_offset) {
+ offset_set(new_dentry, 0);
+ return simple_offset_replace(new_ctx, old_dentry, new_offset);
+ }
+ return simple_offset_add(new_ctx, old_dentry);
+}
+
+/**
* simple_offset_rename_exchange - exchange rename with directory offsets
* @old_dir: parent of dentry being moved
* @old_dentry: dentry being moved
* @new_dir: destination parent
* @new_dentry: destination dentry
*
+ * This API preserves the directory offset values. Caller provides
+ * appropriate serialization.
+ *
* Returns zero on success. Otherwise a negative errno is returned and the
* rename is rolled back.
*/
@@ -369,11 +414,11 @@ int simple_offset_rename_exchange(struct inode *old_dir,
simple_offset_remove(old_ctx, old_dentry);
simple_offset_remove(new_ctx, new_dentry);
- ret = simple_offset_add(new_ctx, old_dentry);
+ ret = simple_offset_replace(new_ctx, old_dentry, new_index);
if (ret)
goto out_restore;
- ret = simple_offset_add(old_ctx, new_dentry);
+ ret = simple_offset_replace(old_ctx, new_dentry, old_index);
if (ret) {
simple_offset_remove(new_ctx, old_dentry);
goto out_restore;
@@ -388,10 +433,8 @@ int simple_offset_rename_exchange(struct inode *old_dir,
return 0;
out_restore:
- offset_set(old_dentry, old_index);
- mtree_store(&old_ctx->mt, old_index, old_dentry, GFP_KERNEL);
- offset_set(new_dentry, new_index);
- mtree_store(&new_ctx->mt, new_index, new_dentry, GFP_KERNEL);
+ (void)simple_offset_replace(old_ctx, old_dentry, old_index);
+ (void)simple_offset_replace(new_ctx, new_dentry, new_index);
return ret;
}
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 267b622d9..912ad0a1d 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -163,7 +163,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
struct folio *folio;
enum netfs_how_to_modify howto;
enum netfs_folio_trace trace;
- unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
+ unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
ssize_t written = 0, ret, ret2;
loff_t i_size, pos = iocb->ki_pos, from, to;
size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index ac505671e..bdd6cb33a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1802,9 +1802,10 @@ __nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
} else {
- /* Wait for unlink to complete */
+ /* Wait for unlink to complete - see unblock_revalidate() */
wait_var_event(&dentry->d_fsdata,
- dentry->d_fsdata != NFS_FSDATA_BLOCKED);
+ smp_load_acquire(&dentry->d_fsdata)
+ != NFS_FSDATA_BLOCKED);
parent = dget_parent(dentry);
ret = reval(d_inode(parent), dentry, flags);
dput(parent);
@@ -1817,6 +1818,29 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
}
+static void block_revalidate(struct dentry *dentry)
+{
+ /* old devname - just in case */
+ kfree(dentry->d_fsdata);
+
+ /* Any new reference that could lead to an open
+ * will take ->d_lock in lookup_open() -> d_lookup().
+ * Holding this lock ensures we cannot race with
+ * __nfs_lookup_revalidate() and removes and need
+ * for further barriers.
+ */
+ lockdep_assert_held(&dentry->d_lock);
+
+ dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+}
+
+static void unblock_revalidate(struct dentry *dentry)
+{
+ /* store_release ensures wait_var_event() sees the update */
+ smp_store_release(&dentry->d_fsdata, NULL);
+ wake_up_var(&dentry->d_fsdata);
+}
+
/*
* A weaker form of d_revalidate for revalidating just the d_inode(dentry)
* when we don't really care about the dentry name. This is called when a
@@ -2501,15 +2525,12 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
spin_unlock(&dentry->d_lock);
goto out;
}
- /* old devname */
- kfree(dentry->d_fsdata);
- dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+ block_revalidate(dentry);
spin_unlock(&dentry->d_lock);
error = nfs_safe_remove(dentry);
nfs_dentry_remove_handle_error(dir, dentry, error);
- dentry->d_fsdata = NULL;
- wake_up_var(&dentry->d_fsdata);
+ unblock_revalidate(dentry);
out:
trace_nfs_unlink_exit(dir, dentry, error);
return error;
@@ -2616,8 +2637,7 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data)
{
struct dentry *new_dentry = data->new_dentry;
- new_dentry->d_fsdata = NULL;
- wake_up_var(&new_dentry->d_fsdata);
+ unblock_revalidate(new_dentry);
}
/*
@@ -2679,11 +2699,6 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (WARN_ON(new_dentry->d_flags & DCACHE_NFSFS_RENAMED) ||
WARN_ON(new_dentry->d_fsdata == NFS_FSDATA_BLOCKED))
goto out;
- if (new_dentry->d_fsdata) {
- /* old devname */
- kfree(new_dentry->d_fsdata);
- new_dentry->d_fsdata = NULL;
- }
spin_lock(&new_dentry->d_lock);
if (d_count(new_dentry) > 2) {
@@ -2705,7 +2720,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
new_dentry = dentry;
new_inode = NULL;
} else {
- new_dentry->d_fsdata = NFS_FSDATA_BLOCKED;
+ block_revalidate(new_dentry);
must_unblock = true;
spin_unlock(&new_dentry->d_lock);
}
@@ -2717,6 +2732,8 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir,
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry,
must_unblock ? nfs_unblock_rename : NULL);
if (IS_ERR(task)) {
+ if (must_unblock)
+ unblock_revalidate(new_dentry);
error = PTR_ERR(task);
goto out;
}
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index ce8f8934b..569ae4ec6 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -883,7 +883,7 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
NFS4_MAX_UINT64,
IOMODE_READ,
false,
- GFP_KERNEL);
+ nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
@@ -907,7 +907,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
NFS4_MAX_UINT64,
IOMODE_RW,
false,
- GFP_NOFS);
+ nfs_io_gfp_mask());
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
pgio->pg_lseg = NULL;
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index d0a0956f8..cac1157be 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -1112,9 +1112,12 @@ static int nfs23_parse_monolithic(struct fs_context *fc,
ctx->acdirmax = data->acdirmax;
ctx->need_mount = false;
- memcpy(sap, &data->addr, sizeof(data->addr));
- ctx->nfs_server.addrlen = sizeof(data->addr);
- ctx->nfs_server.port = ntohs(data->addr.sin_port);
+ if (!is_remount_fc(fc)) {
+ memcpy(sap, &data->addr, sizeof(data->addr));
+ ctx->nfs_server.addrlen = sizeof(data->addr);
+ ctx->nfs_server.port = ntohs(data->addr.sin_port);
+ }
+
if (sap->ss_family != AF_INET ||
!nfs_verify_server_address(sap))
goto out_no_address;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 06253695f..2ff0c35d8 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -710,9 +710,9 @@ unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
if ((bsize & (bsize - 1)) || nrbitsp) {
unsigned char nrbits;
- for (nrbits = 31; nrbits && !(bsize & (1 << nrbits)); nrbits--)
+ for (nrbits = 31; nrbits && !(bsize & (1UL << nrbits)); nrbits--)
;
- bsize = 1 << nrbits;
+ bsize = 1UL << nrbits;
if (nrbitsp)
*nrbitsp = nrbits;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ea390db94..3a816c4a6 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4023,6 +4023,23 @@ static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
}
}
+static bool _is_same_nfs4_pathname(struct nfs4_pathname *path1,
+ struct nfs4_pathname *path2)
+{
+ int i;
+
+ if (path1->ncomponents != path2->ncomponents)
+ return false;
+ for (i = 0; i < path1->ncomponents; i++) {
+ if (path1->components[i].len != path2->components[i].len)
+ return false;
+ if (memcmp(path1->components[i].data, path2->components[i].data,
+ path1->components[i].len))
+ return false;
+ }
+ return true;
+}
+
static int _nfs4_discover_trunking(struct nfs_server *server,
struct nfs_fh *fhandle)
{
@@ -4056,9 +4073,13 @@ static int _nfs4_discover_trunking(struct nfs_server *server,
if (status)
goto out_free_3;
- for (i = 0; i < locations->nlocations; i++)
+ for (i = 0; i < locations->nlocations; i++) {
+ if (!_is_same_nfs4_pathname(&locations->fs_path,
+ &locations->locations[i].rootpath))
+ continue;
test_fs_location_for_trunking(&locations->locations[i], clp,
server);
+ }
out_free_3:
kfree(locations->fattr);
out_free_2:
@@ -5456,7 +5477,7 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
struct rpc_message *msg = &task->tk_msg;
if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS] &&
- server->caps & NFS_CAP_READ_PLUS && task->tk_status == -ENOTSUPP) {
+ task->tk_status == -ENOTSUPP) {
server->caps &= ~NFS_CAP_READ_PLUS;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
rpc_restart_call_prepare(task);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 662e86ea3..5b452411e 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2116,6 +2116,7 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_fs_locations *locations = NULL;
+ struct nfs_fattr *fattr;
struct inode *inode;
struct page *page;
int status, result;
@@ -2125,19 +2126,16 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
(unsigned long long)server->fsid.minor,
clp->cl_hostname);
- result = 0;
page = alloc_page(GFP_KERNEL);
locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
- if (page == NULL || locations == NULL) {
- dprintk("<-- %s: no memory\n", __func__);
- goto out;
- }
- locations->fattr = nfs_alloc_fattr();
- if (locations->fattr == NULL) {
+ fattr = nfs_alloc_fattr();
+ if (page == NULL || locations == NULL || fattr == NULL) {
dprintk("<-- %s: no memory\n", __func__);
+ result = 0;
goto out;
}
+ locations->fattr = fattr;
inode = d_inode(server->super->s_root);
result = nfs4_proc_get_locations(server, NFS_FH(inode), locations,
page, cred);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index ecd18bffe..4d23bb1d0 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -48,12 +48,10 @@ enum {
NFSD_MaxBlkSize,
NFSD_MaxConnections,
NFSD_Filecache,
-#ifdef CONFIG_NFSD_V4
NFSD_Leasetime,
NFSD_Gracetime,
NFSD_RecoveryDir,
NFSD_V4EndGrace,
-#endif
NFSD_MaxReserved
};
@@ -1360,7 +1358,9 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
+#ifdef CONFIG_NFSD_LEGACY_CLIENT_TRACKING
[NFSD_RecoveryDir] = {"nfsv4recoverydir", &transaction_ops, S_IWUSR|S_IRUSR},
+#endif
[NFSD_V4EndGrace] = {"v4_end_grace", &transaction_ops, S_IWUSR|S_IRUGO},
#endif
/* last one */ {""}
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 40fecf7b2..0b75305fb 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -573,7 +573,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
_fh_update(fhp, exp, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID) {
fh_put(fhp);
- return nfserr_opnotsupp;
+ return nfserr_stale;
}
return 0;
@@ -599,7 +599,7 @@ fh_update(struct svc_fh *fhp)
_fh_update(fhp, fhp->fh_export, dentry);
if (fhp->fh_handle.fh_fileid_type == FILEID_INVALID)
- return nfserr_opnotsupp;
+ return nfserr_stale;
return 0;
out_bad:
printk(KERN_ERR "fh_update: fh not verified!\n");
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index aee40db7a..35e6c55a0 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -608,7 +608,7 @@ int nilfs_empty_dir(struct inode *inode)
kaddr = nilfs_get_folio(inode, i, &folio);
if (IS_ERR(kaddr))
- continue;
+ return 0;
de = (struct nilfs_dir_entry *)kaddr;
kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index f1a01c191..8be471ce4 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -60,7 +60,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
if (argv->v_nmembs == 0)
return 0;
- if (argv->v_size > PAGE_SIZE)
+ if ((size_t)argv->v_size > PAGE_SIZE)
return -EINVAL;
/*
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index aa5290cb7..a1b011cb4 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1652,6 +1652,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
if (bh->b_folio != bd_folio) {
if (bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
@@ -1665,6 +1666,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
if (bh == segbuf->sb_super_root) {
if (bh->b_folio != bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
@@ -1681,6 +1683,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
}
if (bd_folio) {
folio_lock(bd_folio);
+ folio_wait_writeback(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
@@ -2124,8 +2127,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
{
spin_lock(&sci->sc_state_lock);
if (!(sci->sc_state & NILFS_SEGCTOR_COMMIT)) {
- sci->sc_timer.expires = jiffies + sci->sc_interval;
- add_timer(&sci->sc_timer);
+ if (sci->sc_task) {
+ sci->sc_timer.expires = jiffies + sci->sc_interval;
+ add_timer(&sci->sc_timer);
+ }
sci->sc_state |= NILFS_SEGCTOR_COMMIT;
}
spin_unlock(&sci->sc_state_lock);
@@ -2172,19 +2177,36 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
struct nilfs_segctor_wait_request wait_req;
int err = 0;
- spin_lock(&sci->sc_state_lock);
init_wait(&wait_req.wq);
wait_req.err = 0;
atomic_set(&wait_req.done, 0);
+ init_waitqueue_entry(&wait_req.wq, current);
+
+ /*
+ * To prevent a race issue where completion notifications from the
+ * log writer thread are missed, increment the request sequence count
+ * "sc_seq_request" and insert a wait queue entry using the current
+ * sequence number into the "sc_wait_request" queue at the same time
+ * within the lock section of "sc_state_lock".
+ */
+ spin_lock(&sci->sc_state_lock);
wait_req.seq = ++sci->sc_seq_request;
+ add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
spin_unlock(&sci->sc_state_lock);
- init_waitqueue_entry(&wait_req.wq, current);
- add_wait_queue(&sci->sc_wait_request, &wait_req.wq);
- set_current_state(TASK_INTERRUPTIBLE);
wake_up(&sci->sc_wait_daemon);
for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ /*
+ * Synchronize only while the log writer thread is alive.
+ * Leave flushing out after the log writer thread exits to
+ * the cleanup work in nilfs_segctor_destroy().
+ */
+ if (!sci->sc_task)
+ break;
+
if (atomic_read(&wait_req.done)) {
err = wait_req.err;
break;
@@ -2200,7 +2222,7 @@ static int nilfs_segctor_sync(struct nilfs_sc_info *sci)
return err;
}
-static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
+static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
{
struct nilfs_segctor_wait_request *wrq, *n;
unsigned long flags;
@@ -2208,7 +2230,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
if (!atomic_read(&wrq->done) &&
- nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
+ (force || nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq))) {
wrq->err = err;
atomic_set(&wrq->done, 1);
}
@@ -2326,10 +2348,21 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
*/
static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
{
+ bool thread_is_alive;
+
spin_lock(&sci->sc_state_lock);
sci->sc_seq_accepted = sci->sc_seq_request;
+ thread_is_alive = (bool)sci->sc_task;
spin_unlock(&sci->sc_state_lock);
- del_timer_sync(&sci->sc_timer);
+
+ /*
+ * This function does not race with the log writer thread's
+ * termination. Therefore, deleting sc_timer, which should not be
+ * done after the log writer thread exits, can be done safely outside
+ * the area protected by sc_state_lock.
+ */
+ if (thread_is_alive)
+ del_timer_sync(&sci->sc_timer);
}
/**
@@ -2346,7 +2379,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
if (mode == SC_LSEG_SR) {
sci->sc_state &= ~NILFS_SEGCTOR_COMMIT;
sci->sc_seq_done = sci->sc_seq_accepted;
- nilfs_segctor_wakeup(sci, err);
+ nilfs_segctor_wakeup(sci, err, false);
sci->sc_flush_request = 0;
} else {
if (mode == SC_FLUSH_FILE)
@@ -2355,7 +2388,7 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
sci->sc_flush_request &= ~FLUSH_DAT_BIT;
/* re-enable timer if checkpoint creation was not done */
- if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) &&
+ if ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && sci->sc_task &&
time_before(jiffies, sci->sc_timer.expires))
add_timer(&sci->sc_timer);
}
@@ -2545,6 +2578,7 @@ static int nilfs_segctor_thread(void *arg)
int timeout = 0;
sci->sc_timer_task = current;
+ timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
/* start sync. */
sci->sc_task = current;
@@ -2612,6 +2646,7 @@ static int nilfs_segctor_thread(void *arg)
end_thread:
/* end sync. */
sci->sc_task = NULL;
+ timer_shutdown_sync(&sci->sc_timer);
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_kill_thread() */
spin_unlock(&sci->sc_state_lock);
return 0;
@@ -2675,7 +2710,6 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
INIT_LIST_HEAD(&sci->sc_gc_inodes);
INIT_LIST_HEAD(&sci->sc_iput_queue);
INIT_WORK(&sci->sc_iput_work, nilfs_iput_work_func);
- timer_setup(&sci->sc_timer, nilfs_construction_timeout, 0);
sci->sc_interval = HZ * NILFS_SC_DEFAULT_TIMEOUT;
sci->sc_mjcp_freq = HZ * NILFS_SC_DEFAULT_SR_FREQ;
@@ -2729,6 +2763,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
+ /*
+ * Forcibly wake up tasks waiting in nilfs_segctor_sync(), which can
+ * be called from delayed iput() via nilfs_evict_inode() and can race
+ * with the above log writer thread termination.
+ */
+ nilfs_segctor_wakeup(sci, 0, true);
+
if (flush_work(&sci->sc_iput_work))
flag = true;
@@ -2754,7 +2795,6 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
down_write(&nilfs->ns_segctor_sem);
- timer_shutdown_sync(&sci->sc_timer);
kfree(sci);
}
diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c
index 263635199..1937e8e61 100644
--- a/fs/ntfs3/dir.c
+++ b/fs/ntfs3/dir.c
@@ -475,6 +475,7 @@ static int ntfs_readdir(struct file *file, struct dir_context *ctx)
vbo = (u64)bit << index_bits;
if (vbo >= i_size) {
ntfs_inode_err(dir, "Looks like your dir is corrupt");
+ ctx->pos = eod;
err = -EINVAL;
goto out;
}
diff --git a/fs/ntfs3/fslog.c b/fs/ntfs3/fslog.c
index 855519713..4085fe30b 100644
--- a/fs/ntfs3/fslog.c
+++ b/fs/ntfs3/fslog.c
@@ -1184,7 +1184,8 @@ out:
static int log_read_rst(struct ntfs_log *log, bool first,
struct restart_info *info)
{
- u32 skip, vbo;
+ u32 skip;
+ u64 vbo;
struct RESTART_HDR *r_page = NULL;
/* Determine which restart area we are looking for. */
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index daabaad63..14284f0ed 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -1533,6 +1533,11 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
goto out1;
}
+ if (data_size <= le64_to_cpu(alloc->nres.data_size)) {
+ /* Reuse index. */
+ goto out;
+ }
+
/* Increase allocation. */
err = attr_set_size(ni, ATTR_ALLOC, in->name, in->name_len,
&indx->alloc_run, data_size, &data_size, true,
@@ -1546,6 +1551,7 @@ static int indx_add_allocate(struct ntfs_index *indx, struct ntfs_inode *ni,
if (in->name == I30_NAME)
i_size_write(&ni->vfs_inode, data_size);
+out:
*vbn = bit << indx->idx2vbn_bits;
return 0;
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index d273eda1c..90d4e9a98 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -37,7 +37,7 @@ static struct inode *ntfs_read_mft(struct inode *inode,
bool is_dir;
unsigned long ino = inode->i_ino;
u32 rp_fa = 0, asize, t32;
- u16 roff, rsize, names = 0;
+ u16 roff, rsize, names = 0, links = 0;
const struct ATTR_FILE_NAME *fname = NULL;
const struct INDEX_ROOT *root;
struct REPARSE_DATA_BUFFER rp; // 0x18 bytes
@@ -200,11 +200,12 @@ next_attr:
rsize < SIZEOF_ATTRIBUTE_FILENAME)
goto out;
+ names += 1;
fname = Add2Ptr(attr, roff);
if (fname->type == FILE_NAME_DOS)
goto next_attr;
- names += 1;
+ links += 1;
if (name && name->len == fname->name_len &&
!ntfs_cmp_names_cpu(name, (struct le_str *)&fname->name_len,
NULL, false))
@@ -429,7 +430,7 @@ end_enum:
ni->mi.dirty = true;
}
- set_nlink(inode, names);
+ set_nlink(inode, links);
if (S_ISDIR(mode)) {
ni->std_fa |= FILE_ATTRIBUTE_DIRECTORY;
@@ -576,13 +577,18 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
clear_buffer_uptodate(bh);
if (is_resident(ni)) {
- ni_lock(ni);
- err = attr_data_read_resident(ni, &folio->page);
- ni_unlock(ni);
-
- if (!err)
- set_buffer_uptodate(bh);
+ bh->b_blocknr = RESIDENT_LCN;
bh->b_size = block_size;
+ if (!folio) {
+ err = 0;
+ } else {
+ ni_lock(ni);
+ err = attr_data_read_resident(ni, &folio->page);
+ ni_unlock(ni);
+
+ if (!err)
+ set_buffer_uptodate(bh);
+ }
return err;
}
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 9c7478150..3d6143c7a 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -59,7 +59,7 @@ struct GUID {
struct cpu_str {
u8 len;
u8 unused;
- u16 name[10];
+ u16 name[];
};
struct le_str {
diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
index 6aa3a9d44..6c76503ed 100644
--- a/fs/ntfs3/record.c
+++ b/fs/ntfs3/record.c
@@ -534,16 +534,9 @@ bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi,
if (aoff + asize > used)
return false;
- if (ni && is_attr_indexed(attr)) {
+ if (ni && is_attr_indexed(attr) && attr->type == ATTR_NAME) {
u16 links = le16_to_cpu(ni->mi.mrec->hard_links);
- struct ATTR_FILE_NAME *fname =
- attr->type != ATTR_NAME ?
- NULL :
- resident_data_ex(attr,
- SIZEOF_ATTRIBUTE_FILENAME);
- if (fname && fname->type == FILE_NAME_DOS) {
- /* Do not decrease links count deleting DOS name. */
- } else if (!links) {
+ if (!links) {
/* minor error. Not critical. */
} else {
ni->mi.mrec->hard_links = cpu_to_le16(links - 1);
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index b26d95a8d..0ed534f75 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -1861,8 +1861,6 @@ static int __init init_ntfs_fs(void)
{
int err;
- pr_info("ntfs3: Max link count %u\n", NTFS_LINK_MAX);
-
if (IS_ENABLED(CONFIG_NTFS3_FS_POSIX_ACL))
pr_info("ntfs3: Enabled Linux POSIX ACLs support\n");
if (IS_ENABLED(CONFIG_NTFS3_64BIT_CLUSTER))
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 0da8e7bd3..ccc57038a 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1936,6 +1936,8 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
inode_lock(inode);
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
+ inode_dio_wait(inode);
/*
* This prevents concurrent writes on other nodes
*/
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 604fea3a2..86807086b 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -479,12 +479,6 @@ bail:
return status;
}
-
-struct ocfs2_triggers {
- struct jbd2_buffer_trigger_type ot_triggers;
- int ot_offset;
-};
-
static inline struct ocfs2_triggers *to_ocfs2_trigger(struct jbd2_buffer_trigger_type *triggers)
{
return container_of(triggers, struct ocfs2_triggers, ot_triggers);
@@ -548,85 +542,76 @@ static void ocfs2_db_frozen_trigger(struct jbd2_buffer_trigger_type *triggers,
static void ocfs2_abort_trigger(struct jbd2_buffer_trigger_type *triggers,
struct buffer_head *bh)
{
+ struct ocfs2_triggers *ot = to_ocfs2_trigger(triggers);
+
mlog(ML_ERROR,
"ocfs2_abort_trigger called by JBD2. bh = 0x%lx, "
"bh->b_blocknr = %llu\n",
(unsigned long)bh,
(unsigned long long)bh->b_blocknr);
- ocfs2_error(bh->b_assoc_map->host->i_sb,
+ ocfs2_error(ot->sb,
"JBD2 has aborted our journal, ocfs2 cannot continue\n");
}
-static struct ocfs2_triggers di_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dinode, i_check),
-};
-
-static struct ocfs2_triggers eb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_extent_block, h_check),
-};
-
-static struct ocfs2_triggers rb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_refcount_block, rf_check),
-};
-
-static struct ocfs2_triggers gd_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_group_desc, bg_check),
-};
-
-static struct ocfs2_triggers db_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_db_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
-};
+static void ocfs2_setup_csum_triggers(struct super_block *sb,
+ enum ocfs2_journal_trigger_type type,
+ struct ocfs2_triggers *ot)
+{
+ BUG_ON(type >= OCFS2_JOURNAL_TRIGGER_COUNT);
-static struct ocfs2_triggers xb_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_xattr_block, xb_check),
-};
+ switch (type) {
+ case OCFS2_JTR_DI:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dinode, i_check);
+ break;
+ case OCFS2_JTR_EB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_extent_block, h_check);
+ break;
+ case OCFS2_JTR_RB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_refcount_block, rf_check);
+ break;
+ case OCFS2_JTR_GD:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_group_desc, bg_check);
+ break;
+ case OCFS2_JTR_DB:
+ ot->ot_triggers.t_frozen = ocfs2_db_frozen_trigger;
+ break;
+ case OCFS2_JTR_XB:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_xattr_block, xb_check);
+ break;
+ case OCFS2_JTR_DQ:
+ ot->ot_triggers.t_frozen = ocfs2_dq_frozen_trigger;
+ break;
+ case OCFS2_JTR_DR:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check);
+ break;
+ case OCFS2_JTR_DL:
+ ot->ot_triggers.t_frozen = ocfs2_frozen_trigger;
+ ot->ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check);
+ break;
+ case OCFS2_JTR_NONE:
+ /* To make compiler happy... */
+ return;
+ }
-static struct ocfs2_triggers dq_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_dq_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
-};
+ ot->ot_triggers.t_abort = ocfs2_abort_trigger;
+ ot->sb = sb;
+}
-static struct ocfs2_triggers dr_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dx_root_block, dr_check),
-};
+void ocfs2_initialize_journal_triggers(struct super_block *sb,
+ struct ocfs2_triggers triggers[])
+{
+ enum ocfs2_journal_trigger_type type;
-static struct ocfs2_triggers dl_triggers = {
- .ot_triggers = {
- .t_frozen = ocfs2_frozen_trigger,
- .t_abort = ocfs2_abort_trigger,
- },
- .ot_offset = offsetof(struct ocfs2_dx_leaf, dl_check),
-};
+ for (type = OCFS2_JTR_DI; type < OCFS2_JOURNAL_TRIGGER_COUNT; type++)
+ ocfs2_setup_csum_triggers(sb, type, &triggers[type]);
+}
static int __ocfs2_journal_access(handle_t *handle,
struct ocfs2_caching_info *ci,
@@ -708,56 +693,91 @@ static int __ocfs2_journal_access(handle_t *handle,
int ocfs2_journal_access_di(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &di_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DI],
+ type);
}
int ocfs2_journal_access_eb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &eb_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_EB],
+ type);
}
int ocfs2_journal_access_rb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &rb_triggers,
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_RB],
type);
}
int ocfs2_journal_access_gd(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &gd_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_GD],
+ type);
}
int ocfs2_journal_access_db(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &db_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DB],
+ type);
}
int ocfs2_journal_access_xb(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &xb_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_XB],
+ type);
}
int ocfs2_journal_access_dq(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dq_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DQ],
+ type);
}
int ocfs2_journal_access_dr(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dr_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DR],
+ type);
}
int ocfs2_journal_access_dl(handle_t *handle, struct ocfs2_caching_info *ci,
struct buffer_head *bh, int type)
{
- return __ocfs2_journal_access(handle, ci, bh, &dl_triggers, type);
+ struct ocfs2_super *osb = OCFS2_SB(ocfs2_metadata_cache_get_super(ci));
+
+ return __ocfs2_journal_access(handle, ci, bh,
+ &osb->s_journal_triggers[OCFS2_JTR_DL],
+ type);
}
int ocfs2_journal_access(handle_t *handle, struct ocfs2_caching_info *ci,
@@ -778,13 +798,15 @@ void ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh)
if (!is_handle_aborted(handle)) {
journal_t *journal = handle->h_transaction->t_journal;
- mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed. "
- "Aborting transaction and journal.\n");
+ mlog(ML_ERROR, "jbd2_journal_dirty_metadata failed: "
+ "handle type %u started at line %u, credits %u/%u "
+ "errcode %d. Aborting transaction and journal.\n",
+ handle->h_type, handle->h_line_no,
+ handle->h_requested_credits,
+ jbd2_handle_buffer_credits(handle), status);
handle->h_err = status;
jbd2_journal_abort_handle(handle);
jbd2_journal_abort(journal, status);
- ocfs2_abort(bh->b_assoc_map->host->i_sb,
- "Journal already aborted.\n");
}
}
}
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index c803c10dd..33aeaaa05 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -863,14 +863,8 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
numfound = bitoff = startoff = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
- while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
- if (bitoff == left) {
- /* mlog(0, "bitoff (%d) == left", bitoff); */
- break;
- }
- /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
- "numfound = %d\n", bitoff, startoff, numfound);*/
-
+ while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) <
+ left) {
/* Ok, we found a zero bit... is it contig. or do we
* start over?*/
if (bitoff == startoff) {
@@ -976,9 +970,9 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
start = count = 0;
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
- while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
- != -1) {
- if ((bit_off < left) && (bit_off == start)) {
+ while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) <
+ left) {
+ if (bit_off == start) {
count++;
start++;
continue;
@@ -1002,8 +996,7 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
goto bail;
}
}
- if (bit_off >= left)
- break;
+
count = 1;
start = bit_off + 1;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 9221a33f9..4d1ea8703 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -566,7 +566,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
fe->i_last_eb_blk = 0;
strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
- ktime_get_real_ts64(&ts);
+ ktime_get_coarse_real_ts64(&ts);
fe->i_atime = fe->i_ctime = fe->i_mtime =
cpu_to_le64(ts.tv_sec);
fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
@@ -797,6 +797,7 @@ static int ocfs2_link(struct dentry *old_dentry,
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
+ ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, fe_bh);
err = ocfs2_add_entry(handle, dentry, inode,
@@ -993,6 +994,7 @@ static int ocfs2_unlink(struct inode *dir,
drop_nlink(inode);
drop_nlink(inode);
ocfs2_set_links_count(fe, inode->i_nlink);
+ ocfs2_update_inode_fsync_trans(handle, inode, 0);
ocfs2_journal_dirty(handle, fe_bh);
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index a503c553b..8fe826143 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -284,6 +284,30 @@ enum ocfs2_mount_options
#define OCFS2_OSB_ERROR_FS 0x0004
#define OCFS2_DEFAULT_ATIME_QUANTUM 60
+struct ocfs2_triggers {
+ struct jbd2_buffer_trigger_type ot_triggers;
+ int ot_offset;
+ struct super_block *sb;
+};
+
+enum ocfs2_journal_trigger_type {
+ OCFS2_JTR_DI,
+ OCFS2_JTR_EB,
+ OCFS2_JTR_RB,
+ OCFS2_JTR_GD,
+ OCFS2_JTR_DB,
+ OCFS2_JTR_XB,
+ OCFS2_JTR_DQ,
+ OCFS2_JTR_DR,
+ OCFS2_JTR_DL,
+ OCFS2_JTR_NONE /* This must be the last entry */
+};
+
+#define OCFS2_JOURNAL_TRIGGER_COUNT OCFS2_JTR_NONE
+
+void ocfs2_initialize_journal_triggers(struct super_block *sb,
+ struct ocfs2_triggers triggers[]);
+
struct ocfs2_journal;
struct ocfs2_slot_info;
struct ocfs2_recovery_map;
@@ -351,6 +375,9 @@ struct ocfs2_super
struct ocfs2_journal *journal;
unsigned long osb_commit_interval;
+ /* Journal triggers for checksum */
+ struct ocfs2_triggers s_journal_triggers[OCFS2_JOURNAL_TRIGGER_COUNT];
+
struct delayed_work la_enable_wq;
/*
diff --git a/fs/ocfs2/reservations.c b/fs/ocfs2/reservations.c
index a9d1296d7..1fe61974d 100644
--- a/fs/ocfs2/reservations.c
+++ b/fs/ocfs2/reservations.c
@@ -414,7 +414,7 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
start = search_start;
while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
- start)) != -1) {
+ start)) < resmap->m_bitmap_len) {
/* Search reached end of the region */
if (offset >= (search_start + search_len))
break;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 166c8918c..961998415 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1290,10 +1290,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
found = start = best_offset = best_size = 0;
bitmap = bg->bg_bitmap;
- while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
- if (offset == total_bits)
- break;
-
+ while ((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) <
+ total_bits) {
if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
/* We found a zero, but we can't use it as it
* hasn't been put to disk yet! */
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 8aabaed2c..afee70125 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1075,9 +1075,11 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
debugfs_create_file("fs_state", S_IFREG|S_IRUSR, osb->osb_debug_root,
osb, &ocfs2_osb_debug_fops);
- if (ocfs2_meta_ecc(osb))
+ if (ocfs2_meta_ecc(osb)) {
+ ocfs2_initialize_journal_triggers(sb, osb->s_journal_triggers);
ocfs2_blockcheck_stats_debugfs_install( &osb->osb_ecc_stats,
osb->osb_debug_root);
+ }
status = ocfs2_mount_volume(sb);
if (status < 0)
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 4a0779e3e..a7b527ea5 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -355,10 +355,10 @@ static struct inode *openprom_iget(struct super_block *sb, ino_t ino)
return inode;
}
-static int openprom_remount(struct super_block *sb, int *flags, char *data)
+static int openpromfs_reconfigure(struct fs_context *fc)
{
- sync_filesystem(sb);
- *flags |= SB_NOATIME;
+ sync_filesystem(fc->root->d_sb);
+ fc->sb_flags |= SB_NOATIME;
return 0;
}
@@ -366,7 +366,6 @@ static const struct super_operations openprom_sops = {
.alloc_inode = openprom_alloc_inode,
.free_inode = openprom_free_inode,
.statfs = simple_statfs,
- .remount_fs = openprom_remount,
};
static int openprom_fill_super(struct super_block *s, struct fs_context *fc)
@@ -415,6 +414,7 @@ static int openpromfs_get_tree(struct fs_context *fc)
static const struct fs_context_operations openpromfs_context_ops = {
.get_tree = openpromfs_get_tree,
+ .reconfigure = openpromfs_reconfigure,
};
static int openpromfs_init_fs_context(struct fs_context *fc)
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 0f8b4a719..02d89a285 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -327,9 +327,6 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
struct dentry *newdentry;
int err;
- if (!attr->hardlink && !IS_POSIXACL(udir))
- attr->mode &= ~current_umask();
-
inode_lock_nested(udir, I_MUTEX_PARENT);
newdentry = ovl_create_real(ofs, udir,
ovl_lookup_upper(ofs, dentry->d_name.name,
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 063409069..5868cb222 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -181,6 +181,10 @@ static int ovl_check_encode_origin(struct dentry *dentry)
struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
bool decodable = ofs->config.nfs_export;
+ /* No upper layer? */
+ if (!ovl_upper_mnt(ofs))
+ return 1;
+
/* Lower file handle for non-upper non-decodable */
if (!ovl_dentry_upper(dentry) && !decodable)
return 1;
@@ -209,7 +213,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
* ovl_connect_layer() will try to make origin's layer "connected" by
* copying up a "connectable" ancestor.
*/
- if (d_is_dir(dentry) && ovl_upper_mnt(ofs) && decodable)
+ if (d_is_dir(dentry) && decodable)
return ovl_connect_layer(dentry);
/* Lower file handle for indexed and non-upper dir/non-dir */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 18550c071..72a1acd03 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task);
if (mm) {
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
- seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
+ seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
mmput(mm);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 6e72e5ad4..f4b1c8b42 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -74,7 +74,18 @@ out:
return 0;
}
-static int proc_fdinfo_access_allowed(struct inode *inode)
+static int seq_fdinfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show, inode);
+}
+
+/**
+ * Shared /proc/pid/fdinfo and /proc/pid/fdinfo/fd permission helper to ensure
+ * that the current task has PTRACE_MODE_READ in addition to the normal
+ * POSIX-like checks.
+ */
+static int proc_fdinfo_permission(struct mnt_idmap *idmap, struct inode *inode,
+ int mask)
{
bool allowed = false;
struct task_struct *task = get_proc_task(inode);
@@ -88,18 +99,13 @@ static int proc_fdinfo_access_allowed(struct inode *inode)
if (!allowed)
return -EACCES;
- return 0;
+ return generic_permission(idmap, inode, mask);
}
-static int seq_fdinfo_open(struct inode *inode, struct file *file)
-{
- int ret = proc_fdinfo_access_allowed(inode);
-
- if (ret)
- return ret;
-
- return single_open(file, seq_show, inode);
-}
+static const struct inode_operations proc_fdinfo_file_inode_operations = {
+ .permission = proc_fdinfo_permission,
+ .setattr = proc_setattr,
+};
static const struct file_operations proc_fdinfo_file_operations = {
.open = seq_fdinfo_open,
@@ -388,6 +394,8 @@ static struct dentry *proc_fdinfo_instantiate(struct dentry *dentry,
ei = PROC_I(inode);
ei->fd = data->fd;
+ inode->i_op = &proc_fdinfo_file_inode_operations;
+
inode->i_fop = &proc_fdinfo_file_operations;
tid_fd_update_inode(task, inode, 0);
@@ -407,23 +415,13 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
proc_fdinfo_instantiate);
}
-static int proc_open_fdinfo(struct inode *inode, struct file *file)
-{
- int ret = proc_fdinfo_access_allowed(inode);
-
- if (ret)
- return ret;
-
- return 0;
-}
-
const struct inode_operations proc_fdinfo_inode_operations = {
.lookup = proc_lookupfdinfo,
+ .permission = proc_fdinfo_permission,
.setattr = proc_setattr,
};
const struct file_operations proc_fdinfo_operations = {
- .open = proc_open_fdinfo,
.read = generic_read_dir,
.iterate_shared = proc_readfdinfo,
.llseek = generic_file_llseek,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 102f48668..a097c9a65 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -965,12 +965,17 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
break;
/* Case 1 and 2 above */
- if (vma->vm_start >= last_vma_end)
+ if (vma->vm_start >= last_vma_end) {
+ smap_gather_stats(vma, &mss, 0);
+ last_vma_end = vma->vm_end;
continue;
+ }
/* Case 4 above */
- if (vma->vm_end > last_vma_end)
+ if (vma->vm_end > last_vma_end) {
smap_gather_stats(vma, &mss, last_vma_end);
+ last_vma_end = vma->vm_end;
+ }
}
} for_each_vma(vmi, vma);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 1fb213f37..d06607a1f 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -383,6 +383,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
/* leave now if filled buffer already */
if (!iov_iter_count(iter))
return acc;
+
+ cond_resched();
}
list_for_each_entry(m, &vmcore_list, list) {
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 39277c371..d8ede6826 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -134,7 +134,7 @@ module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
module_param(enable_gcm_256, bool, 0644);
-MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
+MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
module_param(require_gcm_256, bool, 0644);
MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
@@ -1277,7 +1277,7 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
struct cifsFileInfo *smb_file_src = src_file->private_data;
struct cifsFileInfo *smb_file_target = dst_file->private_data;
struct cifs_tcon *target_tcon, *src_tcon;
- unsigned long long destend, fstart, fend, new_size;
+ unsigned long long destend, fstart, fend, old_size, new_size;
unsigned int xid;
int rc;
@@ -1342,6 +1342,9 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
if (rc)
goto unlock;
+ if (fend > target_cifsi->netfs.zero_point)
+ target_cifsi->netfs.zero_point = fend + 1;
+ old_size = target_cifsi->netfs.remote_i_size;
/* Discard all the folios that overlap the destination region. */
cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
@@ -1354,12 +1357,13 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
if (target_tcon->ses->server->ops->duplicate_extents) {
rc = target_tcon->ses->server->ops->duplicate_extents(xid,
smb_file_src, smb_file_target, off, len, destoff);
- if (rc == 0 && new_size > i_size_read(target_inode)) {
+ if (rc == 0 && new_size > old_size) {
truncate_setsize(target_inode, new_size);
- netfs_resize_file(&target_cifsi->netfs, new_size, true);
fscache_resize_cookie(cifs_inode_cookie(target_inode),
new_size);
}
+ if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
+ target_cifsi->netfs.zero_point = new_size;
}
/* force revalidate of size and timestamps of target file now
@@ -1451,6 +1455,8 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
if (rc)
goto unlock;
+ if (fend > target_cifsi->netfs.zero_point)
+ target_cifsi->netfs.zero_point = fend + 1;
/* Discard all the folios that overlap the destination region. */
truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index c46d418c1..a2072ab9e 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -2574,7 +2574,7 @@ typedef struct {
struct win_dev {
- unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO*/
+ unsigned char type[8]; /* IntxCHR or IntxBLK or LnxFIFO or LnxSOCK */
__le64 major;
__le64 minor;
} __attribute__((packed));
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 60afab5c8..d0e695913 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -591,6 +591,10 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
fattr->cf_rdev = MKDEV(mjr, mnr);
}
+ } else if (memcmp("LnxSOCK", pbuf, 8) == 0) {
+ cifs_dbg(FYI, "Socket\n");
+ fattr->cf_mode |= S_IFSOCK;
+ fattr->cf_dtype = DT_SOCK;
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
cifs_dbg(FYI, "Symlink\n");
fattr->cf_mode |= S_IFLNK;
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 28f0b7d19..d35c45f3a 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -2028,6 +2028,7 @@ smb2_duplicate_extents(const unsigned int xid,
* size will be queried on next revalidate, but it is important
* to make sure that file's cached size is updated immediately
*/
+ netfs_resize_file(netfs_inode(inode), dest_off + len, true);
cifs_setsize(inode, dest_off + len);
}
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
@@ -4995,6 +4996,9 @@ static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
pdev.major = cpu_to_le64(MAJOR(dev));
pdev.minor = cpu_to_le64(MINOR(dev));
break;
+ case S_IFSOCK:
+ strscpy(pdev.type, "LnxSOCK");
+ break;
case S_IFIFO:
strscpy(pdev.type, "LnxFIFO");
break;
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 02135a605..1476c445c 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -216,8 +216,8 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
}
tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
if (!tcon) {
- cifs_put_smb_ses(ses);
spin_unlock(&cifs_tcp_ses_lock);
+ cifs_put_smb_ses(ses);
return NULL;
}
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
index a2f0a2edc..e0a6b7580 100644
--- a/fs/smb/server/mgmt/share_config.c
+++ b/fs/smb/server/mgmt/share_config.c
@@ -165,8 +165,12 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
share->path = kstrndup(ksmbd_share_config_path(resp), path_len,
GFP_KERNEL);
- if (share->path)
+ if (share->path) {
share->path_sz = strlen(share->path);
+ while (share->path_sz > 1 &&
+ share->path[share->path_sz - 1] == '/')
+ share->path[--share->path_sz] = '\0';
+ }
share->create_mask = resp->create_mask;
share->directory_mask = resp->directory_mask;
share->force_create_mode = resp->force_create_mode;
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index b9d9116fc..a8f52c4eb 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -610,19 +610,24 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level)
if (opinfo->op_state == OPLOCK_CLOSING)
return -ENOENT;
else if (opinfo->level <= req_op_level) {
- if (opinfo->is_lease &&
- opinfo->o_lease->state !=
- (SMB2_LEASE_HANDLE_CACHING_LE |
- SMB2_LEASE_READ_CACHING_LE))
+ if (opinfo->is_lease == false)
+ return 1;
+
+ if (opinfo->o_lease->state !=
+ (SMB2_LEASE_HANDLE_CACHING_LE |
+ SMB2_LEASE_READ_CACHING_LE))
return 1;
}
}
if (opinfo->level <= req_op_level) {
- if (opinfo->is_lease &&
- opinfo->o_lease->state !=
- (SMB2_LEASE_HANDLE_CACHING_LE |
- SMB2_LEASE_READ_CACHING_LE)) {
+ if (opinfo->is_lease == false) {
+ wake_up_oplock_break(opinfo);
+ return 1;
+ }
+ if (opinfo->o_lease->state !=
+ (SMB2_LEASE_HANDLE_CACHING_LE |
+ SMB2_LEASE_READ_CACHING_LE)) {
wake_up_oplock_break(opinfo);
return 1;
}
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index b6c5a8ea3..e7e078917 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -630,6 +630,12 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
return name;
}
+ if (*name == '\\') {
+ pr_err("not allow directory name included leading slash\n");
+ kfree(name);
+ return ERR_PTR(-EINVAL);
+ }
+
ksmbd_conv_path_to_unix(name);
ksmbd_strip_last_slash(name);
return name;
@@ -2361,7 +2367,8 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
if (rc > 0) {
rc = ksmbd_vfs_remove_xattr(idmap,
path,
- attr_name);
+ attr_name,
+ get_write);
if (rc < 0) {
ksmbd_debug(SMB,
@@ -2376,7 +2383,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
} else {
rc = ksmbd_vfs_setxattr(idmap, path, attr_name, value,
le16_to_cpu(eabuf->EaValueLength),
- 0, true);
+ 0, get_write);
if (rc < 0) {
ksmbd_debug(SMB,
"ksmbd_vfs_setxattr is failed(%d)\n",
@@ -2468,7 +2475,7 @@ static int smb2_remove_smb_xattrs(const struct path *path)
!strncmp(&name[XATTR_USER_PREFIX_LEN], STREAM_PREFIX,
STREAM_PREFIX_LEN)) {
err = ksmbd_vfs_remove_xattr(idmap, path,
- name);
+ name, true);
if (err)
ksmbd_debug(SMB, "remove xattr failed : %s\n",
name);
@@ -2842,20 +2849,11 @@ int smb2_open(struct ksmbd_work *work)
}
if (req->NameLength) {
- if ((req->CreateOptions & FILE_DIRECTORY_FILE_LE) &&
- *(char *)req->Buffer == '\\') {
- pr_err("not allow directory name included leading slash\n");
- rc = -EINVAL;
- goto err_out2;
- }
-
name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
le16_to_cpu(req->NameLength),
work->conn->local_nls);
if (IS_ERR(name)) {
rc = PTR_ERR(name);
- if (rc != -ENOMEM)
- rc = -ENOENT;
name = NULL;
goto err_out2;
}
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 51b1b0bed..9e859ba01 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -1058,16 +1058,21 @@ int ksmbd_vfs_fqar_lseek(struct ksmbd_file *fp, loff_t start, loff_t length,
}
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
- const struct path *path, char *attr_name)
+ const struct path *path, char *attr_name,
+ bool get_write)
{
int err;
- err = mnt_want_write(path->mnt);
- if (err)
- return err;
+ if (get_write == true) {
+ err = mnt_want_write(path->mnt);
+ if (err)
+ return err;
+ }
err = vfs_removexattr(idmap, path->dentry, attr_name);
- mnt_drop_write(path->mnt);
+
+ if (get_write == true)
+ mnt_drop_write(path->mnt);
return err;
}
@@ -1380,7 +1385,7 @@ int ksmbd_vfs_remove_sd_xattrs(struct mnt_idmap *idmap, const struct path *path)
ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
if (!strncmp(name, XATTR_NAME_SD, XATTR_NAME_SD_LEN)) {
- err = ksmbd_vfs_remove_xattr(idmap, path, name);
+ err = ksmbd_vfs_remove_xattr(idmap, path, name, true);
if (err)
ksmbd_debug(SMB, "remove xattr failed : %s\n", name);
}
diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
index cfe1c8092..cb76f4b5b 100644
--- a/fs/smb/server/vfs.h
+++ b/fs/smb/server/vfs.h
@@ -114,7 +114,8 @@ int ksmbd_vfs_setxattr(struct mnt_idmap *idmap,
int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
size_t *xattr_stream_name_size, int s_type);
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
- const struct path *path, char *attr_name);
+ const struct path *path, char *attr_name,
+ bool get_write);
int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
unsigned int flags, struct path *parent_path,
struct path *path, bool caseless);
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index 6cb599cd2..8b2e37c87 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -254,7 +254,8 @@ static void __ksmbd_inode_close(struct ksmbd_file *fp)
ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
&filp->f_path,
- fp->stream.name);
+ fp->stream.name,
+ true);
if (err)
pr_err("remove xattr failed : %s\n",
fp->stream.name);
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index a878cea70..129d0f54b 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -50,8 +50,12 @@ static struct eventfs_root_inode *get_root_inode(struct eventfs_inode *ei)
/* Just try to make something consistent and unique */
static int eventfs_dir_ino(struct eventfs_inode *ei)
{
- if (!ei->ino)
+ if (!ei->ino) {
ei->ino = get_next_ino();
+ /* Must not have the file inode number */
+ if (ei->ino == EVENTFS_FILE_INODE_INO)
+ ei->ino = get_next_ino();
+ }
return ei->ino;
}
@@ -301,33 +305,60 @@ static const struct file_operations eventfs_file_operations = {
.llseek = generic_file_llseek,
};
-/*
- * On a remount of tracefs, if UID or GID options are set, then
- * the mount point inode permissions should be used.
- * Reset the saved permission flags appropriately.
- */
-void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid)
+static void eventfs_set_attrs(struct eventfs_inode *ei, bool update_uid, kuid_t uid,
+ bool update_gid, kgid_t gid, int level)
{
- struct eventfs_inode *ei = ti->private;
+ struct eventfs_inode *ei_child;
- if (!ei)
+ /* Update events/<system>/<event> */
+ if (WARN_ON_ONCE(level > 3))
return;
- if (update_uid)
+ if (update_uid) {
ei->attr.mode &= ~EVENTFS_SAVE_UID;
+ ei->attr.uid = uid;
+ }
- if (update_gid)
+ if (update_gid) {
ei->attr.mode &= ~EVENTFS_SAVE_GID;
+ ei->attr.gid = gid;
+ }
+
+ list_for_each_entry(ei_child, &ei->children, list) {
+ eventfs_set_attrs(ei_child, update_uid, uid, update_gid, gid, level + 1);
+ }
if (!ei->entry_attrs)
return;
for (int i = 0; i < ei->nr_entries; i++) {
- if (update_uid)
+ if (update_uid) {
ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_UID;
- if (update_gid)
+ ei->entry_attrs[i].uid = uid;
+ }
+ if (update_gid) {
ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_GID;
+ ei->entry_attrs[i].gid = gid;
+ }
}
+
+}
+
+/*
+ * On a remount of tracefs, if UID or GID options are set, then
+ * the mount point inode permissions should be used.
+ * Reset the saved permission flags appropriately.
+ */
+void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid)
+{
+ struct eventfs_inode *ei = ti->private;
+
+ /* Only the events directory does the updates */
+ if (!ei || !ei->is_events || ei->is_freed)
+ return;
+
+ eventfs_set_attrs(ei, update_uid, ti->vfs_inode.i_uid,
+ update_gid, ti->vfs_inode.i_gid, 0);
}
/* Return the evenfs_inode of the "events" directory */
@@ -345,10 +376,9 @@ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
* If the ei is being freed, the ownership of the children
* doesn't matter.
*/
- if (ei->is_freed) {
- ei = NULL;
- break;
- }
+ if (ei->is_freed)
+ return NULL;
+
// Walk upwards until you find the events inode
} while (!ei->is_events);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 417c840e6..d8c60c2b7 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -439,10 +439,26 @@ static int tracefs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
+static int tracefs_drop_inode(struct inode *inode)
+{
+ struct tracefs_inode *ti = get_tracefs(inode);
+
+ /*
+ * This inode is being freed and cannot be used for
+ * eventfs. Clear the flag so that it doesn't call into
+ * eventfs during the remount flag updates. The eventfs_inode
+ * gets freed after an RCU cycle, so the content will still
+ * be safe if the iteration is going on now.
+ */
+ ti->flags &= ~TRACEFS_EVENT_INODE;
+
+ return 1;
+}
+
static const struct super_operations tracefs_super_operations = {
.alloc_inode = tracefs_alloc_inode,
.free_inode = tracefs_free_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = tracefs_drop_inode,
.statfs = simple_statfs,
.remount_fs = tracefs_remount,
.show_options = tracefs_show_options,
@@ -469,22 +485,7 @@ static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags)
return !(ei && ei->is_freed);
}
-static void tracefs_d_iput(struct dentry *dentry, struct inode *inode)
-{
- struct tracefs_inode *ti = get_tracefs(inode);
-
- /*
- * This inode is being freed and cannot be used for
- * eventfs. Clear the flag so that it doesn't call into
- * eventfs during the remount flag updates. The eventfs_inode
- * gets freed after an RCU cycle, so the content will still
- * be safe if the iteration is going on now.
- */
- ti->flags &= ~TRACEFS_EVENT_INODE;
-}
-
static const struct dentry_operations tracefs_dentry_operations = {
- .d_iput = tracefs_d_iput,
.d_revalidate = tracefs_d_revalidate,
.d_release = tracefs_d_release,
};
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 2f831a3a9..bbf891841 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -341,7 +341,7 @@ const struct address_space_operations udf_aops = {
*/
int udf_expand_file_adinicb(struct inode *inode)
{
- struct page *page;
+ struct folio *folio;
struct udf_inode_info *iinfo = UDF_I(inode);
int err;
@@ -357,12 +357,13 @@ int udf_expand_file_adinicb(struct inode *inode)
return 0;
}
- page = find_or_create_page(inode->i_mapping, 0, GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ folio = __filemap_get_folio(inode->i_mapping, 0,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_KERNEL);
+ if (IS_ERR(folio))
+ return PTR_ERR(folio);
- if (!PageUptodate(page))
- udf_adinicb_readpage(page);
+ if (!folio_test_uptodate(folio))
+ udf_adinicb_readpage(&folio->page);
down_write(&iinfo->i_data_sem);
memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
iinfo->i_lenAlloc);
@@ -371,22 +372,22 @@ int udf_expand_file_adinicb(struct inode *inode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
else
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
- set_page_dirty(page);
- unlock_page(page);
+ folio_mark_dirty(folio);
+ folio_unlock(folio);
up_write(&iinfo->i_data_sem);
err = filemap_fdatawrite(inode->i_mapping);
if (err) {
/* Restore everything back so that we don't lose data... */
- lock_page(page);
+ folio_lock(folio);
down_write(&iinfo->i_data_sem);
- memcpy_to_page(page, 0, iinfo->i_data + iinfo->i_lenEAttr,
- inode->i_size);
- unlock_page(page);
+ memcpy_from_folio(iinfo->i_data + iinfo->i_lenEAttr,
+ folio, 0, inode->i_size);
+ folio_unlock(folio);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
iinfo->i_lenAlloc = inode->i_size;
up_write(&iinfo->i_data_sem);
}
- put_page(page);
+ folio_put(folio);
mark_inode_dirty(inode);
return err;
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index 758163af3..78ecc6336 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -46,13 +46,18 @@ udf_disk_stamp_to_time(struct timespec64 *dest, struct timestamp src)
dest->tv_sec = mktime64(year, src.month, src.day, src.hour, src.minute,
src.second);
dest->tv_sec -= offset * 60;
- dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
- src.hundredsOfMicroseconds * 100 + src.microseconds);
+
/*
* Sanitize nanosecond field since reportedly some filesystems are
* recorded with bogus sub-second values.
*/
- dest->tv_nsec %= NSEC_PER_SEC;
+ if (src.centiseconds < 100 && src.hundredsOfMicroseconds < 100 &&
+ src.microseconds < 100) {
+ dest->tv_nsec = 1000 * (src.centiseconds * 10000 +
+ src.hundredsOfMicroseconds * 100 + src.microseconds);
+ } else {
+ dest->tv_nsec = 0;
+ }
}
void
diff --git a/fs/verity/init.c b/fs/verity/init.c
index cb2c9aac6..f440f0e61 100644
--- a/fs/verity/init.c
+++ b/fs/verity/init.c
@@ -10,8 +10,6 @@
#include <linux/ratelimit.h>
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *fsverity_sysctl_header;
-
static struct ctl_table fsverity_sysctl_table[] = {
#ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES
{
@@ -28,10 +26,7 @@ static struct ctl_table fsverity_sysctl_table[] = {
static void __init fsverity_init_sysctl(void)
{
- fsverity_sysctl_header = register_sysctl("fs/verity",
- fsverity_sysctl_table);
- if (!fsverity_sysctl_header)
- panic("fsverity sysctl registration failed");
+ register_sysctl_init("fs/verity", fsverity_sysctl_table);
}
#else /* CONFIG_SYSCTL */
static inline void fsverity_init_sysctl(void)
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 3d90716f9..f5fbc15e5 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -663,6 +663,10 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_adr_space_type
space_id))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_execute_orphan_reg_method(acpi_handle device,
+ acpi_adr_space_type
+ space_id))
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_remove_address_space_handler(acpi_handle
device,
acpi_adr_space_type
diff --git a/include/drm/bridge/aux-bridge.h b/include/drm/bridge/aux-bridge.h
index 445390610..c2f5a8555 100644
--- a/include/drm/bridge/aux-bridge.h
+++ b/include/drm/bridge/aux-bridge.h
@@ -33,7 +33,7 @@ static inline struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct devic
return NULL;
}
-static inline int devm_drm_dp_hpd_bridge_add(struct auxiliary_device *adev)
+static inline int devm_drm_dp_hpd_bridge_add(struct device *dev, struct auxiliary_device *adev)
{
return 0;
}
diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h
index 9b19d8bd5..6c9145abc 100644
--- a/include/drm/display/drm_dp_mst_helper.h
+++ b/include/drm/display/drm_dp_mst_helper.h
@@ -851,7 +851,6 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_atomic_payload *payload);
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload);
void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index 566497eeb..bc1f6b378 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -30,7 +30,6 @@ struct drm_edid;
#define VESA_IEEE_OUI 0x3a0292
/* DisplayID Structure versions */
-#define DISPLAY_ID_STRUCTURE_VER_12 0x12
#define DISPLAY_ID_STRUCTURE_VER_20 0x20
/* DisplayID Structure v1r2 Data Blocks */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index c0aec0d4d..3011d33ec 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -241,9 +241,9 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value);
-ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
-ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
- const struct drm_dsc_picture_parameter_set *pps);
+int mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+int mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps);
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size);
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 34829f2c5..168201e4c 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -573,9 +573,13 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
#define OSC_SB_CPCV2_SUPPORT 0x00000040
#define OSC_SB_PCLPI_SUPPORT 0x00000080
#define OSC_SB_OSLPI_SUPPORT 0x00000100
+#define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200
+#define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400
+#define OSC_SB_GED_SUPPORT 0x00000800
#define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000
-#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000
+#define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT 0x00002000
#define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000
+#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000
#define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000
#define OSC_SB_PRM_SUPPORT 0x00200000
#define OSC_SB_FFH_OPR_SUPPORT 0x00400000
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 956bcba5d..2f9d36b72 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -2242,7 +2242,7 @@ raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
/**
* raw_atomic_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: int value to add
+ * @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4368,7 +4368,7 @@ raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
/**
* raw_atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: s64 value to add
+ * @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4690,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 14850c0b0db20c62fdc78ccd1d42b98b88d76331
+// b565db590afeeff0d7c9485ccbca5bb6e155749f
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index debd487fe..9409a6ddf 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -1349,7 +1349,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
/**
* atomic_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: int value to add
+ * @i: int value to subtract
* @v: pointer to atomic_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -2927,7 +2927,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
/**
* atomic64_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: s64 value to add
+ * @i: s64 value to subtract
* @v: pointer to atomic64_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -4505,7 +4505,7 @@ atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
/**
* atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: long value to add
+ * @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -5050,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// ce5b65e0f1f8a276268b667194581d24bed219d4
+// 8829b337928e9508259079d32581775ececd415b
diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h
index 3ef844b3a..f86b29d90 100644
--- a/include/linux/atomic/atomic-long.h
+++ b/include/linux/atomic/atomic-long.h
@@ -1535,7 +1535,7 @@ raw_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
/**
* raw_atomic_long_sub_and_test() - atomic subtract and test if zero with full ordering
- * @i: long value to add
+ * @i: long value to subtract
* @v: pointer to atomic_long_t
*
* Atomically updates @v to (@v - @i) with full ordering.
@@ -1809,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
-// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a
+// eadf183c3600b8b92b91839dd3be6bcc560c752d
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 2ba557e06..f7f5a783d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -80,6 +80,7 @@ __check_bitop_pr(__test_and_set_bit);
__check_bitop_pr(__test_and_clear_bit);
__check_bitop_pr(__test_and_change_bit);
__check_bitop_pr(test_bit);
+__check_bitop_pr(test_bit_acquire);
#undef __check_bitop_pr
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 7cb1b75ee..e742db470 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -737,6 +737,8 @@ struct bpf_verifier_env {
/* Same as scratched_regs but for stack slots */
u64 scratched_stack_slots;
u64 prev_log_pos, prev_insn_print_pos;
+ /* buffer used to temporary hold constants as scalar registers */
+ struct bpf_reg_state fake_reg[2];
/* buffer used to generate temporary string representations,
* e.g., in reg_type_str() to generate reg_type string
*/
diff --git a/include/linux/counter.h b/include/linux/counter.h
index 702e9108b..b767b5c82 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -359,7 +359,6 @@ struct counter_ops {
* @num_counts: number of Counts specified in @counts
* @ext: optional array of Counter device extensions
* @num_ext: number of Counter device extensions specified in @ext
- * @priv: optional private data supplied by driver
* @dev: internal device structure
* @chrdev: internal character device structure
* @events_list: list of current watching Counter events
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 0ce6ff0d9..de4cf0ee9 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -70,7 +70,6 @@ extern int cpuset_init(void);
extern void cpuset_init_smp(void);
extern void cpuset_force_rebuild(void);
extern void cpuset_update_active_cpus(void);
-extern void cpuset_wait_for_hotplug(void);
extern void inc_dl_tasks_cs(struct task_struct *task);
extern void dec_dl_tasks_cs(struct task_struct *task);
extern void cpuset_lock(void);
@@ -185,8 +184,6 @@ static inline void cpuset_update_active_cpus(void)
partition_sched_domains(1, NULL, NULL);
}
-static inline void cpuset_wait_for_hotplug(void) { }
-
static inline void inc_dl_tasks_cs(struct task_struct *task) { }
static inline void dec_dl_tasks_cs(struct task_struct *task) { }
static inline void cpuset_lock(void) { }
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 6bfe70dec..ae80a303c 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -130,6 +130,16 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
})
/*
+ * Dummy dev_printk for disabled debugging statements to use whilst maintaining
+ * gcc's format checking.
+ */
+#define dev_no_printk(level, dev, fmt, ...) \
+ ({ \
+ if (0) \
+ _dev_printk(level, dev, fmt, ##__VA_ARGS__); \
+ })
+
+/*
* #defines for all the dev_<level> macros to prefix with whatever
* possible use of #define dev_fmt(fmt) ...
*/
@@ -158,10 +168,7 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#else
#define dev_dbg(dev, fmt, ...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
#ifdef CONFIG_PRINTK
@@ -247,20 +254,14 @@ do { \
} while (0)
#else
#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
#ifdef VERBOSE_DEBUG
#define dev_vdbg dev_dbg
#else
#define dev_vdbg(dev, fmt, ...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
+ dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#endif
/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 297231854..e44913a82 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -632,6 +632,14 @@ static inline void eth_skb_pkt_type(struct sk_buff *skb,
}
}
+static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+
+ skb_pull_inline(skb, ETH_HLEN);
+ return eth;
+}
+
/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 0dd27364d..811e47f9d 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -694,6 +694,10 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
+ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
+
/*
* Initializes struct fb_ops for deferred I/O.
*/
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 6aeebe0a6..9754f97e7 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -71,17 +71,30 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning("
__ret; \
})
-#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+#if defined(__SANITIZE_ADDRESS__)
+
+#if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
+#elif defined(CONFIG_KASAN_GENERIC)
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__asan_memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__asan_memcpy);
+#else /* CONFIG_KASAN_SW_TAGS */
+extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(__hwasan_memset);
+extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memmove);
+extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(__hwasan_memcpy);
+#endif
+
extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr);
extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp);
-extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy);
-extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove);
-extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset);
extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat);
extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy);
extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen);
extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat);
extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy);
+
#else
#if defined(__SANITIZE_MEMORY__)
@@ -106,6 +119,7 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
#define __underlying_strlen __builtin_strlen
#define __underlying_strncat __builtin_strncat
#define __underlying_strncpy __builtin_strncpy
+
#endif
/**
@@ -734,7 +748,8 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp
if (__compiletime_lessthan(p_size, size))
__read_overflow();
if (p_size < size)
- fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, NULL);
+ fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size,
+ __real_kmemdup(p, 0, gfp));
return __real_kmemdup(p, size, gfp);
}
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index 223da48a6..94c4edd04 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -45,6 +45,7 @@ struct fpga_bridge_info {
* @dev: FPGA bridge device
* @mutex: enforces exclusive reference to bridge
* @br_ops: pointer to struct of FPGA bridge ops
+ * @br_ops_owner: module containing the br_ops
* @info: fpga image specific information
* @node: FPGA bridge list node
* @priv: low level driver private date
@@ -54,6 +55,7 @@ struct fpga_bridge {
struct device dev;
struct mutex mutex; /* for exclusive reference to bridge */
const struct fpga_bridge_ops *br_ops;
+ struct module *br_ops_owner;
struct fpga_image_info *info;
struct list_head node;
void *priv;
@@ -79,10 +81,12 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
struct fpga_image_info *info,
struct list_head *bridge_list);
+#define fpga_bridge_register(parent, name, br_ops, priv) \
+ __fpga_bridge_register(parent, name, br_ops, priv, THIS_MODULE)
struct fpga_bridge *
-fpga_bridge_register(struct device *parent, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv);
+__fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv,
+ struct module *owner);
void fpga_bridge_unregister(struct fpga_bridge *br);
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 54f63459e..0d4fe068f 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -201,6 +201,7 @@ struct fpga_manager_ops {
* @state: state of fpga manager
* @compat_id: FPGA manager id for compatibility check.
* @mops: pointer to struct of fpga manager ops
+ * @mops_owner: module containing the mops
* @priv: low level driver private date
*/
struct fpga_manager {
@@ -210,6 +211,7 @@ struct fpga_manager {
enum fpga_mgr_states state;
struct fpga_compat_id *compat_id;
const struct fpga_manager_ops *mops;
+ struct module *mops_owner;
void *priv;
};
@@ -230,18 +232,30 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
void fpga_mgr_put(struct fpga_manager *mgr);
+#define fpga_mgr_register_full(parent, info) \
+ __fpga_mgr_register_full(parent, info, THIS_MODULE)
struct fpga_manager *
-fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner);
+#define fpga_mgr_register(parent, name, mops, priv) \
+ __fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
struct fpga_manager *
-fpga_mgr_register(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops, void *priv);
+__fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv, struct module *owner);
+
void fpga_mgr_unregister(struct fpga_manager *mgr);
+#define devm_fpga_mgr_register_full(parent, info) \
+ __devm_fpga_mgr_register_full(parent, info, THIS_MODULE)
struct fpga_manager *
-devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info,
+ struct module *owner);
+#define devm_fpga_mgr_register(parent, name, mops, priv) \
+ __devm_fpga_mgr_register(parent, name, mops, priv, THIS_MODULE)
struct fpga_manager *
-devm_fpga_mgr_register(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops, void *priv);
+__devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv,
+ struct module *owner);
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 9d4d32909..5fbc05fe7 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -36,6 +36,7 @@ struct fpga_region_info {
* @mgr: FPGA manager
* @info: FPGA image info
* @compat_id: FPGA region id for compatibility check.
+ * @ops_owner: module containing the get_bridges function
* @priv: private data
* @get_bridges: optional function to get bridges to a list
*/
@@ -46,6 +47,7 @@ struct fpga_region {
struct fpga_manager *mgr;
struct fpga_image_info *info;
struct fpga_compat_id *compat_id;
+ struct module *ops_owner;
void *priv;
int (*get_bridges)(struct fpga_region *region);
};
@@ -58,12 +60,17 @@ fpga_region_class_find(struct device *start, const void *data,
int fpga_region_program_fpga(struct fpga_region *region);
+#define fpga_region_register_full(parent, info) \
+ __fpga_region_register_full(parent, info, THIS_MODULE)
struct fpga_region *
-fpga_region_register_full(struct device *parent, const struct fpga_region_info *info);
+__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info,
+ struct module *owner);
+#define fpga_region_register(parent, mgr, get_bridges) \
+ __fpga_region_register(parent, mgr, get_bridges, THIS_MODULE)
struct fpga_region *
-fpga_region_register(struct device *parent, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
+__fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *), struct module *owner);
void fpga_region_unregister(struct fpga_region *region);
#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8dfd53b52..b09f14132 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3340,6 +3340,8 @@ void simple_offset_init(struct offset_ctx *octx);
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
int simple_offset_empty(struct dentry *dentry);
+int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
int simple_offset_rename_exchange(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 3385a2cc5..ac5be38d8 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -5302,7 +5302,7 @@ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data,
info_len += 1;
return prof->sta_info_len >= info_len &&
- fixed + prof->sta_info_len <= len;
+ fixed + prof->sta_info_len - 1 <= len;
}
/**
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index ac333ea81..327d7f43c 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -653,7 +653,7 @@ struct io_kiocb {
struct io_rsrc_node *rsrc_node;
atomic_t refs;
- atomic_t poll_refs;
+ bool cancel_seq_set;
struct io_task_work io_task_work;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
struct hlist_node hash_node;
@@ -662,6 +662,7 @@ struct io_kiocb {
/* opcode allocated if it needs to store data for async defer */
void *async_data;
/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
+ atomic_t poll_refs;
struct io_kiocb *link;
/* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2e925b5eb..3b67d59a3 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -1537,7 +1537,7 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
static inline struct iommu_sva *
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index b851ba415..75a2fb8b1 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -21,6 +21,8 @@ enum kcov_mode {
KCOV_MODE_TRACE_PC = 2,
/* Collecting comparison operands mode. */
KCOV_MODE_TRACE_CMP = 3,
+ /* The process owns a KCOV remote reference. */
+ KCOV_MODE_REMOTE = 4,
};
#define KCOV_IN_CTXSW (1 << 30)
@@ -55,21 +57,47 @@ static inline void kcov_remote_start_usb(u64 id)
/*
* The softirq flavor of kcov_remote_*() functions is introduced as a temporary
- * work around for kcov's lack of nested remote coverage sections support in
- * task context. Adding support for nested sections is tracked in:
- * https://bugzilla.kernel.org/show_bug.cgi?id=210337
+ * workaround for KCOV's lack of nested remote coverage sections support.
+ *
+ * Adding support is tracked in https://bugzilla.kernel.org/show_bug.cgi?id=210337.
+ *
+ * kcov_remote_start_usb_softirq():
+ *
+ * 1. Only collects coverage when called in the softirq context. This allows
+ * avoiding nested remote coverage collection sections in the task context.
+ * For example, USB/IP calls usb_hcd_giveback_urb() in the task context
+ * within an existing remote coverage collection section. Thus, KCOV should
+ * not attempt to start collecting coverage within the coverage collection
+ * section in __usb_hcd_giveback_urb() in this case.
+ *
+ * 2. Disables interrupts for the duration of the coverage collection section.
+ * This allows avoiding nested remote coverage collection sections in the
+ * softirq context (a softirq might occur during the execution of a work in
+ * the BH workqueue, which runs with in_serving_softirq() > 0).
+ * For example, usb_giveback_urb_bh() runs in the BH workqueue with
+ * interrupts enabled, so __usb_hcd_giveback_urb() might be interrupted in
+ * the middle of its remote coverage collection section, and the interrupt
+ * handler might invoke __usb_hcd_giveback_urb() again.
*/
-static inline void kcov_remote_start_usb_softirq(u64 id)
+static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
{
- if (in_serving_softirq())
+ unsigned long flags = 0;
+
+ if (in_serving_softirq()) {
+ local_irq_save(flags);
kcov_remote_start_usb(id);
+ }
+
+ return flags;
}
-static inline void kcov_remote_stop_softirq(void)
+static inline void kcov_remote_stop_softirq(unsigned long flags)
{
- if (in_serving_softirq())
+ if (in_serving_softirq()) {
kcov_remote_stop();
+ local_irq_restore(flags);
+ }
}
#ifdef CONFIG_64BIT
@@ -103,8 +131,11 @@ static inline u64 kcov_common_handle(void)
}
static inline void kcov_remote_start_common(u64 id) {}
static inline void kcov_remote_start_usb(u64 id) {}
-static inline void kcov_remote_start_usb_softirq(u64 id) {}
-static inline void kcov_remote_stop_softirq(void) {}
+static inline unsigned long kcov_remote_start_usb_softirq(u64 id)
+{
+ return 0;
+}
+static inline void kcov_remote_stop_softirq(unsigned long flags) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 060835bb8..f31bd304d 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -461,10 +461,8 @@ static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) {
extern bool kexec_file_dbg_print;
-#define kexec_dprintk(fmt, ...) \
- printk("%s" fmt, \
- kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG, \
- ##__VA_ARGS__)
+#define kexec_dprintk(fmt, arg...) \
+ do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0ff44d663..5fcbc254d 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -378,11 +378,15 @@ static inline void wait_for_kprobe_optimizer(void) { }
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
+/* Set when ftrace has been killed: kprobes on ftrace must be disabled for safety */
+extern bool kprobe_ftrace_disabled __read_mostly;
+extern void kprobe_ftrace_kill(void);
#else
static inline int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
return -EINVAL;
}
+static inline void kprobe_ftrace_kill(void) {}
#endif /* CONFIG_KPROBES_ON_FTRACE */
/* Get the kprobe at this addr (if any) - called with preemption disabled */
@@ -495,6 +499,9 @@ static inline void kprobe_flush_task(struct task_struct *tk)
static inline void kprobe_free_init_mem(void)
{
}
+static inline void kprobe_ftrace_kill(void)
+{
+}
static inline int disable_kprobe(struct kprobe *kp)
{
return -EOPNOTSUPP;
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 401348e9f..f4692ec36 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
*/
#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
-extern unsigned long ksm_zero_pages;
+extern atomic_long_t ksm_zero_pages;
+
+static inline void ksm_map_zero_page(struct mm_struct *mm)
+{
+ atomic_long_inc(&ksm_zero_pages);
+ atomic_long_inc(&mm->ksm_zero_pages);
+}
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
{
if (is_ksm_zero_pte(pte)) {
- ksm_zero_pages--;
- mm->ksm_zero_pages--;
+ atomic_long_dec(&ksm_zero_pages);
+ atomic_long_dec(&mm->ksm_zero_pages);
}
}
+static inline long mm_ksm_zero_pages(struct mm_struct *mm)
+{
+ return atomic_long_read(&mm->ksm_zero_pages);
+}
+
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
int ret;
@@ -59,6 +70,14 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
return 0;
}
+static inline int ksm_execve(struct mm_struct *mm)
+{
+ if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ return __ksm_enter(mm);
+
+ return 0;
+}
+
static inline void ksm_exit(struct mm_struct *mm)
{
if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
@@ -107,6 +126,11 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
return 0;
}
+static inline int ksm_execve(struct mm_struct *mm)
+{
+ return 0;
+}
+
static inline void ksm_exit(struct mm_struct *mm)
{
}
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 334e00efb..7e539f6f8 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -412,7 +412,7 @@ LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring,
#ifdef CONFIG_AUDIT
LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule)
LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule)
LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index bf9324a31..80452bd98 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -862,6 +862,7 @@ struct mlx5_cmd_work_ent {
void *context;
int idx;
struct completion handling;
+ struct completion slotted;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index c940b329a..5e5a9c677 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -10267,9 +10267,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mfrl[0x1];
u8 regs_39_to_32[0x8];
- u8 regs_31_to_10[0x16];
+ u8 regs_31_to_11[0x15];
u8 mtmp[0x1];
- u8 regs_8_to_0[0x9];
+ u8 regs_9_to_0[0xa];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5240bd7bc..962e7e05e 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -988,7 +988,7 @@ struct mm_struct {
* Represent how many empty pages are merged with kernel zero
* pages when enabling KSM use_zero_pages.
*/
- unsigned long ksm_zero_pages;
+ atomic_long_t ksm_zero_pages;
#endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN_WALKS_MMU
struct {
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 5d3d15e97..66272fdce 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -21,6 +21,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int debounce);
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
unsigned int idx, unsigned int debounce);
+int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config);
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 7a9a07ea4..4338b1b4a 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -690,6 +690,8 @@ struct x86_cpu_id {
__u16 model;
__u16 steppings;
__u16 feature; /* bit index */
+ /* Solely for kernel-internal use: DO NOT EXPORT to userspace! */
+ __u16 flags;
kernel_ulong_t driver_data;
};
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 915033a75..1d43371fa 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -36,12 +36,7 @@ int memory_add_physaddr_to_nid(u64 start);
int phys_to_target_node(u64 start);
#endif
-#ifndef numa_fill_memblks
-static inline int __init numa_fill_memblks(u64 start, u64 end)
-{
- return NUMA_NO_MEMBLK;
-}
-#endif
+int numa_fill_memblks(u64 start, u64 end);
#else /* !CONFIG_NUMA */
static inline int numa_nearest_node(int node, unsigned int state)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 2df35e655..d31e59e2e 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -344,6 +344,19 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
+/*
+ * There are some parts of the kernel which assume that PMD entries
+ * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
+ * limit the maximum allocation order to PMD size. I'm not aware of any
+ * assumptions about maximum order if THP are disabled, but 8 seems like
+ * a good order (that's 1MB if you're using 4kB pages)
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
+#else
+#define MAX_PAGECACHE_ORDER 8
+#endif
+
/**
* mapping_set_large_folios() - Indicate the file supports large folios.
* @mapping: The file.
@@ -366,10 +379,22 @@ static inline void mapping_set_large_folios(struct address_space *mapping)
*/
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
+ /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */
+ VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
+ "Anonymous mapping always supports large folio");
+
return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
}
+/* Return the maximum folio size for this pagecache mapping, in bytes. */
+static inline size_t mapping_max_folio_size(struct address_space *mapping)
+{
+ if (mapping_large_folio_support(mapping))
+ return PAGE_SIZE << MAX_PAGECACHE_ORDER;
+ return PAGE_SIZE;
+}
+
static inline int filemap_nr_thps(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
@@ -528,19 +553,6 @@ static inline void *detach_page_private(struct page *page)
return folio_detach_private(page_folio(page));
}
-/*
- * There are some parts of the kernel which assume that PMD entries
- * are exactly HPAGE_PMD_ORDER. Those should be fixed, but until then,
- * limit the maximum allocation order to PMD size. I'm not aware of any
- * assumptions about maximum order if THP are disabled, but 8 seems like
- * a good order (that's 1MB if you're using 4kB pages)
- */
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER
-#else
-#define MAX_PAGECACHE_ORDER 8
-#endif
-
#ifdef CONFIG_NUMA
struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
#else
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 16493426a..6f9c5ed5e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -2519,7 +2519,12 @@ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
{
- return dev->error_state == pci_channel_io_perm_failure;
+ /*
+ * error_state is set in pci_dev_set_io_state() using xchg/cmpxchg()
+ * and read w/o common lock. READ_ONCE() ensures compiler cannot cache
+ * the value (e.g. inside the loop in pci_dev_wait()).
+ */
+ return READ_ONCE(dev->error_state) == pci_channel_io_perm_failure;
}
void pci_request_acs(void);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 772d3280d..f24546a3d 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -260,6 +260,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
int pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
int pm_genpd_remove(struct generic_pm_domain *genpd);
+struct device *dev_to_genpd_dev(struct device *dev);
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
int dev_pm_genpd_add_notifier(struct device *dev, struct notifier_block *nb);
int dev_pm_genpd_remove_notifier(struct device *dev);
@@ -307,6 +308,11 @@ static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
return -EOPNOTSUPP;
}
+static inline struct device *dev_to_genpd_dev(struct device *dev)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int dev_pm_genpd_set_performance_state(struct device *dev,
unsigned int state)
{
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 955e31860..2fde40cc9 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -126,7 +126,7 @@ struct va_format {
#define no_printk(fmt, ...) \
({ \
if (0) \
- printk(fmt, ##__VA_ARGS__); \
+ _printk(fmt, ##__VA_ARGS__); \
0; \
})
diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h
index fb724c65c..5ce0cd769 100644
--- a/include/linux/pse-pd/pse.h
+++ b/include/linux/pse-pd/pse.h
@@ -114,14 +114,14 @@ static inline int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 22a07c090..f230a472c 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -299,6 +299,8 @@ enum regulator_type {
* @vsel_range_reg: Register for range selector when using pickable ranges
* and ``regulator_map_*_voltage_*_pickable`` functions.
* @vsel_range_mask: Mask for register bitfield used for range selector
+ * @range_applied_by_vsel: A flag to indicate that changes to vsel_range_reg
+ * are only effective after vsel_reg is written
* @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*``
* @vsel_mask: Mask for register bitfield used for selector
* @vsel_step: Specify the resolution of selector stepping when setting
@@ -389,6 +391,7 @@ struct regulator_desc {
unsigned int vsel_range_reg;
unsigned int vsel_range_mask;
+ bool range_applied_by_vsel;
unsigned int vsel_reg;
unsigned int vsel_mask;
unsigned int vsel_step;
diff --git a/include/linux/security.h b/include/linux/security.h
index 41a8f667b..5122e3ad8 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2048,7 +2048,8 @@ static inline void security_key_post_create_or_update(struct key *keyring,
#ifdef CONFIG_AUDIT
#ifdef CONFIG_SECURITY
-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule);
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
+ gfp_t gfp);
int security_audit_rule_known(struct audit_krule *krule);
int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule);
void security_audit_rule_free(void *lsmrule);
@@ -2056,7 +2057,7 @@ void security_audit_rule_free(void *lsmrule);
#else
static inline int security_audit_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
{
return 0;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4ff48eda3..5b1078c16 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1174,15 +1174,6 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb)
return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
}
-/**
- * skb_rtable - Returns the skb &rtable
- * @skb: buffer
- */
-static inline struct rtable *skb_rtable(const struct sk_buff *skb)
-{
- return (struct rtable *)skb_dst(skb);
-}
-
/* For mangling skb->pkt_type from user space side from applications
* such as nft, tc, etc, we only allow a conservative subset of
* possible pkt_types to be set.
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index dfa1828cd..c0d74f97f 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -117,7 +117,6 @@ struct stmmac_axi {
#define EST_GCL 1024
struct stmmac_est {
- struct mutex lock;
int enable;
u32 btr_reserve[2];
u32 btr_offset[2];
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 7372124fb..dd4b31ce6 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -154,6 +154,13 @@ struct serial_struct;
*
* Optional. Called under the @tty->termios_rwsem. May sleep.
*
+ * @ldisc_ok: ``int ()(struct tty_struct *tty, int ldisc)``
+ *
+ * This routine allows the @tty driver to decide if it can deal
+ * with a particular @ldisc.
+ *
+ * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem.
+ *
* @set_ldisc: ``void ()(struct tty_struct *tty)``
*
* This routine allows the @tty driver to be notified when the device's
@@ -372,6 +379,7 @@ struct tty_operations {
void (*hangup)(struct tty_struct *tty);
int (*break_ctl)(struct tty_struct *tty, int state);
void (*flush_buffer)(struct tty_struct *tty);
+ int (*ldisc_ok)(struct tty_struct *tty, int ldisc);
void (*set_ldisc)(struct tty_struct *tty);
void (*wait_until_sent)(struct tty_struct *tty, int timeout);
void (*send_xchar)(struct tty_struct *tty, u8 ch);
diff --git a/include/media/cec.h b/include/media/cec.h
index 10c9cf605..cc3fcd049 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -258,6 +258,7 @@ struct cec_adapter {
u16 phys_addr;
bool needs_hpd;
bool is_enabled;
+ bool is_claiming_log_addrs;
bool is_configuring;
bool must_reconfigure;
bool is_configured;
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 0d939e5ae..c2a85fd3f 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -216,7 +216,7 @@ typedef struct {
struct ctl_table;
typedef struct ax25_dev {
- struct ax25_dev *next;
+ struct list_head list;
struct net_device *dev;
netdevice_tracker dev_tracker;
@@ -330,7 +330,6 @@ int ax25_addr_size(const ax25_digi *);
void ax25_digi_invert(const ax25_digi *, ax25_digi *);
/* ax25_dev.c */
-extern ax25_dev *ax25_dev_list;
extern spinlock_t ax25_dev_lock;
#if IS_ENABLED(CONFIG_AX25)
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index eaec5d6ca..b3228bd6c 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -285,7 +285,7 @@ void bt_err_ratelimited(const char *fmt, ...);
bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__)
/* Connection and socket states */
-enum {
+enum bt_sock_state {
BT_CONNECTED = 1, /* Equal to TCP_ESTABLISHED to make net code happy */
BT_OPEN,
BT_BOUND,
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 07198beb3..f18751042 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -33,9 +33,6 @@
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
#define HCI_LINK_KEY_SIZE 16
-#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
-
-#define HCI_MAX_AMP_ASSOC_SIZE 672
#define HCI_MAX_CPB_DATA_SIZE 252
@@ -71,26 +68,6 @@
#define HCI_SMD 9
#define HCI_VIRTIO 10
-/* HCI controller types */
-#define HCI_PRIMARY 0x00
-#define HCI_AMP 0x01
-
-/* First BR/EDR Controller shall have ID = 0 */
-#define AMP_ID_BREDR 0x00
-
-/* AMP controller types */
-#define AMP_TYPE_BREDR 0x00
-#define AMP_TYPE_80211 0x01
-
-/* AMP controller status */
-#define AMP_STATUS_POWERED_DOWN 0x00
-#define AMP_STATUS_BLUETOOTH_ONLY 0x01
-#define AMP_STATUS_NO_CAPACITY 0x02
-#define AMP_STATUS_LOW_CAPACITY 0x03
-#define AMP_STATUS_MEDIUM_CAPACITY 0x04
-#define AMP_STATUS_HIGH_CAPACITY 0x05
-#define AMP_STATUS_FULL_CAPACITY 0x06
-
/* HCI device quirks */
enum {
/* When this quirk is set, the HCI Reset command is send when
@@ -528,7 +505,6 @@ enum {
#define ESCO_LINK 0x02
/* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80
-#define AMP_LINK 0x81
#define ISO_LINK 0x82
#define INVALID_LINK 0xff
@@ -944,56 +920,6 @@ struct hci_cp_io_capability_neg_reply {
__u8 reason;
} __packed;
-#define HCI_OP_CREATE_PHY_LINK 0x0435
-struct hci_cp_create_phy_link {
- __u8 phy_handle;
- __u8 key_len;
- __u8 key_type;
- __u8 key[HCI_AMP_LINK_KEY_SIZE];
-} __packed;
-
-#define HCI_OP_ACCEPT_PHY_LINK 0x0436
-struct hci_cp_accept_phy_link {
- __u8 phy_handle;
- __u8 key_len;
- __u8 key_type;
- __u8 key[HCI_AMP_LINK_KEY_SIZE];
-} __packed;
-
-#define HCI_OP_DISCONN_PHY_LINK 0x0437
-struct hci_cp_disconn_phy_link {
- __u8 phy_handle;
- __u8 reason;
-} __packed;
-
-struct ext_flow_spec {
- __u8 id;
- __u8 stype;
- __le16 msdu;
- __le32 sdu_itime;
- __le32 acc_lat;
- __le32 flush_to;
-} __packed;
-
-#define HCI_OP_CREATE_LOGICAL_LINK 0x0438
-#define HCI_OP_ACCEPT_LOGICAL_LINK 0x0439
-struct hci_cp_create_accept_logical_link {
- __u8 phy_handle;
- struct ext_flow_spec tx_flow_spec;
- struct ext_flow_spec rx_flow_spec;
-} __packed;
-
-#define HCI_OP_DISCONN_LOGICAL_LINK 0x043a
-struct hci_cp_disconn_logical_link {
- __le16 log_handle;
-} __packed;
-
-#define HCI_OP_LOGICAL_LINK_CANCEL 0x043b
-struct hci_cp_logical_link_cancel {
- __u8 phy_handle;
- __u8 flow_spec_id;
-} __packed;
-
#define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d
struct hci_coding_format {
__u8 id;
@@ -1615,46 +1541,6 @@ struct hci_rp_read_enc_key_size {
__u8 key_size;
} __packed;
-#define HCI_OP_READ_LOCAL_AMP_INFO 0x1409
-struct hci_rp_read_local_amp_info {
- __u8 status;
- __u8 amp_status;
- __le32 total_bw;
- __le32 max_bw;
- __le32 min_latency;
- __le32 max_pdu;
- __u8 amp_type;
- __le16 pal_cap;
- __le16 max_assoc_size;
- __le32 max_flush_to;
- __le32 be_flush_to;
-} __packed;
-
-#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
-struct hci_cp_read_local_amp_assoc {
- __u8 phy_handle;
- __le16 len_so_far;
- __le16 max_len;
-} __packed;
-struct hci_rp_read_local_amp_assoc {
- __u8 status;
- __u8 phy_handle;
- __le16 rem_len;
- __u8 frag[];
-} __packed;
-
-#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
-struct hci_cp_write_remote_amp_assoc {
- __u8 phy_handle;
- __le16 len_so_far;
- __le16 rem_len;
- __u8 frag[];
-} __packed;
-struct hci_rp_write_remote_amp_assoc {
- __u8 status;
- __u8 phy_handle;
-} __packed;
-
#define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c
#define HCI_OP_ENABLE_DUT_MODE 0x1803
@@ -2035,7 +1921,7 @@ struct hci_cp_le_set_ext_adv_data {
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[];
+ __u8 data[] __counted_by(length);
} __packed;
#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
@@ -2044,7 +1930,7 @@ struct hci_cp_le_set_ext_scan_rsp_data {
__u8 operation;
__u8 frag_pref;
__u8 length;
- __u8 data[];
+ __u8 data[] __counted_by(length);
} __packed;
#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
@@ -2070,7 +1956,7 @@ struct hci_cp_le_set_per_adv_data {
__u8 handle;
__u8 operation;
__u8 length;
- __u8 data[];
+ __u8 data[] __counted_by(length);
} __packed;
#define HCI_OP_LE_SET_PER_ADV_ENABLE 0x2040
@@ -2171,7 +2057,7 @@ struct hci_cis {
struct hci_cp_le_create_cis {
__u8 num_cis;
- struct hci_cis cis[];
+ struct hci_cis cis[] __counted_by(num_cis);
} __packed;
#define HCI_OP_LE_REMOVE_CIG 0x2065
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index b1c8489ff..17b2a7baa 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -126,7 +126,6 @@ enum suspended_state {
struct hci_conn_hash {
struct list_head list;
unsigned int acl_num;
- unsigned int amp_num;
unsigned int sco_num;
unsigned int iso_num;
unsigned int le_num;
@@ -247,6 +246,7 @@ struct adv_info {
bool periodic;
__u8 mesh;
__u8 instance;
+ __u8 handle;
__u32 flags;
__u16 timeout;
__u16 remaining_time;
@@ -341,14 +341,6 @@ struct adv_monitor {
/* Default authenticated payload timeout 30s */
#define DEFAULT_AUTH_PAYLOAD_TIMEOUT 0x0bb8
-struct amp_assoc {
- __u16 len;
- __u16 offset;
- __u16 rem_len;
- __u16 len_so_far;
- __u8 data[HCI_MAX_AMP_ASSOC_SIZE];
-};
-
#define HCI_MAX_PAGES 3
struct hci_dev {
@@ -361,7 +353,6 @@ struct hci_dev {
unsigned long flags;
__u16 id;
__u8 bus;
- __u8 dev_type;
bdaddr_t bdaddr;
bdaddr_t setup_addr;
bdaddr_t public_addr;
@@ -467,21 +458,6 @@ struct hci_dev {
__u16 sniff_min_interval;
__u16 sniff_max_interval;
- __u8 amp_status;
- __u32 amp_total_bw;
- __u32 amp_max_bw;
- __u32 amp_min_latency;
- __u32 amp_max_pdu;
- __u8 amp_type;
- __u16 amp_pal_cap;
- __u16 amp_assoc_size;
- __u32 amp_max_flush_to;
- __u32 amp_be_flush_to;
-
- struct amp_assoc loc_assoc;
-
- __u8 flow_ctl_mode;
-
unsigned int auto_accept_delay;
unsigned long quirks;
@@ -501,11 +477,6 @@ struct hci_dev {
unsigned int le_pkts;
unsigned int iso_pkts;
- __u16 block_len;
- __u16 block_mtu;
- __u16 num_blocks;
- __u16 block_cnt;
-
unsigned long acl_last_tx;
unsigned long sco_last_tx;
unsigned long le_last_tx;
@@ -778,7 +749,6 @@ struct hci_conn {
void *l2cap_data;
void *sco_data;
void *iso_data;
- struct amp_mgr *amp_mgr;
struct list_head link_list;
struct hci_conn *parent;
@@ -805,7 +775,6 @@ struct hci_chan {
struct sk_buff_head data_q;
unsigned int sent;
__u8 state;
- bool amp;
};
struct hci_conn_params {
@@ -1014,9 +983,6 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
case ACL_LINK:
h->acl_num++;
break;
- case AMP_LINK:
- h->amp_num++;
- break;
case LE_LINK:
h->le_num++;
if (c->role == HCI_ROLE_SLAVE)
@@ -1043,9 +1009,6 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
case ACL_LINK:
h->acl_num--;
break;
- case AMP_LINK:
- h->amp_num--;
- break;
case LE_LINK:
h->le_num--;
if (c->role == HCI_ROLE_SLAVE)
@@ -1067,8 +1030,6 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
switch (type) {
case ACL_LINK:
return h->acl_num;
- case AMP_LINK:
- return h->amp_num;
case LE_LINK:
return h->le_num;
case SCO_LINK:
@@ -1085,7 +1046,7 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
{
struct hci_conn_hash *c = &hdev->conn_hash;
- return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num;
+ return c->acl_num + c->sco_num + c->le_num + c->iso_num;
}
static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
@@ -1374,8 +1335,7 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
rcu_read_lock();
list_for_each_entry_rcu(c, &h->list, list) {
- if (c->type != ISO_LINK ||
- !test_bit(HCI_CONN_PA_SYNC, &c->flags))
+ if (c->type != ISO_LINK)
continue;
if (c->sync_handle == sync_handle) {
@@ -1611,10 +1571,6 @@ static inline void hci_conn_drop(struct hci_conn *conn)
}
break;
- case AMP_LINK:
- timeo = conn->disc_timeout;
- break;
-
default:
timeo = 0;
break;
@@ -2157,18 +2113,46 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
{
u16 max_latency;
- if (min > max || min < 6 || max > 3200)
+ if (min > max) {
+ BT_WARN("min %d > max %d", min, max);
+ return -EINVAL;
+ }
+
+ if (min < 6) {
+ BT_WARN("min %d < 6", min);
+ return -EINVAL;
+ }
+
+ if (max > 3200) {
+ BT_WARN("max %d > 3200", max);
+ return -EINVAL;
+ }
+
+ if (to_multiplier < 10) {
+ BT_WARN("to_multiplier %d < 10", to_multiplier);
return -EINVAL;
+ }
- if (to_multiplier < 10 || to_multiplier > 3200)
+ if (to_multiplier > 3200) {
+ BT_WARN("to_multiplier %d > 3200", to_multiplier);
return -EINVAL;
+ }
- if (max >= to_multiplier * 8)
+ if (max >= to_multiplier * 8) {
+ BT_WARN("max %d >= to_multiplier %d * 8", max, to_multiplier);
return -EINVAL;
+ }
max_latency = (to_multiplier * 4 / max) - 1;
- if (latency > 499 || latency > max_latency)
+ if (latency > 499) {
+ BT_WARN("latency %d > 499", latency);
return -EINVAL;
+ }
+
+ if (latency > max_latency) {
+ BT_WARN("latency %d > max_latency %d", latency, max_latency);
+ return -EINVAL;
+ }
return 0;
}
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index a4278aa61..3434cfc26 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -548,6 +548,9 @@ struct l2cap_chan {
__u16 tx_credits;
__u16 rx_credits;
+ /* estimated available receive buffer space or -1 if unknown */
+ ssize_t rx_avail;
+
__u8 tx_state;
__u8 rx_state;
@@ -682,10 +685,15 @@ struct l2cap_user {
/* ----- L2CAP socket info ----- */
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
+struct l2cap_rx_busy {
+ struct list_head list;
+ struct sk_buff *skb;
+};
+
struct l2cap_pinfo {
struct bt_sock bt;
struct l2cap_chan *chan;
- struct sk_buff *rx_busy_skb;
+ struct list_head rx_busy;
};
enum {
@@ -943,6 +951,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
+void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail);
int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
void l2cap_chan_set_defaults(struct l2cap_chan *chan);
int l2cap_ertm_init(struct l2cap_chan *chan);
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 6d1c85411..3a9001a04 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -24,7 +24,7 @@ struct dst_ops {
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev);
- struct dst_entry * (*negative_advice)(struct dst_entry *);
+ void (*negative_advice)(struct sock *sk, struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
diff --git a/include/net/ip.h b/include/net/ip.h
index 25cb688bd..6d735e00d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -423,7 +423,7 @@ int ip_decrease_ttl(struct iphdr *iph)
static inline int ip_mtu_locked(const struct dst_entry *dst)
{
- const struct rtable *rt = (const struct rtable *)dst;
+ const struct rtable *rt = dst_rtable(dst);
return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU);
}
@@ -461,7 +461,7 @@ static inline bool ip_sk_ignore_df(const struct sock *sk)
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
- const struct rtable *rt = container_of(dst, struct rtable, dst);
+ const struct rtable *rt = dst_rtable(dst);
struct net *net = dev_net(dst->dev);
unsigned int mtu;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 323c94f18..73524fa0c 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -234,9 +234,11 @@ struct fib6_result {
for (rt = (w)->leaf; rt; \
rt = rcu_dereference_protected(rt->fib6_next, 1))
-static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
+#define dst_rt6_info(_ptr) container_of_const(_ptr, struct rt6_info, dst)
+
+static inline struct inet6_dev *ip6_dst_idev(const struct dst_entry *dst)
{
- return ((struct rt6_info *)dst)->rt6i_idev;
+ return dst_rt6_info(dst)->rt6i_idev;
}
static inline bool fib6_requires_src(const struct fib6_info *rt)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index a30c6aa9e..a18ed24fe 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -210,12 +210,11 @@ void rt6_uncached_list_del(struct rt6_info *rt);
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
- const struct rt6_info *rt6 = NULL;
if (dst)
- rt6 = container_of(dst, struct rt6_info, dst);
+ return dst_rt6_info(dst);
- return rt6;
+ return NULL;
}
/*
@@ -227,7 +226,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
{
struct ipv6_pinfo *np = inet6_sk(sk);
- np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst);
+ np->dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
sk_setup_caps(sk, dst);
np->daddr_cache = daddr;
#ifdef CONFIG_IPV6_SUBTREES
@@ -240,7 +239,7 @@ void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
{
- struct rt6_info *rt = (struct rt6_info *) skb_dst(skb);
+ const struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
return rt->rt6i_flags & RTF_LOCAL;
}
@@ -248,7 +247,7 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb)
static inline bool ipv6_anycast_destination(const struct dst_entry *dst,
const struct in6_addr *daddr)
{
- struct rt6_info *rt = (struct rt6_info *)dst;
+ const struct rt6_info *rt = dst_rt6_info(dst);
return rt->rt6i_flags & RTF_ANYCAST ||
(rt->rt6i_dst.plen < 127 &&
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index c286cc2e7..cc666da62 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -363,9 +363,10 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
/* Variant of pskb_inet_may_pull().
*/
-static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+static inline bool skb_vlan_inet_prepare(struct sk_buff *skb,
+ bool inner_proto_inherit)
{
- int nhlen = 0, maclen = ETH_HLEN;
+ int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN;
__be16 type = skb->protocol;
/* Essentially this is skb_protocol(skb, true)
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 02bbdc577..a6a0bf4a2 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -15,6 +15,9 @@ struct netns_nf {
const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
+#ifdef CONFIG_LWTUNNEL
+ struct ctl_table_header *nf_lwtnl_dir_header;
+#endif
#endif
struct nf_hook_entries __rcu *hooks_ipv4[NF_INET_NUMHOOKS];
struct nf_hook_entries __rcu *hooks_ipv6[NF_INET_NUMHOOKS];
diff --git a/include/net/route.h b/include/net/route.h
index d4a014794..af55401aa 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -77,6 +77,17 @@ struct rtable {
rt_pmtu:31;
};
+#define dst_rtable(_ptr) container_of_const(_ptr, struct rtable, dst)
+
+/**
+ * skb_rtable - Returns the skb &rtable
+ * @skb: buffer
+ */
+static inline struct rtable *skb_rtable(const struct sk_buff *skb)
+{
+ return dst_rtable(skb_dst(skb));
+}
+
static inline bool rt_is_input_route(const struct rtable *rt)
{
return rt->rt_is_input != 0;
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 3bfb80bad..b45d57b59 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -13,6 +13,7 @@ enum rtnl_link_flags {
RTNL_FLAG_DOIT_UNLOCKED = BIT(0),
RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1),
RTNL_FLAG_DUMP_UNLOCKED = BIT(2),
+ RTNL_FLAG_DUMP_SPLIT_NLM_DONE = BIT(3), /* legacy behavior */
};
enum rtnl_kinds {
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 41ca14e81..0014b9ee5 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -128,6 +128,7 @@ struct Qdisc {
struct rcu_head rcu;
netdevice_tracker dev_tracker;
+ struct lock_class_key root_lock_key;
/* private data */
long privdata[] ____cacheline_aligned;
};
diff --git a/include/net/sock.h b/include/net/sock.h
index b4b553df7..944f71a8a 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2134,17 +2134,10 @@ sk_dst_get(const struct sock *sk)
static inline void __dst_negative_advice(struct sock *sk)
{
- struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+ struct dst_entry *dst = __sk_dst_get(sk);
- if (dst && dst->ops->negative_advice) {
- ndst = dst->ops->negative_advice(dst);
-
- if (ndst != dst) {
- rcu_assign_pointer(sk->sk_dst_cache, ndst);
- sk_tx_queue_clear(sk);
- WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
- }
- }
+ if (dst && dst->ops->negative_advice)
+ dst->ops->negative_advice(sk, dst);
}
static inline void dst_negative_advice(struct sock *sk)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6ae35199d..2bcf30381 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1539,11 +1539,10 @@ static inline int tcp_space_from_win(const struct sock *sk, int win)
return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
}
-/* Assume a conservative default of 1200 bytes of payload per 4K page.
+/* Assume a 50% default for skb->len/skb->truesize ratio.
* This may be adjusted later in tcp_measure_rcv_mss().
*/
-#define TCP_DEFAULT_SCALING_RATIO ((1200 << TCP_RMEM_TO_WIN_SCALE) / \
- SKB_TRUESIZE(4096))
+#define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
static inline void tcp_scaling_ratio_init(struct sock *sk)
{
diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h
index 471e17736..5d8e9ed2c 100644
--- a/include/net/tcp_ao.h
+++ b/include/net/tcp_ao.h
@@ -86,7 +86,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
struct tcp_ao_info {
/* List of tcp_ao_key's */
struct hlist_head head;
- /* current_key and rnext_key aren't maintained on listen sockets.
+ /* current_key and rnext_key are maintained on sockets
+ * in TCP_AO_ESTABLISHED states.
* Their purpose is to cache keys on established connections,
* saving needless lookups. Never dereference any of them from
* listen sockets.
@@ -201,9 +202,9 @@ struct tcp6_ao_context {
};
struct tcp_sigpool;
+/* Established states are fast-path and there always is current_key/rnext_key */
#define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
- TCPF_CLOSE | TCPF_CLOSE_WAIT | \
- TCPF_LAST_ACK | TCPF_CLOSING)
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING)
int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_ao_key *key, struct tcphdr *th,
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 6b548dc2c..1d79a3b53 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -69,8 +69,10 @@
#define BLIST_RETRY_ITF ((__force blist_flags_t)(1ULL << 32))
/* Always retry ABORTED_COMMAND with ASC 0xc1 */
#define BLIST_RETRY_ASC_C1 ((__force blist_flags_t)(1ULL << 33))
+/* Do not query the IO Advice Hints Grouping mode page */
+#define BLIST_SKIP_IO_HINTS ((__force blist_flags_t)(1ULL << 34))
-#define __BLIST_LAST_USED BLIST_RETRY_ASC_C1
+#define __BLIST_LAST_USED BLIST_SKIP_IO_HINTS
#define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \
(__force blist_flags_t) \
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index 0e75b9277..e3b6ce3cb 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -200,6 +200,8 @@ unsigned int sas_is_tlr_enabled(struct scsi_device *);
void sas_disable_tlr(struct scsi_device *);
void sas_enable_tlr(struct scsi_device *);
+bool sas_ata_ncq_prio_supported(struct scsi_device *sdev);
+
extern struct sas_rphy *sas_end_device_alloc(struct sas_port *);
extern struct sas_rphy *sas_expander_alloc(struct sas_port *, enum sas_device_type);
void sas_rphy_free(struct sas_rphy *);
diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h
index c8bb56e68..47a6cab75 100644
--- a/include/soc/qcom/cmd-db.h
+++ b/include/soc/qcom/cmd-db.h
@@ -1,5 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
#ifndef __QCOM_COMMAND_DB_H__
#define __QCOM_COMMAND_DB_H__
@@ -21,6 +24,8 @@ u32 cmd_db_read_addr(const char *resource_id);
const void *cmd_db_read_aux_data(const char *resource_id, size_t *len);
+bool cmd_db_match_resource_addr(u32 addr1, u32 addr2);
+
enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id);
int cmd_db_ready(void);
@@ -31,6 +36,9 @@ static inline u32 cmd_db_read_addr(const char *resource_id)
static inline const void *cmd_db_read_aux_data(const char *resource_id, size_t *len)
{ return ERR_PTR(-ENODEV); }
+static inline bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
+{ return false; }
+
static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id)
{ return -ENODEV; }
diff --git a/include/sound/tas2781-dsp.h b/include/sound/tas2781-dsp.h
index ea9af2726..7fba7ea26 100644
--- a/include/sound/tas2781-dsp.h
+++ b/include/sound/tas2781-dsp.h
@@ -2,7 +2,7 @@
//
// ALSA SoC Texas Instruments TAS2781 Audio Smart Amplifier
//
-// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
// https://www.ti.com
//
// The TAS2781 driver implements a flexible and configurable
@@ -13,8 +13,8 @@
// Author: Kevin Lu <kevin-lu@ti.com>
//
-#ifndef __TASDEVICE_DSP_H__
-#define __TASDEVICE_DSP_H__
+#ifndef __TAS2781_DSP_H__
+#define __TAS2781_DSP_H__
#define MAIN_ALL_DEVICES 0x0d
#define MAIN_DEVICE_A 0x01
@@ -180,7 +180,6 @@ void tasdevice_calbin_remove(void *context);
int tasdevice_select_tuningprm_cfg(void *context, int prm,
int cfg_no, int rca_conf_no);
int tasdevice_prmg_load(void *context, int prm_no);
-int tasdevice_prmg_calibdata_load(void *context, int prm_no);
void tasdevice_tuning_switch(void *context, int state);
int tas2781_load_calibration(void *context, char *file_name,
unsigned short i);
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 4eed9028b..517015ef3 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -12,6 +12,8 @@
#define DAPM_DIRECT "(direct)"
#define DAPM_ARROW(dir) (((dir) == SND_SOC_DAPM_DIR_OUT) ? "->" : "<-")
+TRACE_DEFINE_ENUM(SND_SOC_DAPM_DIR_OUT);
+
struct snd_soc_jack;
struct snd_soc_card;
struct snd_soc_dapm_widget;
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index cf4b98b9a..7d931db02 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -33,6 +33,8 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_see_withdrawal,
cachefiles_obj_get_ondemand_fd,
cachefiles_obj_put_ondemand_fd,
+ cachefiles_obj_get_read_req,
+ cachefiles_obj_put_read_req,
};
enum fscache_why_object_killed {
@@ -127,7 +129,11 @@ enum cachefiles_error_trace {
EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
- E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
+ EM(cachefiles_obj_see_withdrawal, "SEE withdrawal") \
+ EM(cachefiles_obj_get_ondemand_fd, "GET ondemand_fd") \
+ EM(cachefiles_obj_put_ondemand_fd, "PUT ondemand_fd") \
+ EM(cachefiles_obj_get_read_req, "GET read_req") \
+ E_(cachefiles_obj_put_read_req, "PUT read_req")
#define cachefiles_coherency_traces \
EM(cachefiles_coherency_check_aux, "BAD aux ") \
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index cd84227f1..5402f77ee 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -68,6 +68,13 @@ extern "C" {
*/
#define NOUVEAU_GETPARAM_VRAM_USED 19
+/*
+ * NOUVEAU_GETPARAM_HAS_VMA_TILEMODE
+ *
+ * Query whether tile mode and PTE kind are accepted with VM allocs or not.
+ */
+#define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
+
struct drm_nouveau_getparam {
__u64 param;
__u64 value;
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3c42b9f1b..bcd84985f 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -7150,7 +7150,7 @@ struct bpf_fib_lookup {
/* output: MTU value */
__u16 mtu_result;
- };
+ } __attribute__((packed, aligned(2)));
/* input: L3 device index for lookup
* output: device index from FIB lookup
*/
diff --git a/include/uapi/linux/virtio_bt.h b/include/uapi/linux/virtio_bt.h
index af798f4c9..3cc7d6334 100644
--- a/include/uapi/linux/virtio_bt.h
+++ b/include/uapi/linux/virtio_bt.h
@@ -13,7 +13,6 @@
enum virtio_bt_config_type {
VIRTIO_BT_CONFIG_TYPE_PRIMARY = 0,
- VIRTIO_BT_CONFIG_TYPE_AMP = 1,
};
enum virtio_bt_config_vendor {
diff --git a/init/Kconfig b/init/Kconfig
index 664bedb9a..459f44ef7 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -743,8 +743,8 @@ config LOG_CPU_MAX_BUF_SHIFT
int "CPU kernel log buffer size contribution (13 => 8 KB, 17 => 128KB)"
depends on SMP
range 0 21
- default 12 if !BASE_SMALL
- default 0 if BASE_SMALL
+ default 0 if BASE_SMALL != 0
+ default 12
depends on PRINTK
help
This option allows to increase the default ring buffer size
diff --git a/io_uring/cancel.h b/io_uring/cancel.h
index 76b32e65c..b33995e00 100644
--- a/io_uring/cancel.h
+++ b/io_uring/cancel.h
@@ -27,10 +27,10 @@ bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd);
static inline bool io_cancel_match_sequence(struct io_kiocb *req, int sequence)
{
- if ((req->flags & REQ_F_CANCEL_SEQ) && sequence == req->work.cancel_seq)
+ if (req->cancel_seq_set && sequence == req->work.cancel_seq)
return true;
- req->flags |= REQ_F_CANCEL_SEQ;
+ req->cancel_seq_set = true;
req->work.cancel_seq = sequence;
return false;
}
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 522196dfb..8a99aabca 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -25,10 +25,10 @@
#define WORKER_IDLE_TIMEOUT (5 * HZ)
enum {
- IO_WORKER_F_UP = 1, /* up and active */
- IO_WORKER_F_RUNNING = 2, /* account as running */
- IO_WORKER_F_FREE = 4, /* worker on free list */
- IO_WORKER_F_BOUND = 8, /* is doing bounded work */
+ IO_WORKER_F_UP = 0, /* up and active */
+ IO_WORKER_F_RUNNING = 1, /* account as running */
+ IO_WORKER_F_FREE = 2, /* worker on free list */
+ IO_WORKER_F_BOUND = 3, /* is doing bounded work */
};
enum {
@@ -44,7 +44,8 @@ enum {
*/
struct io_worker {
refcount_t ref;
- unsigned flags;
+ int create_index;
+ unsigned long flags;
struct hlist_nulls_node nulls_node;
struct list_head all_list;
struct task_struct *task;
@@ -58,7 +59,6 @@ struct io_worker {
unsigned long create_state;
struct callback_head create_work;
- int create_index;
union {
struct rcu_head rcu;
@@ -165,7 +165,7 @@ static inline struct io_wq_acct *io_work_get_acct(struct io_wq *wq,
static inline struct io_wq_acct *io_wq_get_acct(struct io_worker *worker)
{
- return io_get_acct(worker->wq, worker->flags & IO_WORKER_F_BOUND);
+ return io_get_acct(worker->wq, test_bit(IO_WORKER_F_BOUND, &worker->flags));
}
static void io_worker_ref_put(struct io_wq *wq)
@@ -225,7 +225,7 @@ static void io_worker_exit(struct io_worker *worker)
wait_for_completion(&worker->ref_done);
raw_spin_lock(&wq->lock);
- if (worker->flags & IO_WORKER_F_FREE)
+ if (test_bit(IO_WORKER_F_FREE, &worker->flags))
hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list);
raw_spin_unlock(&wq->lock);
@@ -410,7 +410,7 @@ static void io_wq_dec_running(struct io_worker *worker)
struct io_wq_acct *acct = io_wq_get_acct(worker);
struct io_wq *wq = worker->wq;
- if (!(worker->flags & IO_WORKER_F_UP))
+ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
return;
if (!atomic_dec_and_test(&acct->nr_running))
@@ -430,8 +430,8 @@ static void io_wq_dec_running(struct io_worker *worker)
*/
static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
{
- if (worker->flags & IO_WORKER_F_FREE) {
- worker->flags &= ~IO_WORKER_F_FREE;
+ if (test_bit(IO_WORKER_F_FREE, &worker->flags)) {
+ clear_bit(IO_WORKER_F_FREE, &worker->flags);
raw_spin_lock(&wq->lock);
hlist_nulls_del_init_rcu(&worker->nulls_node);
raw_spin_unlock(&wq->lock);
@@ -444,8 +444,8 @@ static void __io_worker_busy(struct io_wq *wq, struct io_worker *worker)
static void __io_worker_idle(struct io_wq *wq, struct io_worker *worker)
__must_hold(wq->lock)
{
- if (!(worker->flags & IO_WORKER_F_FREE)) {
- worker->flags |= IO_WORKER_F_FREE;
+ if (!test_bit(IO_WORKER_F_FREE, &worker->flags)) {
+ set_bit(IO_WORKER_F_FREE, &worker->flags);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
}
}
@@ -564,10 +564,7 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
* clear the stalled flag.
*/
work = io_get_next_work(acct, worker);
- raw_spin_unlock(&acct->lock);
if (work) {
- __io_worker_busy(wq, worker);
-
/*
* Make sure cancelation can find this, even before
* it becomes the active work. That avoids a window
@@ -578,9 +575,15 @@ static void io_worker_handle_work(struct io_wq_acct *acct,
raw_spin_lock(&worker->lock);
worker->next_work = work;
raw_spin_unlock(&worker->lock);
- } else {
- break;
}
+
+ raw_spin_unlock(&acct->lock);
+
+ if (!work)
+ break;
+
+ __io_worker_busy(wq, worker);
+
io_assign_current_work(worker, work);
__set_current_state(TASK_RUNNING);
@@ -631,7 +634,8 @@ static int io_wq_worker(void *data)
bool exit_mask = false, last_timeout = false;
char buf[TASK_COMM_LEN];
- worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
+ set_mask_bits(&worker->flags, 0,
+ BIT(IO_WORKER_F_UP) | BIT(IO_WORKER_F_RUNNING));
snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task->pid);
set_task_comm(current, buf);
@@ -695,11 +699,11 @@ void io_wq_worker_running(struct task_struct *tsk)
if (!worker)
return;
- if (!(worker->flags & IO_WORKER_F_UP))
+ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
return;
- if (worker->flags & IO_WORKER_F_RUNNING)
+ if (test_bit(IO_WORKER_F_RUNNING, &worker->flags))
return;
- worker->flags |= IO_WORKER_F_RUNNING;
+ set_bit(IO_WORKER_F_RUNNING, &worker->flags);
io_wq_inc_running(worker);
}
@@ -713,12 +717,12 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
if (!worker)
return;
- if (!(worker->flags & IO_WORKER_F_UP))
+ if (!test_bit(IO_WORKER_F_UP, &worker->flags))
return;
- if (!(worker->flags & IO_WORKER_F_RUNNING))
+ if (!test_bit(IO_WORKER_F_RUNNING, &worker->flags))
return;
- worker->flags &= ~IO_WORKER_F_RUNNING;
+ clear_bit(IO_WORKER_F_RUNNING, &worker->flags);
io_wq_dec_running(worker);
}
@@ -732,7 +736,7 @@ static void io_init_new_worker(struct io_wq *wq, struct io_worker *worker,
raw_spin_lock(&wq->lock);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wq->free_list);
list_add_tail_rcu(&worker->all_list, &wq->all_list);
- worker->flags |= IO_WORKER_F_FREE;
+ set_bit(IO_WORKER_F_FREE, &worker->flags);
raw_spin_unlock(&wq->lock);
wake_up_new_task(tsk);
}
@@ -838,7 +842,7 @@ fail:
init_completion(&worker->ref_done);
if (index == IO_WQ_ACCT_BOUND)
- worker->flags |= IO_WORKER_F_BOUND;
+ set_bit(IO_WORKER_F_BOUND, &worker->flags);
tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE);
if (!IS_ERR(tsk)) {
@@ -924,8 +928,12 @@ static bool io_wq_work_match_item(struct io_wq_work *work, void *data)
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{
struct io_wq_acct *acct = io_work_get_acct(wq, work);
- struct io_cb_cancel_data match;
- unsigned work_flags = work->flags;
+ unsigned long work_flags = work->flags;
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_item,
+ .data = work,
+ .cancel_all = false,
+ };
bool do_create;
/*
@@ -963,10 +971,6 @@ void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
raw_spin_unlock(&wq->lock);
/* fatal condition, failed to create the first worker */
- match.fn = io_wq_work_match_item,
- match.data = work,
- match.cancel_all = false,
-
io_acct_cancel_pending_work(wq, acct, &match);
}
}
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index c170a2b8d..8a216b1d6 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2211,6 +2211,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->file = NULL;
req->rsrc_node = NULL;
req->task = current;
+ req->cancel_seq_set = false;
if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 6426ee382..bb6492b66 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -340,7 +340,7 @@ static inline int io_run_task_work(void)
static inline bool io_task_work_pending(struct io_ring_ctx *ctx)
{
- return task_work_pending(current) || !wq_list_empty(&ctx->work_llist);
+ return task_work_pending(current) || !llist_empty(&ctx->work_llist);
}
static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts)
@@ -442,7 +442,7 @@ static inline bool io_file_can_poll(struct io_kiocb *req)
{
if (req->flags & REQ_F_CAN_POLL)
return true;
- if (file_can_poll(req->file)) {
+ if (req->file && file_can_poll(req->file)) {
req->flags |= REQ_F_CAN_POLL;
return true;
}
diff --git a/io_uring/napi.c b/io_uring/napi.c
index 883a1a665..8c18ede59 100644
--- a/io_uring/napi.c
+++ b/io_uring/napi.c
@@ -261,12 +261,14 @@ int io_unregister_napi(struct io_ring_ctx *ctx, void __user *arg)
}
/*
- * __io_napi_adjust_timeout() - Add napi id to the busy poll list
+ * __io_napi_adjust_timeout() - adjust busy loop timeout
* @ctx: pointer to io-uring context structure
* @iowq: pointer to io wait queue
* @ts: pointer to timespec or NULL
*
* Adjust the busy loop timeout according to timespec and busy poll timeout.
+ * If the specified NAPI timeout is bigger than the wait timeout, then adjust
+ * the NAPI timeout accordingly.
*/
void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iowq,
struct timespec64 *ts)
@@ -274,16 +276,16 @@ void __io_napi_adjust_timeout(struct io_ring_ctx *ctx, struct io_wait_queue *iow
unsigned int poll_to = READ_ONCE(ctx->napi_busy_poll_to);
if (ts) {
- struct timespec64 poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
-
- if (timespec64_compare(ts, &poll_to_ts) > 0) {
- *ts = timespec64_sub(*ts, poll_to_ts);
- } else {
- u64 to = timespec64_to_ns(ts);
-
- do_div(to, 1000);
- ts->tv_sec = 0;
- ts->tv_nsec = 0;
+ struct timespec64 poll_to_ts;
+
+ poll_to_ts = ns_to_timespec64(1000 * (s64)poll_to);
+ if (timespec64_compare(ts, &poll_to_ts) < 0) {
+ s64 poll_to_ns = timespec64_to_ns(ts);
+ if (poll_to_ns > 0) {
+ u64 val = poll_to_ns + 999;
+ do_div(val, (s64) 1000);
+ poll_to = val;
+ }
}
}
diff --git a/io_uring/net.c b/io_uring/net.c
index 4afb475d4..3896e6220 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1041,6 +1041,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_kiocb *notif;
zc->done_io = 0;
+ req->flags |= REQ_F_POLL_NO_LAZY;
if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
return -EINVAL;
diff --git a/io_uring/nop.c b/io_uring/nop.c
index d956599a3..1a4e312df 100644
--- a/io_uring/nop.c
+++ b/io_uring/nop.c
@@ -12,6 +12,8 @@
int io_nop_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
+ if (READ_ONCE(sqe->rw_flags))
+ return -EINVAL;
return 0;
}
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 4818b7923..956e2c715 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -250,6 +250,7 @@ __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
ret = io_run_task_work_sig(ctx);
if (ret < 0) {
+ __set_current_state(TASK_RUNNING);
mutex_lock(&ctx->uring_lock);
if (list_empty(&ctx->rsrc_ref_list))
ret = 0;
@@ -1104,7 +1105,6 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
* branch doesn't expect non PAGE_SIZE'd chunks.
*/
iter->bvec = bvec;
- iter->nr_segs = bvec->bv_len;
iter->count -= offset;
iter->iov_offset = offset;
} else {
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index 3983708ce..b3722e527 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -238,11 +238,13 @@ static unsigned int io_sq_tw(struct llist_node **retry_list, int max_entries)
if (*retry_list) {
*retry_list = io_handle_tw_list(*retry_list, &count, max_entries);
if (count >= max_entries)
- return count;
+ goto out;
max_entries -= count;
}
-
*retry_list = tctx_task_work_run(tctx, max_entries, &count);
+out:
+ if (task_work_pending(current))
+ task_work_run();
return count;
}
@@ -291,6 +293,14 @@ static int io_sq_thread(void *data)
sqd->sq_cpu = raw_smp_processor_id();
}
+ /*
+ * Force audit context to get setup, in case we do prep side async
+ * operations that would trigger an audit call before any issue side
+ * audit has been done.
+ */
+ audit_uring_entry(IORING_OP_NOP);
+ audit_uring_exit(true, 0);
+
mutex_lock(&sqd->lock);
while (1) {
bool cap_entries, sqt_spin = false;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index be8c68012..d6ef4f4f9 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -529,7 +529,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
entry->rule.buflen += f_val;
f->lsm_str = str;
err = security_audit_rule_init(f->type, f->op, str,
- (void **)&f->lsm_rule);
+ (void **)&f->lsm_rule,
+ GFP_KERNEL);
/* Keep currently invalid fields around in case they
* become valid after a policy reload. */
if (err == -EINVAL) {
@@ -799,7 +800,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
/* our own (refreshed) copy of lsm_rule */
ret = security_audit_rule_init(df->type, df->op, df->lsm_str,
- (void **)&df->lsm_rule);
+ (void **)&df->lsm_rule, GFP_KERNEL);
/* Keep currently invalid fields around in case they
* become valid after a policy reload. */
if (ret == -EINVAL) {
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 050fe1ebf..d0febf070 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -308,6 +308,7 @@ static long trie_update_elem(struct bpf_map *map,
{
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
+ struct lpm_trie_node *free_node = NULL;
struct lpm_trie_node __rcu **slot;
struct bpf_lpm_trie_key_u8 *key = _key;
unsigned long irq_flags;
@@ -382,7 +383,7 @@ static long trie_update_elem(struct bpf_map *map,
trie->n_entries--;
rcu_assign_pointer(*slot, new_node);
- kfree_rcu(node, rcu);
+ free_node = node;
goto out;
}
@@ -429,6 +430,7 @@ out:
}
spin_unlock_irqrestore(&trie->lock, irq_flags);
+ kfree_rcu(free_node, rcu);
return ret;
}
@@ -437,6 +439,7 @@ out:
static long trie_delete_elem(struct bpf_map *map, void *_key)
{
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ struct lpm_trie_node *free_node = NULL, *free_parent = NULL;
struct bpf_lpm_trie_key_u8 *key = _key;
struct lpm_trie_node __rcu **trim, **trim2;
struct lpm_trie_node *node, *parent;
@@ -506,8 +509,8 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
else
rcu_assign_pointer(
*trim2, rcu_access_pointer(parent->child[0]));
- kfree_rcu(parent, rcu);
- kfree_rcu(node, rcu);
+ free_parent = parent;
+ free_node = node;
goto out;
}
@@ -521,10 +524,12 @@ static long trie_delete_elem(struct bpf_map *map, void *_key)
rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1]));
else
RCU_INIT_POINTER(*trim, NULL);
- kfree_rcu(node, rcu);
+ free_node = node;
out:
spin_unlock_irqrestore(&trie->lock, irq_flags);
+ kfree_rcu(free_parent, rcu);
+ kfree_rcu(free_node, rcu);
return ret;
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index c28792547..52ffe3335 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2985,6 +2985,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops, struct bpf_prog *prog)
{
+ WARN_ON(ops->dealloc && ops->dealloc_deferred);
atomic64_set(&link->refcnt, 1);
link->type = type;
link->id = 0;
@@ -3043,16 +3044,17 @@ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
/* bpf_link_free is guaranteed to be called from process context */
static void bpf_link_free(struct bpf_link *link)
{
+ const struct bpf_link_ops *ops = link->ops;
bool sleepable = false;
bpf_link_free_id(link->id);
if (link->prog) {
sleepable = link->prog->sleepable;
/* detach BPF program, clean up used resources */
- link->ops->release(link);
+ ops->release(link);
bpf_prog_put(link->prog);
}
- if (link->ops->dealloc_deferred) {
+ if (ops->dealloc_deferred) {
/* schedule BPF link deallocation; if underlying BPF program
* is sleepable, we need to first wait for RCU tasks trace
* sync, then go through "classic" RCU grace period
@@ -3061,9 +3063,8 @@ static void bpf_link_free(struct bpf_link *link)
call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
else
call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
- }
- if (link->ops->dealloc)
- link->ops->dealloc(link);
+ } else if (ops->dealloc)
+ ops->dealloc(link);
}
static void bpf_link_put_deferred(struct work_struct *work)
@@ -3985,6 +3986,11 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
* check permissions at attach time.
*/
return -EPERM;
+
+ ptype = attach_type_to_prog_type(attach_type);
+ if (prog->type != ptype)
+ return -EINVAL;
+
return prog->enforce_expected_attach_type &&
prog->expected_attach_type != attach_type ?
-EINVAL : 0;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index cb7ad1f79..0ef18ae40 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2359,6 +2359,8 @@ static void mark_btf_ld_reg(struct bpf_verifier_env *env,
regs[regno].type = PTR_TO_BTF_ID | flag;
regs[regno].btf = btf;
regs[regno].btf_id = btf_id;
+ if (type_may_be_null(flag))
+ regs[regno].id = ++env->id_gen;
}
#define DEF_NOT_SUBREG (0)
@@ -3615,7 +3617,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
* sreg needs precision before this insn
*/
bt_clear_reg(bt, dreg);
- bt_set_reg(bt, sreg);
+ if (sreg != BPF_REG_FP)
+ bt_set_reg(bt, sreg);
} else {
/* dreg = K
* dreg needs precision after this insn.
@@ -3631,7 +3634,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
* both dreg and sreg need precision
* before this insn
*/
- bt_set_reg(bt, sreg);
+ if (sreg != BPF_REG_FP)
+ bt_set_reg(bt, sreg);
} /* else dreg += K
* dreg still needs precision before this insn
*/
@@ -5386,8 +5390,6 @@ static int check_map_kptr_access(struct bpf_verifier_env *env, u32 regno,
*/
mark_btf_ld_reg(env, cur_regs(env), value_regno, PTR_TO_BTF_ID, kptr_field->kptr.btf,
kptr_field->kptr.btf_id, btf_ld_kptr_type(env, kptr_field));
- /* For mark_ptr_or_null_reg */
- val_reg->id = ++env->id_gen;
} else if (class == BPF_STX) {
val_reg = reg_state(env, value_regno);
if (!register_is_null(val_reg) &&
@@ -5705,7 +5707,8 @@ static bool is_trusted_reg(const struct bpf_reg_state *reg)
return true;
/* Types listed in the reg2btf_ids are always trusted */
- if (reg2btf_ids[base_type(reg->type)])
+ if (reg2btf_ids[base_type(reg->type)] &&
+ !bpf_type_has_unsafe_modifiers(reg->type))
return true;
/* If a register is not referenced, it is trusted if it has the
@@ -6325,6 +6328,7 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val,
#define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu)
#define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null)
#define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted)
+#define BTF_TYPE_SAFE_TRUSTED_OR_NULL(__type) __PASTE(__type, __safe_trusted_or_null)
/*
* Allow list few fields as RCU trusted or full trusted.
@@ -6388,7 +6392,7 @@ BTF_TYPE_SAFE_TRUSTED(struct dentry) {
struct inode *d_inode;
};
-BTF_TYPE_SAFE_TRUSTED(struct socket) {
+BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket) {
struct sock *sk;
};
@@ -6423,11 +6427,20 @@ static bool type_is_trusted(struct bpf_verifier_env *env,
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct linux_binprm));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct file));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
- BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket));
return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted");
}
+static bool type_is_trusted_or_null(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg,
+ const char *field_name, u32 btf_id)
+{
+ BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED_OR_NULL(struct socket));
+
+ return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id,
+ "__safe_trusted_or_null");
+}
+
static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
struct bpf_reg_state *regs,
int regno, int off, int size,
@@ -6536,6 +6549,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
*/
if (type_is_trusted(env, reg, field_name, btf_id)) {
flag |= PTR_TRUSTED;
+ } else if (type_is_trusted_or_null(env, reg, field_name, btf_id)) {
+ flag |= PTR_TRUSTED | PTR_MAYBE_NULL;
} else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) {
if (type_is_rcu(env, reg, field_name, btf_id)) {
/* ignore __rcu tag and mark it MEM_RCU */
@@ -8830,7 +8845,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
enum bpf_attach_type eatype = env->prog->expected_attach_type;
enum bpf_prog_type type = resolve_prog_type(env->prog);
- if (func_id != BPF_FUNC_map_update_elem)
+ if (func_id != BPF_FUNC_map_update_elem &&
+ func_id != BPF_FUNC_map_delete_elem)
return false;
/* It's not possible to get access to a locked struct sock in these
@@ -8841,6 +8857,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
if (eatype == BPF_TRACE_ITER)
return true;
break;
+ case BPF_PROG_TYPE_SOCK_OPS:
+ /* map_update allowed only via dedicated helpers with event type checks */
+ if (func_id == BPF_FUNC_map_delete_elem)
+ return true;
+ break;
case BPF_PROG_TYPE_SOCKET_FILTER:
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
@@ -8936,7 +8957,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKMAP:
if (func_id != BPF_FUNC_sk_redirect_map &&
func_id != BPF_FUNC_sock_map_update &&
- func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
@@ -8946,7 +8966,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_SOCKHASH:
if (func_id != BPF_FUNC_sk_redirect_hash &&
func_id != BPF_FUNC_sock_hash_update &&
- func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash &&
func_id != BPF_FUNC_sk_select_reuseport &&
func_id != BPF_FUNC_map_lookup_elem &&
@@ -14954,7 +14973,6 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs;
struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL;
struct bpf_reg_state *eq_branch_regs;
- struct bpf_reg_state fake_reg = {};
u8 opcode = BPF_OP(insn->code);
bool is_jmp32;
int pred = -1;
@@ -15020,7 +15038,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
verbose(env, "BPF_JMP/JMP32 uses reserved fields\n");
return -EINVAL;
}
- src_reg = &fake_reg;
+ src_reg = &env->fake_reg[0];
+ memset(src_reg, 0, sizeof(*src_reg));
src_reg->type = SCALAR_VALUE;
__mark_reg_known(src_reg, insn->imm);
}
@@ -15080,10 +15099,16 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
&other_branch_regs[insn->src_reg],
dst_reg, src_reg, opcode, is_jmp32);
} else /* BPF_SRC(insn->code) == BPF_K */ {
+ /* reg_set_min_max() can mangle the fake_reg. Make a copy
+ * so that these are two different memory locations. The
+ * src_reg is not used beyond here in context of K.
+ */
+ memcpy(&env->fake_reg[1], &env->fake_reg[0],
+ sizeof(env->fake_reg[0]));
err = reg_set_min_max(env,
&other_branch_regs[insn->dst_reg],
- src_reg /* fake one */,
- dst_reg, src_reg /* same fake one */,
+ &env->fake_reg[0],
+ dst_reg, &env->fake_reg[1],
opcode, is_jmp32);
}
if (err)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 4237c8748..73ef0dabc 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -202,6 +202,14 @@ struct cpuset {
};
/*
+ * Legacy hierarchy call to cgroup_transfer_tasks() is handled asynchrously
+ */
+struct cpuset_remove_tasks_struct {
+ struct work_struct work;
+ struct cpuset *cs;
+};
+
+/*
* Exclusive CPUs distributed out to sub-partitions of top_cpuset
*/
static cpumask_var_t subpartitions_cpus;
@@ -449,12 +457,6 @@ static DEFINE_SPINLOCK(callback_lock);
static struct workqueue_struct *cpuset_migrate_mm_wq;
-/*
- * CPU / memory hotplug is handled asynchronously.
- */
-static void cpuset_hotplug_workfn(struct work_struct *work);
-static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
-
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
static inline void check_insane_mems_config(nodemask_t *nodes)
@@ -540,22 +542,10 @@ static void guarantee_online_cpus(struct task_struct *tsk,
rcu_read_lock();
cs = task_cs(tsk);
- while (!cpumask_intersects(cs->effective_cpus, pmask)) {
+ while (!cpumask_intersects(cs->effective_cpus, pmask))
cs = parent_cs(cs);
- if (unlikely(!cs)) {
- /*
- * The top cpuset doesn't have any online cpu as a
- * consequence of a race between cpuset_hotplug_work
- * and cpu hotplug notifier. But we know the top
- * cpuset's effective_cpus is on its way to be
- * identical to cpu_online_mask.
- */
- goto out_unlock;
- }
- }
- cpumask_and(pmask, pmask, cs->effective_cpus);
-out_unlock:
+ cpumask_and(pmask, pmask, cs->effective_cpus);
rcu_read_unlock();
}
@@ -1217,7 +1207,7 @@ static void rebuild_sched_domains_locked(void)
/*
* If we have raced with CPU hotplug, return early to avoid
* passing doms with offlined cpu to partition_sched_domains().
- * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
+ * Anyways, cpuset_handle_hotplug() will rebuild sched domains.
*
* With no CPUs in any subpartitions, top_cpuset's effective CPUs
* should be the same as the active CPUs, so checking only top_cpuset
@@ -1260,12 +1250,17 @@ static void rebuild_sched_domains_locked(void)
}
#endif /* CONFIG_SMP */
-void rebuild_sched_domains(void)
+static void rebuild_sched_domains_cpuslocked(void)
{
- cpus_read_lock();
mutex_lock(&cpuset_mutex);
rebuild_sched_domains_locked();
mutex_unlock(&cpuset_mutex);
+}
+
+void rebuild_sched_domains(void)
+{
+ cpus_read_lock();
+ rebuild_sched_domains_cpuslocked();
cpus_read_unlock();
}
@@ -2079,14 +2074,11 @@ write_error:
/*
* For partcmd_update without newmask, it is being called from
- * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken.
- * Update the load balance flag and scheduling domain if
- * cpus_read_trylock() is successful.
+ * cpuset_handle_hotplug(). Update the load balance flag and
+ * scheduling domain accordingly.
*/
- if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) {
+ if ((cmd == partcmd_update) && !newmask)
update_partition_sd_lb(cs, old_prs);
- cpus_read_unlock();
- }
notify_partition_change(cs, old_prs);
return 0;
@@ -2948,7 +2940,7 @@ bool current_cpuset_is_being_rebound(void)
static int update_relax_domain_level(struct cpuset *cs, s64 val)
{
#ifdef CONFIG_SMP
- if (val < -1 || val >= sched_domain_level_max)
+ if (val < -1 || val > sched_domain_level_max + 1)
return -EINVAL;
#endif
@@ -3599,8 +3591,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
* proceeding, so that we don't end up keep removing tasks added
* after execution capability is restored.
*
- * cpuset_hotplug_work calls back into cgroup core via
- * cgroup_transfer_tasks() and waiting for it from a cgroupfs
+ * cpuset_handle_hotplug may call back into cgroup core asynchronously
+ * via cgroup_transfer_tasks() and waiting for it from a cgroupfs
* operation like this one can lead to a deadlock through kernfs
* active_ref protection. Let's break the protection. Losing the
* protection is okay as we check whether @cs is online after
@@ -3609,7 +3601,6 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
*/
css_get(&cs->css);
kernfs_break_active_protection(of->kn);
- flush_work(&cpuset_hotplug_work);
cpus_read_lock();
mutex_lock(&cpuset_mutex);
@@ -4354,6 +4345,16 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
}
}
+static void cpuset_migrate_tasks_workfn(struct work_struct *work)
+{
+ struct cpuset_remove_tasks_struct *s;
+
+ s = container_of(work, struct cpuset_remove_tasks_struct, work);
+ remove_tasks_in_empty_cpuset(s->cs);
+ css_put(&s->cs->css);
+ kfree(s);
+}
+
static void
hotplug_update_tasks_legacy(struct cpuset *cs,
struct cpumask *new_cpus, nodemask_t *new_mems,
@@ -4383,12 +4384,21 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
/*
* Move tasks to the nearest ancestor with execution resources,
* This is full cgroup operation which will also call back into
- * cpuset. Should be done outside any lock.
+ * cpuset. Execute it asynchronously using workqueue.
*/
- if (is_empty) {
- mutex_unlock(&cpuset_mutex);
- remove_tasks_in_empty_cpuset(cs);
- mutex_lock(&cpuset_mutex);
+ if (is_empty && cs->css.cgroup->nr_populated_csets &&
+ css_tryget_online(&cs->css)) {
+ struct cpuset_remove_tasks_struct *s;
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ if (WARN_ON_ONCE(!s)) {
+ css_put(&cs->css);
+ return;
+ }
+
+ s->cs = cs;
+ INIT_WORK(&s->work, cpuset_migrate_tasks_workfn);
+ schedule_work(&s->work);
}
}
@@ -4421,30 +4431,6 @@ void cpuset_force_rebuild(void)
force_rebuild = true;
}
-/*
- * Attempt to acquire a cpus_read_lock while a hotplug operation may be in
- * progress.
- * Return: true if successful, false otherwise
- *
- * To avoid circular lock dependency between cpuset_mutex and cpus_read_lock,
- * cpus_read_trylock() is used here to acquire the lock.
- */
-static bool cpuset_hotplug_cpus_read_trylock(void)
-{
- int retries = 0;
-
- while (!cpus_read_trylock()) {
- /*
- * CPU hotplug still in progress. Retry 5 times
- * with a 10ms wait before bailing out.
- */
- if (++retries > 5)
- return false;
- msleep(10);
- }
- return true;
-}
-
/**
* cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
* @cs: cpuset in interest
@@ -4493,13 +4479,11 @@ retry:
compute_partition_effective_cpumask(cs, &new_cpus);
if (remote && cpumask_empty(&new_cpus) &&
- partition_is_populated(cs, NULL) &&
- cpuset_hotplug_cpus_read_trylock()) {
+ partition_is_populated(cs, NULL)) {
remote_partition_disable(cs, tmp);
compute_effective_cpumask(&new_cpus, cs, parent);
remote = false;
cpuset_force_rebuild();
- cpus_read_unlock();
}
/*
@@ -4519,18 +4503,8 @@ retry:
else if (is_partition_valid(parent) && is_partition_invalid(cs))
partcmd = partcmd_update;
- /*
- * cpus_read_lock needs to be held before calling
- * update_parent_effective_cpumask(). To avoid circular lock
- * dependency between cpuset_mutex and cpus_read_lock,
- * cpus_read_trylock() is used here to acquire the lock.
- */
if (partcmd >= 0) {
- if (!cpuset_hotplug_cpus_read_trylock())
- goto update_tasks;
-
update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
- cpus_read_unlock();
if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
compute_partition_effective_cpumask(cs, &new_cpus);
cpuset_force_rebuild();
@@ -4558,8 +4532,7 @@ unlock:
}
/**
- * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
- * @work: unused
+ * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset
*
* This function is called after either CPU or memory configuration has
* changed and updates cpuset accordingly. The top_cpuset is always
@@ -4573,8 +4546,10 @@ unlock:
*
* Note that CPU offlining during suspend is ignored. We don't modify
* cpusets across suspend/resume cycles at all.
+ *
+ * CPU / memory hotplug is handled synchronously.
*/
-static void cpuset_hotplug_workfn(struct work_struct *work)
+static void cpuset_handle_hotplug(void)
{
static cpumask_t new_cpus;
static nodemask_t new_mems;
@@ -4585,6 +4560,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
if (on_dfl && !alloc_cpumasks(NULL, &tmp))
ptmp = &tmp;
+ lockdep_assert_cpus_held();
mutex_lock(&cpuset_mutex);
/* fetch the available cpus/mems and find out which changed how */
@@ -4666,7 +4642,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
/* rebuild sched domains if cpus_allowed has changed */
if (cpus_updated || force_rebuild) {
force_rebuild = false;
- rebuild_sched_domains();
+ rebuild_sched_domains_cpuslocked();
}
free_cpumasks(NULL, ptmp);
@@ -4679,12 +4655,7 @@ void cpuset_update_active_cpus(void)
* inside cgroup synchronization. Bounce actual hotplug processing
* to a work item to avoid reverse locking order.
*/
- schedule_work(&cpuset_hotplug_work);
-}
-
-void cpuset_wait_for_hotplug(void)
-{
- flush_work(&cpuset_hotplug_work);
+ cpuset_handle_hotplug();
}
/*
@@ -4695,7 +4666,7 @@ void cpuset_wait_for_hotplug(void)
static int cpuset_track_online_nodes(struct notifier_block *self,
unsigned long action, void *arg)
{
- schedule_work(&cpuset_hotplug_work);
+ cpuset_handle_hotplug();
return NOTIFY_OK;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 63447eb85..563877d6c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1208,52 +1208,6 @@ void __init cpuhp_threads_init(void)
kthread_unpark(this_cpu_read(cpuhp_state.thread));
}
-/*
- *
- * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock
- * protected region.
- *
- * The operation is still serialized against concurrent CPU hotplug via
- * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_
- * serialized against other hotplug related activity like adding or
- * removing of state callbacks and state instances, which invoke either the
- * startup or the teardown callback of the affected state.
- *
- * This is required for subsystems which are unfixable vs. CPU hotplug and
- * evade lock inversion problems by scheduling work which has to be
- * completed _before_ cpu_up()/_cpu_down() returns.
- *
- * Don't even think about adding anything to this for any new code or even
- * drivers. It's only purpose is to keep existing lock order trainwrecks
- * working.
- *
- * For cpu_down() there might be valid reasons to finish cleanups which are
- * not required to be done under cpu_hotplug_lock, but that's a different
- * story and would be not invoked via this.
- */
-static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
-{
- /*
- * cpusets delegate hotplug operations to a worker to "solve" the
- * lock order problems. Wait for the worker, but only if tasks are
- * _not_ frozen (suspend, hibernate) as that would wait forever.
- *
- * The wait is required because otherwise the hotplug operation
- * returns with inconsistent state, which could even be observed in
- * user space when a new CPU is brought up. The CPU plug uevent
- * would be delivered and user space reacting on it would fail to
- * move tasks to the newly plugged CPU up to the point where the
- * work has finished because up to that point the newly plugged CPU
- * is not assignable in cpusets/cgroups. On unplug that's not
- * necessarily a visible issue, but it is still inconsistent state,
- * which is the real problem which needs to be "fixed". This can't
- * prevent the transient state between scheduling the work and
- * returning from waiting for it.
- */
- if (!tasks_frozen)
- cpuset_wait_for_hotplug();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
#ifndef arch_clear_mm_cpumask_cpu
#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
@@ -1494,7 +1448,6 @@ out:
*/
lockup_detector_cleanup();
arch_smt_update();
- cpu_up_down_serialize_trainwrecks(tasks_frozen);
return ret;
}
@@ -1728,7 +1681,6 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
out:
cpus_write_unlock();
arch_smt_update();
- cpu_up_down_serialize_trainwrecks(tasks_frozen);
return ret;
}
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 9443bc63c..2aeaf9765 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -184,6 +184,33 @@ char kdb_getchar(void)
unreachable();
}
+/**
+ * kdb_position_cursor() - Place cursor in the correct horizontal position
+ * @prompt: Nil-terminated string containing the prompt string
+ * @buffer: Nil-terminated string containing the entire command line
+ * @cp: Cursor position, pointer the character in buffer where the cursor
+ * should be positioned.
+ *
+ * The cursor is positioned by sending a carriage-return and then printing
+ * the content of the line until we reach the correct cursor position.
+ *
+ * There is some additional fine detail here.
+ *
+ * Firstly, even though kdb_printf() will correctly format zero-width fields
+ * we want the second call to kdb_printf() to be conditional. That keeps things
+ * a little cleaner when LOGGING=1.
+ *
+ * Secondly, we can't combine everything into one call to kdb_printf() since
+ * that renders into a fixed length buffer and the combined print could result
+ * in unwanted truncation.
+ */
+static void kdb_position_cursor(char *prompt, char *buffer, char *cp)
+{
+ kdb_printf("\r%s", kdb_prompt_str);
+ if (cp > buffer)
+ kdb_printf("%.*s", (int)(cp - buffer), buffer);
+}
+
/*
* kdb_read
*
@@ -212,7 +239,6 @@ static char *kdb_read(char *buffer, size_t bufsize)
* and null byte */
char *lastchar;
char *p_tmp;
- char tmp;
static char tmpbuffer[CMD_BUFLEN];
int len = strlen(buffer);
int len_tmp;
@@ -249,12 +275,8 @@ poll_again:
}
*(--lastchar) = '\0';
--cp;
- kdb_printf("\b%s \r", cp);
- tmp = *cp;
- *cp = '\0';
- kdb_printf(kdb_prompt_str);
- kdb_printf("%s", buffer);
- *cp = tmp;
+ kdb_printf("\b%s ", cp);
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
}
break;
case 10: /* linefeed */
@@ -272,19 +294,14 @@ poll_again:
memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
memcpy(cp, tmpbuffer, lastchar - cp - 1);
*(--lastchar) = '\0';
- kdb_printf("%s \r", cp);
- tmp = *cp;
- *cp = '\0';
- kdb_printf(kdb_prompt_str);
- kdb_printf("%s", buffer);
- *cp = tmp;
+ kdb_printf("%s ", cp);
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
}
break;
case 1: /* Home */
if (cp > buffer) {
- kdb_printf("\r");
- kdb_printf(kdb_prompt_str);
cp = buffer;
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
}
break;
case 5: /* End */
@@ -300,11 +317,10 @@ poll_again:
}
break;
case 14: /* Down */
- memset(tmpbuffer, ' ',
- strlen(kdb_prompt_str) + (lastchar-buffer));
- *(tmpbuffer+strlen(kdb_prompt_str) +
- (lastchar-buffer)) = '\0';
- kdb_printf("\r%s\r", tmpbuffer);
+ case 16: /* Up */
+ kdb_printf("\r%*c\r",
+ (int)(strlen(kdb_prompt_str) + (lastchar - buffer)),
+ ' ');
*lastchar = (char)key;
*(lastchar+1) = '\0';
return lastchar;
@@ -314,15 +330,6 @@ poll_again:
++cp;
}
break;
- case 16: /* Up */
- memset(tmpbuffer, ' ',
- strlen(kdb_prompt_str) + (lastchar-buffer));
- *(tmpbuffer+strlen(kdb_prompt_str) +
- (lastchar-buffer)) = '\0';
- kdb_printf("\r%s\r", tmpbuffer);
- *lastchar = (char)key;
- *(lastchar+1) = '\0';
- return lastchar;
case 9: /* Tab */
if (tab < 2)
++tab;
@@ -366,15 +373,25 @@ poll_again:
kdb_printf("\n");
kdb_printf(kdb_prompt_str);
kdb_printf("%s", buffer);
+ if (cp != lastchar)
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
} else if (tab != 2 && count > 0) {
- len_tmp = strlen(p_tmp);
- strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
- len_tmp = strlen(p_tmp);
- strncpy(cp, p_tmp+len, len_tmp-len + 1);
- len = len_tmp - len;
- kdb_printf("%s", cp);
- cp += len;
- lastchar += len;
+ /* How many new characters do we want from tmpbuffer? */
+ len_tmp = strlen(p_tmp) - len;
+ if (lastchar + len_tmp >= bufend)
+ len_tmp = bufend - lastchar;
+
+ if (len_tmp) {
+ /* + 1 ensures the '\0' is memmove'd */
+ memmove(cp+len_tmp, cp, (lastchar-cp) + 1);
+ memcpy(cp, p_tmp+len, len_tmp);
+ kdb_printf("%s", cp);
+ cp += len_tmp;
+ lastchar += len_tmp;
+ if (cp != lastchar)
+ kdb_position_cursor(kdb_prompt_str,
+ buffer, cp);
+ }
}
kdb_nextline = 1; /* reset output line number */
break;
@@ -385,13 +402,9 @@ poll_again:
memcpy(cp+1, tmpbuffer, lastchar - cp);
*++lastchar = '\0';
*cp = key;
- kdb_printf("%s\r", cp);
+ kdb_printf("%s", cp);
++cp;
- tmp = *cp;
- *cp = '\0';
- kdb_printf(kdb_prompt_str);
- kdb_printf("%s", buffer);
- *cp = tmp;
+ kdb_position_cursor(kdb_prompt_str, buffer, cp);
} else {
*++lastchar = '\0';
*cp++ = key;
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index 02205ab53..f7f3d14fa 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
struct task_struct **tsk;
int threads = map->bparam.threads;
int node = map->bparam.node;
- const cpumask_t *cpu_mask = cpumask_of_node(node);
u64 loops;
int ret = 0;
int i;
@@ -118,11 +117,13 @@ static int do_map_benchmark(struct map_benchmark_data *map)
if (IS_ERR(tsk[i])) {
pr_err("create dma_map thread failed\n");
ret = PTR_ERR(tsk[i]);
+ while (--i >= 0)
+ kthread_stop(tsk[i]);
goto out;
}
if (node != NUMA_NO_NODE)
- kthread_bind_mask(tsk[i], cpu_mask);
+ kthread_bind_mask(tsk[i], cpumask_of_node(node));
}
/* clear the old value in the previous benchmark */
@@ -139,13 +140,17 @@ static int do_map_benchmark(struct map_benchmark_data *map)
msleep_interruptible(map->bparam.seconds * 1000);
- /* wait for the completion of benchmark threads */
+ /* wait for the completion of all started benchmark threads */
for (i = 0; i < threads; i++) {
- ret = kthread_stop(tsk[i]);
- if (ret)
- goto out;
+ int kthread_ret = kthread_stop_put(tsk[i]);
+
+ if (kthread_ret)
+ ret = kthread_ret;
}
+ if (ret)
+ goto out;
+
loops = atomic64_read(&map->loops);
if (likely(loops > 0)) {
u64 map_variance, unmap_variance;
@@ -170,8 +175,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
}
out:
- for (i = 0; i < threads; i++)
- put_task_struct(tsk[i]);
put_device(map->dev);
kfree(tsk);
return ret;
@@ -208,7 +211,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
}
if (map->bparam.node != NUMA_NO_NODE &&
- !node_possible(map->bparam.node)) {
+ (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
+ !node_possible(map->bparam.node))) {
pr_err("invalid numa node\n");
return -EINVAL;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 724e6d7e1..4082d0161 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5365,6 +5365,7 @@ int perf_event_release_kernel(struct perf_event *event)
again:
mutex_lock(&event->child_mutex);
list_for_each_entry(child, &event->child_list, child_list) {
+ void *var = NULL;
/*
* Cannot change, child events are not migrated, see the
@@ -5405,11 +5406,23 @@ again:
* this can't be the last reference.
*/
put_event(event);
+ } else {
+ var = &ctx->refcount;
}
mutex_unlock(&event->child_mutex);
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
+
+ if (var) {
+ /*
+ * If perf_event_free_task() has deleted all events from the
+ * ctx while the child_mutex got released above, make sure to
+ * notify about the preceding put_ctx().
+ */
+ smp_mb(); /* pairs with wait_var_event() */
+ wake_up_var(var);
+ }
goto again;
}
mutex_unlock(&event->child_mutex);
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 74a4ef1da..fd75b4a48 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
#include <linux/mm.h>
#include "gcov.h"
-#if (__GNUC__ >= 10)
+#if (__GNUC__ >= 14)
+#define GCOV_COUNTERS 9
+#elif (__GNUC__ >= 10)
#define GCOV_COUNTERS 8
#elif (__GNUC__ >= 7)
#define GCOV_COUNTERS 9
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 6d443ea22..383fd43ac 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -14,7 +14,12 @@ include/
arch/$SRCARCH/include/
"
-type cpio > /dev/null
+if ! command -v cpio >/dev/null; then
+ echo >&2 "***"
+ echo >&2 "*** 'cpio' could not be found."
+ echo >&2 "***"
+ exit 1
+fi
# Support incremental builds by skipping archive generation
# if timestamps of files being archived are not changed.
@@ -84,7 +89,7 @@ find $cpio_dir -type f -print0 |
# Create archive and try to normalize metadata for reproducibility.
tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
- --owner=0 --group=0 --sort=name --numeric-owner \
+ --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \
-I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null
echo $headers_md5 > kernel/kheaders.md5
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 1ed2b1739..5ecd072a3 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -70,6 +70,14 @@ static bool migrate_one_irq(struct irq_desc *desc)
}
/*
+ * Complete an eventually pending irq move cleanup. If this
+ * interrupt was moved in hard irq context, then the vectors need
+ * to be cleaned up. It can't wait until this interrupt actually
+ * happens and this CPU was involved.
+ */
+ irq_force_complete_move(desc);
+
+ /*
* No move required, if:
* - Interrupt is per cpu
* - Interrupt is not started
@@ -88,14 +96,6 @@ static bool migrate_one_irq(struct irq_desc *desc)
}
/*
- * Complete an eventually pending irq move cleanup. If this
- * interrupt was moved in hard irq context, then the vectors need
- * to be cleaned up. It can't wait until this interrupt actually
- * happens and this CPU was involved.
- */
- irq_force_complete_move(desc);
-
- /*
* If there is a setaffinity pending, then try to reuse the pending
* mask, so the last change of the affinity does not get lost. If
* there is no move pending or the pending mask does not contain
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 4c6b32318..7bf9f66ca 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -160,7 +160,10 @@ static int irq_find_free_area(unsigned int from, unsigned int cnt)
static unsigned int irq_find_at_or_after(unsigned int offset)
{
unsigned long index = offset;
- struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
+ struct irq_desc *desc;
+
+ guard(rcu)();
+ desc = mt_find(&sparse_irqs, &index, nr_irqs);
return desc ? irq_desc_get_irq(desc) : nr_irqs;
}
diff --git a/kernel/kcov.c b/kernel/kcov.c
index f9ac2e9e4..9f4affae4 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -631,6 +631,7 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
return -EINVAL;
kcov->mode = mode;
t->kcov = kcov;
+ t->kcov_mode = KCOV_MODE_REMOTE;
kcov->t = t;
kcov->remote = true;
kcov->remote_size = remote_arg->area_size;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 65adc815f..4f917bdad 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1068,6 +1068,7 @@ static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
static int kprobe_ipmodify_enabled;
static int kprobe_ftrace_enabled;
+bool kprobe_ftrace_disabled;
static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
int *cnt)
@@ -1136,6 +1137,11 @@ static int disarm_kprobe_ftrace(struct kprobe *p)
ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
}
+
+void kprobe_ftrace_kill(void)
+{
+ kprobe_ftrace_disabled = true;
+}
#else /* !CONFIG_KPROBES_ON_FTRACE */
static inline int arm_kprobe_ftrace(struct kprobe *p)
{
diff --git a/kernel/padata.c b/kernel/padata.c
index e3f639ff1..53f4bc912 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -106,7 +106,7 @@ static int __init padata_work_alloc_mt(int nworks, void *data,
{
int i;
- spin_lock(&padata_works_lock);
+ spin_lock_bh(&padata_works_lock);
/* Start at 1 because the current task participates in the job. */
for (i = 1; i < nworks; ++i) {
struct padata_work *pw = padata_work_alloc();
@@ -116,7 +116,7 @@ static int __init padata_work_alloc_mt(int nworks, void *data,
padata_work_init(pw, padata_mt_helper, data, 0);
list_add(&pw->pw_list, head);
}
- spin_unlock(&padata_works_lock);
+ spin_unlock_bh(&padata_works_lock);
return i;
}
@@ -134,12 +134,12 @@ static void __init padata_works_free(struct list_head *works)
if (list_empty(works))
return;
- spin_lock(&padata_works_lock);
+ spin_lock_bh(&padata_works_lock);
list_for_each_entry_safe(cur, next, works, pw_list) {
list_del(&cur->pw_list);
padata_work_free(cur);
}
- spin_unlock(&padata_works_lock);
+ spin_unlock_bh(&padata_works_lock);
}
static void padata_parallel_worker(struct work_struct *parallel_work)
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 7ade20e95..415201ca0 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -218,6 +218,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
*/
do {
clear_thread_flag(TIF_SIGPENDING);
+ clear_thread_flag(TIF_NOTIFY_SIGNAL);
rc = kernel_wait4(-1, NULL, __WALL, NULL);
} while (rc != -ECHILD);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index cae81a87c..66ac067d9 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -194,8 +194,6 @@ void thaw_processes(void)
__usermodehelper_set_disable_depth(UMH_FREEZING);
thaw_workqueues();
- cpuset_wait_for_hotplug();
-
read_lock(&tasklist_lock);
for_each_process_thread(g, p) {
/* No other threads should have PF_SUSPEND_TASK set */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 45d6b4c3d..cf2e90753 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -1997,7 +1997,8 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
preempt_disable();
pipe_count = READ_ONCE(p->rtort_pipe_count);
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
- /* Should not happen, but... */
+ // Should not happen in a correct RCU implementation,
+ // happens quite often for torture_type=busted.
pipe_count = RCU_TORTURE_PIPE_LEN;
}
completed = cur_ops->get_gp_seq();
@@ -2486,8 +2487,8 @@ static int rcu_torture_stall(void *args)
preempt_disable();
pr_alert("%s start on CPU %d.\n",
__func__, raw_smp_processor_id());
- while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
- stop_at))
+ while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
+ !kthread_should_stop())
if (stall_cpu_block) {
#ifdef CONFIG_PREEMPTION
preempt_schedule();
@@ -3040,11 +3041,12 @@ static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
}
/* IPI handler to get callback posted on desired CPU, if online. */
-static void rcu_torture_barrier1cb(void *rcu_void)
+static int rcu_torture_barrier1cb(void *rcu_void)
{
struct rcu_head *rhp = rcu_void;
cur_ops->call(rhp, rcu_torture_barrier_cbf);
+ return 0;
}
/* kthread function to register callbacks used to test RCU barriers. */
@@ -3070,11 +3072,9 @@ static int rcu_torture_barrier_cbs(void *arg)
* The above smp_load_acquire() ensures barrier_phase load
* is ordered before the following ->call().
*/
- if (smp_call_function_single(myid, rcu_torture_barrier1cb,
- &rcu, 1)) {
- // IPI failed, so use direct call from current CPU.
+ if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
cur_ops->call(&rcu, rcu_torture_barrier_cbf);
- }
+
if (atomic_dec_and_test(&barrier_cbs_count))
wake_up(&barrier_wq);
} while (!torture_must_stop());
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 147b5945d..2a453de9f 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -1994,7 +1994,7 @@ void show_rcu_tasks_trace_gp_kthread(void)
{
char buf[64];
- sprintf(buf, "N%lu h:%lu/%lu/%lu",
+ snprintf(buf, sizeof(buf), "N%lu h:%lu/%lu/%lu",
data_race(n_trc_holdouts),
data_race(n_heavy_reader_ofl_updates),
data_race(n_heavy_reader_updates),
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index 5d6664285..b5ec62b2d 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -504,7 +504,8 @@ static void print_cpu_stall_info(int cpu)
rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
if (rcuc_starved)
- sprintf(buf, " rcuc=%ld jiffies(starved)", j);
+ // Print signed value, as negative values indicate a probable bug.
+ snprintf(buf, sizeof(buf), " rcuc=%ld jiffies(starved)", j);
pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
cpu,
"O."[!!cpu_online(cpu)],
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7019a4045..d211d40a2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -11402,7 +11402,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of,
{
struct task_group *tg = css_tg(of_css(of));
u64 period = tg_get_cfs_period(tg);
- u64 burst = tg_get_cfs_burst(tg);
+ u64 burst = tg->cfs_bandwidth.burst;
u64 quota;
int ret;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c62805dbd..213c94d02 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6675,22 +6675,42 @@ static inline void hrtick_update(struct rq *rq)
#ifdef CONFIG_SMP
static inline bool cpu_overutilized(int cpu)
{
- unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
- unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+ unsigned long rq_util_min, rq_util_max;
+
+ if (!sched_energy_enabled())
+ return false;
+
+ rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+ rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
/* Return true only if the utilization doesn't fit CPU's capacity */
return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
}
-static inline void update_overutilized_status(struct rq *rq)
+static inline void set_rd_overutilized_status(struct root_domain *rd,
+ unsigned int status)
{
- if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu)) {
- WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rq->rd, SG_OVERUTILIZED);
- }
+ if (!sched_energy_enabled())
+ return;
+
+ WRITE_ONCE(rd->overutilized, status);
+ trace_sched_overutilized_tp(rd, !!status);
+}
+
+static inline void check_update_overutilized_status(struct rq *rq)
+{
+ /*
+ * overutilized field is used for load balancing decisions only
+ * if energy aware scheduler is being used
+ */
+ if (!sched_energy_enabled())
+ return;
+
+ if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
+ set_rd_overutilized_status(rq->rd, SG_OVERUTILIZED);
}
#else
-static inline void update_overutilized_status(struct rq *rq) { }
+static inline void check_update_overutilized_status(struct rq *rq) { }
#endif
/* Runqueue only has SCHED_IDLE tasks enqueued */
@@ -6791,7 +6811,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* and the following generally works well enough in practice.
*/
if (!task_new)
- update_overutilized_status(rq);
+ check_update_overutilized_status(rq);
enqueue_throttle:
assert_list_leaf_cfs_rq(rq);
@@ -10608,19 +10628,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
if (!env->sd->parent) {
- struct root_domain *rd = env->dst_rq->rd;
-
/* update overload indicator if we are at root domain */
- WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
+ WRITE_ONCE(env->dst_rq->rd->overload, sg_status & SG_OVERLOAD);
/* Update over-utilization (tipping point, U >= 0) indicator */
- WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rd, sg_status & SG_OVERUTILIZED);
+ set_rd_overutilized_status(env->dst_rq->rd,
+ sg_status & SG_OVERUTILIZED);
} else if (sg_status & SG_OVERUTILIZED) {
- struct root_domain *rd = env->dst_rq->rd;
-
- WRITE_ONCE(rd->overutilized, SG_OVERUTILIZED);
- trace_sched_overutilized_tp(rd, SG_OVERUTILIZED);
+ set_rd_overutilized_status(env->dst_rq->rd, SG_OVERUTILIZED);
}
update_idle_cpu_scan(env, sum_util);
@@ -12621,7 +12636,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
task_tick_numa(rq, curr);
update_misfit_status(curr, rq);
- update_overutilized_status(task_rq(curr));
+ check_update_overutilized_status(task_rq(curr));
task_tick_core(rq, curr);
}
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 99ea59860..3127c9b30 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1468,7 +1468,7 @@ static void set_domain_attribute(struct sched_domain *sd,
} else
request = attr->relax_domain_level;
- if (sd->level > request) {
+ if (sd->level >= request) {
/* Turn off idle balance on this domain: */
sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
}
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index e5b260aa0..4d50d53ac 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -20,6 +20,16 @@
#include "tick-internal.h"
#include "timekeeping_internal.h"
+static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
+{
+ u64 delta = clocksource_delta(end, start, cs->mask);
+
+ if (likely(delta < cs->max_cycles))
+ return clocksource_cyc2ns(delta, cs->mult, cs->shift);
+
+ return mul_u64_u32_shr(delta, cs->mult, cs->shift);
+}
+
/**
* clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
* @mult: pointer to mult variable
@@ -222,8 +232,8 @@ enum wd_read_status {
static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
{
unsigned int nretries, max_retries;
- u64 wd_end, wd_end2, wd_delta;
int64_t wd_delay, wd_seq_delay;
+ u64 wd_end, wd_end2;
max_retries = clocksource_get_max_watchdog_retry();
for (nretries = 0; nretries <= max_retries; nretries++) {
@@ -234,9 +244,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
wd_end2 = watchdog->read(watchdog);
local_irq_enable();
- wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
- wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
- watchdog->shift);
+ wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
if (wd_delay <= WATCHDOG_MAX_SKEW) {
if (nretries > 1 || nretries >= max_retries) {
pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
@@ -254,8 +262,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
* report system busy, reinit the watchdog and skip the current
* watchdog test.
*/
- wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask);
- wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift);
+ wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
goto skip_test;
}
@@ -366,8 +373,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
delta = (csnow_end - csnow_mid) & cs->mask;
if (delta < 0)
cpumask_set_cpu(cpu, &cpus_ahead);
- delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
- cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
+ cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
if (cs_nsec > cs_nsec_max)
cs_nsec_max = cs_nsec;
if (cs_nsec < cs_nsec_min)
@@ -398,8 +404,8 @@ static inline void clocksource_reset_watchdog(void)
static void clocksource_watchdog(struct timer_list *unused)
{
- u64 csnow, wdnow, cslast, wdlast, delta;
int64_t wd_nsec, cs_nsec, interval;
+ u64 csnow, wdnow, cslast, wdlast;
int next_cpu, reset_pending;
struct clocksource *cs;
enum wd_read_status read_ret;
@@ -456,12 +462,8 @@ static void clocksource_watchdog(struct timer_list *unused)
continue;
}
- delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask);
- wd_nsec = clocksource_cyc2ns(delta, watchdog->mult,
- watchdog->shift);
-
- delta = clocksource_delta(csnow, cs->cs_last, cs->mask);
- cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
+ wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
+ cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
wdlast = cs->wd_last; /* save these in case we print them */
cslast = cs->cs_last;
cs->cs_last = csnow;
@@ -832,7 +834,7 @@ void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
*/
u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
{
- u64 now, delta, nsec = 0;
+ u64 now, nsec = 0;
if (!suspend_clocksource)
return 0;
@@ -847,12 +849,8 @@ u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
else
now = suspend_clocksource->read(suspend_clocksource);
- if (now > suspend_start) {
- delta = clocksource_delta(now, suspend_start,
- suspend_clocksource->mask);
- nsec = mul_u64_u32_shr(delta, suspend_clocksource->mult,
- suspend_clocksource->shift);
- }
+ if (now > suspend_start)
+ nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
/*
* Disable the suspend timer to save power if current clocksource is
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index d88b13076..a47bcf71d 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -178,26 +178,6 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
}
}
-#ifdef CONFIG_NO_HZ_FULL
-static void giveup_do_timer(void *info)
-{
- int cpu = *(unsigned int *)info;
-
- WARN_ON(tick_do_timer_cpu != smp_processor_id());
-
- tick_do_timer_cpu = cpu;
-}
-
-static void tick_take_do_timer_from_boot(void)
-{
- int cpu = smp_processor_id();
- int from = tick_do_timer_boot_cpu;
-
- if (from >= 0 && from != cpu)
- smp_call_function_single(from, giveup_do_timer, &cpu, 1);
-}
-#endif
-
/*
* Setup the tick device
*/
@@ -221,19 +201,25 @@ static void tick_setup_device(struct tick_device *td,
tick_next_period = ktime_get();
#ifdef CONFIG_NO_HZ_FULL
/*
- * The boot CPU may be nohz_full, in which case set
- * tick_do_timer_boot_cpu so the first housekeeping
- * secondary that comes up will take do_timer from
- * us.
+ * The boot CPU may be nohz_full, in which case the
+ * first housekeeping secondary will take do_timer()
+ * from it.
*/
if (tick_nohz_full_cpu(cpu))
tick_do_timer_boot_cpu = cpu;
- } else if (tick_do_timer_boot_cpu != -1 &&
- !tick_nohz_full_cpu(cpu)) {
- tick_take_do_timer_from_boot();
+ } else if (tick_do_timer_boot_cpu != -1 && !tick_nohz_full_cpu(cpu)) {
tick_do_timer_boot_cpu = -1;
- WARN_ON(READ_ONCE(tick_do_timer_cpu) != cpu);
+ /*
+ * The boot CPU will stay in periodic (NOHZ disabled)
+ * mode until clocksource_done_booting() called after
+ * smp_init() selects a high resolution clocksource and
+ * timekeeping_notify() kicks the NOHZ stuff alive.
+ *
+ * So this WRITE_ONCE can only race with the READ_ONCE
+ * check in tick_periodic() but this race is harmless.
+ */
+ WRITE_ONCE(tick_do_timer_cpu, cpu);
#endif
}
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 47345bf1d..34804c715 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1123,7 +1123,7 @@ config PREEMPTIRQ_DELAY_TEST
config SYNTH_EVENT_GEN_TEST
tristate "Test module for in-kernel synthetic event generation"
- depends on SYNTH_EVENTS
+ depends on SYNTH_EVENTS && m
help
This option creates a test module to check the base
functionality of in-kernel synthetic event definition and
@@ -1136,7 +1136,7 @@ config SYNTH_EVENT_GEN_TEST
config KPROBE_EVENT_GEN_TEST
tristate "Test module for in-kernel kprobe event generation"
- depends on KPROBE_EVENTS
+ depends on KPROBE_EVENTS && m
help
This option creates a test module to check the base
functionality of in-kernel kprobe event definition.
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9dc605f08..5d8f918c9 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -3260,7 +3260,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
struct bpf_run_ctx *old_run_ctx;
int err = 0;
- if (link->task && current != link->task)
+ if (link->task && current->mm != link->task->mm)
return 0;
if (sleepable)
@@ -3361,8 +3361,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
upath = u64_to_user_ptr(attr->link_create.uprobe_multi.path);
uoffsets = u64_to_user_ptr(attr->link_create.uprobe_multi.offsets);
cnt = attr->link_create.uprobe_multi.cnt;
+ pid = attr->link_create.uprobe_multi.pid;
- if (!upath || !uoffsets || !cnt)
+ if (!upath || !uoffsets || !cnt || pid < 0)
return -EINVAL;
if (cnt > MAX_UPROBE_MULTI_CNT)
return -E2BIG;
@@ -3386,10 +3387,9 @@ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
goto error_path_put;
}
- pid = attr->link_create.uprobe_multi.pid;
if (pid) {
rcu_read_lock();
- task = get_pid_task(find_vpid(pid), PIDTYPE_PID);
+ task = get_pid_task(find_vpid(pid), PIDTYPE_TGID);
rcu_read_unlock();
if (!task) {
err = -ESRCH;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index da1710499..2e1123672 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1595,12 +1595,15 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{
struct dyn_ftrace *rec;
+ unsigned long ip = 0;
+ rcu_read_lock();
rec = lookup_rec(start, end);
if (rec)
- return rec->ip;
+ ip = rec->ip;
+ rcu_read_unlock();
- return 0;
+ return ip;
}
/**
@@ -1614,25 +1617,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
*/
unsigned long ftrace_location(unsigned long ip)
{
- struct dyn_ftrace *rec;
+ unsigned long loc;
unsigned long offset;
unsigned long size;
- rec = lookup_rec(ip, ip);
- if (!rec) {
+ loc = ftrace_location_range(ip, ip);
+ if (!loc) {
if (!kallsyms_lookup_size_offset(ip, &size, &offset))
goto out;
/* map sym+0 to __fentry__ */
if (!offset)
- rec = lookup_rec(ip, ip + size - 1);
+ loc = ftrace_location_range(ip, ip + size - 1);
}
- if (rec)
- return rec->ip;
-
out:
- return 0;
+ return loc;
}
/**
@@ -6596,6 +6596,8 @@ static int ftrace_process_locs(struct module *mod,
/* We should have used all pages unless we skipped some */
if (pg_unuse) {
WARN_ON(!skipped);
+ /* Need to synchronize with ftrace_location_range() */
+ synchronize_rcu();
ftrace_free_pages(pg_unuse);
}
return ret;
@@ -6809,6 +6811,9 @@ void ftrace_release_mod(struct module *mod)
out_unlock:
mutex_unlock(&ftrace_lock);
+ /* Need to synchronize with ftrace_location_range() */
+ if (tmp_page)
+ synchronize_rcu();
for (pg = tmp_page; pg; pg = tmp_page) {
/* Needs to be called outside of ftrace_lock */
@@ -7142,6 +7147,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
unsigned long start = (unsigned long)(start_ptr);
unsigned long end = (unsigned long)(end_ptr);
struct ftrace_page **last_pg = &ftrace_pages_start;
+ struct ftrace_page *tmp_page = NULL;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
struct dyn_ftrace key;
@@ -7183,12 +7189,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
ftrace_update_tot_cnt--;
if (!pg->index) {
*last_pg = pg->next;
- if (pg->records) {
- free_pages((unsigned long)pg->records, pg->order);
- ftrace_number_of_pages -= 1 << pg->order;
- }
- ftrace_number_of_groups--;
- kfree(pg);
+ pg->next = tmp_page;
+ tmp_page = pg;
pg = container_of(last_pg, struct ftrace_page, next);
if (!(*last_pg))
ftrace_pages = pg;
@@ -7205,6 +7207,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
clear_func_from_hashes(func);
kfree(func);
}
+ /* Need to synchronize with ftrace_location_range() */
+ if (tmp_page) {
+ synchronize_rcu();
+ ftrace_free_pages(tmp_page);
+ }
}
void __init ftrace_free_init_mem(void)
@@ -7895,6 +7902,7 @@ void ftrace_kill(void)
ftrace_disabled = 1;
ftrace_enabled = 0;
ftrace_trace_function = ftrace_stub;
+ kprobe_ftrace_kill();
}
/**
diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
index 8c4ffd076..cb0871fbd 100644
--- a/kernel/trace/preemptirq_delay_test.c
+++ b/kernel/trace/preemptirq_delay_test.c
@@ -215,4 +215,5 @@ static void __exit preemptirq_delay_exit(void)
module_init(preemptirq_delay_init)
module_exit(preemptirq_delay_exit)
+MODULE_DESCRIPTION("Preempt / IRQ disable delay thread to test latency tracers");
MODULE_LICENSE("GPL v2");
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6511dc3a0..54887f4c3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1449,6 +1449,11 @@ static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
*
* As a safety measure we check to make sure the data pages have not
* been corrupted.
+ *
+ * Callers of this function need to guarantee that the list of pages doesn't get
+ * modified during the check. In particular, if it's possible that the function
+ * is invoked with concurrent readers which can swap in a new reader page then
+ * the caller should take cpu_buffer->reader_lock.
*/
static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
@@ -2200,8 +2205,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
*/
synchronize_rcu();
for_each_buffer_cpu(buffer, cpu) {
+ unsigned long flags;
+
cpu_buffer = buffer->buffers[cpu];
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_check_pages(cpu_buffer);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
atomic_dec(&buffer->record_disabled);
}
diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c
index 2f68e93ff..df0745a42 100644
--- a/kernel/trace/rv/rv.c
+++ b/kernel/trace/rv/rv.c
@@ -245,6 +245,7 @@ static int __rv_disable_monitor(struct rv_monitor_def *mdef, bool sync)
/**
* rv_disable_monitor - disable a given runtime monitor
+ * @mdef: Pointer to the monitor definition structure.
*
* Returns 0 on success.
*/
@@ -256,6 +257,7 @@ int rv_disable_monitor(struct rv_monitor_def *mdef)
/**
* rv_enable_monitor - enable a given runtime monitor
+ * @mdef: Pointer to the monitor definition structure.
*
* Returns 0 on success, error otherwise.
*/
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 70d428c39..82b191f33 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -1990,6 +1990,80 @@ static int user_event_set_tp_name(struct user_event *user)
}
/*
+ * Counts how many ';' without a trailing space are in the args.
+ */
+static int count_semis_no_space(char *args)
+{
+ int count = 0;
+
+ while ((args = strchr(args, ';'))) {
+ args++;
+
+ if (!isspace(*args))
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * Copies the arguments while ensuring all ';' have a trailing space.
+ */
+static char *insert_space_after_semis(char *args, int count)
+{
+ char *fixed, *pos;
+ int len;
+
+ len = strlen(args) + count;
+ fixed = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!fixed)
+ return NULL;
+
+ pos = fixed;
+
+ /* Insert a space after ';' if there is no trailing space. */
+ while (*args) {
+ *pos = *args++;
+
+ if (*pos++ == ';' && !isspace(*args))
+ *pos++ = ' ';
+ }
+
+ *pos = '\0';
+
+ return fixed;
+}
+
+static char **user_event_argv_split(char *args, int *argc)
+{
+ char **split;
+ char *fixed;
+ int count;
+
+ /* Count how many ';' without a trailing space */
+ count = count_semis_no_space(args);
+
+ /* No fixup is required */
+ if (!count)
+ return argv_split(GFP_KERNEL, args, argc);
+
+ /* We must fixup 'field;field' to 'field; field' */
+ fixed = insert_space_after_semis(args, count);
+
+ if (!fixed)
+ return NULL;
+
+ /* We do a normal split afterwards */
+ split = argv_split(GFP_KERNEL, fixed, argc);
+
+ /* We can free since argv_split makes a copy */
+ kfree(fixed);
+
+ return split;
+}
+
+/*
* Parses the event name, arguments and flags then registers if successful.
* The name buffer lifetime is owned by this method for success cases only.
* Upon success the returned user_event has its ref count increased by 1.
@@ -2012,7 +2086,7 @@ static int user_event_parse(struct user_event_group *group, char *name,
return -EPERM;
if (args) {
- argv = argv_split(GFP_KERNEL, args, &argc);
+ argv = user_event_argv_split(args, &argc);
if (!argv)
return -ENOMEM;
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index 42bc0f362..1a7e7cf94 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -553,6 +553,10 @@ static int parse_btf_field(char *fieldname, const struct btf_type *type,
anon_offs = 0;
field = btf_find_struct_member(ctx->btf, type, fieldname,
&anon_offs);
+ if (IS_ERR(field)) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return PTR_ERR(field);
+ }
if (!field) {
trace_probe_log_err(ctx->offset, NO_BTF_FIELD);
return -ENOENT;
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index e81e1ac4a..bdda600f8 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -4,6 +4,7 @@ config ARCH_HAS_UBSAN
menuconfig UBSAN
bool "Undefined behaviour sanity checker"
+ depends on ARCH_HAS_UBSAN
help
This option enables the Undefined Behaviour sanity checker.
Compile-time instrumentation is used to detect various undefined
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 493ec02dd..fdba0eaf1 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -267,28 +267,28 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
\
checker((expected_pages) * PAGE_SIZE, \
kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
- vfree(p)); \
+ kvfree(p)); \
\
prev_size = (expected_pages) * PAGE_SIZE; \
orig = kvmalloc(prev_size, gfp); \
@@ -917,19 +917,19 @@ static void kmemdup_test(struct kunit *test)
/* Out of bounds by 1 byte. */
copy = kmemdup(src, len + 1, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
kfree(copy);
/* Way out of bounds. */
copy = kmemdup(src, len * 2, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
kfree(copy);
/* Starting offset causing out of bounds. */
copy = kmemdup(src + 1, len, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
kfree(copy);
}
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
index abc603730..25c81ed46 100644
--- a/lib/kunit/device.c
+++ b/lib/kunit/device.c
@@ -51,7 +51,7 @@ int kunit_bus_init(void)
error = bus_register(&kunit_bus_type);
if (error)
- bus_unregister(&kunit_bus_type);
+ root_device_unregister(kunit_bus_device);
return error;
}
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 1d1475578..b8514dbb3 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -712,6 +712,9 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
{
unsigned int i;
+ if (num_suites == 0)
+ return 0;
+
if (!kunit_enabled() && num_suites > 0) {
pr_info("kunit: disabled\n");
return 0;
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
index f7825991d..d9d1df28c 100644
--- a/lib/kunit/try-catch.c
+++ b/lib/kunit/try-catch.c
@@ -11,6 +11,7 @@
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
+#include <linux/sched/task.h>
#include "try-catch-impl.h"
@@ -65,13 +66,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
try_catch->context = context;
try_catch->try_completion = &try_completion;
try_catch->try_result = 0;
- task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
- try_catch,
- "kunit_try_catch_thread");
+ task_struct = kthread_create(kunit_generic_run_threadfn_adapter,
+ try_catch, "kunit_try_catch_thread");
if (IS_ERR(task_struct)) {
try_catch->catch(try_catch->context);
return;
}
+ get_task_struct(task_struct);
+ wake_up_process(task_struct);
time_remaining = wait_for_completion_timeout(&try_completion,
kunit_test_timeout());
@@ -81,6 +83,7 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
kthread_stop(task_struct);
}
+ put_task_struct(task_struct);
exit_code = try_catch->try_result;
if (!exit_code)
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index d4a3730b0..4ce960438 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -55,7 +55,7 @@ static void test_next_pointer(struct kunit *test)
ptr_addr = (unsigned long *)(p + s->offset);
tmp = *ptr_addr;
- p[s->offset] = 0x12;
+ p[s->offset] = ~p[s->offset];
/*
* Expecting three errors.
diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c
index e21be9551..ca09f7f0e 100644
--- a/lib/strcat_kunit.c
+++ b/lib/strcat_kunit.c
@@ -10,7 +10,7 @@
static volatile int unconst;
-static void strcat_test(struct kunit *test)
+static void test_strcat(struct kunit *test)
{
char dest[8];
@@ -29,7 +29,7 @@ static void strcat_test(struct kunit *test)
KUNIT_EXPECT_STREQ(test, dest, "fourAB");
}
-static void strncat_test(struct kunit *test)
+static void test_strncat(struct kunit *test)
{
char dest[8];
@@ -56,7 +56,7 @@ static void strncat_test(struct kunit *test)
KUNIT_EXPECT_STREQ(test, dest, "fourAB");
}
-static void strlcat_test(struct kunit *test)
+static void test_strlcat(struct kunit *test)
{
char dest[8] = "";
int len = sizeof(dest) + unconst;
@@ -88,9 +88,9 @@ static void strlcat_test(struct kunit *test)
}
static struct kunit_case strcat_test_cases[] = {
- KUNIT_CASE(strcat_test),
- KUNIT_CASE(strncat_test),
- KUNIT_CASE(strlcat_test),
+ KUNIT_CASE(test_strcat),
+ KUNIT_CASE(test_strncat),
+ KUNIT_CASE(test_strlcat),
{}
};
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
index eabf025cf..dd19bd774 100644
--- a/lib/string_kunit.c
+++ b/lib/string_kunit.c
@@ -11,6 +11,12 @@
#include <linux/slab.h>
#include <linux/string.h>
+#define STRCMP_LARGE_BUF_LEN 2048
+#define STRCMP_CHANGE_POINT 1337
+#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0)
+
static void test_memset16(struct kunit *test)
{
unsigned i, j, k;
@@ -179,6 +185,147 @@ static void test_strspn(struct kunit *test)
}
}
+static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN];
+static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN];
+
+static void strcmp_fill_buffers(char fill1, char fill2)
+{
+ memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN);
+ memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN);
+ strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0;
+ strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0;
+}
+
+static void test_strcmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!");
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!");
+ /* First string is lexicographically larger than the second */
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!");
+ /* Empty string is always lexicographically less than any non-empty string */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string");
+ /* Two empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", "");
+ /* Compare two strings which have only one char difference */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba");
+ /* Compare two strings which have the same prefix*/
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else");
+}
+
+static void test_strcmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void test_strncmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13);
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0);
+ /* Strings with common prefix are equal if count = length of prefix */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3);
+ /* Strings with common prefix are not equal when count = length of prefix + 1 */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4);
+ /* If one string is a prefix of another, the shorter string is lexicographically smaller */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string and something else"));
+ /*
+ * If one string is a prefix of another, and we check first length
+ * of prefix chars, the result is 'equal'
+ */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string"));
+}
+
+static void test_strncmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+ /* the strings are equal up to STRCMP_CHANGE_POINT */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+static void test_strcasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!");
+ /* Empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", "");
+ /* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B");
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a");
+ /* Special symbols and numbers should be processed correctly */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^");
+}
+
+static void test_strcasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void test_strncasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba"));
+ /* strncasecmp should check 'count' chars only */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5);
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0);
+}
+
+static void test_strncasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
static struct kunit_case string_test_cases[] = {
KUNIT_CASE(test_memset16),
KUNIT_CASE(test_memset32),
@@ -186,6 +333,14 @@ static struct kunit_case string_test_cases[] = {
KUNIT_CASE(test_strchr),
KUNIT_CASE(test_strnchr),
KUNIT_CASE(test_strspn),
+ KUNIT_CASE(test_strcmp),
+ KUNIT_CASE(test_strcmp_long_strings),
+ KUNIT_CASE(test_strncmp),
+ KUNIT_CASE(test_strncmp_long_strings),
+ KUNIT_CASE(test_strcasecmp),
+ KUNIT_CASE(test_strcasecmp_long_strings),
+ KUNIT_CASE(test_strncasecmp),
+ KUNIT_CASE(test_strncasecmp_long_strings),
{}
};
diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
index a6b634435..b6d1d93a8 100644
--- a/lib/strscpy_kunit.c
+++ b/lib/strscpy_kunit.c
@@ -8,22 +8,23 @@
#include <kunit/test.h>
#include <linux/string.h>
-/*
- * tc() - Run a specific test case.
+/**
+ * strscpy_check() - Run a specific test case.
+ * @test: KUnit test context pointer
* @src: Source string, argument to strscpy_pad()
* @count: Size of destination buffer, argument to strscpy_pad()
* @expected: Expected return value from call to strscpy_pad()
- * @terminator: 1 if there should be a terminating null byte 0 otherwise.
* @chars: Number of characters from the src string expected to be
* written to the dst buffer.
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
* @pad: Number of pad characters expected (in the tail of dst buffer).
* (@pad does not include the null terminator byte.)
*
* Calls strscpy_pad() and verifies the return value and state of the
* destination buffer after the call returns.
*/
-static void tc(struct kunit *test, char *src, int count, int expected,
- int chars, int terminator, int pad)
+static void strscpy_check(struct kunit *test, char *src, int count,
+ int expected, int chars, int terminator, int pad)
{
int nr_bytes_poison;
int max_expected;
@@ -79,12 +80,12 @@ static void tc(struct kunit *test, char *src, int count, int expected,
}
}
-static void strscpy_test(struct kunit *test)
+static void test_strscpy(struct kunit *test)
{
char dest[8];
/*
- * tc() uses a destination buffer of size 6 and needs at
+ * strscpy_check() uses a destination buffer of size 6 and needs at
* least 2 characters spare (one for null and one to check for
* overflow). This means we should only call tc() with
* strings up to a maximum of 4 characters long and 'count'
@@ -92,27 +93,27 @@ static void strscpy_test(struct kunit *test)
* the buffer size in tc().
*/
- /* tc(test, src, count, expected, chars, terminator, pad) */
- tc(test, "a", 0, -E2BIG, 0, 0, 0);
- tc(test, "", 0, -E2BIG, 0, 0, 0);
+ /* strscpy_check(test, src, count, expected, chars, terminator, pad) */
+ strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0);
+ strscpy_check(test, "", 0, -E2BIG, 0, 0, 0);
- tc(test, "a", 1, -E2BIG, 0, 1, 0);
- tc(test, "", 1, 0, 0, 1, 0);
+ strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0);
+ strscpy_check(test, "", 1, 0, 0, 1, 0);
- tc(test, "ab", 2, -E2BIG, 1, 1, 0);
- tc(test, "a", 2, 1, 1, 1, 0);
- tc(test, "", 2, 0, 0, 1, 1);
+ strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0);
+ strscpy_check(test, "a", 2, 1, 1, 1, 0);
+ strscpy_check(test, "", 2, 0, 0, 1, 1);
- tc(test, "abc", 3, -E2BIG, 2, 1, 0);
- tc(test, "ab", 3, 2, 2, 1, 0);
- tc(test, "a", 3, 1, 1, 1, 1);
- tc(test, "", 3, 0, 0, 1, 2);
+ strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0);
+ strscpy_check(test, "ab", 3, 2, 2, 1, 0);
+ strscpy_check(test, "a", 3, 1, 1, 1, 1);
+ strscpy_check(test, "", 3, 0, 0, 1, 2);
- tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
- tc(test, "abc", 4, 3, 3, 1, 0);
- tc(test, "ab", 4, 2, 2, 1, 1);
- tc(test, "a", 4, 1, 1, 1, 2);
- tc(test, "", 4, 0, 0, 1, 3);
+ strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ strscpy_check(test, "abc", 4, 3, 3, 1, 0);
+ strscpy_check(test, "ab", 4, 2, 2, 1, 1);
+ strscpy_check(test, "a", 4, 1, 1, 1, 2);
+ strscpy_check(test, "", 4, 0, 0, 1, 3);
/* Compile-time-known source strings. */
KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
@@ -127,7 +128,7 @@ static void strscpy_test(struct kunit *test)
}
static struct kunit_case strscpy_test_cases[] = {
- KUNIT_CASE(strscpy_test),
+ KUNIT_CASE(test_strscpy),
{}
};
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 717dcb830..b823ba7cb 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -1226,8 +1226,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
unsigned long *src_pfns;
unsigned long *dst_pfns;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, start_pfn, npages);
for (i = 0; i < npages; i++) {
@@ -1250,8 +1250,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
}
migrate_device_pages(src_pfns, dst_pfns, npages);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
}
/* Removes free pages from the free list so they can't be re-allocated */
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 0abbbac87..0982578fb 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -124,19 +124,32 @@ typedef s64 s_max;
typedef u64 u_max;
#endif
-void __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_negate_overflow(void *_data, void *old_val);
-void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
-void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
-void __ubsan_handle_out_of_bounds(void *_data, void *index);
-void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_builtin_unreachable(void *_data);
-void __ubsan_handle_load_invalid_value(void *_data, void *val);
-void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
- unsigned long align,
- unsigned long offset);
+/*
+ * When generating Runtime Calls, Clang doesn't respect the -mregparm=3
+ * option used on i386: https://github.com/llvm/llvm-project/issues/89670
+ * Fix this for earlier Clang versions by forcing the calling convention
+ * to use non-register arguments.
+ */
+#if defined(CONFIG_X86_32) && \
+ defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 190000
+# define ubsan_linkage asmlinkage
+#else
+# define ubsan_linkage
+#endif
+
+void ubsan_linkage __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
+void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
+void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
+void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
+void ubsan_linkage __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_builtin_unreachable(void *_data);
+void ubsan_linkage __ubsan_handle_load_invalid_value(void *_data, void *val);
+void ubsan_linkage __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
#endif
diff --git a/mm/cma.c b/mm/cma.c
index 01f5a8f71..3e9724716 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -182,10 +182,6 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
if (!size || !memblock_is_region_reserved(base, size))
return -EINVAL;
- /* alignment should be aligned with order_per_bit */
- if (!IS_ALIGNED(CMA_MIN_ALIGNMENT_PAGES, 1 << order_per_bit))
- return -EINVAL;
-
/* ensure minimal alignment required by mm core */
if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES))
return -EINVAL;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 89f58c760..769e8a125 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2493,32 +2493,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
return __split_huge_zero_page_pmd(vma, haddr, pmd);
}
- /*
- * Up to this point the pmd is present and huge and userland has the
- * whole access to the hugepage during the split (which happens in
- * place). If we overwrite the pmd with the not-huge version pointing
- * to the pte here (which of course we could if all CPUs were bug
- * free), userland could trigger a small page size TLB miss on the
- * small sized TLB while the hugepage TLB entry is still established in
- * the huge TLB. Some CPU doesn't like that.
- * See http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
- * 383 on page 105. Intel should be safe but is also warns that it's
- * only safe if the permission and cache attributes of the two entries
- * loaded in the two TLB is identical (which should be the case here).
- * But it is generally safer to never allow small and huge TLB entries
- * for the same virtual address to be loaded simultaneously. So instead
- * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the
- * current pmd notpresent (atomically because here the pmd_trans_huge
- * must remain set at all times on the pmd until the split is complete
- * for this pmd), then we flush the SMP TLB and finally we write the
- * non-huge version of the pmd entry with pmd_populate.
- */
- old_pmd = pmdp_invalidate(vma, haddr, pmd);
-
- pmd_migration = is_pmd_migration_entry(old_pmd);
+ pmd_migration = is_pmd_migration_entry(*pmd);
if (unlikely(pmd_migration)) {
swp_entry_t entry;
+ old_pmd = *pmd;
entry = pmd_to_swp_entry(old_pmd);
page = pfn_swap_entry_to_page(entry);
write = is_writable_migration_entry(entry);
@@ -2529,6 +2508,30 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
soft_dirty = pmd_swp_soft_dirty(old_pmd);
uffd_wp = pmd_swp_uffd_wp(old_pmd);
} else {
+ /*
+ * Up to this point the pmd is present and huge and userland has
+ * the whole access to the hugepage during the split (which
+ * happens in place). If we overwrite the pmd with the not-huge
+ * version pointing to the pte here (which of course we could if
+ * all CPUs were bug free), userland could trigger a small page
+ * size TLB miss on the small sized TLB while the hugepage TLB
+ * entry is still established in the huge TLB. Some CPU doesn't
+ * like that. See
+ * http://support.amd.com/TechDocs/41322_10h_Rev_Gd.pdf, Erratum
+ * 383 on page 105. Intel should be safe but is also warns that
+ * it's only safe if the permission and cache attributes of the
+ * two entries loaded in the two TLB is identical (which should
+ * be the case here). But it is generally safer to never allow
+ * small and huge TLB entries for the same virtual address to be
+ * loaded simultaneously. So instead of doing "pmd_populate();
+ * flush_pmd_tlb_range();" we first mark the current pmd
+ * notpresent (atomically because here the pmd_trans_huge must
+ * remain set at all times on the pmd until the split is
+ * complete for this pmd), then we flush the SMP TLB and finally
+ * we write the non-huge version of the pmd entry with
+ * pmd_populate.
+ */
+ old_pmd = pmdp_invalidate(vma, haddr, pmd);
page = pmd_page(old_pmd);
folio = page_folio(page);
if (pmd_dirty(old_pmd)) {
@@ -3055,30 +3058,36 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
if (new_order >= folio_order(folio))
return -EINVAL;
- /* Cannot split anonymous THP to order-1 */
- if (new_order == 1 && folio_test_anon(folio)) {
- VM_WARN_ONCE(1, "Cannot split to order-1 folio");
- return -EINVAL;
- }
-
- if (new_order) {
- /* Only swapping a whole PMD-mapped folio is supported */
- if (folio_test_swapcache(folio))
+ if (folio_test_anon(folio)) {
+ /* order-1 is not supported for anonymous THP. */
+ if (new_order == 1) {
+ VM_WARN_ONCE(1, "Cannot split to order-1 folio");
return -EINVAL;
+ }
+ } else if (new_order) {
/* Split shmem folio to non-zero order not supported */
if (shmem_mapping(folio->mapping)) {
VM_WARN_ONCE(1,
"Cannot split shmem folio to non-0 order");
return -EINVAL;
}
- /* No split if the file system does not support large folio */
- if (!mapping_large_folio_support(folio->mapping)) {
+ /*
+ * No split if the file system does not support large folio.
+ * Note that we might still have THPs in such mappings due to
+ * CONFIG_READ_ONLY_THP_FOR_FS. But in that case, the mapping
+ * does not actually support large folios properly.
+ */
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ !mapping_large_folio_support(folio->mapping)) {
VM_WARN_ONCE(1,
"Cannot split file folio to non-0 order");
return -EINVAL;
}
}
+ /* Only swapping a whole PMD-mapped folio is supported */
+ if (folio_test_swapcache(folio) && new_order)
+ return -EINVAL;
is_hzp = is_huge_zero_page(&folio->page);
if (is_hzp) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ce7be5c24..c445e6fd8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -5774,8 +5774,20 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
* do_exit() will not see it, and will keep the reservation
* forever.
*/
- if (adjust_reservation && vma_needs_reservation(h, vma, address))
- vma_add_reservation(h, vma, address);
+ if (adjust_reservation) {
+ int rc = vma_needs_reservation(h, vma, address);
+
+ if (rc < 0)
+ /* Pressumably allocate_file_region_entries failed
+ * to allocate a file_region struct. Clear
+ * hugetlb_restore_reserve so that global reserve
+ * count will not be incremented by free_huge_folio.
+ * Act as if we consumed the reservation.
+ */
+ folio_clear_hugetlb_restore_reserve(page_folio(page));
+ else if (rc)
+ vma_add_reservation(h, vma, address);
+ }
tlb_remove_page_size(tlb, page, huge_page_size(h));
/*
@@ -7867,9 +7879,9 @@ void __init hugetlb_cma_reserve(int order)
* huge page demotion.
*/
res = cma_declare_contiguous_nid(0, size, 0,
- PAGE_SIZE << HUGETLB_PAGE_ORDER,
- 0, false, name,
- &hugetlb_cma[nid], nid);
+ PAGE_SIZE << HUGETLB_PAGE_ORDER,
+ HUGETLB_PAGE_ORDER, false, name,
+ &hugetlb_cma[nid], nid);
if (res) {
pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
res, nid);
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c
index cf2d70e9c..95f859e38 100644
--- a/mm/kmsan/core.c
+++ b/mm/kmsan/core.c
@@ -196,8 +196,7 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
u32 origin, bool checked)
{
u64 address = (u64)addr;
- void *shadow_start;
- u32 *origin_start;
+ u32 *shadow_start, *origin_start;
size_t pad = 0;
KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
@@ -225,8 +224,16 @@ void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
origin_start =
(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
- for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
- origin_start[i] = origin;
+ /*
+ * If the new origin is non-zero, assume that the shadow byte is also non-zero,
+ * and unconditionally overwrite the old origin slot.
+ * If the new origin is zero, overwrite the old origin slot iff the
+ * corresponding shadow slot is zero.
+ */
+ for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++) {
+ if (origin || !shadow_start[i])
+ origin_start[i] = origin;
+ }
}
struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
diff --git a/mm/ksm.c b/mm/ksm.c
index 8c001819c..ddd482c71 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
static bool ksm_smart_scan = true;
/* The number of zero pages which is placed by KSM */
-unsigned long ksm_zero_pages;
+atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
/* The number of pages that have been skipped due to "smart scanning" */
static unsigned long ksm_pages_skipped;
@@ -1428,8 +1428,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* the dirty bit in zero page's PTE is set.
*/
newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
- ksm_zero_pages++;
- mm->ksm_zero_pages++;
+ ksm_map_zero_page(mm);
/*
* We're replacing an anonymous page with a zero page, which is
* not anonymous. We need to do proper accounting otherwise we
@@ -2747,18 +2746,16 @@ static void ksm_do_scan(unsigned int scan_npages)
{
struct ksm_rmap_item *rmap_item;
struct page *page;
- unsigned int npages = scan_npages;
- while (npages-- && likely(!freezing(current))) {
+ while (scan_npages-- && likely(!freezing(current))) {
cond_resched();
rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item)
return;
cmp_and_merge_page(page, rmap_item);
put_page(page);
+ ksm_pages_scanned++;
}
-
- ksm_pages_scanned += scan_npages - npages;
}
static int ksmd_should_run(void)
@@ -3370,7 +3367,7 @@ static void wait_while_offlining(void)
#ifdef CONFIG_PROC_FS
long ksm_process_profit(struct mm_struct *mm)
{
- return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
+ return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
}
#endif /* CONFIG_PROC_FS */
@@ -3659,7 +3656,7 @@ KSM_ATTR_RO(pages_skipped);
static ssize_t ksm_zero_pages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
}
KSM_ATTR_RO(ksm_zero_pages);
@@ -3668,7 +3665,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
{
long general_profit;
- general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
+ general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
ksm_rmap_items * sizeof(struct ksm_rmap_item);
return sysfs_emit(buf, "%ld\n", general_profit);
diff --git a/mm/memblock.c b/mm/memblock.c
index d09136e04..08e9806b1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1339,6 +1339,10 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
int start_rgn, end_rgn;
int i, ret;
+ if (WARN_ONCE(nid == MAX_NUMNODES,
+ "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
+ nid = NUMA_NO_NODE;
+
ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
if (ret)
return ret;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index fabce2b50..612558f30 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -7531,8 +7531,7 @@ void __mem_cgroup_uncharge_folios(struct folio_batch *folios)
* @new: Replacement folio.
*
* Charge @new as a replacement folio for @old. @old will
- * be uncharged upon free. This is only used by the page cache
- * (in replace_page_cache_folio()).
+ * be uncharged upon free.
*
* Both folios must be locked, @new->mapping must be set up.
*/
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 9e62a00b4..7751bd78f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1218,7 +1218,7 @@ static int me_huge_page(struct page_state *ps, struct page *p)
* subpages.
*/
folio_put(folio);
- if (__page_handle_poison(p) >= 0) {
+ if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
@@ -2097,7 +2097,7 @@ retry:
*/
if (res == 0) {
folio_unlock(folio);
- if (__page_handle_poison(p) >= 0) {
+ if (__page_handle_poison(p) > 0) {
page_ref_inc(p);
res = MF_RECOVERED;
} else {
@@ -2550,6 +2550,13 @@ int unpoison_memory(unsigned long pfn)
goto unlock_mutex;
}
+ if (is_huge_zero_page(&folio->page)) {
+ unpoison_pr_info("Unpoison: huge zero page is not supported %#lx\n",
+ pfn, &unpoison_rs);
+ ret = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
if (!PageHWPoison(p)) {
unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n",
pfn, &unpoison_rs);
diff --git a/mm/page_table_check.c b/mm/page_table_check.c
index af69c3c8f..6363f93a4 100644
--- a/mm/page_table_check.c
+++ b/mm/page_table_check.c
@@ -71,6 +71,9 @@ static void page_table_check_clear(unsigned long pfn, unsigned long pgcnt)
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
+ if (!page_ext)
+ return;
+
BUG_ON(PageSlab(page));
anon = PageAnon(page);
@@ -108,6 +111,9 @@ static void page_table_check_set(unsigned long pfn, unsigned long pgcnt,
page = pfn_to_page(pfn);
page_ext = page_ext_get(page);
+ if (!page_ext)
+ return;
+
BUG_ON(PageSlab(page));
anon = PageAnon(page);
@@ -138,7 +144,10 @@ void __page_table_check_zero(struct page *page, unsigned int order)
BUG_ON(PageSlab(page));
page_ext = page_ext_get(page);
- BUG_ON(!page_ext);
+
+ if (!page_ext)
+ return;
+
for (i = 0; i < (1ul << order); i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 4fcd959dc..a78a4adf7 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -198,6 +198,7 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return old;
@@ -208,6 +209,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
+ VM_WARN_ON_ONCE(!pmd_present(*pmdp));
return pmdp_invalidate(vma, address, pmdp);
}
#endif
diff --git a/mm/shmem.c b/mm/shmem.c
index 94ab99b6b..b66e99e2c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1786,7 +1786,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
xa_lock_irq(&swap_mapping->i_pages);
error = shmem_replace_entry(swap_mapping, swap_index, old, new);
if (!error) {
- mem_cgroup_migrate(old, new);
+ mem_cgroup_replace_folio(old, new);
__lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1);
__lruvec_stat_mod_folio(new, NR_SHMEM, 1);
__lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1);
@@ -3467,8 +3467,7 @@ static int shmem_rename2(struct mnt_idmap *idmap,
return error;
}
- simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry);
- error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry);
+ error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
if (error)
return error;
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 3c3539c57..829f7b108 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -316,6 +316,38 @@ out_release:
goto out;
}
+static int mfill_atomic_pte_zeroed_folio(pmd_t *dst_pmd,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr)
+{
+ struct folio *folio;
+ int ret = -ENOMEM;
+
+ folio = vma_alloc_zeroed_movable_folio(dst_vma, dst_addr);
+ if (!folio)
+ return ret;
+
+ if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
+ goto out_put;
+
+ /*
+ * The memory barrier inside __folio_mark_uptodate makes sure that
+ * zeroing out the folio become visible before mapping the page
+ * using set_pte_at(). See do_anonymous_page().
+ */
+ __folio_mark_uptodate(folio);
+
+ ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
+ &folio->page, true, 0);
+ if (ret)
+ goto out_put;
+
+ return 0;
+out_put:
+ folio_put(folio);
+ return ret;
+}
+
static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
struct vm_area_struct *dst_vma,
unsigned long dst_addr)
@@ -324,6 +356,9 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd,
spinlock_t *ptl;
int ret;
+ if (mm_forbids_zeropage(dst_vma->vm_mm))
+ return mfill_atomic_pte_zeroed_folio(dst_pmd, dst_vma, dst_addr);
+
_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
dst_vma->vm_page_prot));
ret = -EAGAIN;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 125427cbd..109272b8e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3492,7 +3492,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
{
unsigned int nr_allocated = 0;
gfp_t alloc_gfp = gfp;
- bool nofail = false;
+ bool nofail = gfp & __GFP_NOFAIL;
struct page *page;
int i;
@@ -3549,12 +3549,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* and compaction etc.
*/
alloc_gfp &= ~__GFP_NOFAIL;
- nofail = true;
}
/* High-order pages or fallback path if "bulk" fails. */
while (nr_allocated < nr_pages) {
- if (fatal_signal_pending(current))
+ if (!nofail && fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)
diff --git a/net/9p/client.c b/net/9p/client.c
index f7e90b476..b05f73c29 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -235,6 +235,8 @@ static int p9_fcall_init(struct p9_client *c, struct p9_fcall *fc,
if (!fc->sdata)
return -ENOMEM;
fc->capacity = alloc_msize;
+ fc->id = 0;
+ fc->tag = P9_NOTAG;
return 0;
}
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 294cb9efe..015fb679b 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -345,7 +345,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- rt = (struct rtable *) dst;
+ rt = dst_rtable(dst);
if (rt->rt_gw_family == AF_INET)
daddr = &rt->rt_gw4;
else
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 9169efb2f..5fff5930e 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
{
struct sk_buff *skb;
struct sock *newsk;
+ ax25_dev *ax25_dev;
DEFINE_WAIT(wait);
struct sock *sk;
+ ax25_cb *ax25;
int err = 0;
if (sock->state != SS_UNCONNECTED)
@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags,
kfree_skb(skb);
sk_acceptq_removed(sk);
newsock->state = SS_CONNECTED;
+ ax25 = sk_to_ax25(newsk);
+ ax25_dev = ax25->ax25_dev;
+ netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
+ ax25_dev_hold(ax25_dev);
out:
release_sock(sk);
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 282ec581c..67ae6b8c5 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -22,11 +22,12 @@
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/fcntl.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
-ax25_dev *ax25_dev_list;
+static LIST_HEAD(ax25_dev_list);
DEFINE_SPINLOCK(ax25_dev_lock);
ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
@@ -34,10 +35,11 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
ax25_dev *ax25_dev, *res = NULL;
spin_lock_bh(&ax25_dev_lock);
- for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
+ list_for_each_entry(ax25_dev, &ax25_dev_list, list)
if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
res = ax25_dev;
ax25_dev_hold(ax25_dev);
+ break;
}
spin_unlock_bh(&ax25_dev_lock);
@@ -59,7 +61,6 @@ void ax25_dev_device_up(struct net_device *dev)
}
refcount_set(&ax25_dev->refcount, 1);
- dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL);
ax25_dev->forward = NULL;
@@ -85,10 +86,9 @@ void ax25_dev_device_up(struct net_device *dev)
#endif
spin_lock_bh(&ax25_dev_lock);
- ax25_dev->next = ax25_dev_list;
- ax25_dev_list = ax25_dev;
+ list_add(&ax25_dev->list, &ax25_dev_list);
+ dev->ax25_ptr = ax25_dev;
spin_unlock_bh(&ax25_dev_lock);
- ax25_dev_hold(ax25_dev);
ax25_register_dev_sysctl(ax25_dev);
}
@@ -111,32 +111,19 @@ void ax25_dev_device_down(struct net_device *dev)
/*
* Remove any packet forwarding that points to this device.
*/
- for (s = ax25_dev_list; s != NULL; s = s->next)
+ list_for_each_entry(s, &ax25_dev_list, list)
if (s->forward == dev)
s->forward = NULL;
- if ((s = ax25_dev_list) == ax25_dev) {
- ax25_dev_list = s->next;
- goto unlock_put;
- }
-
- while (s != NULL && s->next != NULL) {
- if (s->next == ax25_dev) {
- s->next = ax25_dev->next;
- goto unlock_put;
+ list_for_each_entry(s, &ax25_dev_list, list) {
+ if (s == ax25_dev) {
+ list_del(&s->list);
+ break;
}
-
- s = s->next;
}
- spin_unlock_bh(&ax25_dev_lock);
- dev->ax25_ptr = NULL;
- ax25_dev_put(ax25_dev);
- return;
-unlock_put:
- spin_unlock_bh(&ax25_dev_lock);
- ax25_dev_put(ax25_dev);
dev->ax25_ptr = NULL;
+ spin_unlock_bh(&ax25_dev_lock);
netdev_put(dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
}
@@ -200,16 +187,13 @@ struct net_device *ax25_fwd_dev(struct net_device *dev)
*/
void __exit ax25_dev_free(void)
{
- ax25_dev *s, *ax25_dev;
+ ax25_dev *s, *n;
spin_lock_bh(&ax25_dev_lock);
- ax25_dev = ax25_dev_list;
- while (ax25_dev != NULL) {
- s = ax25_dev;
- netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
- ax25_dev = ax25_dev->next;
- kfree(s);
+ list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
+ netdev_put(s->dev, &s->dev_tracker);
+ list_del(&s->list);
+ ax25_dev_put(s);
}
- ax25_dev_list = NULL;
spin_unlock_bh(&ax25_dev_lock);
}
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 71c143d4b..ac74f6ead 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1266,6 +1266,8 @@ void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
/* for all origins... */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
+ if (hlist_empty(head))
+ continue;
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 27520a8a4..50cfec8cc 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -133,7 +133,7 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
struct in6_addr *daddr,
struct sk_buff *skb)
{
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
int count = atomic_read(&dev->peer_count);
const struct in6_addr *nexthop;
struct lowpan_peer *peer;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6ab404dda..08ae30fd3 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1173,8 +1173,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
list_for_each_entry(d, &hci_dev_list, list) {
if (!test_bit(HCI_UP, &d->flags) ||
- hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
- d->dev_type != HCI_PRIMARY)
+ hci_dev_test_flag(d, HCI_USER_CHANNEL))
continue;
/* Simple routing:
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index bc5086423..24f6b6a5c 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -395,11 +395,6 @@ int hci_inquiry(void __user *arg)
goto done;
}
- if (hdev->dev_type != HCI_PRIMARY) {
- err = -EOPNOTSUPP;
- goto done;
- }
-
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP;
goto done;
@@ -752,11 +747,6 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
goto done;
}
- if (hdev->dev_type != HCI_PRIMARY) {
- err = -EOPNOTSUPP;
- goto done;
- }
-
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
err = -EOPNOTSUPP;
goto done;
@@ -910,7 +900,7 @@ int hci_get_dev_info(void __user *arg)
strscpy(di.name, hdev->name, sizeof(di.name));
di.bdaddr = hdev->bdaddr;
- di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
+ di.type = (hdev->bus & 0x0f);
di.flags = flags;
di.pkt_type = hdev->pkt_type;
if (lmp_bredr_capable(hdev)) {
@@ -1026,8 +1016,7 @@ static void hci_power_on(struct work_struct *work)
*/
if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
- (hdev->dev_type == HCI_PRIMARY &&
- !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_do_close(hdev);
@@ -1769,6 +1758,15 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
adv->pending = true;
adv->instance = instance;
+
+ /* If controller support only one set and the instance is set to
+ * 1 then there is no option other than using handle 0x00.
+ */
+ if (hdev->le_num_of_adv_sets == 1 && instance == 1)
+ adv->handle = 0x00;
+ else
+ adv->handle = instance;
+
list_add(&adv->list, &hdev->adv_instances);
hdev->adv_instance_cnt++;
}
@@ -2635,21 +2633,7 @@ int hci_register_dev(struct hci_dev *hdev)
if (!hdev->open || !hdev->close || !hdev->send)
return -EINVAL;
- /* Do not allow HCI_AMP devices to register at index 0,
- * so the index can be used as the AMP controller ID.
- */
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
- break;
- case HCI_AMP:
- id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
- GFP_KERNEL);
- break;
- default:
- return -EINVAL;
- }
-
+ id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
if (id < 0)
return id;
@@ -2701,12 +2685,10 @@ int hci_register_dev(struct hci_dev *hdev)
hci_dev_set_flag(hdev, HCI_SETUP);
hci_dev_set_flag(hdev, HCI_AUTO_OFF);
- if (hdev->dev_type == HCI_PRIMARY) {
- /* Assume BR/EDR support until proven otherwise (such as
- * through reading supported features during init.
- */
- hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
- }
+ /* Assume BR/EDR support until proven otherwise (such as
+ * through reading supported features during init.
+ */
+ hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
write_lock(&hci_dev_list_lock);
list_add(&hdev->list, &hci_dev_list);
@@ -3242,17 +3224,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- hci_add_acl_hdr(skb, conn->handle, flags);
- break;
- case HCI_AMP:
- hci_add_acl_hdr(skb, chan->handle, flags);
- break;
- default:
- bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
- return;
- }
+ hci_add_acl_hdr(skb, conn->handle, flags);
list = skb_shinfo(skb)->frag_list;
if (!list) {
@@ -3412,9 +3384,6 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
case ACL_LINK:
cnt = hdev->acl_cnt;
break;
- case AMP_LINK:
- cnt = hdev->block_cnt;
- break;
case SCO_LINK:
case ESCO_LINK:
cnt = hdev->sco_cnt;
@@ -3612,12 +3581,6 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
}
-static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
-{
- /* Calculate count of blocks used by this packet */
- return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
-}
-
static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
{
unsigned long last_tx;
@@ -3731,81 +3694,15 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK);
}
-static void hci_sched_acl_blk(struct hci_dev *hdev)
-{
- unsigned int cnt = hdev->block_cnt;
- struct hci_chan *chan;
- struct sk_buff *skb;
- int quote;
- u8 type;
-
- BT_DBG("%s", hdev->name);
-
- if (hdev->dev_type == HCI_AMP)
- type = AMP_LINK;
- else
- type = ACL_LINK;
-
- __check_timeout(hdev, cnt, type);
-
- while (hdev->block_cnt > 0 &&
- (chan = hci_chan_sent(hdev, type, &quote))) {
- u32 priority = (skb_peek(&chan->data_q))->priority;
- while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
- int blocks;
-
- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
-
- /* Stop if priority has changed */
- if (skb->priority < priority)
- break;
-
- skb = skb_dequeue(&chan->data_q);
-
- blocks = __get_blocks(hdev, skb);
- if (blocks > hdev->block_cnt)
- return;
-
- hci_conn_enter_active_mode(chan->conn,
- bt_cb(skb)->force_active);
-
- hci_send_frame(hdev, skb);
- hdev->acl_last_tx = jiffies;
-
- hdev->block_cnt -= blocks;
- quote -= blocks;
-
- chan->sent += blocks;
- chan->conn->sent += blocks;
- }
- }
-
- if (cnt != hdev->block_cnt)
- hci_prio_recalculate(hdev, type);
-}
-
static void hci_sched_acl(struct hci_dev *hdev)
{
BT_DBG("%s", hdev->name);
/* No ACL link over BR/EDR controller */
- if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
- return;
-
- /* No AMP link over AMP controller */
- if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
+ if (!hci_conn_num(hdev, ACL_LINK))
return;
- switch (hdev->flow_ctl_mode) {
- case HCI_FLOW_CTL_MODE_PACKET_BASED:
- hci_sched_acl_pkt(hdev);
- break;
-
- case HCI_FLOW_CTL_MODE_BLOCK_BASED:
- hci_sched_acl_blk(hdev);
- break;
- }
+ hci_sched_acl_pkt(hdev);
}
static void hci_sched_le(struct hci_dev *hdev)
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4de8f0dc1..1ed734a7f 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1,7 +1,7 @@
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
- Copyright 2023 NXP
+ Copyright 2023-2024 NXP
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
@@ -913,21 +913,6 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
return rp->status;
}
-static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_rp_read_flow_control_mode *rp = data;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
-
- if (rp->status)
- return rp->status;
-
- hdev->flow_ctl_mode = rp->mode;
-
- return rp->status;
-}
-
static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -1071,28 +1056,6 @@ static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
return rp->status;
}
-static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_rp_read_data_block_size *rp = data;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
-
- if (rp->status)
- return rp->status;
-
- hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
- hdev->block_len = __le16_to_cpu(rp->block_len);
- hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
-
- hdev->block_cnt = hdev->num_blocks;
-
- BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
- hdev->block_cnt, hdev->block_len);
-
- return rp->status;
-}
-
static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -1127,30 +1090,6 @@ unlock:
return rp->status;
}
-static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_rp_read_local_amp_info *rp = data;
-
- bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
-
- if (rp->status)
- return rp->status;
-
- hdev->amp_status = rp->amp_status;
- hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
- hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
- hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
- hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
- hdev->amp_type = rp->amp_type;
- hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
- hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
- hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
- hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
-
- return rp->status;
-}
-
static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -4121,12 +4060,6 @@ static const struct hci_cc {
HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
sizeof(struct hci_rp_read_page_scan_type)),
HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
- HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
- sizeof(struct hci_rp_read_data_block_size)),
- HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
- sizeof(struct hci_rp_read_flow_control_mode)),
- HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
- sizeof(struct hci_rp_read_local_amp_info)),
HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
sizeof(struct hci_rp_read_clock)),
HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
@@ -4317,7 +4250,7 @@ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev);
/* Remove connection if command failed */
- for (i = 0; cp->num_cis; cp->num_cis--, i++) {
+ for (i = 0; i < cp->num_cis; i++) {
struct hci_conn *conn;
u16 handle;
@@ -4333,6 +4266,7 @@ static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
hci_conn_del(conn);
}
}
+ cp->num_cis = 0;
if (pending)
hci_le_create_cis_pending(hdev);
@@ -4461,11 +4395,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
flex_array_size(ev, handles, ev->num)))
return;
- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
- bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
- return;
- }
-
bt_dev_dbg(hdev, "num %d", ev->num);
for (i = 0; i < ev->num; i++) {
@@ -4533,78 +4462,6 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
queue_work(hdev->workqueue, &hdev->tx_work);
}
-static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
- __u16 handle)
-{
- struct hci_chan *chan;
-
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- return hci_conn_hash_lookup_handle(hdev, handle);
- case HCI_AMP:
- chan = hci_chan_lookup_handle(hdev, handle);
- if (chan)
- return chan->conn;
- break;
- default:
- bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
- break;
- }
-
- return NULL;
-}
-
-static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
- struct sk_buff *skb)
-{
- struct hci_ev_num_comp_blocks *ev = data;
- int i;
-
- if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
- flex_array_size(ev, handles, ev->num_hndl)))
- return;
-
- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
- bt_dev_err(hdev, "wrong event for mode %d",
- hdev->flow_ctl_mode);
- return;
- }
-
- bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
- ev->num_hndl);
-
- for (i = 0; i < ev->num_hndl; i++) {
- struct hci_comp_blocks_info *info = &ev->handles[i];
- struct hci_conn *conn = NULL;
- __u16 handle, block_count;
-
- handle = __le16_to_cpu(info->handle);
- block_count = __le16_to_cpu(info->blocks);
-
- conn = __hci_conn_lookup_handle(hdev, handle);
- if (!conn)
- continue;
-
- conn->sent -= block_count;
-
- switch (conn->type) {
- case ACL_LINK:
- case AMP_LINK:
- hdev->block_cnt += block_count;
- if (hdev->block_cnt > hdev->num_blocks)
- hdev->block_cnt = hdev->num_blocks;
- break;
-
- default:
- bt_dev_err(hdev, "unknown type %d conn %p",
- conn->type, conn);
- break;
- }
- }
-
- queue_work(hdev->workqueue, &hdev->tx_work);
-}
-
static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -6502,14 +6359,16 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
if (!(flags & HCI_PROTO_DEFER))
goto unlock;
- if (ev->status) {
- /* Add connection to indicate the failed PA sync event */
- pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
- HCI_ROLE_SLAVE);
+ /* Add connection to indicate PA sync event */
+ pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
+ HCI_ROLE_SLAVE);
- if (!pa_sync)
- goto unlock;
+ if (IS_ERR(pa_sync))
+ goto unlock;
+
+ pa_sync->sync_handle = le16_to_cpu(ev->handle);
+ if (ev->status) {
set_bit(HCI_CONN_PA_SYNC_FAILED, &pa_sync->flags);
/* Notify iso layer */
@@ -6526,6 +6385,7 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
struct hci_ev_le_per_adv_report *ev = data;
int mask = hdev->link_mode;
__u8 flags = 0;
+ struct hci_conn *pa_sync;
bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
@@ -6533,8 +6393,28 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
if (!(mask & HCI_LM_ACCEPT))
- hci_le_pa_term_sync(hdev, ev->sync_handle);
+ goto unlock;
+
+ if (!(flags & HCI_PROTO_DEFER))
+ goto unlock;
+ pa_sync = hci_conn_hash_lookup_pa_sync_handle
+ (hdev,
+ le16_to_cpu(ev->sync_handle));
+
+ if (!pa_sync)
+ goto unlock;
+
+ if (ev->data_status == LE_PA_DATA_COMPLETE &&
+ !test_and_set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags)) {
+ /* Notify iso layer */
+ hci_connect_cfm(pa_sync, 0);
+
+ /* Notify MGMT layer */
+ mgmt_device_connected(hdev, pa_sync, NULL, 0);
+ }
+
+unlock:
hci_dev_unlock(hdev);
}
@@ -7069,10 +6949,8 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
- if (!(mask & HCI_LM_ACCEPT)) {
- hci_le_pa_term_sync(hdev, ev->sync_handle);
+ if (!(mask & HCI_LM_ACCEPT))
goto unlock;
- }
if (!(flags & HCI_PROTO_DEFER))
goto unlock;
@@ -7081,24 +6959,11 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
(hdev,
le16_to_cpu(ev->sync_handle));
- if (pa_sync)
- goto unlock;
-
- /* Add connection to indicate the PA sync event */
- pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY,
- HCI_ROLE_SLAVE);
-
if (IS_ERR(pa_sync))
goto unlock;
- pa_sync->sync_handle = le16_to_cpu(ev->sync_handle);
- set_bit(HCI_CONN_PA_SYNC, &pa_sync->flags);
-
/* Notify iso layer */
- hci_connect_cfm(pa_sync, 0x00);
-
- /* Notify MGMT layer */
- mgmt_device_connected(hdev, pa_sync, NULL, 0);
+ hci_connect_cfm(pa_sync, 0);
unlock:
hci_dev_unlock(hdev);
@@ -7512,9 +7377,6 @@ static const struct hci_ev {
/* [0x3e = HCI_EV_LE_META] */
HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
- /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
- HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
- sizeof(struct hci_ev_num_comp_blocks)),
/* [0xff = HCI_EV_VENDOR] */
HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
};
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 703b84bd4..69c2ba1e8 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -485,7 +485,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
return NULL;
ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
- ni->type = hdev->dev_type;
+ ni->type = 0x00; /* Old hdev->dev_type */
ni->bus = hdev->bus;
bacpy(&ni->bdaddr, &hdev->bdaddr);
memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
@@ -1007,9 +1007,6 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
return -EOPNOTSUPP;
- if (hdev->dev_type != HCI_PRIMARY)
- return -EOPNOTSUPP;
-
switch (cmd) {
case HCISETRAW:
if (!capable(CAP_NET_ADMIN))
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 4c707eb64..7bfa6b59b 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -1043,11 +1043,10 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
struct hci_cp_ext_adv_set *set;
u8 data[sizeof(*cp) + sizeof(*set) * 1];
u8 size;
+ struct adv_info *adv = NULL;
/* If request specifies an instance that doesn't exist, fail */
if (instance > 0) {
- struct adv_info *adv;
-
adv = hci_find_adv_instance(hdev, instance);
if (!adv)
return -EINVAL;
@@ -1066,7 +1065,7 @@ static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
cp->num_of_sets = !!instance;
cp->enable = 0x00;
- set->handle = instance;
+ set->handle = adv ? adv->handle : instance;
size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
@@ -1195,7 +1194,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
cp.own_addr_type = own_addr_type;
cp.channel_map = hdev->le_adv_channel_map;
- cp.handle = instance;
+ cp.handle = adv ? adv->handle : instance;
if (flags & MGMT_ADV_FLAG_SEC_2M) {
cp.primary_phy = HCI_ADV_PHY_1M;
@@ -1235,31 +1234,27 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
{
- struct {
- struct hci_cp_le_set_ext_scan_rsp_data cp;
- u8 data[HCI_MAX_EXT_AD_LENGTH];
- } pdu;
+ DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
+ HCI_MAX_EXT_AD_LENGTH);
u8 len;
struct adv_info *adv = NULL;
int err;
- memset(&pdu, 0, sizeof(pdu));
-
if (instance) {
adv = hci_find_adv_instance(hdev, instance);
if (!adv || !adv->scan_rsp_changed)
return 0;
}
- len = eir_create_scan_rsp(hdev, instance, pdu.data);
+ len = eir_create_scan_rsp(hdev, instance, pdu->data);
- pdu.cp.handle = instance;
- pdu.cp.length = len;
- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ pdu->handle = adv ? adv->handle : instance;
+ pdu->length = len;
+ pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
- sizeof(pdu.cp) + len, &pdu.cp,
+ struct_size(pdu, data, len), pdu,
HCI_CMD_TIMEOUT);
if (err)
return err;
@@ -1267,7 +1262,7 @@ static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
if (adv) {
adv->scan_rsp_changed = false;
} else {
- memcpy(hdev->scan_rsp_data, pdu.data, len);
+ memcpy(hdev->scan_rsp_data, pdu->data, len);
hdev->scan_rsp_data_len = len;
}
@@ -1335,7 +1330,7 @@ int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
memset(set, 0, sizeof(*set));
- set->handle = instance;
+ set->handle = adv ? adv->handle : instance;
/* Set duration per instance since controller is responsible for
* scheduling it.
@@ -1411,29 +1406,25 @@ static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
- struct {
- struct hci_cp_le_set_per_adv_data cp;
- u8 data[HCI_MAX_PER_AD_LENGTH];
- } pdu;
+ DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
+ HCI_MAX_PER_AD_LENGTH);
u8 len;
-
- memset(&pdu, 0, sizeof(pdu));
+ struct adv_info *adv = NULL;
if (instance) {
- struct adv_info *adv = hci_find_adv_instance(hdev, instance);
-
+ adv = hci_find_adv_instance(hdev, instance);
if (!adv || !adv->periodic)
return 0;
}
- len = eir_create_per_adv_data(hdev, instance, pdu.data);
+ len = eir_create_per_adv_data(hdev, instance, pdu->data);
- pdu.cp.length = len;
- pdu.cp.handle = instance;
- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu->length = len;
+ pdu->handle = adv ? adv->handle : instance;
+ pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
- sizeof(pdu.cp) + len, &pdu,
+ struct_size(pdu, data, len), pdu,
HCI_CMD_TIMEOUT);
}
@@ -1727,31 +1718,27 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
- struct {
- struct hci_cp_le_set_ext_adv_data cp;
- u8 data[HCI_MAX_EXT_AD_LENGTH];
- } pdu;
+ DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
+ HCI_MAX_EXT_AD_LENGTH);
u8 len;
struct adv_info *adv = NULL;
int err;
- memset(&pdu, 0, sizeof(pdu));
-
if (instance) {
adv = hci_find_adv_instance(hdev, instance);
if (!adv || !adv->adv_data_changed)
return 0;
}
- len = eir_create_adv_data(hdev, instance, pdu.data);
+ len = eir_create_adv_data(hdev, instance, pdu->data);
- pdu.cp.length = len;
- pdu.cp.handle = instance;
- pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
- pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+ pdu->length = len;
+ pdu->handle = adv ? adv->handle : instance;
+ pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
- sizeof(pdu.cp) + len, &pdu.cp,
+ struct_size(pdu, data, len), pdu,
HCI_CMD_TIMEOUT);
if (err)
return err;
@@ -1760,7 +1747,7 @@ static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
if (adv) {
adv->adv_data_changed = false;
} else {
- memcpy(hdev->adv_data, pdu.data, len);
+ memcpy(hdev->adv_data, pdu->data, len);
hdev->adv_data_len = len;
}
@@ -3523,10 +3510,6 @@ static int hci_unconf_init_sync(struct hci_dev *hdev)
/* Read Local Supported Features. */
static int hci_read_local_features_sync(struct hci_dev *hdev)
{
- /* Not all AMP controllers support this command */
- if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20))
- return 0;
-
return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
0, NULL, HCI_CMD_TIMEOUT);
}
@@ -3561,51 +3544,6 @@ static int hci_read_local_cmds_sync(struct hci_dev *hdev)
return 0;
}
-/* Read Local AMP Info */
-static int hci_read_local_amp_info_sync(struct hci_dev *hdev)
-{
- return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO,
- 0, NULL, HCI_CMD_TIMEOUT);
-}
-
-/* Read Data Blk size */
-static int hci_read_data_block_size_sync(struct hci_dev *hdev)
-{
- return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE,
- 0, NULL, HCI_CMD_TIMEOUT);
-}
-
-/* Read Flow Control Mode */
-static int hci_read_flow_control_mode_sync(struct hci_dev *hdev)
-{
- return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE,
- 0, NULL, HCI_CMD_TIMEOUT);
-}
-
-/* Read Location Data */
-static int hci_read_location_data_sync(struct hci_dev *hdev)
-{
- return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA,
- 0, NULL, HCI_CMD_TIMEOUT);
-}
-
-/* AMP Controller init stage 1 command sequence */
-static const struct hci_init_stage amp_init1[] = {
- /* HCI_OP_READ_LOCAL_VERSION */
- HCI_INIT(hci_read_local_version_sync),
- /* HCI_OP_READ_LOCAL_COMMANDS */
- HCI_INIT(hci_read_local_cmds_sync),
- /* HCI_OP_READ_LOCAL_AMP_INFO */
- HCI_INIT(hci_read_local_amp_info_sync),
- /* HCI_OP_READ_DATA_BLOCK_SIZE */
- HCI_INIT(hci_read_data_block_size_sync),
- /* HCI_OP_READ_FLOW_CONTROL_MODE */
- HCI_INIT(hci_read_flow_control_mode_sync),
- /* HCI_OP_READ_LOCATION_DATA */
- HCI_INIT(hci_read_location_data_sync),
- {}
-};
-
static int hci_init1_sync(struct hci_dev *hdev)
{
int err;
@@ -3619,28 +3557,9 @@ static int hci_init1_sync(struct hci_dev *hdev)
return err;
}
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
- return hci_init_stage_sync(hdev, br_init1);
- case HCI_AMP:
- hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
- return hci_init_stage_sync(hdev, amp_init1);
- default:
- bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
- break;
- }
-
- return 0;
+ return hci_init_stage_sync(hdev, br_init1);
}
-/* AMP Controller init stage 2 command sequence */
-static const struct hci_init_stage amp_init2[] = {
- /* HCI_OP_READ_LOCAL_FEATURES */
- HCI_INIT(hci_read_local_features_sync),
- {}
-};
-
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
static int hci_read_buffer_size_sync(struct hci_dev *hdev)
{
@@ -3898,9 +3817,6 @@ static int hci_init2_sync(struct hci_dev *hdev)
bt_dev_dbg(hdev, "");
- if (hdev->dev_type == HCI_AMP)
- return hci_init_stage_sync(hdev, amp_init2);
-
err = hci_init_stage_sync(hdev, hci_init2);
if (err)
return err;
@@ -4728,13 +4644,6 @@ static int hci_init_sync(struct hci_dev *hdev)
if (err < 0)
return err;
- /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
- * BR/EDR/LE type controllers. AMP controllers only need the
- * first two stages of init.
- */
- if (hdev->dev_type != HCI_PRIMARY)
- return 0;
-
err = hci_init3_sync(hdev);
if (err < 0)
return err;
@@ -4963,12 +4872,8 @@ int hci_dev_open_sync(struct hci_dev *hdev)
* In case of user channel usage, it is not important
* if a public address or static random address is
* available.
- *
- * This check is only valid for BR/EDR controllers
- * since AMP controllers do not have an address.
*/
if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hdev->dev_type == HCI_PRIMARY &&
!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY)) {
ret = -EADDRNOTAVAIL;
@@ -5003,8 +4908,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)
!hci_dev_test_flag(hdev, HCI_CONFIG) &&
!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hci_dev_test_flag(hdev, HCI_MGMT) &&
- hdev->dev_type == HCI_PRIMARY) {
+ hci_dev_test_flag(hdev, HCI_MGMT)) {
ret = hci_powered_update_sync(hdev);
mgmt_power_on(hdev, ret);
}
@@ -5149,8 +5053,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
- if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
hci_dev_test_flag(hdev, HCI_MGMT))
__mgmt_power_off(hdev);
@@ -5212,9 +5115,6 @@ int hci_dev_close_sync(struct hci_dev *hdev)
hdev->flags &= BIT(HCI_RAW);
hci_dev_clear_volatile_flags(hdev);
- /* Controller radio is available but is currently powered down */
- hdev->amp_status = AMP_STATUS_POWERED_DOWN;
-
memset(hdev->eir, 0, sizeof(hdev->eir));
memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
bacpy(&hdev->random_addr, BDADDR_ANY);
@@ -5251,8 +5151,7 @@ static int hci_power_on_sync(struct hci_dev *hdev)
*/
if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
- (hdev->dev_type == HCI_PRIMARY &&
- !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
!bacmp(&hdev->static_addr, BDADDR_ANY))) {
hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
hci_dev_close_sync(hdev);
@@ -5354,27 +5253,11 @@ int hci_stop_discovery_sync(struct hci_dev *hdev)
return 0;
}
-static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
- u8 reason)
-{
- struct hci_cp_disconn_phy_link cp;
-
- memset(&cp, 0, sizeof(cp));
- cp.phy_handle = HCI_PHY_HANDLE(handle);
- cp.reason = reason;
-
- return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
- sizeof(cp), &cp, HCI_CMD_TIMEOUT);
-}
-
static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
u8 reason)
{
struct hci_cp_disconnect cp;
- if (conn->type == AMP_LINK)
- return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
-
if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
/* This is a BIS connection, hci_conn_del will
* do the necessary cleanup.
@@ -6493,10 +6376,8 @@ done:
int hci_le_create_cis_sync(struct hci_dev *hdev)
{
- struct {
- struct hci_cp_le_create_cis cp;
- struct hci_cis cis[0x1f];
- } cmd;
+ DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
+ size_t aux_num_cis = 0;
struct hci_conn *conn;
u8 cig = BT_ISO_QOS_CIG_UNSET;
@@ -6523,8 +6404,6 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
* remains pending.
*/
- memset(&cmd, 0, sizeof(cmd));
-
hci_dev_lock(hdev);
rcu_read_lock();
@@ -6561,7 +6440,7 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
goto done;
list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
- struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis];
+ struct hci_cis *cis = &cmd->cis[aux_num_cis];
if (hci_conn_check_create_cis(conn) ||
conn->iso_qos.ucast.cig != cig)
@@ -6570,25 +6449,25 @@ int hci_le_create_cis_sync(struct hci_dev *hdev)
set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
cis->acl_handle = cpu_to_le16(conn->parent->handle);
cis->cis_handle = cpu_to_le16(conn->handle);
- cmd.cp.num_cis++;
+ aux_num_cis++;
- if (cmd.cp.num_cis >= ARRAY_SIZE(cmd.cis))
+ if (aux_num_cis >= 0x1f)
break;
}
+ cmd->num_cis = aux_num_cis;
done:
rcu_read_unlock();
hci_dev_unlock(hdev);
- if (!cmd.cp.num_cis)
+ if (!aux_num_cis)
return 0;
/* Wait for HCI_LE_CIS_Established */
return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
- sizeof(cmd.cp) + sizeof(cmd.cis[0]) *
- cmd.cp.num_cis, &cmd,
- HCI_EVT_LE_CIS_ESTABLISHED,
+ struct_size(cmd, cis, cmd->num_cis),
+ cmd, HCI_EVT_LE_CIS_ESTABLISHED,
conn->conn_timeout, NULL);
}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 6bed4aa82..00c0d8413 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -54,7 +54,6 @@ static void iso_sock_kill(struct sock *sk);
enum {
BT_SK_BIG_SYNC,
BT_SK_PA_SYNC,
- BT_SK_PA_SYNC_TERM,
};
struct iso_pinfo {
@@ -81,12 +80,14 @@ static bool check_ucast_qos(struct bt_iso_qos *qos);
static bool check_bcast_qos(struct bt_iso_qos *qos);
static bool iso_match_sid(struct sock *sk, void *data);
static bool iso_match_sync_handle(struct sock *sk, void *data);
+static bool iso_match_sync_handle_pa_report(struct sock *sk, void *data);
static void iso_sock_disconn(struct sock *sk);
typedef bool (*iso_sock_match_t)(struct sock *sk, void *data);
-static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst,
- iso_sock_match_t match, void *data);
+static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
+ enum bt_sock_state state,
+ iso_sock_match_t match, void *data);
/* ---- ISO timers ---- */
#define ISO_CONN_TIMEOUT (HZ * 40)
@@ -196,21 +197,10 @@ static void iso_chan_del(struct sock *sk, int err)
sock_set_flag(sk, SOCK_ZAPPED);
}
-static bool iso_match_conn_sync_handle(struct sock *sk, void *data)
-{
- struct hci_conn *hcon = data;
-
- if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags))
- return false;
-
- return hcon->sync_handle == iso_pi(sk)->sync_handle;
-}
-
static void iso_conn_del(struct hci_conn *hcon, int err)
{
struct iso_conn *conn = hcon->iso_data;
struct sock *sk;
- struct sock *parent;
if (!conn)
return;
@@ -226,25 +216,6 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
if (sk) {
lock_sock(sk);
-
- /* While a PA sync hcon is in the process of closing,
- * mark parent socket with a flag, so that any residual
- * BIGInfo adv reports that arrive before PA sync is
- * terminated are not processed anymore.
- */
- if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_conn_sync_handle,
- hcon);
-
- if (parent) {
- set_bit(BT_SK_PA_SYNC_TERM,
- &iso_pi(parent)->flags);
- sock_put(parent);
- }
- }
-
iso_sock_clear_timer(sk);
iso_chan_del(sk, err);
release_sock(sk);
@@ -581,22 +552,23 @@ static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc,
return NULL;
}
-/* Find socket listening:
+/* Find socket in given state:
* source bdaddr (Unicast)
* destination bdaddr (Broadcast only)
* match func - pass NULL to ignore
* match func data - pass -1 to ignore
* Returns closest match.
*/
-static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst,
- iso_sock_match_t match, void *data)
+static struct sock *iso_get_sock(bdaddr_t *src, bdaddr_t *dst,
+ enum bt_sock_state state,
+ iso_sock_match_t match, void *data)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&iso_sk_list.lock);
sk_for_each(sk, &iso_sk_list.head) {
- if (sk->sk_state != BT_LISTEN)
+ if (sk->sk_state != state)
continue;
/* Match Broadcast destination */
@@ -857,6 +829,7 @@ static struct sock *iso_sock_alloc(struct net *net, struct socket *sock,
iso_pi(sk)->src_type = BDADDR_LE_PUBLIC;
iso_pi(sk)->qos = default_qos;
+ iso_pi(sk)->sync_handle = -1;
bt_sock_link(&iso_sk_list, sk);
return sk;
@@ -904,7 +877,6 @@ static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr,
return -EINVAL;
iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type;
- iso_pi(sk)->sync_handle = -1;
if (sa->iso_bc->bc_sid > 0x0f)
return -EINVAL;
@@ -981,7 +953,8 @@ static int iso_sock_bind(struct socket *sock, struct sockaddr *addr,
/* Allow the user to bind a PA sync socket to a number
* of BISes to sync to.
*/
- if (sk->sk_state == BT_CONNECT2 &&
+ if ((sk->sk_state == BT_CONNECT2 ||
+ sk->sk_state == BT_CONNECTED) &&
test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
err = iso_sock_bind_pa_sk(sk, sa, addr_len);
goto done;
@@ -1393,6 +1366,16 @@ static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg,
}
release_sock(sk);
return 0;
+ case BT_CONNECTED:
+ if (test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags)) {
+ iso_conn_big_sync(sk);
+ sk->sk_state = BT_LISTEN;
+ release_sock(sk);
+ return 0;
+ }
+
+ release_sock(sk);
+ break;
case BT_CONNECT:
release_sock(sk);
return iso_connect_cis(sk);
@@ -1538,7 +1521,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname,
case BT_ISO_QOS:
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
- sk->sk_state != BT_CONNECT2) {
+ sk->sk_state != BT_CONNECT2 &&
+ (!test_bit(BT_SK_PA_SYNC, &iso_pi(sk)->flags) ||
+ sk->sk_state != BT_CONNECTED)) {
err = -EINVAL;
break;
}
@@ -1759,7 +1744,7 @@ static void iso_conn_ready(struct iso_conn *conn)
struct sock *sk = conn->sk;
struct hci_ev_le_big_sync_estabilished *ev = NULL;
struct hci_ev_le_pa_sync_established *ev2 = NULL;
- struct hci_evt_le_big_info_adv_report *ev3 = NULL;
+ struct hci_ev_le_per_adv_report *ev3 = NULL;
struct hci_conn *hcon;
BT_DBG("conn %p", conn);
@@ -1777,32 +1762,37 @@ static void iso_conn_ready(struct iso_conn *conn)
HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
/* Get reference to PA sync parent socket, if it exists */
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_pa_sync_flag, NULL);
+ parent = iso_get_sock(&hcon->src, &hcon->dst,
+ BT_LISTEN,
+ iso_match_pa_sync_flag,
+ NULL);
if (!parent && ev)
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_big, ev);
+ parent = iso_get_sock(&hcon->src,
+ &hcon->dst,
+ BT_LISTEN,
+ iso_match_big, ev);
} else if (test_bit(HCI_CONN_PA_SYNC_FAILED, &hcon->flags)) {
ev2 = hci_recv_event_data(hcon->hdev,
HCI_EV_LE_PA_SYNC_ESTABLISHED);
if (ev2)
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_sid, ev2);
+ parent = iso_get_sock(&hcon->src,
+ &hcon->dst,
+ BT_LISTEN,
+ iso_match_sid, ev2);
} else if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
ev3 = hci_recv_event_data(hcon->hdev,
- HCI_EVT_LE_BIG_INFO_ADV_REPORT);
+ HCI_EV_LE_PER_ADV_REPORT);
if (ev3)
- parent = iso_get_sock_listen(&hcon->src,
- &hcon->dst,
- iso_match_sync_handle, ev3);
+ parent = iso_get_sock(&hcon->src,
+ &hcon->dst,
+ BT_LISTEN,
+ iso_match_sync_handle_pa_report,
+ ev3);
}
if (!parent)
- parent = iso_get_sock_listen(&hcon->src,
- BDADDR_ANY, NULL, NULL);
+ parent = iso_get_sock(&hcon->src, BDADDR_ANY,
+ BT_LISTEN, NULL, NULL);
if (!parent)
return;
@@ -1839,7 +1829,6 @@ static void iso_conn_ready(struct iso_conn *conn)
if (ev3) {
iso_pi(sk)->qos = iso_pi(parent)->qos;
- iso_pi(sk)->qos.bcast.encryption = ev3->encryption;
hcon->iso_qos = iso_pi(sk)->qos;
iso_pi(sk)->bc_num_bis = iso_pi(parent)->bc_num_bis;
memcpy(iso_pi(sk)->bc_bis, iso_pi(parent)->bc_bis, ISO_MAX_NUM_BIS);
@@ -1923,8 +1912,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
*/
ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED);
if (ev1) {
- sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sid,
- ev1);
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
+ iso_match_sid, ev1);
if (sk && !ev1->status)
iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle);
@@ -1933,26 +1922,29 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT);
if (ev2) {
- /* Try to get PA sync listening socket, if it exists */
- sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
- iso_match_pa_sync_flag, NULL);
-
- if (!sk) {
- sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
- iso_match_sync_handle, ev2);
-
- /* If PA Sync is in process of terminating,
- * do not handle any more BIGInfo adv reports.
- */
-
- if (sk && test_bit(BT_SK_PA_SYNC_TERM,
- &iso_pi(sk)->flags))
- return 0;
+ /* Check if BIGInfo report has already been handled */
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_CONNECTED,
+ iso_match_sync_handle, ev2);
+ if (sk) {
+ sock_put(sk);
+ sk = NULL;
+ goto done;
}
+ /* Try to get PA sync socket, if it exists */
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_CONNECT2,
+ iso_match_sync_handle, ev2);
+ if (!sk)
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr,
+ BT_LISTEN,
+ iso_match_sync_handle,
+ ev2);
+
if (sk) {
int err;
+ iso_pi(sk)->qos.bcast.encryption = ev2->encryption;
+
if (ev2->num_bis < iso_pi(sk)->bc_num_bis)
iso_pi(sk)->bc_num_bis = ev2->num_bis;
@@ -1971,6 +1963,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
}
}
}
+
+ goto done;
}
ev3 = hci_recv_event_data(hdev, HCI_EV_LE_PER_ADV_REPORT);
@@ -1979,8 +1973,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
u8 *base;
struct hci_conn *hcon;
- sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr,
- iso_match_sync_handle_pa_report, ev3);
+ sk = iso_get_sock(&hdev->bdaddr, bdaddr, BT_LISTEN,
+ iso_match_sync_handle_pa_report, ev3);
if (!sk)
goto done;
@@ -2029,7 +2023,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
hcon->le_per_adv_data_len = 0;
}
} else {
- sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL);
+ sk = iso_get_sock(&hdev->bdaddr, BDADDR_ANY,
+ BT_LISTEN, NULL, NULL);
}
done:
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 3f7a82f10..9394a158d 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -457,6 +457,9 @@ struct l2cap_chan *l2cap_chan_create(void)
/* Set default lock nesting level */
atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
+ /* Available receive buffer space is initially unknown */
+ chan->rx_avail = -1;
+
write_lock(&chan_list_lock);
list_add(&chan->global_l, &chan_list);
write_unlock(&chan_list_lock);
@@ -538,6 +541,28 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
}
EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
+static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
+{
+ size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
+
+ if (chan->mps == 0)
+ return 0;
+
+ /* If we don't know the available space in the receiver buffer, give
+ * enough credits for a full packet.
+ */
+ if (chan->rx_avail == -1)
+ return (chan->imtu / chan->mps) + 1;
+
+ /* If we know how much space is available in the receive buffer, give
+ * out as many credits as would fill the buffer.
+ */
+ if (chan->rx_avail <= sdu_len)
+ return 0;
+
+ return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
+}
+
static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
{
chan->sdu = NULL;
@@ -546,8 +571,7 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
chan->tx_credits = tx_credits;
/* Derive MPS from connection MTU to stop HCI fragmentation */
chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
- /* Give enough credits for a full packet */
- chan->rx_credits = (chan->imtu / chan->mps) + 1;
+ chan->rx_credits = l2cap_le_rx_credits(chan);
skb_queue_head_init(&chan->tx_q);
}
@@ -559,7 +583,7 @@ static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
/* L2CAP implementations shall support a minimum MPS of 64 octets */
if (chan->mps < L2CAP_ECRED_MIN_MPS) {
chan->mps = L2CAP_ECRED_MIN_MPS;
- chan->rx_credits = (chan->imtu / chan->mps) + 1;
+ chan->rx_credits = l2cap_le_rx_credits(chan);
}
}
@@ -3906,7 +3930,7 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn,
}
static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
- u8 *data, u8 rsp_code, u8 amp_id)
+ u8 *data, u8 rsp_code)
{
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
struct l2cap_conn_rsp rsp;
@@ -3985,17 +4009,8 @@ static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
status = L2CAP_CS_AUTHOR_PEND;
chan->ops->defer(chan);
} else {
- /* Force pending result for AMP controllers.
- * The connection will succeed after the
- * physical link is up.
- */
- if (amp_id == AMP_ID_BREDR) {
- l2cap_state_change(chan, BT_CONFIG);
- result = L2CAP_CR_SUCCESS;
- } else {
- l2cap_state_change(chan, BT_CONNECT2);
- result = L2CAP_CR_PEND;
- }
+ l2cap_state_change(chan, BT_CONFIG);
+ result = L2CAP_CR_SUCCESS;
status = L2CAP_CS_NO_INFO;
}
} else {
@@ -4060,7 +4075,7 @@ static int l2cap_connect_req(struct l2cap_conn *conn,
mgmt_device_connected(hdev, hcon, NULL, 0);
hci_dev_unlock(hdev);
- l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
+ l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
return 0;
}
@@ -4630,13 +4645,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
memset(&rsp, 0, sizeof(rsp));
- if (max > hcon->le_conn_max_interval) {
- BT_DBG("requested connection interval exceeds current bounds.");
- err = -EINVAL;
- } else {
- err = hci_check_conn_params(min, max, latency, to_multiplier);
- }
-
+ err = hci_check_conn_params(min, max, latency, to_multiplier);
if (err)
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
@@ -6513,9 +6522,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
struct l2cap_le_credits pkt;
- u16 return_credits;
-
- return_credits = (chan->imtu / chan->mps) + 1;
+ u16 return_credits = l2cap_le_rx_credits(chan);
if (chan->rx_credits >= return_credits)
return;
@@ -6534,6 +6541,19 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
}
+void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
+{
+ if (chan->rx_avail == rx_avail)
+ return;
+
+ BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
+
+ chan->rx_avail = rx_avail;
+
+ if (chan->state == BT_CONNECTED)
+ l2cap_chan_le_send_credits(chan);
+}
+
static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
@@ -6543,6 +6563,12 @@ static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
/* Wait recv to confirm reception before updating the credits */
err = chan->ops->recv(chan, skb);
+ if (err < 0 && chan->rx_avail != -1) {
+ BT_ERR("Queueing received LE L2CAP data failed");
+ l2cap_send_disconn_req(chan, ECONNRESET);
+ return err;
+ }
+
/* Update credits whenever an SDU is received */
l2cap_chan_le_send_credits(chan);
@@ -6565,7 +6591,8 @@ static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
}
chan->rx_credits--;
- BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
+ BT_DBG("chan %p: rx_credits %u -> %u",
+ chan, chan->rx_credits + 1, chan->rx_credits);
/* Update if remote had run out of credits, this should only happens
* if the remote is not using the entire MPS.
@@ -7453,10 +7480,6 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
struct l2cap_conn *conn = hcon->l2cap_data;
int len;
- /* For AMP controller do not create l2cap conn */
- if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
- goto drop;
-
if (!conn)
conn = l2cap_conn_add(hcon);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 5cc83f906..8645461d4 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1131,6 +1131,34 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg,
return err;
}
+static void l2cap_publish_rx_avail(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+ ssize_t avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc);
+ int expected_skbs, skb_overhead;
+
+ if (avail <= 0) {
+ l2cap_chan_rx_avail(chan, 0);
+ return;
+ }
+
+ if (!chan->mps) {
+ l2cap_chan_rx_avail(chan, -1);
+ return;
+ }
+
+ /* Correct available memory by estimated sk_buff overhead.
+ * This is significant due to small transfer sizes. However, accept
+ * at least one full packet if receive space is non-zero.
+ */
+ expected_skbs = DIV_ROUND_UP(avail, chan->mps);
+ skb_overhead = expected_skbs * sizeof(struct sk_buff);
+ if (skb_overhead < avail)
+ l2cap_chan_rx_avail(chan, avail - skb_overhead);
+ else
+ l2cap_chan_rx_avail(chan, -1);
+}
+
static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
@@ -1167,28 +1195,33 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
else
err = bt_sock_recvmsg(sock, msg, len, flags);
- if (pi->chan->mode != L2CAP_MODE_ERTM)
+ if (pi->chan->mode != L2CAP_MODE_ERTM &&
+ pi->chan->mode != L2CAP_MODE_LE_FLOWCTL &&
+ pi->chan->mode != L2CAP_MODE_EXT_FLOWCTL)
return err;
- /* Attempt to put pending rx data in the socket buffer */
-
lock_sock(sk);
- if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
- goto done;
+ l2cap_publish_rx_avail(pi->chan);
- if (pi->rx_busy_skb) {
- if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
- pi->rx_busy_skb = NULL;
- else
+ /* Attempt to put pending rx data in the socket buffer */
+ while (!list_empty(&pi->rx_busy)) {
+ struct l2cap_rx_busy *rx_busy =
+ list_first_entry(&pi->rx_busy,
+ struct l2cap_rx_busy,
+ list);
+ if (__sock_queue_rcv_skb(sk, rx_busy->skb) < 0)
goto done;
+ list_del(&rx_busy->list);
+ kfree(rx_busy);
}
/* Restore data flow when half of the receive buffer is
* available. This avoids resending large numbers of
* frames.
*/
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+ if (test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state) &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
l2cap_chan_busy(pi->chan, 0);
done:
@@ -1449,17 +1482,20 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
struct sock *sk = chan->data;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
int err;
lock_sock(sk);
- if (l2cap_pi(sk)->rx_busy_skb) {
+ if (chan->mode == L2CAP_MODE_ERTM && !list_empty(&pi->rx_busy)) {
err = -ENOMEM;
goto done;
}
if (chan->mode != L2CAP_MODE_ERTM &&
- chan->mode != L2CAP_MODE_STREAMING) {
+ chan->mode != L2CAP_MODE_STREAMING &&
+ chan->mode != L2CAP_MODE_LE_FLOWCTL &&
+ chan->mode != L2CAP_MODE_EXT_FLOWCTL) {
/* Even if no filter is attached, we could potentially
* get errors from security modules, etc.
*/
@@ -1470,7 +1506,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
err = __sock_queue_rcv_skb(sk, skb);
- /* For ERTM, handle one skb that doesn't fit into the recv
+ l2cap_publish_rx_avail(chan);
+
+ /* For ERTM and LE, handle a skb that doesn't fit into the recv
* buffer. This is important to do because the data frames
* have already been acked, so the skb cannot be discarded.
*
@@ -1479,8 +1517,18 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
* acked and reassembled until there is buffer space
* available.
*/
- if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
- l2cap_pi(sk)->rx_busy_skb = skb;
+ if (err < 0 &&
+ (chan->mode == L2CAP_MODE_ERTM ||
+ chan->mode == L2CAP_MODE_LE_FLOWCTL ||
+ chan->mode == L2CAP_MODE_EXT_FLOWCTL)) {
+ struct l2cap_rx_busy *rx_busy =
+ kmalloc(sizeof(*rx_busy), GFP_KERNEL);
+ if (!rx_busy) {
+ err = -ENOMEM;
+ goto done;
+ }
+ rx_busy->skb = skb;
+ list_add_tail(&rx_busy->list, &pi->rx_busy);
l2cap_chan_busy(chan, 1);
err = 0;
}
@@ -1706,6 +1754,8 @@ static const struct l2cap_ops l2cap_chan_ops = {
static void l2cap_sock_destruct(struct sock *sk)
{
+ struct l2cap_rx_busy *rx_busy, *next;
+
BT_DBG("sk %p", sk);
if (l2cap_pi(sk)->chan) {
@@ -1713,9 +1763,10 @@ static void l2cap_sock_destruct(struct sock *sk)
l2cap_chan_put(l2cap_pi(sk)->chan);
}
- if (l2cap_pi(sk)->rx_busy_skb) {
- kfree_skb(l2cap_pi(sk)->rx_busy_skb);
- l2cap_pi(sk)->rx_busy_skb = NULL;
+ list_for_each_entry_safe(rx_busy, next, &l2cap_pi(sk)->rx_busy, list) {
+ kfree_skb(rx_busy->skb);
+ list_del(&rx_busy->list);
+ kfree(rx_busy);
}
skb_queue_purge(&sk->sk_receive_queue);
@@ -1799,6 +1850,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
chan->data = sk;
chan->ops = &l2cap_chan_ops;
+
+ l2cap_publish_rx_avail(chan);
}
static struct proto l2cap_proto = {
@@ -1820,6 +1873,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
sk->sk_destruct = l2cap_sock_destruct;
sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
+ INIT_LIST_HEAD(&l2cap_pi(sk)->rx_busy);
+
chan = l2cap_chan_create();
if (!chan) {
sk_free(sk);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 965f621ef..80f220b7e 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -443,8 +443,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
- if (d->dev_type == HCI_PRIMARY &&
- !hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
count++;
}
@@ -468,8 +467,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue;
- if (d->dev_type == HCI_PRIMARY &&
- !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
+ if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
rp->index[count++] = cpu_to_le16(d->id);
bt_dev_dbg(hdev, "Added hci%u", d->id);
}
@@ -503,8 +501,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
count = 0;
list_for_each_entry(d, &hci_dev_list, list) {
- if (d->dev_type == HCI_PRIMARY &&
- hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
count++;
}
@@ -528,8 +525,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue;
- if (d->dev_type == HCI_PRIMARY &&
- hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
+ if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
rp->index[count++] = cpu_to_le16(d->id);
bt_dev_dbg(hdev, "Added hci%u", d->id);
}
@@ -561,10 +557,8 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
read_lock(&hci_dev_list_lock);
count = 0;
- list_for_each_entry(d, &hci_dev_list, list) {
- if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
- count++;
- }
+ list_for_each_entry(d, &hci_dev_list, list)
+ count++;
rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
if (!rp) {
@@ -585,16 +579,10 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
continue;
- if (d->dev_type == HCI_PRIMARY) {
- if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
- rp->entry[count].type = 0x01;
- else
- rp->entry[count].type = 0x00;
- } else if (d->dev_type == HCI_AMP) {
- rp->entry[count].type = 0x02;
- } else {
- continue;
- }
+ if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
+ rp->entry[count].type = 0x01;
+ else
+ rp->entry[count].type = 0x00;
rp->entry[count].bus = d->bus;
rp->entry[count++].index = cpu_to_le16(d->id);
@@ -9331,23 +9319,14 @@ void mgmt_index_added(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return;
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
- mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
- NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
- ev.type = 0x01;
- } else {
- mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
- HCI_MGMT_INDEX_EVENTS);
- ev.type = 0x00;
- }
- break;
- case HCI_AMP:
- ev.type = 0x02;
- break;
- default:
- return;
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
+ HCI_MGMT_UNCONF_INDEX_EVENTS);
+ ev.type = 0x01;
+ } else {
+ mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
+ HCI_MGMT_INDEX_EVENTS);
+ ev.type = 0x00;
}
ev.bus = hdev->bus;
@@ -9364,25 +9343,16 @@ void mgmt_index_removed(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
return;
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
+ mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
- if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
- mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
- NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
- ev.type = 0x01;
- } else {
- mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
- HCI_MGMT_INDEX_EVENTS);
- ev.type = 0x00;
- }
- break;
- case HCI_AMP:
- ev.type = 0x02;
- break;
- default:
- return;
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
+ HCI_MGMT_UNCONF_INDEX_EVENTS);
+ ev.type = 0x01;
+ } else {
+ mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
+ HCI_MGMT_INDEX_EVENTS);
+ ev.type = 0x00;
}
ev.bus = hdev->bus;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 61efeadaf..4cd29fb49 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -719,10 +719,16 @@ static void
__bpf_prog_test_run_raw_tp(void *data)
{
struct bpf_raw_tp_test_run_info *info = data;
+ struct bpf_trace_run_ctx run_ctx = {};
+ struct bpf_run_ctx *old_run_ctx;
+
+ old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
rcu_read_lock();
info->retval = bpf_prog_run(info->prog, info->ctx);
rcu_read_unlock();
+
+ bpf_reset_run_ctx(old_run_ctx);
}
int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index c366ccc8b..ecac78869 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -27,6 +27,7 @@ EXPORT_SYMBOL_GPL(nf_br_ops);
/* net device transmit always called with BH disabled */
netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ enum skb_drop_reason reason = pskb_may_pull_reason(skb, ETH_HLEN);
struct net_bridge_mcast_port *pmctx_null = NULL;
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mcast *brmctx = &br->multicast_ctx;
@@ -38,6 +39,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
const unsigned char *dest;
u16 vid = 0;
+ if (unlikely(reason != SKB_NOT_DROPPED_YET)) {
+ kfree_skb_reason(skb, reason);
+ return NETDEV_TX_OK;
+ }
+
memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
br_tc_skb_miss_set(skb, false);
diff --git a/net/bridge/br_mst.c b/net/bridge/br_mst.c
index ee680adce..1820f09ff 100644
--- a/net/bridge/br_mst.c
+++ b/net/bridge/br_mst.c
@@ -73,12 +73,11 @@ int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state)
}
EXPORT_SYMBOL_GPL(br_mst_get_state);
-static void br_mst_vlan_set_state(struct net_bridge_port *p, struct net_bridge_vlan *v,
+static void br_mst_vlan_set_state(struct net_bridge_vlan_group *vg,
+ struct net_bridge_vlan *v,
u8 state)
{
- struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
-
- if (v->state == state)
+ if (br_vlan_get_state(v) == state)
return;
br_vlan_set_state(v, state);
@@ -100,11 +99,12 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
};
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
- int err;
+ int err = 0;
- vg = nbp_vlan_group(p);
+ rcu_read_lock();
+ vg = nbp_vlan_group_rcu(p);
if (!vg)
- return 0;
+ goto out;
/* MSTI 0 (CST) state changes are notified via the regular
* SWITCHDEV_ATTR_ID_PORT_STP_STATE.
@@ -112,17 +112,20 @@ int br_mst_set_state(struct net_bridge_port *p, u16 msti, u8 state,
if (msti) {
err = switchdev_port_attr_set(p->dev, &attr, extack);
if (err && err != -EOPNOTSUPP)
- return err;
+ goto out;
}
- list_for_each_entry(v, &vg->vlan_list, vlist) {
+ err = 0;
+ list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
if (v->brvlan->msti != msti)
continue;
- br_mst_vlan_set_state(p, v, state);
+ br_mst_vlan_set_state(vg, v, state);
}
- return 0;
+out:
+ rcu_read_unlock();
+ return err;
}
static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
@@ -136,13 +139,13 @@ static void br_mst_vlan_sync_state(struct net_bridge_vlan *pv, u16 msti)
* it.
*/
if (v != pv && v->brvlan->msti == msti) {
- br_mst_vlan_set_state(pv->port, pv, v->state);
+ br_mst_vlan_set_state(vg, pv, v->state);
return;
}
}
/* Otherwise, start out in a new MSTI with all ports disabled. */
- return br_mst_vlan_set_state(pv->port, pv, BR_STATE_DISABLED);
+ return br_mst_vlan_set_state(vg, pv, BR_STATE_DISABLED);
}
int br_mst_vlan_set_msti(struct net_bridge_vlan *mv, u16 msti)
diff --git a/net/core/dev.c b/net/core/dev.c
index 331848eca..e8fb4ef8a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -10488,8 +10488,9 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
rebroadcast_time = jiffies;
}
+ rcu_barrier();
+
if (!wait) {
- rcu_barrier();
wait = WAIT_REFS_MIN_MSECS;
} else {
msleep(wait);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b0f221d65..430ed18f8 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -74,7 +74,7 @@ struct net_dm_hw_entries {
};
struct per_cpu_dm_data {
- spinlock_t lock; /* Protects 'skb', 'hw_entries' and
+ raw_spinlock_t lock; /* Protects 'skb', 'hw_entries' and
* 'send_timer'
*/
union {
@@ -168,9 +168,9 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
err:
mod_timer(&data->send_timer, jiffies + HZ / 10);
out:
- spin_lock_irqsave(&data->lock, flags);
+ raw_spin_lock_irqsave(&data->lock, flags);
swap(data->skb, skb);
- spin_unlock_irqrestore(&data->lock, flags);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
if (skb) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
@@ -225,7 +225,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
local_irq_save(flags);
data = this_cpu_ptr(&dm_cpu_data);
- spin_lock(&data->lock);
+ raw_spin_lock(&data->lock);
dskb = data->skb;
if (!dskb)
@@ -259,7 +259,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
}
out:
- spin_unlock_irqrestore(&data->lock, flags);
+ raw_spin_unlock_irqrestore(&data->lock, flags);
}
static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
@@ -314,9 +314,9 @@ net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data)
mod_timer(&hw_data->send_timer, jiffies + HZ / 10);
}
- spin_lock_irqsave(&hw_data->lock, flags);
+ raw_spin_lock_irqsave(&hw_data->lock, flags);
swap(hw_data->hw_entries, hw_entries);
- spin_unlock_irqrestore(&hw_data->lock, flags);
+ raw_spin_unlock_irqrestore(&hw_data->lock, flags);
return hw_entries;
}
@@ -448,7 +448,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
return;
hw_data = this_cpu_ptr(&dm_hw_cpu_data);
- spin_lock_irqsave(&hw_data->lock, flags);
+ raw_spin_lock_irqsave(&hw_data->lock, flags);
hw_entries = hw_data->hw_entries;
if (!hw_entries)
@@ -477,7 +477,7 @@ net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
}
out:
- spin_unlock_irqrestore(&hw_data->lock, flags);
+ raw_spin_unlock_irqrestore(&hw_data->lock, flags);
}
static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
@@ -1673,7 +1673,7 @@ static struct notifier_block dropmon_net_notifier = {
static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data)
{
- spin_lock_init(&data->lock);
+ raw_spin_lock_init(&data->lock);
skb_queue_head_init(&data->drop_queue);
u64_stats_init(&data->stats.syncp);
}
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
index 0ccfd5fa5..0c0bdb058 100644
--- a/net/core/dst_cache.c
+++ b/net/core/dst_cache.c
@@ -83,7 +83,7 @@ struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr)
return NULL;
*saddr = idst->in_saddr.s_addr;
- return container_of(dst, struct rtable, dst);
+ return dst_rtable(dst);
}
EXPORT_SYMBOL_GPL(dst_cache_get_ip4);
@@ -112,7 +112,7 @@ void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst,
idst = this_cpu_ptr(dst_cache->cache);
dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst,
- rt6_get_cookie((struct rt6_info *)dst));
+ rt6_get_cookie(dst_rt6_info(dst)));
idst->in6_saddr = *saddr;
}
EXPORT_SYMBOL_GPL(dst_cache_set_ip6);
diff --git a/net/core/filter.c b/net/core/filter.c
index ae5254f71..ce255e0a2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1662,6 +1662,11 @@ static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
static inline int __bpf_try_make_writable(struct sk_buff *skb,
unsigned int write_len)
{
+#ifdef CONFIG_DEBUG_NET
+ /* Avoid a splat in pskb_may_pull_reason() */
+ if (write_len > INT_MAX)
+ return -EINVAL;
+#endif
return skb_ensure_writable(skb, write_len);
}
@@ -2215,7 +2220,7 @@ static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
rcu_read_lock();
if (!nh) {
dst = skb_dst(skb);
- nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
+ nexthop = rt6_nexthop(dst_rt6_info(dst),
&ipv6_hdr(skb)->daddr);
} else {
nexthop = &nh->ipv6_nh;
@@ -2314,8 +2319,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
rcu_read_lock();
if (!nh) {
- struct dst_entry *dst = skb_dst(skb);
- struct rtable *rt = container_of(dst, struct rtable, dst);
+ struct rtable *rt = skb_rtable(skb);
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
} else if (nh->nh_family == AF_INET6) {
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 9d690d32d..b1dc84c4f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -693,11 +693,16 @@ EXPORT_SYMBOL_GPL(__put_net);
* get_net_ns - increment the refcount of the network namespace
* @ns: common namespace (net)
*
- * Returns the net's common namespace.
+ * Returns the net's common namespace or ERR_PTR() if ref is zero.
*/
struct ns_common *get_net_ns(struct ns_common *ns)
{
- return &get_net(container_of(ns, struct net, ns))->ns;
+ struct net *net;
+
+ net = maybe_get_net(container_of(ns, struct net, ns));
+ if (net)
+ return &net->ns;
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(get_net_ns);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 7004b3399..8c2d5a0bc 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -59,22 +59,22 @@ XDP_METADATA_KFUNC_xxx
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES,
xdp_rx_meta, NETDEV_A_DEV_PAD) ||
nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES,
- xsk_features, NETDEV_A_DEV_PAD)) {
- genlmsg_cancel(rsp, hdr);
- return -EINVAL;
- }
+ xsk_features, NETDEV_A_DEV_PAD))
+ goto err_cancel_msg;
if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) {
if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS,
- netdev->xdp_zc_max_segs)) {
- genlmsg_cancel(rsp, hdr);
- return -EINVAL;
- }
+ netdev->xdp_zc_max_segs))
+ goto err_cancel_msg;
}
genlmsg_end(rsp, hdr);
return 0;
+
+err_cancel_msg:
+ genlmsg_cancel(rsp, hdr);
+ return -EMSGSIZE;
}
static void
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 543007f15..55bcacf67 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -316,7 +316,7 @@ static int netpoll_owner_active(struct net_device *dev)
struct napi_struct *napi;
list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
- if (napi->poll_owner == smp_processor_id())
+ if (READ_ONCE(napi->poll_owner) == smp_processor_id())
return 1;
}
return 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8ba6a4e4b..74e6f9746 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -6484,6 +6484,46 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
/* Process one rtnetlink message. */
+static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ rtnl_dumpit_func dumpit = cb->data;
+ int err;
+
+ /* Previous iteration have already finished, avoid calling->dumpit()
+ * again, it may not expect to be called after it reached the end.
+ */
+ if (!dumpit)
+ return 0;
+
+ err = dumpit(skb, cb);
+
+ /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
+ * Some applications which parse netlink manually depend on this.
+ */
+ if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+ if (err < 0 && err != -EMSGSIZE)
+ return err;
+ if (!err)
+ cb->data = NULL;
+
+ return skb->len;
+ }
+ return err;
+}
+
+static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ struct netlink_dump_control *control)
+{
+ if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+ WARN_ON(control->data);
+ control->data = control->dump;
+ control->dump = rtnl_dumpit;
+ }
+
+ return netlink_dump_start(ssk, skb, nlh, control);
+}
+
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -6548,7 +6588,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
.module = owner,
.flags = flags,
};
- err = netlink_dump_start(rtnl, skb, nlh, &c);
+ err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
/* netlink_dump_start() will keep a reference on
* module if dump is still in progress.
*/
@@ -6694,7 +6734,7 @@ void __init rtnetlink_init(void)
register_netdevice_notifier(&rtnetlink_dev_notifier);
rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
- rtnl_dump_ifinfo, 0);
+ rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
diff --git a/net/core/sock.c b/net/core/sock.c
index 0963689a5..09eccc9c5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3743,6 +3743,9 @@ void sk_common_release(struct sock *sk)
sk->sk_prot->unhash(sk);
+ if (sk->sk_socket)
+ sk->sk_socket->sk = NULL;
+
/*
* In this point socket cannot receive new packets, but it is possible
* that some packets are in flight because some CPU runs receiver and
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 8598466a3..01be07b48 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -1639,19 +1639,23 @@ void sock_map_close(struct sock *sk, long timeout)
lock_sock(sk);
rcu_read_lock();
- psock = sk_psock_get(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- release_sock(sk);
- saved_close = READ_ONCE(sk->sk_prot)->close;
- } else {
+ psock = sk_psock(sk);
+ if (likely(psock)) {
saved_close = psock->saved_close;
sock_map_remove_links(sk, psock);
+ psock = sk_psock_get(sk);
+ if (unlikely(!psock))
+ goto no_psock;
rcu_read_unlock();
sk_psock_stop(psock);
release_sock(sk);
cancel_delayed_work_sync(&psock->work);
sk_psock_put(sk, psock);
+ } else {
+ saved_close = READ_ONCE(sk->sk_prot)->close;
+no_psock:
+ rcu_read_unlock();
+ release_sock(sk);
}
/* Make sure we do not recurse. This is a bug.
diff --git a/net/devlink/core.c b/net/devlink/core.c
index 7f0b09320..f49cd83f1 100644
--- a/net/devlink/core.c
+++ b/net/devlink/core.c
@@ -314,7 +314,7 @@ static void devlink_release(struct work_struct *work)
mutex_destroy(&devlink->lock);
lockdep_unregister_key(&devlink->lock_key);
put_device(devlink->dev);
- kfree(devlink);
+ kvfree(devlink);
}
void devlink_put(struct devlink *devlink)
@@ -420,7 +420,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
if (!devlink_reload_actions_valid(ops))
return NULL;
- devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
+ devlink = kvzalloc(struct_size(devlink, priv, priv_size), GFP_KERNEL);
if (!devlink)
return NULL;
@@ -455,7 +455,7 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
return devlink;
err_xa_alloc:
- kfree(devlink);
+ kvfree(devlink);
return NULL;
}
EXPORT_SYMBOL_GPL(devlink_alloc_ns);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 049c3adeb..4e3651101 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -161,9 +161,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
skb->dev = dev;
skb_reset_mac_header(skb);
- eth = (struct ethhdr *)skb->data;
- skb_pull_inline(skb, ETH_HLEN);
-
+ eth = eth_skb_pull_mac(skb);
eth_skb_pkt_type(skb, dev);
/*
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 5a55270aa..e645d751a 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2220,7 +2220,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
int n_stats, ret;
- if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
+ if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
return -EOPNOTSUPP;
n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index fafb123f7..5622ddd3b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -758,7 +758,9 @@ void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *new
sock_rps_record_flow(newsk);
WARN_ON(!((1 << newsk->sk_state) &
(TCPF_ESTABLISHED | TCPF_SYN_RECV |
- TCPF_CLOSE_WAIT | TCPF_CLOSE)));
+ TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
+ TCPF_CLOSING | TCPF_CLOSE_WAIT |
+ TCPF_CLOSE)));
if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
@@ -1306,8 +1308,8 @@ static int inet_sk_reselect_saddr(struct sock *sk)
int inet_sk_rebuild_header(struct sock *sk)
{
+ struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
struct inet_sock *inet = inet_sk(sk);
- struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
__be32 daddr;
struct ip_options_rcu *inet_opt;
struct flowi4 *fl4;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 8b17d83e5..1eb98440c 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -2012,12 +2012,16 @@ static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
* from there we can determine the new total option length */
iter = 0;
optlen_new = 0;
- while (iter < opt->opt.optlen)
- if (opt->opt.__data[iter] != IPOPT_NOP) {
+ while (iter < opt->opt.optlen) {
+ if (opt->opt.__data[iter] == IPOPT_END) {
+ break;
+ } else if (opt->opt.__data[iter] == IPOPT_NOP) {
+ iter++;
+ } else {
iter += opt->opt.__data[iter + 1];
optlen_new = iter;
- } else
- iter++;
+ }
+ }
hdr_delta = opt->opt.optlen;
opt->opt.optlen = (optlen_new + 3) & ~3;
hdr_delta -= opt->opt.optlen;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 7a437f0d4..84b5d1ccf 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1683,6 +1683,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
struct nlmsghdr *nlh;
unsigned long tstamp;
u32 preferred, valid;
+ u32 flags;
nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
args->flags);
@@ -1692,7 +1693,13 @@ static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET;
ifm->ifa_prefixlen = ifa->ifa_prefixlen;
- ifm->ifa_flags = READ_ONCE(ifa->ifa_flags);
+
+ flags = READ_ONCE(ifa->ifa_flags);
+ /* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits.
+ * The 32bit value is given in IFA_FLAGS attribute.
+ */
+ ifm->ifa_flags = (__u8)flags;
+
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
@@ -1701,7 +1708,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
goto nla_put_failure;
tstamp = READ_ONCE(ifa->ifa_tstamp);
- if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
+ if (!(flags & IFA_F_PERMANENT)) {
preferred = READ_ONCE(ifa->ifa_preferred_lft);
valid = READ_ONCE(ifa->ifa_valid_lft);
if (preferred != INFINITY_LIFE_TIME) {
@@ -1732,7 +1739,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, const struct in_ifaddr *ifa,
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
(ifa->ifa_proto &&
nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
- nla_put_u32(skb, IFA_FLAGS, ifm->ifa_flags) ||
+ nla_put_u32(skb, IFA_FLAGS, flags) ||
(ifa->ifa_rt_priority &&
nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
@@ -1875,10 +1882,11 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
goto done;
if (fillargs.ifindex) {
- err = -ENODEV;
dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
- if (!dev)
+ if (!dev) {
+ err = -ENODEV;
goto done;
+ }
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
goto done;
@@ -1890,7 +1898,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
cb->seq = inet_base_seq(tgt_net);
- for_each_netdev_dump(net, dev, ctx->ifindex) {
+ for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
continue;
@@ -2793,7 +2801,7 @@ void __init devinet_init(void)
rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
- RTNL_FLAG_DUMP_UNLOCKED);
+ RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
inet_netconf_dump_devconf,
RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index c484b1c0f..7ad2cafb9 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1050,11 +1050,6 @@ next:
e++;
}
}
-
- /* Don't let NLM_DONE coalesce into a message, even if it could.
- * Some user space expects NLM_DONE in a separate recv().
- */
- err = skb->len;
out:
cb->args[1] = e;
@@ -1665,5 +1660,5 @@ void __init ip_fib_init(void)
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
- RTNL_FLAG_DUMP_UNLOCKED);
+ RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
}
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 437e782b9..207482d30 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -483,6 +483,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
struct icmp_bxm *param)
{
struct net_device *route_lookup_dev;
+ struct dst_entry *dst, *dst2;
struct rtable *rt, *rt2;
struct flowi4 fl4_dec;
int err;
@@ -508,16 +509,17 @@ static struct rtable *icmp_route_lookup(struct net *net,
/* No need to clone since we're just using its address. */
rt2 = rt;
- rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
- flowi4_to_flowi(fl4), NULL, 0);
- if (!IS_ERR(rt)) {
+ dst = xfrm_lookup(net, &rt->dst,
+ flowi4_to_flowi(fl4), NULL, 0);
+ rt = dst_rtable(dst);
+ if (!IS_ERR(dst)) {
if (rt != rt2)
return rt;
- } else if (PTR_ERR(rt) == -EPERM) {
+ } else if (PTR_ERR(dst) == -EPERM) {
rt = NULL;
- } else
+ } else {
return rt;
-
+ }
err = xfrm_decode_session_reverse(net, skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
if (err)
goto relookup_failed;
@@ -551,19 +553,19 @@ static struct rtable *icmp_route_lookup(struct net *net,
if (err)
goto relookup_failed;
- rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
- flowi4_to_flowi(&fl4_dec), NULL,
- XFRM_LOOKUP_ICMP);
- if (!IS_ERR(rt2)) {
+ dst2 = xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4_dec), NULL,
+ XFRM_LOOKUP_ICMP);
+ rt2 = dst_rtable(dst2);
+ if (!IS_ERR(dst2)) {
dst_release(&rt->dst);
memcpy(fl4, &fl4_dec, sizeof(*fl4));
rt = rt2;
- } else if (PTR_ERR(rt2) == -EPERM) {
+ } else if (PTR_ERR(dst2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
- err = PTR_ERR(rt2);
+ err = PTR_ERR(dst2);
goto relookup_failed;
}
return rt;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 5e9c81566..d6fbcbd23 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -616,7 +616,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
dst = skb_dst(skb);
if (curr_dst != dst) {
hint = ip_extract_route_hint(net, skb,
- ((struct rtable *)dst)->rt_type);
+ dst_rtable(dst)->rt_type);
/* dispatch old sublist */
if (!list_empty(&sublist))
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 39229fd06..9500031a1 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -198,7 +198,7 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct rtable *rt = (struct rtable *)dst;
+ struct rtable *rt = dst_rtable(dst);
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
@@ -475,7 +475,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
goto packet_routed;
/* Make sure we can route this packet. */
- rt = (struct rtable *)__sk_dst_check(sk, 0);
+ rt = dst_rtable(__sk_dst_check(sk, 0));
if (!rt) {
__be32 daddr;
@@ -971,7 +971,7 @@ static int __ip_append_data(struct sock *sk,
bool zc = false;
unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
int csummode = CHECKSUM_NONE;
- struct rtable *rt = (struct rtable *)cork->dst;
+ struct rtable *rt = dst_rtable(cork->dst);
bool paged, hold_tskey, extra_uref = false;
unsigned int wmem_alloc_delta = 0;
u32 tskey = 0;
@@ -1390,7 +1390,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options *opt = NULL;
- struct rtable *rt = (struct rtable *)cork->dst;
+ struct rtable *rt = dst_rtable(cork->dst);
struct iphdr *iph;
u8 pmtudisc, ttl;
__be16 df = 0;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 1b8d8ff9a..0e4bd5284 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -543,7 +543,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
struct rt6_info *rt6;
__be32 daddr;
- rt6 = skb_valid_dst(skb) ? (struct rt6_info *)skb_dst(skb) :
+ rt6 = skb_valid_dst(skb) ? dst_rt6_info(skb_dst(skb)) :
NULL;
daddr = md ? dst : tunnel->parms.iph.daddr;
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
index 69e331799..73e66a088 100644
--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
+++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
laddr = 0;
indev = __in_dev_get_rcu(skb->dev);
+ if (!indev)
+ return daddr;
in_dev_for_each_ifa_rcu(ifa, indev) {
if (ifa->ifa_flags & IFA_F_SECONDARY)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b814fdab1..3fcf084fb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -132,7 +132,8 @@ struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ipv4_mtu(const struct dst_entry *dst);
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
+static void ipv4_negative_advice(struct sock *sk,
+ struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
@@ -831,28 +832,21 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
u32 mark = skb->mark;
__u8 tos = iph->tos;
- rt = (struct rtable *) dst;
+ rt = dst_rtable(dst);
__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
-static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
+static void ipv4_negative_advice(struct sock *sk,
+ struct dst_entry *dst)
{
- struct rtable *rt = (struct rtable *)dst;
- struct dst_entry *ret = dst;
+ struct rtable *rt = dst_rtable(dst);
- if (rt) {
- if (dst->obsolete > 0) {
- ip_rt_put(rt);
- ret = NULL;
- } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
- rt->dst.expires) {
- ip_rt_put(rt);
- ret = NULL;
- }
- }
- return ret;
+ if ((dst->obsolete > 0) ||
+ (rt->rt_flags & RTCF_REDIRECTED) ||
+ rt->dst.expires)
+ sk_dst_reset(sk);
}
/*
@@ -1056,7 +1050,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
- struct rtable *rt = (struct rtable *) dst;
+ struct rtable *rt = dst_rtable(dst);
struct flowi4 fl4;
ip_rt_build_flow_key(&fl4, sk, skb);
@@ -1127,7 +1121,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
- rt = (struct rtable *)odst;
+ rt = dst_rtable(odst);
if (odst->obsolete && !odst->ops->check(odst, 0)) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
@@ -1136,7 +1130,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
new = true;
}
- __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
+ __ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
if (!dst_check(&rt->dst, 0)) {
if (new)
@@ -1193,7 +1187,7 @@ EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
u32 cookie)
{
- struct rtable *rt = (struct rtable *) dst;
+ struct rtable *rt = dst_rtable(dst);
/* All IPV4 dsts are created with ->obsolete set to the value
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
@@ -1528,10 +1522,8 @@ void rt_del_uncached_list(struct rtable *rt)
static void ipv4_dst_destroy(struct dst_entry *dst)
{
- struct rtable *rt = (struct rtable *)dst;
-
ip_dst_metrics_put(dst);
- rt_del_uncached_list(rt);
+ rt_del_uncached_list(dst_rtable(dst));
}
void rt_flush_dev(struct net_device *dev)
@@ -2832,7 +2824,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
- struct rtable *ort = (struct rtable *) dst_orig;
+ struct rtable *ort = dst_rtable(dst_orig);
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
@@ -2877,9 +2869,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
if (flp4->flowi4_proto) {
flp4->flowi4_oif = rt->dst.dev->ifindex;
- rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
- flowi4_to_flowi(flp4),
- sk, 0);
+ rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
+ flowi4_to_flowi(flp4),
+ sk, 0));
}
return rt;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 66d77faca..77ee1eda3 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1159,6 +1159,9 @@ new_segment:
process_backlog++;
+#ifdef CONFIG_SKB_DECRYPTED
+ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
tcp_skb_entail(sk, skb);
copy = size_goal;
@@ -2637,6 +2640,10 @@ void tcp_set_state(struct sock *sk, int state)
if (oldstate != TCP_ESTABLISHED)
TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
break;
+ case TCP_CLOSE_WAIT:
+ if (oldstate == TCP_SYN_RECV)
+ TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+ break;
case TCP_CLOSE:
if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
@@ -2648,7 +2655,7 @@ void tcp_set_state(struct sock *sk, int state)
inet_put_port(sk);
fallthrough;
default:
- if (oldstate == TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
}
diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
index 781b67a52..09c0fa675 100644
--- a/net/ipv4/tcp_ao.c
+++ b/net/ipv4/tcp_ao.c
@@ -933,6 +933,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
struct tcp_ao_key *key;
__be32 sisn, disn;
u8 *traffic_key;
+ int state;
u32 sne = 0;
info = rcu_dereference(tcp_sk(sk)->ao_info);
@@ -948,8 +949,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
disn = 0;
}
+ state = READ_ONCE(sk->sk_state);
/* Fast-path */
- if (likely((1 << sk->sk_state) & TCP_AO_ESTABLISHED)) {
+ if (likely((1 << state) & TCP_AO_ESTABLISHED)) {
enum skb_drop_reason err;
struct tcp_ao_key *current_key;
@@ -988,6 +990,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
return SKB_NOT_DROPPED_YET;
}
+ if (unlikely(state == TCP_CLOSE))
+ return SKB_DROP_REASON_TCP_CLOSE;
+
/* Lookup key based on peer address and keyid.
* current_key and rnext_key must not be used on tcp listen
* sockets as otherwise:
@@ -1001,7 +1006,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
if (th->syn && !th->ack)
goto verify_hash;
- if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
+ if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
/* Make the initial syn the likely case here */
if (unlikely(req)) {
sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
@@ -1018,14 +1023,14 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
/* no way to figure out initial sisn/disn - drop */
return SKB_DROP_REASON_TCP_FLAGS;
}
- } else if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ } else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
disn = info->lisn;
if (th->syn || th->rst)
sisn = th->seq;
else
sisn = info->risn;
} else {
- WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", sk->sk_state);
+ WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state);
return SKB_DROP_REASON_TCP_AOFAILURE;
}
verify_hash:
@@ -1963,8 +1968,10 @@ static int tcp_ao_info_cmd(struct sock *sk, unsigned short int family,
first = true;
}
- if (cmd.ao_required && tcp_ao_required_verify(sk))
- return -EKEYREJECTED;
+ if (cmd.ao_required && tcp_ao_required_verify(sk)) {
+ err = -EKEYREJECTED;
+ goto out;
+ }
/* For sockets in TCP_CLOSED it's possible set keys that aren't
* matching the future peer (address/port/VRF/etc),
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index e33fbe493..b00428085 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -58,7 +58,18 @@ struct dctcp {
};
static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
-module_param(dctcp_shift_g, uint, 0644);
+
+static int dctcp_shift_g_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, 0, 10);
+}
+
+static const struct kernel_param_ops dctcp_shift_g_ops = {
+ .set = dctcp_shift_g_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(dctcp_shift_g, &dctcp_shift_g_ops, &dctcp_shift_g, 0644);
MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a140d9f7a..1054a4403 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6289,6 +6289,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
skb_rbtree_walk_from(data)
tcp_mark_skb_lost(sk, data);
tcp_xmit_retransmit_queue(sk);
+ tp->retrans_stamp = 0;
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
return true;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index e0cef75f8..92511b7fd 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2001,7 +2001,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
enum skb_drop_reason *reason)
{
- u32 limit, tail_gso_size, tail_gso_segs;
+ u32 tail_gso_size, tail_gso_segs;
struct skb_shared_info *shinfo;
const struct tcphdr *th;
struct tcphdr *thtail;
@@ -2010,6 +2010,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
bool fragstolen;
u32 gso_segs;
u32 gso_size;
+ u64 limit;
int delta;
/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
@@ -2107,7 +2108,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
__skb_push(skb, hdrlen);
no_coalesce:
- limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
+ /* sk->sk_backlog.len is reset only at the end of __release_sock().
+ * Both sk->sk_backlog.len and sk->sk_rmem_alloc could reach
+ * sk_rcvbuf in normal conditions.
+ */
+ limit = ((u64)READ_ONCE(sk->sk_rcvbuf)) << 1;
+
+ limit += ((u32)READ_ONCE(sk->sk_sndbuf)) >> 1;
/* Only socket owner can try to collapse/prune rx queues
* to reduce memory overhead, so add a little headroom here.
@@ -2115,6 +2122,8 @@ no_coalesce:
*/
limit += 64 * 1024;
+ limit = min_t(u64, limit, UINT_MAX);
+
if (unlikely(sk_add_backlog(sk, skb, limit))) {
bh_unlock_sock(sk);
*reason = SKB_DROP_REASON_SOCKET_BACKLOG;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index d1ad20ce1..f96f68cf7 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -483,8 +483,12 @@ static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
{
const struct tcp_sock *tp = tcp_sk(sk);
const int timeout = TCP_RTO_MAX * 2;
- u32 rcv_delta;
+ s32 rcv_delta;
+ /* Note: timer interrupt might have been delayed by at least one jiffy,
+ * and tp->rcv_tstamp might very well have been written recently.
+ * rcv_delta can thus be negative.
+ */
rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
if (rcv_delta <= timeout)
return false;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b32cf2eee..72d3bf136 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -427,15 +427,21 @@ static struct sock *udp4_lib_lookup2(struct net *net,
{
struct sock *sk, *result;
int score, badness;
+ bool need_rescore;
result = NULL;
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif, sdif);
+ need_rescore = false;
+rescore:
+ score = compute_score(need_rescore ? result : sk, net, saddr,
+ sport, daddr, hnum, dif, sdif);
if (score > badness) {
badness = score;
+ if (need_rescore)
+ continue;
+
if (sk->sk_state == TCP_ESTABLISHED) {
result = sk;
continue;
@@ -456,9 +462,14 @@ static struct sock *udp4_lib_lookup2(struct net *net,
if (IS_ERR(result))
continue;
- badness = compute_score(result, net, saddr, sport,
- daddr, hnum, dif, sdif);
-
+ /* compute_score is too long of a function to be
+ * inlined, and calling it again here yields
+ * measureable overhead for some
+ * workloads. Work around it by jumping
+ * backwards to rescore 'result'.
+ */
+ need_rescore = true;
+ goto rescore;
}
}
return result;
@@ -1207,7 +1218,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
if (connected)
- rt = (struct rtable *)sk_dst_check(sk, 0);
+ rt = dst_rtable(sk_dst_check(sk, 0));
if (!rt) {
struct net *net = sock_net(sk);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index c33bca2c3..1853a8415 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -69,7 +69,7 @@ static int xfrm4_get_saddr(struct net *net, int oif,
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
- struct rtable *rt = (struct rtable *)xdst->route;
+ struct rtable *rt = dst_rtable(xdst->route);
const struct flowi4 *fl4 = &fl->u.ip4;
xdst->u.rt.rt_iif = fl4->flowi4_iif;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 1635da072..d285c1f6f 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -212,7 +212,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
res = true;
} else {
- struct rt6_info *rt = (struct rt6_info *)dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
int tmo = net->ipv6.sysctl.icmpv6_time;
struct inet_peer *peer;
@@ -241,7 +241,7 @@ static bool icmpv6_rt_has_prefsrc(struct sock *sk, u8 type,
dst = ip6_route_output(net, sk, fl6);
if (!dst->error) {
- struct rt6_info *rt = (struct rt6_info *)dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
struct in6_addr prefsrc;
rt6_get_prefsrc(rt, &prefsrc);
@@ -616,7 +616,7 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr),
- &ipc6, &fl6, (struct rt6_info *)dst,
+ &ipc6, &fl6, dst_rt6_info(dst),
MSG_DONTWAIT)) {
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
@@ -803,7 +803,7 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb)
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
skb->len + sizeof(struct icmp6hdr),
sizeof(struct icmp6hdr), &ipc6, &fl6,
- (struct rt6_info *)dst, MSG_DONTWAIT)) {
+ dst_rt6_info(dst), MSG_DONTWAIT)) {
__ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk);
} else {
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index 8c1ce7895..0601bad79 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -38,7 +38,7 @@ static inline struct ila_params *ila_params_lwtunnel(
static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *orig_dst = skb_dst(skb);
- struct rt6_info *rt = (struct rt6_info *)orig_dst;
+ struct rt6_info *rt = dst_rt6_info(orig_dst);
struct ila_lwt *ilwt = ila_lwt_lwtunnel(orig_dst->lwtstate);
struct dst_entry *dst;
int err = -EINVAL;
@@ -70,7 +70,7 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_oif = orig_dst->dev->ifindex;
fl6.flowi6_iif = LOOPBACK_IFINDEX;
- fl6.daddr = *rt6_nexthop((struct rt6_info *)orig_dst,
+ fl6.daddr = *rt6_nexthop(dst_rt6_info(orig_dst),
&ip6h->daddr);
dst = ip6_route_output(net, NULL, &fl6);
diff --git a/net/ipv6/ioam6_iptunnel.c b/net/ipv6/ioam6_iptunnel.c
index 7563f8c6a..bf7120ece 100644
--- a/net/ipv6/ioam6_iptunnel.c
+++ b/net/ipv6/ioam6_iptunnel.c
@@ -351,9 +351,9 @@ do_encap:
goto drop;
if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&ilwt->cache);
- preempt_enable();
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -373,9 +373,9 @@ do_encap:
goto drop;
}
- preempt_disable();
+ local_bh_disable();
dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ local_bh_enable();
}
skb_dst_drop(skb);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index c1f62352a..1ace4ac3e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -965,6 +965,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
if (!fib6_nh->rt6i_pcpu)
return;
+ rcu_read_lock();
/* release the reference to this fib entry from
* all of its cached pcpu routes
*/
@@ -973,7 +974,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
struct rt6_info *pcpu_rt;
ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
- pcpu_rt = *ppcpu_rt;
+
+ /* Paired with xchg() in rt6_get_pcpu_route() */
+ pcpu_rt = READ_ONCE(*ppcpu_rt);
/* only dropping the 'from' reference if the cached route
* is using 'match'. The cached pcpu_rt->from only changes
@@ -987,6 +990,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
fib6_info_release(from);
}
}
+ rcu_read_unlock();
}
struct fib6_nh_pcpu_arg {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 97b0788b3..27d872544 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -120,7 +120,7 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
rcu_read_lock();
- nexthop = rt6_nexthop((struct rt6_info *)dst, daddr);
+ nexthop = rt6_nexthop(dst_rt6_info(dst), daddr);
neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
if (unlikely(IS_ERR_OR_NULL(neigh))) {
@@ -599,7 +599,7 @@ int ip6_forward(struct sk_buff *skb)
* send a redirect.
*/
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (rt->rt6i_flags & RTF_GATEWAY)
target = &rt->rt6i_gateway;
else
@@ -856,7 +856,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
struct sk_buff *frag;
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
bool mono_delivery_time = skb->mono_delivery_time;
@@ -1063,7 +1063,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
return NULL;
}
- rt = (struct rt6_info *)dst;
+ rt = dst_rt6_info(dst);
/* Yes, checking route validity in not connected
* case is not very simple. Take into account,
* that we do not support routing by source, TOS,
@@ -1118,7 +1118,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
struct rt6_info *rt;
*dst = ip6_route_output(net, sk, fl6);
- rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
+ rt = (*dst)->error ? NULL : dst_rt6_info(*dst);
rcu_read_lock();
from = rt ? rcu_dereference(rt->from) : NULL;
@@ -1159,7 +1159,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
* dst entry and replace it instead with the
* dst entry of the nexthop router
*/
- rt = (struct rt6_info *) *dst;
+ rt = dst_rt6_info(*dst);
rcu_read_lock();
n = __ipv6_neigh_lookup_noref(rt->dst.dev,
rt6_nexthop(rt, &fl6->daddr));
@@ -1423,7 +1423,7 @@ static int __ip6_append_data(struct sock *sk,
int offset = 0;
bool zc = false;
u32 tskey = 0;
- struct rt6_info *rt = (struct rt6_info *)cork->dst;
+ struct rt6_info *rt = dst_rt6_info(cork->dst);
bool paged, hold_tskey, extra_uref = false;
struct ipv6_txoptions *opt = v6_cork->opt;
int csummode = CHECKSUM_NONE;
@@ -1877,7 +1877,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
struct net *net = sock_net(sk);
struct ipv6hdr *hdr;
struct ipv6_txoptions *opt = v6_cork->opt;
- struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
+ struct rt6_info *rt = dst_rt6_info(cork->base.dst);
struct flowi6 *fl6 = &cork->fl.u.ip6;
unsigned char proto = fl6->flowi6_proto;
@@ -1949,7 +1949,7 @@ out:
int ip6_send_skb(struct sk_buff *skb)
{
struct net *net = sock_net(skb->sk);
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
int err;
err = ip6_local_out(net, skb->sk, skb);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index cb0ee81a0..dd342e6ec 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2273,7 +2273,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
int err;
struct mr_table *mrt;
struct mfc6_cache *cache;
- struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
if (!mrt)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index ae134634c..d914b2325 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1722,7 +1722,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
if (IS_ERR(dst))
return;
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (rt->rt6i_flags & RTF_GATEWAY) {
ND_PRINTK(2, warn,
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index ef2059c88..88b3fcacd 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -154,7 +154,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false);
if (IS_ERR(dst))
return PTR_ERR(dst);
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = READ_ONCE(np->mcast_oif);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 0d896ca7b..2eedf2556 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -598,7 +598,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
struct ipv6hdr *iph;
struct sk_buff *skb;
int err;
- struct rt6_info *rt = (struct rt6_info *)*dstp;
+ struct rt6_info *rt = dst_rt6_info(*dstp);
int hlen = LL_RESERVED_SPACE(rt->dst.dev);
int tlen = rt->dst.dev->needed_tailroom;
@@ -917,7 +917,7 @@ back_from_confirm:
ipc6.opt = opt;
lock_sock(sk);
err = ip6_append_data(sk, raw6_getfrag, &rfv,
- len, 0, &ipc6, &fl6, (struct rt6_info *)dst,
+ len, 0, &ipc6, &fl6, dst_rt6_info(dst),
msg->msg_flags);
if (err)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index acb4f119e..148bf9e31 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -369,7 +369,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
* the source of the fragment, with the Pointer field set to zero.
*/
nexthdr = hdr->nexthdr;
- if (ipv6frag_thdr_truncated(skb, skb_transport_offset(skb), &nexthdr)) {
+ if (ipv6frag_thdr_truncated(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr), &nexthdr)) {
__IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1f4b935a0..d7a5ca012 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -87,7 +87,8 @@ struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
INDIRECT_CALLABLE_SCOPE
unsigned int ip6_mtu(const struct dst_entry *dst);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *);
+static void ip6_negative_advice(struct sock *sk,
+ struct dst_entry *dst);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
struct net_device *dev);
@@ -226,7 +227,7 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
- const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
+ const struct rt6_info *rt = dst_rt6_info(dst);
return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
dst->dev, skb, daddr);
@@ -234,8 +235,8 @@ static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
+ const struct rt6_info *rt = dst_rt6_info(dst);
struct net_device *dev = dst->dev;
- struct rt6_info *rt = (struct rt6_info *)dst;
daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
if (!daddr)
@@ -354,7 +355,7 @@ EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst)
{
- struct rt6_info *rt = (struct rt6_info *)dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
struct fib6_info *from;
struct inet6_dev *idev;
@@ -373,7 +374,7 @@ static void ip6_dst_destroy(struct dst_entry *dst)
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
{
- struct rt6_info *rt = (struct rt6_info *)dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
struct inet6_dev *idev = rt->rt6i_idev;
if (idev && idev->dev != blackhole_netdev) {
@@ -637,6 +638,8 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
rcu_read_lock();
last_probe = READ_ONCE(fib6_nh->last_probe);
idev = __in6_dev_get(dev);
+ if (!idev)
+ goto out;
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
if (neigh) {
if (READ_ONCE(neigh->nud_state) & NUD_VALID)
@@ -1288,7 +1291,7 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
if (dst->error == 0)
- return (struct rt6_info *) dst;
+ return dst_rt6_info(dst);
dst_release(dst);
@@ -1408,6 +1411,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
struct rt6_info *prev, **p;
p = this_cpu_ptr(res->nh->rt6i_pcpu);
+ /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
prev = xchg(p, NULL);
if (prev) {
dst_dev_put(&prev->dst);
@@ -2647,7 +2651,7 @@ struct dst_entry *ip6_route_output_flags(struct net *net,
rcu_read_lock();
dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
- rt6 = (struct rt6_info *)dst;
+ rt6 = dst_rt6_info(dst);
/* For dst cached in uncached_list, refcnt is already taken. */
if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) {
dst = &net->ipv6.ip6_null_entry->dst;
@@ -2661,7 +2665,7 @@ EXPORT_SYMBOL_GPL(ip6_route_output_flags);
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
- struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
+ struct rt6_info *rt, *ort = dst_rt6_info(dst_orig);
struct net_device *loopback_dev = net->loopback_dev;
struct dst_entry *new = NULL;
@@ -2744,7 +2748,7 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
struct fib6_info *from;
struct rt6_info *rt;
- rt = container_of(dst, struct rt6_info, dst);
+ rt = dst_rt6_info(dst);
if (rt->sernum)
return rt6_is_valid(rt) ? dst : NULL;
@@ -2770,24 +2774,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
}
EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
-static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
+static void ip6_negative_advice(struct sock *sk,
+ struct dst_entry *dst)
{
- struct rt6_info *rt = (struct rt6_info *) dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
- if (rt) {
- if (rt->rt6i_flags & RTF_CACHE) {
- rcu_read_lock();
- if (rt6_check_expired(rt)) {
- rt6_remove_exception_rt(rt);
- dst = NULL;
- }
- rcu_read_unlock();
- } else {
- dst_release(dst);
- dst = NULL;
+ if (rt->rt6i_flags & RTF_CACHE) {
+ rcu_read_lock();
+ if (rt6_check_expired(rt)) {
+ /* counteract the dst_release() in sk_dst_reset() */
+ dst_hold(dst);
+ sk_dst_reset(sk);
+
+ rt6_remove_exception_rt(rt);
}
+ rcu_read_unlock();
+ return;
}
- return dst;
+ sk_dst_reset(sk);
}
static void ip6_link_failure(struct sk_buff *skb)
@@ -2796,7 +2800,7 @@ static void ip6_link_failure(struct sk_buff *skb)
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
- rt = (struct rt6_info *) skb_dst(skb);
+ rt = dst_rt6_info(skb_dst(skb));
if (rt) {
rcu_read_lock();
if (rt->rt6i_flags & RTF_CACHE) {
@@ -2852,7 +2856,7 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
bool confirm_neigh)
{
const struct in6_addr *daddr, *saddr;
- struct rt6_info *rt6 = (struct rt6_info *)dst;
+ struct rt6_info *rt6 = dst_rt6_info(dst);
/* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
* IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
@@ -3601,7 +3605,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
if (!dev)
goto out;
- if (idev->cnf.disable_ipv6) {
+ if (!idev || idev->cnf.disable_ipv6) {
NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
err = -EACCES;
goto out;
@@ -4174,7 +4178,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
}
}
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (rt->rt6i_flags & RTF_REJECT) {
net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
return;
@@ -4445,7 +4449,7 @@ static void rtmsg_to_fib6_config(struct net *net,
.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
: RT6_TABLE_MAIN,
.fc_ifindex = rtmsg->rtmsg_ifindex,
- .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
+ .fc_metric = rtmsg->rtmsg_metric,
.fc_expires = rtmsg->rtmsg_info,
.fc_dst_len = rtmsg->rtmsg_dst_len,
.fc_src_len = rtmsg->rtmsg_src_len,
@@ -4475,6 +4479,9 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
rtnl_lock();
switch (cmd) {
case SIOCADDRT:
+ /* Only do the default setting of fc_metric in route adding */
+ if (cfg.fc_metric == 0)
+ cfg.fc_metric = IP6_RT_PRIO_USER;
err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
break;
case SIOCDELRT:
@@ -5608,7 +5615,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
int iif, int type, u32 portid, u32 seq,
unsigned int flags)
{
- struct rt6_info *rt6 = (struct rt6_info *)dst;
+ struct rt6_info *rt6 = dst_rt6_info(dst);
struct rt6key *rt6_dst, *rt6_src;
u32 *pmetrics, table, rt6_flags;
unsigned char nh_flags = 0;
@@ -6111,7 +6118,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
}
- rt = container_of(dst, struct rt6_info, dst);
+ rt = dst_rt6_info(dst);
if (rt->dst.error) {
err = rt->dst.error;
ip6_rt_put(rt);
@@ -6338,12 +6345,12 @@ static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
if (!write)
return -EINVAL;
- net = (struct net *)ctl->extra1;
- delay = net->ipv6.sysctl.flush_delay;
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (ret)
return ret;
+ net = (struct net *)ctl->extra1;
+ delay = net->ipv6.sysctl.flush_delay;
fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 35508abd7..a31521e27 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -551,6 +551,8 @@ out_unregister_iptun:
#endif
#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
out_unregister_genl:
+#endif
+#if IS_ENABLED(CONFIG_IPV6_SEG6_LWTUNNEL) || IS_ENABLED(CONFIG_IPV6_SEG6_HMAC)
genl_unregister_family(&seg6_genl_family);
#endif
out_unregister_pernet:
@@ -564,8 +566,9 @@ void seg6_exit(void)
seg6_hmac_exit();
#endif
#ifdef CONFIG_IPV6_SEG6_LWTUNNEL
+ seg6_local_exit();
seg6_iptunnel_exit();
#endif
- unregister_pernet_subsys(&ip6_segments_ops);
genl_unregister_family(&seg6_genl_family);
+ unregister_pernet_subsys(&ip6_segments_ops);
}
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index 861e0366f..bbf5b84a7 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -356,6 +356,7 @@ static int seg6_hmac_init_algo(void)
struct crypto_shash *tfm;
struct shash_desc *shash;
int i, alg_count, cpu;
+ int ret = -ENOMEM;
alg_count = ARRAY_SIZE(hmac_algos);
@@ -366,12 +367,14 @@ static int seg6_hmac_init_algo(void)
algo = &hmac_algos[i];
algo->tfms = alloc_percpu(struct crypto_shash *);
if (!algo->tfms)
- return -ENOMEM;
+ goto error_out;
for_each_possible_cpu(cpu) {
tfm = crypto_alloc_shash(algo->name, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ if (IS_ERR(tfm)) {
+ ret = PTR_ERR(tfm);
+ goto error_out;
+ }
p_tfm = per_cpu_ptr(algo->tfms, cpu);
*p_tfm = tfm;
}
@@ -383,18 +386,22 @@ static int seg6_hmac_init_algo(void)
algo->shashs = alloc_percpu(struct shash_desc *);
if (!algo->shashs)
- return -ENOMEM;
+ goto error_out;
for_each_possible_cpu(cpu) {
shash = kzalloc_node(shsize, GFP_KERNEL,
cpu_to_node(cpu));
if (!shash)
- return -ENOMEM;
+ goto error_out;
*per_cpu_ptr(algo->shashs, cpu) = shash;
}
}
return 0;
+
+error_out:
+ seg6_hmac_exit();
+ return ret;
}
int __init seg6_hmac_init(void)
@@ -412,22 +419,29 @@ int __net_init seg6_hmac_net_init(struct net *net)
void seg6_hmac_exit(void)
{
struct seg6_hmac_algo *algo = NULL;
+ struct crypto_shash *tfm;
+ struct shash_desc *shash;
int i, alg_count, cpu;
alg_count = ARRAY_SIZE(hmac_algos);
for (i = 0; i < alg_count; i++) {
algo = &hmac_algos[i];
- for_each_possible_cpu(cpu) {
- struct crypto_shash *tfm;
- struct shash_desc *shash;
- shash = *per_cpu_ptr(algo->shashs, cpu);
- kfree(shash);
- tfm = *per_cpu_ptr(algo->tfms, cpu);
- crypto_free_shash(tfm);
+ if (algo->shashs) {
+ for_each_possible_cpu(cpu) {
+ shash = *per_cpu_ptr(algo->shashs, cpu);
+ kfree(shash);
+ }
+ free_percpu(algo->shashs);
+ }
+
+ if (algo->tfms) {
+ for_each_possible_cpu(cpu) {
+ tfm = *per_cpu_ptr(algo->tfms, cpu);
+ crypto_free_shash(tfm);
+ }
+ free_percpu(algo->tfms);
}
- free_percpu(algo->tfms);
- free_percpu(algo->shashs);
}
}
EXPORT_SYMBOL(seg6_hmac_exit);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 03b877ff4..098632adc 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -459,34 +459,30 @@ static int seg6_input_core(struct net *net, struct sock *sk,
int err;
err = seg6_do_srh(skb);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
+ if (unlikely(err))
+ goto drop;
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&slwt->cache);
- preempt_enable();
if (!dst) {
ip6_route_input(skb);
dst = skb_dst(skb);
if (!dst->error) {
- preempt_disable();
dst_cache_set_ip6(&slwt->cache, dst,
&ipv6_hdr(skb)->saddr);
- preempt_enable();
}
} else {
skb_dst_drop(skb);
skb_dst_set(skb, dst);
}
+ local_bh_enable();
err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
if (unlikely(err))
- return err;
+ goto drop;
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
@@ -494,6 +490,9 @@ static int seg6_input_core(struct net *net, struct sock *sk,
skb_dst(skb)->dev, seg6_input_finish);
return seg6_input_finish(dev_net(skb->dev), NULL, skb);
+drop:
+ kfree_skb(skb);
+ return err;
}
static int seg6_input_nf(struct sk_buff *skb)
@@ -535,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
- preempt_disable();
+ local_bh_disable();
dst = dst_cache_get(&slwt->cache);
- preempt_enable();
+ local_bh_enable();
if (unlikely(!dst)) {
struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -557,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
goto drop;
}
- preempt_disable();
+ local_bh_disable();
dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
- preempt_enable();
+ local_bh_enable();
}
skb_dst_drop(skb);
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 24e2b4b49..c43494013 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -941,8 +941,8 @@ static int input_action_end_dx6(struct sk_buff *skb,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
- dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, input_action_end_dx6_finish);
+ dev_net(skb->dev), NULL, skb, skb->dev,
+ NULL, input_action_end_dx6_finish);
return input_action_end_dx6_finish(dev_net(skb->dev), NULL, skb);
drop:
@@ -991,8 +991,8 @@ static int input_action_end_dx4(struct sk_buff *skb,
if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled))
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
- dev_net(skb->dev), NULL, skb, NULL,
- skb_dst(skb)->dev, input_action_end_dx4_finish);
+ dev_net(skb->dev), NULL, skb, skb->dev,
+ NULL, input_action_end_dx4_finish);
return input_action_end_dx4_finish(dev_net(skb->dev), NULL, skb);
drop:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 3f4cba49e..2b2eda5a2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -95,11 +95,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst_hold_safe(dst)) {
- const struct rt6_info *rt = (const struct rt6_info *)dst;
-
rcu_assign_pointer(sk->sk_rx_dst, dst);
sk->sk_rx_dst_ifindex = skb->skb_iif;
- sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
+ sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
}
}
@@ -1440,7 +1438,6 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
*/
newsk->sk_gso_type = SKB_GSO_TCPV6;
- ip6_dst_store(newsk, dst, NULL, NULL);
inet6_sk_rx_dst_set(newsk, skb);
inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
@@ -1451,6 +1448,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ ip6_dst_store(newsk, dst, NULL, NULL);
+
newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
newnp->saddr = ireq->ir_v6_loc_addr;
newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8f7aa8bac..acafa0cdf 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -168,15 +168,21 @@ static struct sock *udp6_lib_lookup2(struct net *net,
{
struct sock *sk, *result;
int score, badness;
+ bool need_rescore;
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
- score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif, sdif);
+ need_rescore = false;
+rescore:
+ score = compute_score(need_rescore ? result : sk, net, saddr,
+ sport, daddr, hnum, dif, sdif);
if (score > badness) {
badness = score;
+ if (need_rescore)
+ continue;
+
if (sk->sk_state == TCP_ESTABLISHED) {
result = sk;
continue;
@@ -197,8 +203,14 @@ static struct sock *udp6_lib_lookup2(struct net *net,
if (IS_ERR(result))
continue;
- badness = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif, sdif);
+ /* compute_score is too long of a function to be
+ * inlined, and calling it again here yields
+ * measureable overhead for some
+ * workloads. Work around it by jumping
+ * backwards to rescore 'result'.
+ */
+ need_rescore = true;
+ goto rescore;
}
}
return result;
@@ -898,11 +910,8 @@ start_lookup:
static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
- if (udp_sk_rx_dst_set(sk, dst)) {
- const struct rt6_info *rt = (const struct rt6_info *)dst;
-
- sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
- }
+ if (udp_sk_rx_dst_set(sk, dst))
+ sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
}
/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
@@ -1573,7 +1582,7 @@ back_from_confirm:
skb = ip6_make_skb(sk, getfrag, msg, ulen,
sizeof(struct udphdr), &ipc6,
- (struct rt6_info *)dst,
+ dst_rt6_info(dst),
msg->msg_flags, &cork);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
@@ -1600,7 +1609,7 @@ do_append_data:
ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk);
up->len += ulen;
err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
- &ipc6, fl6, (struct rt6_info *)dst,
+ &ipc6, fl6, dst_rt6_info(dst),
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
if (err)
udp_v6_flush_pending_frames(sk);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 42fb6996b..4332d4b82 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -56,12 +56,18 @@ static int xfrm6_get_saddr(struct net *net, int oif,
{
struct dst_entry *dst;
struct net_device *dev;
+ struct inet6_dev *idev;
dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
if (IS_ERR(dst))
return -EHOSTUNREACH;
- dev = ip6_dst_idev(dst)->dev;
+ idev = ip6_dst_idev(dst);
+ if (!idev) {
+ dst_release(dst);
+ return -EHOSTUNREACH;
+ }
+ dev = idev->dev;
ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
dst_release(dst);
return 0;
@@ -70,7 +76,7 @@ static int xfrm6_get_saddr(struct net *net, int oif,
static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
- struct rt6_info *rt = (struct rt6_info *)xdst->route;
+ struct rt6_info *rt = dst_rt6_info(xdst->route);
xdst->u.dst.dev = dev;
netdev_hold(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8d21ff25f..4a0fb8731 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -887,22 +887,20 @@ pass:
return 1;
}
-/* UDP encapsulation receive handler. See net/ipv4/udp.c.
- * Return codes:
- * 0 : success.
- * <0: error
- * >0: skb should be passed up to userspace as UDP.
+/* UDP encapsulation receive and error receive handlers.
+ * See net/ipv4/udp.c for details.
+ *
+ * Note that these functions are called from inside an
+ * RCU-protected region, but without the socket being locked.
+ *
+ * Hence we use rcu_dereference_sk_user_data to access the
+ * tunnel data structure rather the usual l2tp_sk_to_tunnel
+ * accessor function.
*/
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct l2tp_tunnel *tunnel;
- /* Note that this is called from the encap_rcv hook inside an
- * RCU-protected region, but without the socket being locked.
- * Hence we use rcu_dereference_sk_user_data to access the
- * tunnel data structure rather the usual l2tp_sk_to_tunnel
- * accessor function.
- */
tunnel = rcu_dereference_sk_user_data(sk);
if (!tunnel)
goto pass_up;
@@ -919,6 +917,29 @@ pass_up:
}
EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
+static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
+ __be16 port, u32 info, u8 *payload)
+{
+ struct l2tp_tunnel *tunnel;
+
+ tunnel = rcu_dereference_sk_user_data(sk);
+ if (!tunnel || tunnel->fd < 0)
+ return;
+
+ sk->sk_err = err;
+ sk_error_report(sk);
+
+ if (ip_hdr(skb)->version == IPVERSION) {
+ if (inet_test_bit(RECVERR, sk))
+ return ip_icmp_error(sk, skb, err, port, info, payload);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (inet6_test_bit(RECVERR6, sk))
+ return ipv6_icmp_error(sk, skb, err, port, info, payload);
+#endif
+ }
+}
+
/************************************************************************
* Transmit handling
***********************************************************************/
@@ -1493,6 +1514,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
.sk_user_data = tunnel,
.encap_type = UDP_ENCAP_L2TPINUDP,
.encap_rcv = l2tp_udp_encap_recv,
+ .encap_err_rcv = l2tp_udp_encap_err_recv,
.encap_destroy = l2tp_udp_encap_destroy,
};
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 970af3983..19c8cc528 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -459,7 +459,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl4 = &inet->cork.fl.u.ip4;
if (connected)
- rt = (struct rtable *)__sk_dst_check(sk, 0);
+ rt = dst_rtable(__sk_dst_check(sk, 0));
rcu_read_lock();
if (!rt) {
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 7bf14cf9f..8780ec64f 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -630,7 +630,7 @@ back_from_confirm:
ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0);
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
- &fl6, (struct rt6_info *)dst,
+ &fl6, dst_rt6_info(dst),
msg->msg_flags);
if (err)
ip6_flush_pending_frames(sk);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f67c1d021..51dc2d9dd 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1607,10 +1607,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
/* abort any running channel switch or color change */
link_conf->csa_active = false;
link_conf->color_change_active = false;
- if (sdata->csa_blocked_tx) {
+ if (sdata->csa_blocked_queues) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = false;
+ sdata->csa_blocked_queues = false;
}
ieee80211_free_next_beacon(link);
@@ -3648,7 +3648,7 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- sdata->csa_blocked_tx = block_tx;
+ sdata->csa_blocked_queues = block_tx;
sdata_info(sdata, "channel switch failed, disconnecting\n");
wiphy_work_queue(local->hw.wiphy, &ifmgd->csa_connection_drop_work);
}
@@ -3734,10 +3734,10 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data)
ieee80211_link_info_change_notify(sdata, link_data, changed);
- if (sdata->csa_blocked_tx) {
+ if (sdata->csa_blocked_queues) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = false;
+ sdata->csa_blocked_queues = false;
}
err = drv_post_channel_switch(link_data);
@@ -4012,18 +4012,18 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
goto out;
}
- link_data->csa_chanreq = chanreq;
+ link_data->csa_chanreq = chanreq;
link_conf->csa_active = true;
if (params->block_tx &&
!ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = true;
+ sdata->csa_blocked_queues = true;
}
cfg80211_ch_switch_started_notify(sdata->dev,
- &link_data->csa_chanreq.oper, 0,
+ &link_data->csa_chanreq.oper, link_id,
params->count, params->block_tx);
if (changed) {
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index dce37ba8e..254d74583 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -311,6 +311,18 @@ int drv_assign_vif_chanctx(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ /*
+ * We should perhaps push emulate chanctx down and only
+ * make it call ->config() when the chanctx is actually
+ * assigned here (and unassigned below), but that's yet
+ * another change to all drivers to add assign/unassign
+ * emulation callbacks. Maybe later.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ local->emulate_chanctx &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return 0;
+
if (!check_sdata_in_driver(sdata))
return -EIO;
@@ -338,6 +350,11 @@ void drv_unassign_vif_chanctx(struct ieee80211_local *local,
might_sleep();
lockdep_assert_wiphy(local->hw.wiphy);
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
+ local->emulate_chanctx &&
+ !ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return;
+
if (!check_sdata_in_driver(sdata))
return;
diff --git a/net/mac80211/he.c b/net/mac80211/he.c
index 9f5ffdc9d..ecbb042dd 100644
--- a/net/mac80211/he.c
+++ b/net/mac80211/he.c
@@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
if (!he_spr_ie_elem)
return;
+
+ he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
data = he_spr_ie_elem->optional;
if (he_spr_ie_elem->he_sr_control &
IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
- data++;
+ he_obss_pd->non_srg_max_offset = *data++;
+
if (he_spr_ie_elem->he_sr_control &
IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
- he_obss_pd->max_offset = *data++;
he_obss_pd->min_offset = *data++;
+ he_obss_pd->max_offset = *data++;
+ memcpy(he_obss_pd->bss_color_bitmap, data, 8);
+ data += 8;
+ memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
he_obss_pd->enable = true;
}
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index bd507d6b6..70c67c860 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -974,6 +974,7 @@ struct ieee80211_link_data_managed {
bool csa_waiting_bcn;
bool csa_ignored_same_chan;
+ bool csa_blocked_tx;
struct wiphy_delayed_work chswitch_work;
struct wiphy_work request_smps_work;
@@ -1092,7 +1093,7 @@ struct ieee80211_sub_if_data {
unsigned long state;
- bool csa_blocked_tx;
+ bool csa_blocked_queues;
char name[IFNAMSIZ];
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 395de62d9..7c8a421f0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -544,10 +544,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
sdata->vif.bss_conf.csa_active = false;
if (sdata->vif.type == NL80211_IFTYPE_STATION)
sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->csa_blocked_tx) {
+ if (sdata->csa_blocked_queues) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = false;
+ sdata->csa_blocked_queues = false;
}
wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa_finalize_work);
@@ -686,6 +686,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
ieee80211_del_virtual_monitor(local);
ieee80211_recalc_idle(local);
+ ieee80211_recalc_offload(local);
if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE))
break;
@@ -1121,9 +1122,6 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
int ret;
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return 0;
-
ASSERT_RTNL();
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1145,11 +1143,13 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
ieee80211_set_default_queues(sdata);
- ret = drv_add_interface(local, sdata);
- if (WARN_ON(ret)) {
- /* ok .. stupid driver, it asked for this! */
- kfree(sdata);
- return ret;
+ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
+ ret = drv_add_interface(local, sdata);
+ if (WARN_ON(ret)) {
+ /* ok .. stupid driver, it asked for this! */
+ kfree(sdata);
+ return ret;
+ }
}
set_bit(SDATA_STATE_RUNNING, &sdata->state);
@@ -1187,9 +1187,6 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return;
-
ASSERT_RTNL();
lockdep_assert_wiphy(local->hw.wiphy);
@@ -1209,7 +1206,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
ieee80211_link_release_channel(&sdata->deflink);
- drv_remove_interface(local, sdata);
+ if (ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ drv_remove_interface(local, sdata);
kfree(sdata);
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a6b62169f..c0a5c75cd 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -1017,10 +1017,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
+ struct ieee80211_sub_if_data *sdata = mpath->sdata;
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct mesh_preq_queue *preq, *tmp;
struct sk_buff *skb;
while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(mpath->sdata, skb);
+
+ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
+ list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
+ if (ether_addr_equal(mpath->dst, preq->dst)) {
+ list_del(&preq->list);
+ kfree(preq);
+ --ifmsh->preq_queue_len;
+ }
+ }
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
}
/**
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 3bbb216a0..497677e3d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1933,13 +1933,14 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link)
WARN_ON(!link->conf->csa_active);
- if (sdata->csa_blocked_tx) {
+ if (sdata->csa_blocked_queues) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = false;
+ sdata->csa_blocked_queues = false;
}
link->conf->csa_active = false;
+ link->u.mgd.csa_blocked_tx = false;
link->u.mgd.csa_waiting_bcn = false;
ret = drv_post_channel_switch(link);
@@ -1999,13 +2000,14 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link)
ieee80211_link_unreserve_chanctx(link);
- if (sdata->csa_blocked_tx) {
+ if (sdata->csa_blocked_queues) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = false;
+ sdata->csa_blocked_queues = false;
}
link->conf->csa_active = false;
+ link->u.mgd.csa_blocked_tx = false;
drv_abort_channel_switch(link);
}
@@ -2165,12 +2167,13 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
link->csa_chanreq = csa_ie.chanreq;
link->u.mgd.csa_ignored_same_chan = false;
link->u.mgd.beacon_crc_valid = false;
+ link->u.mgd.csa_blocked_tx = csa_ie.mode;
if (csa_ie.mode &&
!ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) {
ieee80211_stop_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = true;
+ sdata->csa_blocked_queues = true;
}
cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper,
@@ -2199,7 +2202,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link,
* reset when the disconnection worker runs.
*/
link->conf->csa_active = true;
- sdata->csa_blocked_tx =
+ link->u.mgd.csa_blocked_tx = csa_ie.mode;
+ sdata->csa_blocked_queues =
csa_ie.mode && !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA);
wiphy_work_queue(sdata->local->hw.wiphy,
@@ -3252,12 +3256,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
}
sdata->vif.bss_conf.csa_active = false;
+ sdata->deflink.u.mgd.csa_blocked_tx = false;
sdata->deflink.u.mgd.csa_waiting_bcn = false;
sdata->deflink.u.mgd.csa_ignored_same_chan = false;
- if (sdata->csa_blocked_tx) {
+ if (sdata->csa_blocked_queues) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = false;
+ sdata->csa_blocked_queues = false;
}
/* existing TX TSPEC sessions no longer exist */
@@ -3563,19 +3568,32 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
- bool tx;
+ bool tx = false;
lockdep_assert_wiphy(local->hw.wiphy);
if (!ifmgd->associated)
return;
- /*
- * MLO drivers should have HANDLES_QUIET_CSA, so that csa_blocked_tx
- * is always false; if they don't then this may try to transmit the
- * frame but queues will be stopped.
- */
- tx = !sdata->csa_blocked_tx;
+ /* only transmit if we have a link that makes that worthwhile */
+ for (unsigned int link_id = 0;
+ link_id < ARRAY_SIZE(sdata->link);
+ link_id++) {
+ struct ieee80211_link_data *link;
+
+ if (!ieee80211_vif_link_active(&sdata->vif, link_id))
+ continue;
+
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (WARN_ON_ONCE(!link))
+ continue;
+
+ if (link->u.mgd.csa_blocked_tx)
+ continue;
+
+ tx = true;
+ break;
+ }
if (!ifmgd->driver_disconnect) {
unsigned int link_id;
@@ -3608,10 +3626,11 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
/* the other links will be destroyed */
sdata->vif.bss_conf.csa_active = false;
sdata->deflink.u.mgd.csa_waiting_bcn = false;
- if (sdata->csa_blocked_tx) {
+ sdata->deflink.u.mgd.csa_blocked_tx = false;
+ if (sdata->csa_blocked_queues) {
ieee80211_wake_vif_queues(local, sdata,
IEEE80211_QUEUE_STOP_REASON_CSA);
- sdata->csa_blocked_tx = false;
+ sdata->csa_blocked_queues = false;
}
ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c
index 55e5497f8..055a60e90 100644
--- a/net/mac80211/parse.c
+++ b/net/mac80211/parse.c
@@ -111,7 +111,7 @@ ieee80211_parse_extension_element(u32 *crc,
if (params->mode < IEEE80211_CONN_MODE_HE)
break;
if (len >= sizeof(*elems->he_spr) &&
- len >= ieee80211_he_spr_size(data))
+ len >= ieee80211_he_spr_size(data) - 1)
elems->he_spr = data;
break;
case WLAN_EID_EXT_HE_6GHZ_CAPA:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 738503125..3da1c5c45 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -708,19 +708,11 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
return -EBUSY;
/* For an MLO connection, if a link ID was specified, validate that it
- * is indeed active. If no link ID was specified, select one of the
- * active links.
+ * is indeed active.
*/
- if (ieee80211_vif_is_mld(&sdata->vif)) {
- if (req->tsf_report_link_id >= 0) {
- if (!(sdata->vif.active_links &
- BIT(req->tsf_report_link_id)))
- return -EINVAL;
- } else {
- req->tsf_report_link_id =
- __ffs(sdata->vif.active_links);
- }
- }
+ if (ieee80211_vif_is_mld(&sdata->vif) && req->tsf_report_link_id >= 0 &&
+ !(sdata->vif.active_links & BIT(req->tsf_report_link_id)))
+ return -EINVAL;
if (!__ieee80211_can_leave_ch(sdata))
return -EBUSY;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index da5fdd6f5..aa22f09e6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1724,7 +1724,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
skb_queue_head_init(&pending);
/* sync with ieee80211_tx_h_unicast_ps_buf */
- spin_lock(&sta->ps_lock);
+ spin_lock_bh(&sta->ps_lock);
/* Send all buffered frames to the station */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
@@ -1753,7 +1753,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
*/
clear_sta_flag(sta, WLAN_STA_PSPOLL);
clear_sta_flag(sta, WLAN_STA_UAPSD);
- spin_unlock(&sta->ps_lock);
+ spin_unlock_bh(&sta->ps_lock);
atomic_dec(&ps->num_sta_ps);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index a237cbcf7..0da5f6082 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1841,7 +1841,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* add interfaces */
sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
/* in HW restart it exists already */
WARN_ON(local->resuming);
res = drv_add_interface(local, sdata);
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 8fc790f2a..4385fd3b1 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -81,7 +81,7 @@ static int mpls_xmit(struct sk_buff *skb)
ttl = net->mpls.default_ttl;
else
ttl = ip_hdr(skb)->ttl;
- rt = (struct rtable *)dst;
+ rt = dst_rtable(dst);
} else if (dst->ops->family == AF_INET6) {
if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED)
ttl = tun_encap_info->default_ttl;
@@ -90,7 +90,7 @@ static int mpls_xmit(struct sk_buff *skb)
ttl = net->mpls.default_ttl;
else
ttl = ipv6_hdr(skb)->hop_limit;
- rt6 = (struct rt6_info *)dst;
+ rt6 = dst_rt6_info(dst);
} else {
goto drop;
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 5c17d3914..8bf7ed6d6 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -676,6 +676,7 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
unsigned int add_addr_accept_max;
struct mptcp_addr_info remote;
unsigned int subflows_max;
+ bool sf_created = false;
int i, nr;
add_addr_accept_max = mptcp_pm_get_add_addr_accept_max(msk);
@@ -703,15 +704,18 @@ static void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
if (nr == 0)
return;
- msk->pm.add_addr_accepted++;
- if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
- msk->pm.subflows >= subflows_max)
- WRITE_ONCE(msk->pm.accept_addr, false);
-
spin_unlock_bh(&msk->pm.lock);
for (i = 0; i < nr; i++)
- __mptcp_subflow_connect(sk, &addrs[i], &remote);
+ if (__mptcp_subflow_connect(sk, &addrs[i], &remote) == 0)
+ sf_created = true;
spin_lock_bh(&msk->pm.lock);
+
+ if (sf_created) {
+ msk->pm.add_addr_accepted++;
+ if (msk->pm.add_addr_accepted >= add_addr_accept_max ||
+ msk->pm.subflows >= subflows_max)
+ WRITE_ONCE(msk->pm.accept_addr, false);
+ }
}
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
@@ -813,10 +817,13 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
spin_lock_bh(&msk->pm.lock);
removed = true;
- __MPTCP_INC_STATS(sock_net(sk), rm_type);
+ if (rm_type == MPTCP_MIB_RMSUBFLOW)
+ __MPTCP_INC_STATS(sock_net(sk), rm_type);
}
if (rm_type == MPTCP_MIB_RMSUBFLOW)
__set_bit(rm_id ? rm_id : msk->mpc_endpoint_id, msk->pm.id_avail_bitmap);
+ else if (rm_type == MPTCP_MIB_RMADDR)
+ __MPTCP_INC_STATS(sock_net(sk), rm_type);
if (!removed)
continue;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 965eb69dc..68e4c0864 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2907,9 +2907,14 @@ void mptcp_set_state(struct sock *sk, int state)
if (oldstate != TCP_ESTABLISHED)
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
break;
-
+ case TCP_CLOSE_WAIT:
+ /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
+ * MPTCP "accepted" sockets will be created later on. So no
+ * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
+ */
+ break;
default:
- if (oldstate == TCP_ESTABLISHED)
+ if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
}
@@ -3726,6 +3731,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
WRITE_ONCE(msk->write_seq, subflow->idsn);
WRITE_ONCE(msk->snd_nxt, subflow->idsn);
+ WRITE_ONCE(msk->snd_una, subflow->idsn);
if (likely(!__mptcp_check_fallback(msk)))
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index a10ebf3ee..9d1ee1994 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -308,6 +308,9 @@ struct mptcp_sock {
free_first:1,
rcvspace_init:1;
u32 notsent_lowat;
+ int keepalive_cnt;
+ int keepalive_idle;
+ int keepalive_intvl;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index 73fdf423d..19ee684f9 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -181,8 +181,6 @@ static int mptcp_setsockopt_sol_socket_int(struct mptcp_sock *msk, int optname,
switch (optname) {
case SO_KEEPALIVE:
- mptcp_sol_socket_sync_intval(msk, optname, val);
- return 0;
case SO_DEBUG:
case SO_MARK:
case SO_PRIORITY:
@@ -624,6 +622,31 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
return ret;
}
+static int __mptcp_setsockopt_set_val(struct mptcp_sock *msk, int max,
+ int (*set_val)(struct sock *, int),
+ int *msk_val, int val)
+{
+ struct mptcp_subflow_context *subflow;
+ int err = 0;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int ret;
+
+ lock_sock(ssk);
+ ret = set_val(ssk, val);
+ err = err ? : ret;
+ release_sock(ssk);
+ }
+
+ if (!err) {
+ *msk_val = val;
+ sockopt_seq_inc(msk);
+ }
+
+ return err;
+}
+
static int __mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, int val)
{
struct mptcp_subflow_context *subflow;
@@ -820,6 +843,22 @@ static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
case TCP_NODELAY:
ret = __mptcp_setsockopt_sol_tcp_nodelay(msk, val);
break;
+ case TCP_KEEPIDLE:
+ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPIDLE,
+ &tcp_sock_set_keepidle_locked,
+ &msk->keepalive_idle, val);
+ break;
+ case TCP_KEEPINTVL:
+ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPINTVL,
+ &tcp_sock_set_keepintvl,
+ &msk->keepalive_intvl, val);
+ break;
+ case TCP_KEEPCNT:
+ ret = __mptcp_setsockopt_set_val(msk, MAX_TCP_KEEPCNT,
+ &tcp_sock_set_keepcnt,
+ &msk->keepalive_cnt,
+ val);
+ break;
default:
ret = -ENOPROTOOPT;
}
@@ -1322,6 +1361,8 @@ static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval,
static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
char __user *optval, int __user *optlen)
{
+ struct sock *sk = (void *)msk;
+
switch (optname) {
case TCP_ULP:
case TCP_CONGESTION:
@@ -1340,6 +1381,18 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
return mptcp_put_int_option(msk, optval, optlen, msk->cork);
case TCP_NODELAY:
return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
+ case TCP_KEEPIDLE:
+ return mptcp_put_int_option(msk, optval, optlen,
+ msk->keepalive_idle ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_time) / HZ);
+ case TCP_KEEPINTVL:
+ return mptcp_put_int_option(msk, optval, optlen,
+ msk->keepalive_intvl ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_intvl) / HZ);
+ case TCP_KEEPCNT:
+ return mptcp_put_int_option(msk, optval, optlen,
+ msk->keepalive_cnt ? :
+ READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_keepalive_probes));
case TCP_NOTSENT_LOWAT:
return mptcp_put_int_option(msk, optval, optlen, msk->notsent_lowat);
}
@@ -1457,6 +1510,9 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
tcp_set_congestion_control(ssk, msk->ca_name, false, true);
__tcp_sock_set_cork(ssk, !!msk->cork);
__tcp_sock_set_nodelay(ssk, !!msk->nodelay);
+ tcp_sock_set_keepidle_locked(ssk, msk->keepalive_idle);
+ tcp_sock_set_keepintvl(ssk, msk->keepalive_intvl);
+ tcp_sock_set_keepcnt(ssk, msk->keepalive_cnt);
inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 374412ed7..ef0f8f738 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -325,6 +325,7 @@ struct ncsi_dev_priv {
spinlock_t lock; /* Protect the NCSI device */
unsigned int package_probe_id;/* Current ID during probe */
unsigned int package_num; /* Number of packages */
+ unsigned int channel_probe_id;/* Current cahnnel ID during probe */
struct list_head packages; /* List of packages */
struct ncsi_channel *hot_channel; /* Channel was ever active */
struct ncsi_request requests[256]; /* Request table */
@@ -343,6 +344,7 @@ struct ncsi_dev_priv {
bool multi_package; /* Enable multiple packages */
bool mlx_multi_host; /* Enable multi host Mellanox */
u32 package_whitelist; /* Packages to configure */
+ unsigned char channel_count; /* Num of channels to probe */
};
struct ncsi_cmd_arg {
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 745c788f1..5ecf611c8 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
break;
case ncsi_dev_state_suspend_gls:
- ndp->pending_req_num = np->channel_num;
+ ndp->pending_req_num = 1;
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
+ nca.channel = ndp->channel_probe_id;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+ ndp->channel_probe_id++;
- nd->state = ncsi_dev_state_suspend_dcnt;
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- nca.channel = nc->id;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
+ if (ndp->channel_probe_id == ndp->channel_count) {
+ ndp->channel_probe_id = 0;
+ nd->state = ncsi_dev_state_suspend_dcnt;
}
break;
@@ -1345,7 +1347,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
struct ncsi_package *np;
- struct ncsi_channel *nc;
struct ncsi_cmd_arg nca;
unsigned char index;
int ret;
@@ -1423,23 +1424,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_cis;
break;
- case ncsi_dev_state_probe_cis:
- ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
-
- /* Clear initial state */
- nca.type = NCSI_PKT_CMD_CIS;
- nca.package = ndp->active_package->id;
- for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
- nca.channel = index;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
-
- nd->state = ncsi_dev_state_probe_gvi;
- if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
- nd->state = ncsi_dev_state_probe_keep_phy;
- break;
case ncsi_dev_state_probe_keep_phy:
ndp->pending_req_num = 1;
@@ -1452,14 +1436,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_gvi;
break;
+ case ncsi_dev_state_probe_cis:
case ncsi_dev_state_probe_gvi:
case ncsi_dev_state_probe_gc:
case ncsi_dev_state_probe_gls:
np = ndp->active_package;
- ndp->pending_req_num = np->channel_num;
+ ndp->pending_req_num = 1;
- /* Retrieve version, capability or link status */
- if (nd->state == ncsi_dev_state_probe_gvi)
+ /* Clear initial state Retrieve version, capability or link status */
+ if (nd->state == ncsi_dev_state_probe_cis)
+ nca.type = NCSI_PKT_CMD_CIS;
+ else if (nd->state == ncsi_dev_state_probe_gvi)
nca.type = NCSI_PKT_CMD_GVI;
else if (nd->state == ncsi_dev_state_probe_gc)
nca.type = NCSI_PKT_CMD_GC;
@@ -1467,19 +1454,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nca.type = NCSI_PKT_CMD_GLS;
nca.package = np->id;
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- nca.channel = nc->id;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
+ nca.channel = ndp->channel_probe_id;
- if (nd->state == ncsi_dev_state_probe_gvi)
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
+
+ if (nd->state == ncsi_dev_state_probe_cis) {
+ nd->state = ncsi_dev_state_probe_gvi;
+ if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
+ nd->state = ncsi_dev_state_probe_keep_phy;
+ } else if (nd->state == ncsi_dev_state_probe_gvi) {
nd->state = ncsi_dev_state_probe_gc;
- else if (nd->state == ncsi_dev_state_probe_gc)
+ } else if (nd->state == ncsi_dev_state_probe_gc) {
nd->state = ncsi_dev_state_probe_gls;
- else
+ } else {
+ nd->state = ncsi_dev_state_probe_cis;
+ ndp->channel_probe_id++;
+ }
+
+ if (ndp->channel_probe_id == ndp->channel_count) {
+ ndp->channel_probe_id = 0;
nd->state = ncsi_dev_state_probe_dp;
+ }
break;
case ncsi_dev_state_probe_dp:
ndp->pending_req_num = 1;
@@ -1780,6 +1777,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
ndp->requests[i].ndp = ndp;
timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
}
+ ndp->channel_count = NCSI_RESERVED_CHANNEL;
spin_lock_irqsave(&ncsi_dev_lock, flags);
list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
@@ -1813,6 +1811,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
if (!(ndp->flags & NCSI_DEV_PROBED)) {
ndp->package_probe_id = 0;
+ ndp->channel_probe_id = 0;
nd->state = ncsi_dev_state_probe;
schedule_work(&ndp->work);
return 0;
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index bee290d0f..e28be33bd 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
struct ncsi_rsp_gc_pkt *rsp;
struct ncsi_dev_priv *ndp = nr->ndp;
struct ncsi_channel *nc;
+ struct ncsi_package *np;
size_t size;
/* Find the channel */
rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
- NULL, &nc);
+ &np, &nc);
if (!nc)
return -ENODEV;
@@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
*/
nc->vlan_filter.bitmap = U64_MAX;
nc->vlan_filter.n_vids = rsp->vlan_cnt;
+ np->ndp->channel_count = rsp->channel_cnt;
return 0;
}
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 3126911f5..b00fc285b 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -815,12 +815,21 @@ int __init netfilter_init(void)
if (ret < 0)
goto err;
+#ifdef CONFIG_LWTUNNEL
+ ret = netfilter_lwtunnel_init();
+ if (ret < 0)
+ goto err_lwtunnel_pernet;
+#endif
ret = netfilter_log_init();
if (ret < 0)
- goto err_pernet;
+ goto err_log_pernet;
return 0;
-err_pernet:
+err_log_pernet:
+#ifdef CONFIG_LWTUNNEL
+ netfilter_lwtunnel_fini();
+err_lwtunnel_pernet:
+#endif
unregister_pernet_subsys(&netfilter_net_ops);
err:
return ret;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 3184cc6be..61431690c 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -53,12 +53,13 @@ MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
/* When the nfnl mutex or ip_set_ref_lock is held: */
-#define ip_set_dereference(p) \
- rcu_dereference_protected(p, \
+#define ip_set_dereference(inst) \
+ rcu_dereference_protected((inst)->ip_set_list, \
lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
- lockdep_is_held(&ip_set_ref_lock))
+ lockdep_is_held(&ip_set_ref_lock) || \
+ (inst)->is_deleted)
#define ip_set(inst, id) \
- ip_set_dereference((inst)->ip_set_list)[id]
+ ip_set_dereference(inst)[id]
#define ip_set_ref_netlink(inst,id) \
rcu_dereference_raw((inst)->ip_set_list)[id]
#define ip_set_dereference_nfnl(p) \
@@ -1133,7 +1134,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info,
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
- tmp = ip_set_dereference(inst->ip_set_list);
+ tmp = ip_set_dereference(inst);
memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
rcu_assign_pointer(inst->ip_set_list, list);
/* Make sure all current packets have passed through */
@@ -1172,23 +1173,50 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
.len = IPSET_MAXNAMELEN - 1 },
};
+/* In order to return quickly when destroying a single set, it is split
+ * into two stages:
+ * - Cancel garbage collector
+ * - Destroy the set itself via call_rcu()
+ */
+
static void
-ip_set_destroy_set(struct ip_set *set)
+ip_set_destroy_set_rcu(struct rcu_head *head)
{
- pr_debug("set: %s\n", set->name);
+ struct ip_set *set = container_of(head, struct ip_set, rcu);
- /* Must call it without holding any lock */
set->variant->destroy(set);
module_put(set->type->me);
kfree(set);
}
static void
-ip_set_destroy_set_rcu(struct rcu_head *head)
+_destroy_all_sets(struct ip_set_net *inst)
{
- struct ip_set *set = container_of(head, struct ip_set, rcu);
+ struct ip_set *set;
+ ip_set_id_t i;
+ bool need_wait = false;
- ip_set_destroy_set(set);
+ /* First cancel gc's: set:list sets are flushed as well */
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = ip_set(inst, i);
+ if (set) {
+ set->variant->cancel_gc(set);
+ if (set->type->features & IPSET_TYPE_NAME)
+ need_wait = true;
+ }
+ }
+ /* Must wait for flush to be really finished */
+ if (need_wait)
+ rcu_barrier();
+ for (i = 0; i < inst->ip_set_max; i++) {
+ set = ip_set(inst, i);
+ if (set) {
+ ip_set(inst, i) = NULL;
+ set->variant->destroy(set);
+ module_put(set->type->me);
+ kfree(set);
+ }
+ }
}
static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
@@ -1202,11 +1230,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
-
/* Commands are serialized and references are
* protected by the ip_set_ref_lock.
* External systems (i.e. xt_set) must call
- * ip_set_put|get_nfnl_* functions, that way we
+ * ip_set_nfnl_get_* functions, that way we
* can safely check references here.
*
* list:set timer can only decrement the reference
@@ -1214,8 +1241,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
* without holding the lock.
*/
if (!attr[IPSET_ATTR_SETNAME]) {
- /* Must wait for flush to be really finished in list:set */
- rcu_barrier();
read_lock_bh(&ip_set_ref_lock);
for (i = 0; i < inst->ip_set_max; i++) {
s = ip_set(inst, i);
@@ -1226,15 +1251,7 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
}
inst->is_destroyed = true;
read_unlock_bh(&ip_set_ref_lock);
- for (i = 0; i < inst->ip_set_max; i++) {
- s = ip_set(inst, i);
- if (s) {
- ip_set(inst, i) = NULL;
- /* Must cancel garbage collectors */
- s->variant->cancel_gc(s);
- ip_set_destroy_set(s);
- }
- }
+ _destroy_all_sets(inst);
/* Modified by ip_set_destroy() only, which is serialized */
inst->is_destroyed = false;
} else {
@@ -1255,12 +1272,12 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info,
features = s->type->features;
ip_set(inst, i) = NULL;
read_unlock_bh(&ip_set_ref_lock);
+ /* Must cancel garbage collectors */
+ s->variant->cancel_gc(s);
if (features & IPSET_TYPE_NAME) {
/* Must wait for flush to be really finished */
rcu_barrier();
}
- /* Must cancel garbage collectors */
- s->variant->cancel_gc(s);
call_rcu(&s->rcu, ip_set_destroy_set_rcu);
}
return 0;
@@ -2365,30 +2382,25 @@ ip_set_net_init(struct net *net)
}
static void __net_exit
-ip_set_net_exit(struct net *net)
+ip_set_net_pre_exit(struct net *net)
{
struct ip_set_net *inst = ip_set_pernet(net);
- struct ip_set *set = NULL;
- ip_set_id_t i;
-
inst->is_deleted = true; /* flag for ip_set_nfnl_put */
+}
- nfnl_lock(NFNL_SUBSYS_IPSET);
- for (i = 0; i < inst->ip_set_max; i++) {
- set = ip_set(inst, i);
- if (set) {
- ip_set(inst, i) = NULL;
- set->variant->cancel_gc(set);
- ip_set_destroy_set(set);
- }
- }
- nfnl_unlock(NFNL_SUBSYS_IPSET);
+static void __net_exit
+ip_set_net_exit(struct net *net)
+{
+ struct ip_set_net *inst = ip_set_pernet(net);
+
+ _destroy_all_sets(inst);
kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {
.init = ip_set_net_init,
+ .pre_exit = ip_set_net_pre_exit,
.exit = ip_set_net_exit,
.id = &ip_set_net_id,
.size = sizeof(struct ip_set_net),
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 6c3f28bc5..bfae70669 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -79,7 +79,7 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
struct set_elem *e;
int ret;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -99,7 +99,7 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
struct set_elem *e;
int ret;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -188,9 +188,10 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
struct list_set *map = set->data;
struct set_adt_elem *d = value;
struct set_elem *e, *next, *prev = NULL;
- int ret;
+ int ret = 0;
- list_for_each_entry(e, &map->members, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -201,6 +202,7 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (d->before == 0) {
ret = 1;
+ goto out;
} else if (d->before > 0) {
next = list_next_entry(e, list);
ret = !list_is_last(&e->list, &map->members) &&
@@ -208,9 +210,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
} else {
ret = prev && prev->id == d->refid;
}
- return ret;
+ goto out;
}
- return 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
static void
@@ -239,7 +243,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
/* Find where to add the new entry */
n = prev = next = NULL;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_rcu(e, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -316,9 +320,9 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
{
struct list_set *map = set->data;
struct set_adt_elem *d = value;
- struct set_elem *e, *next, *prev = NULL;
+ struct set_elem *e, *n, *next, *prev = NULL;
- list_for_each_entry(e, &map->members, list) {
+ list_for_each_entry_safe(e, n, &map->members, list) {
if (SET_WITH_TIMEOUT(set) &&
ip_set_timeout_expired(ext_timeout(e, set)))
continue;
@@ -424,14 +428,8 @@ static void
list_set_destroy(struct ip_set *set)
{
struct list_set *map = set->data;
- struct set_elem *e, *n;
- list_for_each_entry_safe(e, n, &map->members, list) {
- list_del(&e->list);
- ip_set_put_byindex(map->net, e->id);
- ip_set_ext_destroy(set, e);
- kfree(e);
- }
+ WARN_ON_ONCE(!list_empty(&map->members));
kfree(map);
set->data = NULL;
@@ -549,6 +547,9 @@ list_set_cancel_gc(struct ip_set *set)
if (SET_WITH_TIMEOUT(set))
timer_shutdown_sync(&map->gc);
+
+ /* Flush list to drop references to other ipsets */
+ list_set_flush(set);
}
static const struct ip_set_type_variant set_variant = {
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 65e025917..e1f17392f 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -180,7 +180,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
(!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
(addr_type & IPV6_ADDR_LOOPBACK);
old_rt_is_local = __ip_vs_is_local_route6(
- (struct rt6_info *)skb_dst(skb));
+ dst_rt6_info(skb_dst(skb)));
} else
#endif
{
@@ -318,7 +318,7 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
- rt = (struct rtable *) dest_dst->dst_cache;
+ rt = dst_rtable(dest_dst->dst_cache);
else {
dest_dst = ip_vs_dest_dst_alloc();
spin_lock_bh(&dest->dst_lock);
@@ -481,7 +481,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
- rt = (struct rt6_info *) dest_dst->dst_cache;
+ rt = dst_rt6_info(dest_dst->dst_cache);
else {
u32 cookie;
@@ -501,7 +501,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
ip_vs_dest_dst_free(dest_dst);
goto err_unreach;
}
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
cookie = rt6_get_cookie(rt);
__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
spin_unlock_bh(&dest->dst_lock);
@@ -517,7 +517,7 @@ __ip_vs_get_out_rt_v6(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
rt_mode);
if (!dst)
goto err_unreach;
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
}
local = __ip_vs_is_local_route6(rt);
@@ -862,7 +862,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_RT_MODE_RDR);
if (local < 0)
goto tx_error;
- rt = (struct rt6_info *) skb_dst(skb);
+ rt = dst_rt6_info(skb_dst(skb));
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
@@ -1288,7 +1288,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (local)
return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
- rt = (struct rt6_info *) skb_dst(skb);
+ rt = dst_rt6_info(skb_dst(skb));
tdev = rt->dst.dev;
/*
@@ -1590,7 +1590,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
&cp->daddr.in6, NULL, ipvsh, 0, rt_mode);
if (local < 0)
goto tx_error;
- rt = (struct rt6_info *) skb_dst(skb);
+ rt = dst_rt6_info(skb_dst(skb));
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 0ee98ce5b..559665467 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -22,9 +22,6 @@
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
-#ifdef CONFIG_LWTUNNEL
-#include <net/netfilter/nf_hooks_lwtunnel.h>
-#endif
#include <linux/rculist_nulls.h>
static bool enable_hooks __read_mostly;
@@ -612,9 +609,6 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE,
NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM,
#endif
-#ifdef CONFIG_LWTUNNEL
- NF_SYSCTL_CT_LWTUNNEL,
-#endif
__NF_SYSCTL_CT_LAST_SYSCTL,
};
@@ -948,15 +942,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.proc_handler = proc_dointvec_jiffies,
},
#endif
-#ifdef CONFIG_LWTUNNEL
- [NF_SYSCTL_CT_LWTUNNEL] = {
- .procname = "nf_hooks_lwtunnel",
- .data = NULL,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
- },
-#endif
{}
};
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index a05713392..5c1ff07ea 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -77,12 +77,8 @@ EXPORT_SYMBOL_GPL(flow_offload_alloc);
static u32 flow_offload_dst_cookie(struct flow_offload_tuple *flow_tuple)
{
- const struct rt6_info *rt;
-
- if (flow_tuple->l3proto == NFPROTO_IPV6) {
- rt = (const struct rt6_info *)flow_tuple->dst_cache;
- return rt6_get_cookie(rt);
- }
+ if (flow_tuple->l3proto == NFPROTO_IPV6)
+ return rt6_get_cookie(dst_rt6_info(flow_tuple->dst_cache));
return 0;
}
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 5383bed3d..c2c005234 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -434,7 +434,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
return NF_ACCEPT;
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
- rt = (struct rtable *)tuplehash->tuple.dst_cache;
+ rt = dst_rtable(tuplehash->tuple.dst_cache);
memset(skb->cb, 0, sizeof(struct inet_skb_parm));
IPCB(skb)->iif = skb->dev->ifindex;
IPCB(skb)->flags = IPSKB_FORWARDED;
@@ -446,7 +446,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
- rt = (struct rtable *)tuplehash->tuple.dst_cache;
+ rt = dst_rtable(tuplehash->tuple.dst_cache);
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
@@ -729,7 +729,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
return NF_ACCEPT;
if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) {
- rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
+ rt = dst_rt6_info(tuplehash->tuple.dst_cache);
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
IP6CB(skb)->iif = skb->dev->ifindex;
IP6CB(skb)->flags = IP6SKB_FORWARDED;
@@ -741,7 +741,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
switch (tuplehash->tuple.xmit_type) {
case FLOW_OFFLOAD_XMIT_NEIGH:
- rt = (struct rt6_info *)tuplehash->tuple.dst_cache;
+ rt = dst_rt6_info(tuplehash->tuple.dst_cache);
outdev = rt->dst.dev;
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
diff --git a/net/netfilter/nf_hooks_lwtunnel.c b/net/netfilter/nf_hooks_lwtunnel.c
index 00e89ffd7..7cdb59bb4 100644
--- a/net/netfilter/nf_hooks_lwtunnel.c
+++ b/net/netfilter/nf_hooks_lwtunnel.c
@@ -3,6 +3,9 @@
#include <linux/sysctl.h>
#include <net/lwtunnel.h>
#include <net/netfilter/nf_hooks_lwtunnel.h>
+#include <linux/netfilter.h>
+
+#include "nf_internals.h"
static inline int nf_hooks_lwtunnel_get(void)
{
@@ -50,4 +53,68 @@ int nf_hooks_lwtunnel_sysctl_handler(struct ctl_table *table, int write,
return ret;
}
EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_sysctl_handler);
+
+static struct ctl_table nf_lwtunnel_sysctl_table[] = {
+ {
+ .procname = "nf_hooks_lwtunnel",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = nf_hooks_lwtunnel_sysctl_handler,
+ },
+};
+
+static int __net_init nf_lwtunnel_net_init(struct net *net)
+{
+ struct ctl_table_header *hdr;
+ struct ctl_table *table;
+
+ table = nf_lwtunnel_sysctl_table;
+ if (!net_eq(net, &init_net)) {
+ table = kmemdup(nf_lwtunnel_sysctl_table,
+ sizeof(nf_lwtunnel_sysctl_table),
+ GFP_KERNEL);
+ if (!table)
+ goto err_alloc;
+ }
+
+ hdr = register_net_sysctl_sz(net, "net/netfilter", table,
+ ARRAY_SIZE(nf_lwtunnel_sysctl_table));
+ if (!hdr)
+ goto err_reg;
+
+ net->nf.nf_lwtnl_dir_header = hdr;
+
+ return 0;
+err_reg:
+ if (!net_eq(net, &init_net))
+ kfree(table);
+err_alloc:
+ return -ENOMEM;
+}
+
+static void __net_exit nf_lwtunnel_net_exit(struct net *net)
+{
+ const struct ctl_table *table;
+
+ table = net->nf.nf_lwtnl_dir_header->ctl_table_arg;
+ unregister_net_sysctl_table(net->nf.nf_lwtnl_dir_header);
+ if (!net_eq(net, &init_net))
+ kfree(table);
+}
+
+static struct pernet_operations nf_lwtunnel_net_ops = {
+ .init = nf_lwtunnel_net_init,
+ .exit = nf_lwtunnel_net_exit,
+};
+
+int __init netfilter_lwtunnel_init(void)
+{
+ return register_pernet_subsys(&nf_lwtunnel_net_ops);
+}
+
+void netfilter_lwtunnel_fini(void)
+{
+ unregister_pernet_subsys(&nf_lwtunnel_net_ops);
+}
#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
index 832ae6417..254030230 100644
--- a/net/netfilter/nf_internals.h
+++ b/net/netfilter/nf_internals.h
@@ -29,6 +29,12 @@ void nf_queue_nf_hook_drop(struct net *net);
/* nf_log.c */
int __init netfilter_log_init(void);
+#ifdef CONFIG_LWTUNNEL
+/* nf_hooks_lwtunnel.c */
+int __init netfilter_lwtunnel_init(void);
+void netfilter_lwtunnel_fini(void);
+#endif
+
/* core.c */
void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
const struct nf_hook_ops *reg);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 00f4bd21c..f1c31757e 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -169,7 +169,9 @@ instance_destroy_rcu(struct rcu_head *head)
struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
rcu);
+ rcu_read_lock();
nfqnl_flush(inst, NULL, 0);
+ rcu_read_unlock();
kfree(inst);
module_put(THIS_MODULE);
}
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
index 37cfe6dd7..b58f62195 100644
--- a/net/netfilter/nft_fib.c
+++ b/net/netfilter/nft_fib.c
@@ -35,11 +35,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
switch (priv->result) {
case NFT_FIB_RESULT_OIF:
case NFT_FIB_RESULT_OIFNAME:
- hooks = (1 << NF_INET_PRE_ROUTING);
- if (priv->flags & NFTA_FIB_F_IIF) {
- hooks |= (1 << NF_INET_LOCAL_IN) |
- (1 << NF_INET_FORWARD);
- }
+ hooks = (1 << NF_INET_PRE_ROUTING) |
+ (1 << NF_INET_LOCAL_IN) |
+ (1 << NF_INET_FORWARD);
break;
case NFT_FIB_RESULT_ADDRTYPE:
if (priv->flags & NFTA_FIB_F_IIF)
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index ba0d3683a..9139ce38e 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -839,6 +839,9 @@ static int nft_meta_inner_init(const struct nft_ctx *ctx,
struct nft_meta *priv = nft_expr_priv(expr);
unsigned int len;
+ if (!tb[NFTA_META_KEY] || !tb[NFTA_META_DREG])
+ return -EINVAL;
+
priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
switch (priv->key) {
case NFT_META_PROTOCOL:
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 0a689c8e0..50429cbd4 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
int mac_off = skb_mac_header(skb) - skb->data;
u8 *vlanh, *dst_u8 = (u8 *) d;
struct vlan_ethhdr veth;
- u8 vlan_hlen = 0;
-
- if ((skb->protocol == htons(ETH_P_8021AD) ||
- skb->protocol == htons(ETH_P_8021Q)) &&
- offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
- vlan_hlen += VLAN_HLEN;
vlanh = (u8 *) &veth;
- if (offset < VLAN_ETH_HLEN + vlan_hlen) {
+ if (offset < VLAN_ETH_HLEN) {
u8 ethlen = len;
- if (vlan_hlen &&
- skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
- return false;
- else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+ if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
return false;
- if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
- ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
+ if (offset + len > VLAN_ETH_HLEN)
+ ethlen -= offset + len - VLAN_ETH_HLEN;
- memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
+ memcpy(dst_u8, vlanh + offset, ethlen);
len -= ethlen;
if (len == 0)
return true;
dst_u8 += ethlen;
- offset = ETH_HLEN + vlan_hlen;
+ offset = ETH_HLEN;
} else {
- offset -= VLAN_HLEN + vlan_hlen;
+ offset -= VLAN_HLEN;
}
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
@@ -154,12 +145,12 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
return pkt->inneroff;
}
-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
+static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
{
- unsigned int len = priv->offset + priv->len;
+ unsigned int boundary = offset + len;
/* data past ether src/dst requested, copy needed */
- if (len > offsetof(struct ethhdr, h_proto))
+ if (boundary > offsetof(struct ethhdr, h_proto))
return true;
return false;
@@ -183,7 +174,7 @@ void nft_payload_eval(const struct nft_expr *expr,
goto err;
if (skb_vlan_tag_present(skb) &&
- nft_payload_need_vlan_copy(priv)) {
+ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
if (!nft_payload_copy_vlan(dest, skb,
priv->offset, priv->len))
goto err;
@@ -659,6 +650,10 @@ static int nft_payload_inner_init(const struct nft_ctx *ctx,
struct nft_payload *priv = nft_expr_priv(expr);
u32 base;
+ if (!tb[NFTA_PAYLOAD_BASE] || !tb[NFTA_PAYLOAD_OFFSET] ||
+ !tb[NFTA_PAYLOAD_LEN] || !tb[NFTA_PAYLOAD_DREG])
+ return -EINVAL;
+
base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
switch (base) {
case NFT_PAYLOAD_TUN_HEADER:
@@ -810,21 +805,79 @@ struct nft_payload_set {
u8 csum_flags;
};
+/* This is not struct vlan_hdr. */
+struct nft_payload_vlan_hdr {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+};
+
+static bool
+nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
+ int *vlan_hlen)
+{
+ struct nft_payload_vlan_hdr *vlanh;
+ __be16 vlan_proto;
+ u16 vlan_tci;
+
+ if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
+ *vlan_hlen = VLAN_HLEN;
+ return true;
+ }
+
+ switch (offset) {
+ case offsetof(struct vlan_ethhdr, h_vlan_proto):
+ if (len == 2) {
+ vlan_proto = nft_reg_load_be16(src);
+ skb->vlan_proto = vlan_proto;
+ } else if (len == 4) {
+ vlanh = (struct nft_payload_vlan_hdr *)src;
+ __vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
+ ntohs(vlanh->h_vlan_TCI));
+ } else {
+ return false;
+ }
+ break;
+ case offsetof(struct vlan_ethhdr, h_vlan_TCI):
+ if (len != 2)
+ return false;
+
+ vlan_tci = ntohs(nft_reg_load_be16(src));
+ skb->vlan_tci = vlan_tci;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_payload_set *priv = nft_expr_priv(expr);
- struct sk_buff *skb = pkt->skb;
const u32 *src = &regs->data[priv->sreg];
- int offset, csum_offset;
+ int offset, csum_offset, vlan_hlen = 0;
+ struct sk_buff *skb = pkt->skb;
__wsum fsum, tsum;
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
goto err;
- offset = skb_mac_header(skb) - skb->data;
+
+ if (skb_vlan_tag_present(skb) &&
+ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+ if (!nft_payload_set_vlan(src, skb,
+ priv->offset, priv->len,
+ &vlan_hlen))
+ goto err;
+
+ if (!vlan_hlen)
+ return;
+ }
+
+ offset = skb_mac_header(skb) - skb->data - vlan_hlen;
break;
case NFT_PAYLOAD_NETWORK_HEADER:
offset = skb_network_offset(skb);
diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c
index 24d977138..14d88394b 100644
--- a/net/netfilter/nft_rt.c
+++ b/net/netfilter/nft_rt.c
@@ -73,14 +73,14 @@ void nft_rt_get_eval(const struct nft_expr *expr,
if (nft_pf(pkt) != NFPROTO_IPV4)
goto err;
- *dest = (__force u32)rt_nexthop((const struct rtable *)dst,
+ *dest = (__force u32)rt_nexthop(dst_rtable(dst),
ip_hdr(skb)->daddr);
break;
case NFT_RT_NEXTHOP6:
if (nft_pf(pkt) != NFPROTO_IPV6)
goto err;
- memcpy(dest, rt6_nexthop((struct rt6_info *)dst,
+ memcpy(dest, rt6_nexthop(dst_rt6_info(dst),
&ipv6_hdr(skb)->daddr),
sizeof(struct in6_addr));
break;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 70480869a..bd2b17b21 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -285,22 +285,14 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
return 0;
}
-static inline void __nr_remove_node(struct nr_node *nr_node)
+static void nr_remove_node_locked(struct nr_node *nr_node)
{
+ lockdep_assert_held(&nr_node_list_lock);
+
hlist_del_init(&nr_node->node_node);
nr_node_put(nr_node);
}
-#define nr_remove_node_locked(__node) \
- __nr_remove_node(__node)
-
-static void nr_remove_node(struct nr_node *nr_node)
-{
- spin_lock_bh(&nr_node_list_lock);
- __nr_remove_node(nr_node);
- spin_unlock_bh(&nr_node_list_lock);
-}
-
static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
{
hlist_del_init(&nr_neigh->neigh_node);
@@ -339,6 +331,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
return -EINVAL;
}
+ spin_lock_bh(&nr_node_list_lock);
nr_node_lock(nr_node);
for (i = 0; i < nr_node->count; i++) {
if (nr_node->routes[i].neighbour == nr_neigh) {
@@ -352,7 +345,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
nr_node->count--;
if (nr_node->count == 0) {
- nr_remove_node(nr_node);
+ nr_remove_node_locked(nr_node);
} else {
switch (i) {
case 0:
@@ -367,12 +360,14 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
nr_node_put(nr_node);
}
nr_node_unlock(nr_node);
+ spin_unlock_bh(&nr_node_list_lock);
return 0;
}
}
nr_neigh_put(nr_neigh);
nr_node_unlock(nr_node);
+ spin_unlock_bh(&nr_node_list_lock);
nr_node_put(nr_node);
return -EINVAL;
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 4e7c968cd..5e3ca068f 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -121,7 +121,8 @@ static void nr_heartbeat_expiry(struct timer_list *t)
is accepted() it isn't 'dead' so doesn't get removed. */
if (sock_flag(sk, SOCK_DESTROY) ||
(sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
- sock_hold(sk);
+ if (sk->sk_state == TCP_LISTEN)
+ sock_hold(sk);
bh_unlock_sock(sk);
nr_destroy_socket(sk);
goto out;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index b133dc553..f456a5911 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1463,6 +1463,19 @@ int nci_core_ntf_packet(struct nci_dev *ndev, __u16 opcode,
ndev->ops->n_core_ops);
}
+static bool nci_valid_size(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(NCI_CTRL_HDR_SIZE != NCI_DATA_HDR_SIZE);
+ unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
+
+ if (skb->len < hdr_size ||
+ !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
+ return true;
+}
+
/* ---- NCI TX Data worker thread ---- */
static void nci_tx_work(struct work_struct *work)
@@ -1516,10 +1529,9 @@ static void nci_rx_work(struct work_struct *work)
nfc_send_to_raw_sock(ndev->nfc_dev, skb,
RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);
- if (!nci_plen(skb->data)) {
+ if (!nci_valid_size(skb)) {
kfree_skb(skb);
- kcov_remote_stop();
- break;
+ continue;
}
/* Process frame */
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 6fcd7e2ca..964225580 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -936,6 +936,12 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
pskb_trim(skb, ovs_mac_header_len(key));
}
+ /* Need to set the pkt_type to involve the routing layer. The
+ * packet movement through the OVS datapath doesn't generally
+ * use routing, but this is needed for tunnel cases.
+ */
+ skb->pkt_type = PACKET_OUTGOING;
+
if (likely(!mru ||
(skb->len <= mru + vport->dev->hard_header_len))) {
ovs_vport_send(vport, skb, ovs_key_mac_proto(key));
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 33b21a0c0..8a848ce72 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -561,7 +561,6 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
*/
key->tp.src = htons(icmp->icmp6_type);
key->tp.dst = htons(icmp->icmp6_code);
- memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
if (icmp->icmp6_code == 0 &&
(icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -570,6 +569,8 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
struct nd_msg *nd;
int offset;
+ memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
+
/* In order to process neighbor discovery options, we need the
* entire packet.
*/
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 18f616f48..ea3ebc160 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2522,8 +2522,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
ts = __packet_set_timestamp(po, ph, skb);
__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
- if (!packet_read_pending(&po->tx_ring))
- complete(&po->skb_completion);
+ complete(&po->skb_completion);
}
sock_wfree(skb);
@@ -3800,28 +3799,30 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
case PACKET_TX_RING:
{
union tpacket_req_u req_u;
- int len;
+ ret = -EINVAL;
lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
- len = sizeof(req_u.req);
+ if (optlen < sizeof(req_u.req))
+ break;
+ ret = copy_from_sockptr(&req_u.req, optval,
+ sizeof(req_u.req)) ?
+ -EINVAL : 0;
break;
case TPACKET_V3:
default:
- len = sizeof(req_u.req3);
+ if (optlen < sizeof(req_u.req3))
+ break;
+ ret = copy_from_sockptr(&req_u.req3, optval,
+ sizeof(req_u.req3)) ?
+ -EINVAL : 0;
break;
}
- if (optlen < len) {
- ret = -EINVAL;
- } else {
- if (copy_from_sockptr(&req_u.req, optval, len))
- ret = -EFAULT;
- else
- ret = packet_set_ring(sk, &req_u, 0,
- optname == PACKET_TX_RING);
- }
+ if (!ret)
+ ret = packet_set_ring(sk, &req_u, 0,
+ optname == PACKET_TX_RING);
release_sock(sk);
return ret;
}
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index abb0c70ff..654a3cc0d 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -725,6 +725,24 @@ int qrtr_ns_init(void)
if (ret < 0)
goto err_wq;
+ /* As the qrtr ns socket owner and creator is the same module, we have
+ * to decrease the qrtr module reference count to guarantee that it
+ * remains zero after the ns socket is created, otherwise, executing
+ * "rmmod" command is unable to make the qrtr module deleted after the
+ * qrtr module is inserted successfully.
+ *
+ * However, the reference count is increased twice in
+ * sock_create_kern(): one is to increase the reference count of owner
+ * of qrtr socket's proto_ops struct; another is to increment the
+ * reference count of owner of qrtr proto struct. Therefore, we must
+ * decrement the module reference count twice to ensure that it keeps
+ * zero after server's listening socket is created. Of course, we
+ * must bump the module reference count twice as well before the socket
+ * is closed.
+ */
+ module_put(qrtr_ns.sock->ops->owner);
+ module_put(qrtr_ns.sock->sk->sk_prot_creator->owner);
+
return 0;
err_wq:
@@ -739,6 +757,15 @@ void qrtr_ns_remove(void)
{
cancel_work_sync(&qrtr_ns.work);
destroy_workqueue(qrtr_ns.workqueue);
+
+ /* sock_release() expects the two references that were put during
+ * qrtr_ns_init(). This function is only called during module remove,
+ * so try_stop_module() has already set the refcnt to 0. Use
+ * __module_get() instead of try_module_get() to successfully take two
+ * references.
+ */
+ __module_get(qrtr_ns.sock->ops->owner);
+ __module_get(qrtr_ns.sock->sk->sk_prot_creator->owner);
sock_release(qrtr_ns.sock);
}
EXPORT_SYMBOL_GPL(qrtr_ns_remove);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9ee622fb1..2520708b0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -830,7 +830,6 @@ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
u32 max;
if (*index) {
-again:
rcu_read_lock();
p = idr_find(&idrinfo->action_idr, *index);
@@ -839,7 +838,7 @@ again:
* index but did not assign the pointer yet.
*/
rcu_read_unlock();
- goto again;
+ return -EAGAIN;
}
if (!p) {
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index baac083fd..2a96d9c1d 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -41,21 +41,26 @@ static struct workqueue_struct *act_ct_wq;
static struct rhashtable zones_ht;
static DEFINE_MUTEX(zones_mutex);
+struct zones_ht_key {
+ struct net *net;
+ u16 zone;
+};
+
struct tcf_ct_flow_table {
struct rhash_head node; /* In zones tables */
struct rcu_work rwork;
struct nf_flowtable nf_ft;
refcount_t ref;
- u16 zone;
+ struct zones_ht_key key;
bool dying;
};
static const struct rhashtable_params zones_params = {
.head_offset = offsetof(struct tcf_ct_flow_table, node),
- .key_offset = offsetof(struct tcf_ct_flow_table, zone),
- .key_len = sizeof_field(struct tcf_ct_flow_table, zone),
+ .key_offset = offsetof(struct tcf_ct_flow_table, key),
+ .key_len = sizeof_field(struct tcf_ct_flow_table, key),
.automatic_shrinking = true,
};
@@ -316,11 +321,12 @@ static struct nf_flowtable_type flowtable_ct = {
static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
{
+ struct zones_ht_key key = { .net = net, .zone = params->zone };
struct tcf_ct_flow_table *ct_ft;
int err = -ENOMEM;
mutex_lock(&zones_mutex);
- ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
+ ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params);
if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
goto out_unlock;
@@ -329,7 +335,7 @@ static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
goto err_alloc;
refcount_set(&ct_ft->ref, 1);
- ct_ft->zone = params->zone;
+ ct_ft->key = key;
err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
if (err)
goto err_insert;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 60239378d..6292d6d73 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1389,6 +1389,7 @@ err_out4:
ops->destroy(sch);
qdisc_put_stab(rtnl_dereference(sch->stab));
err_out3:
+ lockdep_unregister_key(&sch->root_lock_key);
netdev_put(dev, &sch->dev_tracker);
qdisc_free(sch);
err_out2:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4a2c763e2..fb32984d7 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -673,6 +673,7 @@ struct Qdisc noop_qdisc = {
.qlen = 0,
.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
},
+ .owner = -1,
};
EXPORT_SYMBOL(noop_qdisc);
@@ -945,7 +946,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
__skb_queue_head_init(&sch->gso_skb);
__skb_queue_head_init(&sch->skb_bad_txq);
gnet_stats_basic_sync_init(&sch->bstats);
+ lockdep_register_key(&sch->root_lock_key);
spin_lock_init(&sch->q.lock);
+ lockdep_set_class(&sch->q.lock, &sch->root_lock_key);
if (ops->static_flags & TCQ_F_CPUSTATS) {
sch->cpu_bstats =
@@ -980,6 +983,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
return sch;
errout1:
+ lockdep_unregister_key(&sch->root_lock_key);
kfree(sch);
errout:
return ERR_PTR(err);
@@ -1068,6 +1072,7 @@ static void __qdisc_destroy(struct Qdisc *qdisc)
if (ops->destroy)
ops->destroy(qdisc);
+ lockdep_unregister_key(&qdisc->root_lock_key);
module_put(ops->owner);
netdev_put(dev, &qdisc->dev_tracker);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 93e6fb56f..ff3de3787 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1039,13 +1039,6 @@ static void htb_work_func(struct work_struct *work)
rcu_read_unlock();
}
-static void htb_set_lockdep_class_child(struct Qdisc *q)
-{
- static struct lock_class_key child_key;
-
- lockdep_set_class(qdisc_lock(q), &child_key);
-}
-
static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
{
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
@@ -1132,7 +1125,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
return -ENOMEM;
}
- htb_set_lockdep_class_child(qdisc);
q->direct_qdiscs[ntx] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
@@ -1468,7 +1460,6 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
}
if (q->offload) {
- htb_set_lockdep_class_child(new);
/* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
qdisc_refcount_inc(new);
old_q = htb_graft_helper(dev_queue, new);
@@ -1733,11 +1724,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
cl->parent->common.classid,
NULL);
- if (q->offload) {
- if (new_q)
- htb_set_lockdep_class_child(new_q);
+ if (q->offload)
htb_parent_to_leaf_offload(sch, dev_queue, new_q);
- }
}
sch_tree_lock(sch);
@@ -1947,13 +1935,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
classid, NULL);
if (q->offload) {
- if (new_q) {
- htb_set_lockdep_class_child(new_q);
- /* One ref for cl->leaf.q, the other for
- * dev_queue->qdisc.
- */
+ /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
+ if (new_q)
qdisc_refcount_inc(new_q);
- }
old_q = htb_graft_helper(dev_queue, new_q);
/* No qdisc_put needed. */
WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 79e93a19d..06e03f5cd 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
- removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
+ removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
GFP_KERNEL);
if (!removed)
return -ENOMEM;
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index a0d54b422..0b150b13b 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1151,11 +1151,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
list_for_each_entry(entry, &new->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
- if (!cycle) {
- NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0");
- return -EINVAL;
- }
-
if (cycle < 0 || cycle > INT_MAX) {
NL_SET_ERR_MSG(extack, "'cycle_time' is too big");
return -EINVAL;
@@ -1164,6 +1159,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
new->cycle_time = cycle;
}
+ if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) {
+ NL_SET_ERR_MSG(extack, "'cycle_time' is too small");
+ return -EINVAL;
+ }
+
taprio_calculate_gate_durations(q, new);
return 0;
@@ -1176,16 +1176,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
{
bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
- if (!qopt && !dev->num_tc) {
- NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
- return -EINVAL;
- }
-
- /* If num_tc is already set, it means that the user already
- * configured the mqprio part
- */
- if (dev->num_tc)
+ if (!qopt) {
+ if (!dev->num_tc) {
+ NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+ return -EINVAL;
+ }
return 0;
+ }
/* taprio imposes that traffic classes map 1:n to tx queues */
if (qopt->num_tc > dev->num_tx_queues) {
@@ -1851,6 +1848,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
}
q->flags = taprio_flags;
+ /* Needed for length_to_duration() during netlink attribute parsing */
+ taprio_set_picos_per_byte(dev, q);
+
err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
if (err < 0)
return err;
@@ -1910,7 +1910,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
goto free_sched;
- taprio_set_picos_per_byte(dev, q);
taprio_update_queue_max_sdu(q, new_admin, stab);
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 24368f755..f7b809c0d 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -415,7 +415,7 @@ out:
if (!IS_ERR_OR_NULL(dst)) {
struct rt6_info *rt;
- rt = (struct rt6_info *)dst;
+ rt = dst_rt6_info(dst);
t->dst_cookie = rt6_get_cookie(rt);
pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n",
&rt->rt6i_dst.addr, rt->rt6i_dst.plen,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index e849f368e..5a7436a13 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -552,7 +552,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
struct flowi *fl)
{
union sctp_addr *saddr = &t->saddr;
- struct rtable *rt = (struct rtable *)t->dst;
+ struct rtable *rt = dst_rtable(t->dst);
if (rt) {
saddr->v4.sin_family = AF_INET;
@@ -1085,7 +1085,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t)
skb_reset_inner_mac_header(skb);
skb_reset_inner_transport_header(skb);
skb_set_inner_ipproto(skb, IPPROTO_SCTP);
- udp_tunnel_xmit_skb((struct rtable *)dst, sk, skb, fl4->saddr,
+ udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr,
fl4->daddr, dscp, ip4_dst_hoplimit(dst), df,
sctp_sk(sk)->udp_port, t->encap_port, false, false);
return 0;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 4b52b3b15..5f9f3d4c1 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -460,29 +460,11 @@ out:
static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
unsigned long mask)
{
- struct net *nnet = sock_net(nsk);
-
nsk->sk_userlocks = osk->sk_userlocks;
- if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
+ if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
nsk->sk_sndbuf = osk->sk_sndbuf;
- } else {
- if (mask == SK_FLAGS_SMC_TO_CLC)
- WRITE_ONCE(nsk->sk_sndbuf,
- READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
- else
- WRITE_ONCE(nsk->sk_sndbuf,
- 2 * READ_ONCE(nnet->smc.sysctl_wmem));
- }
- if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
+ if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
nsk->sk_rcvbuf = osk->sk_rcvbuf;
- } else {
- if (mask == SK_FLAGS_SMC_TO_CLC)
- WRITE_ONCE(nsk->sk_rcvbuf,
- READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
- else
- WRITE_ONCE(nsk->sk_rcvbuf,
- 2 * READ_ONCE(nnet->smc.sysctl_rmem));
- }
}
static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index c7af0220f..369310909 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1875,8 +1875,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
/* slack space should prevent this ever happening: */
- if (unlikely(snd_buf->len > snd_buf->buflen))
+ if (unlikely(snd_buf->len > snd_buf->buflen)) {
+ status = -EIO;
goto wrap_failed;
+ }
/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
* done anyway, so it's safe to put the request on the wire: */
if (maj_stat == GSS_S_CONTEXT_EXPIRED)
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 24de94184..73a90ad87 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1033,17 +1033,11 @@ null_verifier:
static void gss_free_in_token_pages(struct gssp_in_token *in_token)
{
- u32 inlen;
int i;
i = 0;
- inlen = in_token->page_len;
- while (inlen) {
- if (in_token->pages[i])
- put_page(in_token->pages[i]);
- inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
- }
-
+ while (in_token->pages[i])
+ put_page(in_token->pages[i++]);
kfree(in_token->pages);
in_token->pages = NULL;
}
@@ -1075,7 +1069,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
goto out_denied_free;
pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
- in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
+ in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!in_token->pages)
goto out_denied_free;
in_token->page_base = 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 28f3749f6..59b2fbd88 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1071,6 +1071,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
.authflavor = old->cl_auth->au_flavor,
.cred = old->cl_cred,
.stats = old->cl_stats,
+ .timeout = old->cl_timeout,
};
struct rpc_clnt *clnt;
int err;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b33e42933..2b4b1276d 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1265,8 +1265,6 @@ svc_generic_init_request(struct svc_rqst *rqstp,
if (rqstp->rq_proc >= versp->vs_nproc)
goto err_bad_proc;
rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
- if (!procp)
- goto err_bad_proc;
/* Initialize storage for argp and resp */
memset(rqstp->rq_argp, 0, procp->pc_argzero);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 4f8d7efa4..432557a55 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -244,7 +244,11 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_DEVICE_REMOVAL:
pr_info("rpcrdma: removing device %s for %pISpc\n",
ep->re_id->device->name, sap);
- fallthrough;
+ switch (xchg(&ep->re_connect_status, -ENODEV)) {
+ case 0: goto wake_connect_worker;
+ case 1: goto disconnected;
+ }
+ return 0;
case RDMA_CM_EVENT_ADDR_CHANGE:
ep->re_connect_status = -ENODEV;
goto disconnected;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index c1e890a82..500320e5c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2105,6 +2105,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
} else {
n = tipc_node_find_by_id(net, ehdr->id);
}
+ skb_dst_force(skb);
tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
if (!skb)
return;
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index f892b0903..b849a3d13 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -174,7 +174,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
local_bh_disable();
ndst = dst_cache_get(cache);
if (dst->proto == htons(ETH_P_IP)) {
- struct rtable *rt = (struct rtable *)ndst;
+ struct rtable *rt = dst_rtable(ndst);
if (!rt) {
struct flowi4 fl = {
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index b4674f03d..90b7f253d 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -816,9 +816,17 @@ struct tls_context *tls_ctx_create(struct sock *sk)
return NULL;
mutex_init(&ctx->tx_lock);
- rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
ctx->sk_proto = READ_ONCE(sk->sk_prot);
ctx->sk = sk;
+ /* Release semantic of rcu_assign_pointer() ensures that
+ * ctx->sk_proto is visible before changing sk->sk_prot in
+ * update_sk_prot(), and prevents reading uninitialized value in
+ * tls_{getsockopt, setsockopt}. Note that we do not need a
+ * read barrier in tls_{getsockopt,setsockopt} as there is an
+ * address dependency between sk->sk_proto->{getsockopt,setsockopt}
+ * and ctx->sk_proto.
+ */
+ rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
return ctx;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 9a6ad5974..68a58bc07 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
-static inline int unix_recvq_full(const struct sock *sk)
-{
- return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
-}
-
static inline int unix_recvq_full_lockless(const struct sock *sk)
{
- return skb_queue_len_lockless(&sk->sk_receive_queue) >
- READ_ONCE(sk->sk_max_ack_backlog);
+ return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
struct sock *unix_peer_get(struct sock *s)
@@ -530,10 +524,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
return 0;
}
-static int unix_writable(const struct sock *sk)
+static int unix_writable(const struct sock *sk, unsigned char state)
{
- return sk->sk_state != TCP_LISTEN &&
- (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+ return state != TCP_LISTEN &&
+ (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
}
static void unix_write_space(struct sock *sk)
@@ -541,7 +535,7 @@ static void unix_write_space(struct sock *sk)
struct socket_wq *wq;
rcu_read_lock();
- if (unix_writable(sk)) {
+ if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
@@ -570,7 +564,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
sk_error_report(other);
}
}
- other->sk_state = TCP_CLOSE;
}
static void unix_sock_destructor(struct sock *sk)
@@ -617,7 +610,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
skpair = unix_peer(sk);
unix_peer(sk) = NULL;
@@ -638,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
unix_state_lock(skpair);
/* No more writes */
WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
- if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
@@ -731,7 +724,7 @@ static int unix_listen(struct socket *sock, int backlog)
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
- if (!u->addr)
+ if (!READ_ONCE(u->addr))
goto out; /* No listens on an unbound socket */
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
@@ -739,7 +732,8 @@ static int unix_listen(struct socket *sock, int backlog)
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
- sk->sk_state = TCP_LISTEN;
+ WRITE_ONCE(sk->sk_state, TCP_LISTEN);
+
/* set credentials so connect can copy them */
init_peercred(sk);
err = 0;
@@ -976,7 +970,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
- sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
+ sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->inflight = 0;
@@ -1131,8 +1125,8 @@ static struct sock *unix_find_other(struct net *net,
static int unix_autobind(struct sock *sk)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
u32 lastnum, ordernum;
@@ -1155,6 +1149,7 @@ static int unix_autobind(struct sock *sk)
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
+ old_hash = sk->sk_hash;
ordernum = get_random_u32();
lastnum = ordernum & 0xFFFFF;
retry:
@@ -1195,8 +1190,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
{
umode_t mode = S_IFSOCK |
(SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct mnt_idmap *idmap;
struct unix_address *addr;
@@ -1234,6 +1229,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
if (u->addr)
goto out_unlock;
+ old_hash = sk->sk_hash;
new_hash = unix_bsd_hash(d_backing_inode(dentry));
unix_table_double_lock(net, old_hash, new_hash);
u->path.mnt = mntget(parent.mnt);
@@ -1261,8 +1257,8 @@ out:
static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
int addr_len)
{
- unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ unsigned int new_hash, old_hash;
struct net *net = sock_net(sk);
struct unix_address *addr;
int err;
@@ -1280,6 +1276,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
goto out_mutex;
}
+ old_hash = sk->sk_hash;
new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
unix_table_double_lock(net, old_hash, new_hash);
@@ -1369,7 +1366,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
- !unix_sk(sk)->addr) {
+ !READ_ONCE(unix_sk(sk)->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1399,7 +1396,8 @@ restart:
if (err)
goto out_unlock;
- sk->sk_state = other->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+ WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
@@ -1416,13 +1414,20 @@ restart:
unix_peer(sk) = other;
if (!other)
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
unix_state_double_unlock(sk, other);
- if (other != old_peer)
+ if (other != old_peer) {
unix_dgram_disconnected(sk, old_peer);
+
+ unix_state_lock(old_peer);
+ if (!unix_peer(old_peer))
+ WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
+ unix_state_unlock(old_peer);
+ }
+
sock_put(old_peer);
} else {
unix_peer(sk) = other;
@@ -1470,7 +1475,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct sk_buff *skb = NULL;
long timeo;
int err;
- int st;
err = unix_validate_addr(sunaddr, addr_len);
if (err)
@@ -1481,7 +1485,8 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
goto out;
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+ !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
@@ -1534,7 +1539,7 @@ restart:
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
- if (unix_recvq_full(other)) {
+ if (unix_recvq_full_lockless(other)) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
@@ -1559,9 +1564,7 @@ restart:
Well, and we have to recheck the state after socket locked.
*/
- st = sk->sk_state;
-
- switch (st) {
+ switch (READ_ONCE(sk->sk_state)) {
case TCP_CLOSE:
/* This is ok... continue with connect */
break;
@@ -1576,7 +1579,7 @@ restart:
unix_state_lock_nested(sk, U_LOCK_SECOND);
- if (sk->sk_state != st) {
+ if (sk->sk_state != TCP_CLOSE) {
unix_state_unlock(sk);
unix_state_unlock(other);
sock_put(other);
@@ -1628,7 +1631,7 @@ restart:
copy_peercred(sk, other);
sock->state = SS_CONNECTED;
- sk->sk_state = TCP_ESTABLISHED;
+ WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
sock_hold(newsk);
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
@@ -1701,7 +1704,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
goto out;
err = -EINVAL;
- if (sk->sk_state != TCP_LISTEN)
+ if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
@@ -1997,14 +2000,15 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
}
if ((test_bit(SOCK_PASSCRED, &sock->flags) ||
- test_bit(SOCK_PASSPIDFD, &sock->flags)) && !u->addr) {
+ test_bit(SOCK_PASSPIDFD, &sock->flags)) &&
+ !READ_ONCE(u->addr)) {
err = unix_autobind(sk);
if (err)
goto out;
}
err = -EMSGSIZE;
- if (len > sk->sk_sndbuf - 32)
+ if (len > READ_ONCE(sk->sk_sndbuf) - 32)
goto out;
if (len > SKB_MAX_ALLOC) {
@@ -2086,7 +2090,7 @@ restart_locked:
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
- sk->sk_state = TCP_CLOSE;
+ WRITE_ONCE(sk->sk_state, TCP_CLOSE);
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
@@ -2217,13 +2221,15 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other
maybe_add_creds(skb, sock, other);
skb_get(skb);
+ scm_stat_add(other, skb);
+
+ spin_lock(&other->sk_receive_queue.lock);
if (ousk->oob_skb)
consume_skb(ousk->oob_skb);
-
WRITE_ONCE(ousk->oob_skb, skb);
+ __skb_queue_tail(&other->sk_receive_queue, skb);
+ spin_unlock(&other->sk_receive_queue.lock);
- scm_stat_add(other, skb);
- skb_queue_tail(&other->sk_receive_queue, skb);
sk_send_sigurg(other);
unix_state_unlock(other);
other->sk_data_ready(other);
@@ -2261,7 +2267,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
}
if (msg->msg_namelen) {
- err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+ err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
err = -ENOTCONN;
@@ -2270,7 +2276,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
goto out_err;
}
- if (sk->sk_shutdown & SEND_SHUTDOWN)
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
goto pipe_err;
while (sent < len) {
@@ -2282,7 +2288,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
&err, 0);
} else {
/* Keep two messages in the pipe so it schedules better */
- size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
+ size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
/* allow fallback to order-0 allocations */
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
@@ -2375,7 +2381,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
if (err)
return err;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
if (msg->msg_namelen)
@@ -2389,7 +2395,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
return -ENOTCONN;
return unix_dgram_recvmsg(sock, msg, size, flags);
@@ -2614,8 +2620,10 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
mutex_lock(&u->iolock);
unix_state_lock(sk);
+ spin_lock(&sk->sk_receive_queue.lock);
if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
+ spin_unlock(&sk->sk_receive_queue.lock);
unix_state_unlock(sk);
mutex_unlock(&u->iolock);
return -EINVAL;
@@ -2627,6 +2635,8 @@ static int unix_stream_recv_urg(struct unix_stream_read_state *state)
WRITE_ONCE(u->oob_skb, NULL);
else
skb_get(oob_skb);
+
+ spin_unlock(&sk->sk_receive_queue.lock);
unix_state_unlock(sk);
chunk = state->recv_actor(oob_skb, 0, chunk, state);
@@ -2655,24 +2665,34 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
consume_skb(skb);
skb = NULL;
} else {
+ struct sk_buff *unlinked_skb = NULL;
+
+ spin_lock(&sk->sk_receive_queue.lock);
+
if (skb == u->oob_skb) {
if (copied) {
skb = NULL;
- } else if (sock_flag(sk, SOCK_URGINLINE)) {
- if (!(flags & MSG_PEEK)) {
+ } else if (!(flags & MSG_PEEK)) {
+ if (sock_flag(sk, SOCK_URGINLINE)) {
WRITE_ONCE(u->oob_skb, NULL);
consume_skb(skb);
+ } else {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ WRITE_ONCE(u->oob_skb, NULL);
+ unlinked_skb = skb;
+ skb = skb_peek(&sk->sk_receive_queue);
}
- } else if (flags & MSG_PEEK) {
- skb = NULL;
- } else {
- skb_unlink(skb, &sk->sk_receive_queue);
- WRITE_ONCE(u->oob_skb, NULL);
- if (!WARN_ON_ONCE(skb_unref(skb)))
- kfree_skb(skb);
- skb = skb_peek(&sk->sk_receive_queue);
+ } else if (!sock_flag(sk, SOCK_URGINLINE)) {
+ skb = skb_peek_next(skb, &sk->sk_receive_queue);
}
}
+
+ spin_unlock(&sk->sk_receive_queue.lock);
+
+ if (unlinked_skb) {
+ WARN_ON_ONCE(skb_unref(unlinked_skb));
+ kfree_skb(unlinked_skb);
+ }
}
return skb;
}
@@ -2680,7 +2700,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
- if (unlikely(sk->sk_state != TCP_ESTABLISHED))
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
return -ENOTCONN;
return unix_read_skb(sk, recv_actor);
@@ -2704,7 +2724,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
size_t size = state->size;
unsigned int last_len;
- if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+ if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
err = -EINVAL;
goto out;
}
@@ -3035,7 +3055,7 @@ long unix_inq_len(struct sock *sk)
struct sk_buff *skb;
long amount = 0;
- if (sk->sk_state == TCP_LISTEN)
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
return -EINVAL;
spin_lock(&sk->sk_receive_queue.lock);
@@ -3147,12 +3167,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err))
@@ -3174,14 +3196,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
/* Connection-based need to check for termination and startup */
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
- sk->sk_state == TCP_CLOSE)
+ state == TCP_CLOSE)
mask |= EPOLLHUP;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
- if (unix_writable(sk))
+ if (unix_writable(sk, state))
mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
return mask;
@@ -3192,12 +3214,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
{
struct sock *sk = sock->sk, *other;
unsigned int writable;
+ unsigned char state;
__poll_t mask;
u8 shutdown;
sock_poll_wait(file, sock, wait);
mask = 0;
shutdown = READ_ONCE(sk->sk_shutdown);
+ state = READ_ONCE(sk->sk_state);
/* exceptional events? */
if (READ_ONCE(sk->sk_err) ||
@@ -3217,19 +3241,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM;
/* Connection-based need to check for termination and startup */
- if (sk->sk_type == SOCK_SEQPACKET) {
- if (sk->sk_state == TCP_CLOSE)
- mask |= EPOLLHUP;
- /* connection hasn't started yet? */
- if (sk->sk_state == TCP_SYN_SENT)
- return mask;
- }
+ if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
+ mask |= EPOLLHUP;
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
return mask;
- writable = unix_writable(sk);
+ writable = unix_writable(sk, state);
if (writable) {
unix_state_lock(sk);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index ae39538c5..937edf4af 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
u32 *buf;
int i;
- if (sk->sk_state == TCP_LISTEN) {
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);
attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
@@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_diag_rqlen rql;
- if (sk->sk_state == TCP_LISTEN) {
- rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+ if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+ rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
rql.udiag_rqueue = (u32) unix_inq_len(sk);
@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
- rep->udiag_state = sk->sk_state;
+ rep->udiag_state = READ_ONCE(sk->sk_state);
rep->pad = 0;
rep->udiag_ino = sk_ino;
sock_diag_save_cookie(sk, rep->udiag_cookie);
@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
goto out_nlmsg_trim;
- if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+ if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_UID) &&
@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
sk_for_each(sk, &net->unx.table.buckets[slot]) {
if (num < s_num)
goto next;
- if (!(req->udiag_states & (1 << sk->sk_state)))
+ if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
goto next;
if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
NETLINK_CB(cb->skb).portid,
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 3fb1b6373..4b1f45e30 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -431,7 +431,7 @@ static void cfg80211_wiphy_work(struct work_struct *work)
if (wk) {
list_del_init(&wk->entry);
if (!list_empty(&rdev->wiphy_work_list))
- schedule_work(work);
+ queue_work(system_unbound_wq, work);
spin_unlock_irq(&rdev->wiphy_work_lock);
wk->func(&rdev->wiphy, wk);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 30ff9a470..65c416e8d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -9162,6 +9162,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
struct wiphy *wiphy;
int err, tmp, n_ssids = 0, n_channels, i;
size_t ie_len, size;
+ size_t ssids_offset, ie_offset;
wiphy = &rdev->wiphy;
@@ -9207,21 +9208,20 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
size = struct_size(request, channels, n_channels);
+ ssids_offset = size;
size = size_add(size, array_size(sizeof(*request->ssids), n_ssids));
+ ie_offset = size;
size = size_add(size, ie_len);
request = kzalloc(size, GFP_KERNEL);
if (!request)
return -ENOMEM;
+ request->n_channels = n_channels;
if (n_ssids)
- request->ssids = (void *)&request->channels[n_channels];
+ request->ssids = (void *)request + ssids_offset;
request->n_ssids = n_ssids;
- if (ie_len) {
- if (n_ssids)
- request->ie = (void *)(request->ssids + n_ssids);
- else
- request->ie = (void *)(request->channels + n_channels);
- }
+ if (ie_len)
+ request->ie = (void *)request + ie_offset;
i = 0;
if (scan_freqs) {
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
index e106dcea3..c569c37da 100644
--- a/net/wireless/pmsr.c
+++ b/net/wireless/pmsr.c
@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.burst_period = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
out->ftm.burst_period =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+ nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
if (out->ftm.asap && !capa->ftm.asap) {
@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.num_bursts_exp = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
out->ftm.num_bursts_exp =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
if (capa->ftm.max_bursts_exponent >= 0 &&
out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.burst_duration = 15;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
out->ftm.burst_duration =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
out->ftm.ftms_per_burst = 0;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
out->ftm.ftmr_retries = 3;
if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
out->ftm.ftmr_retries =
- nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+ nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
if (out->ftm.request_lci && !capa->ftm.request_lci) {
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 5a5dd3ce4..ecea8c08e 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2128,7 +2128,8 @@ static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
struct ieee80211_he_operation *he_oper;
tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
- if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
+ if (tmp && tmp->datalen >= sizeof(*he_oper) + 1 &&
+ tmp->datalen >= ieee80211_he_oper_size(tmp->data + 1)) {
const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
he_oper = (void *)&tmp->data[1];
@@ -2207,12 +2208,16 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
tmp.pub.use_for = data->use_for;
tmp.pub.cannot_use_reasons = data->cannot_use_reasons;
- if (data->bss_source != BSS_SOURCE_DIRECT) {
+ switch (data->bss_source) {
+ case BSS_SOURCE_MBSSID:
tmp.pub.transmitted_bss = data->source_bss;
+ fallthrough;
+ case BSS_SOURCE_STA_PROFILE:
ts = bss_from_pub(data->source_bss)->ts;
tmp.pub.bssid_index = data->bssid_index;
tmp.pub.max_bssid_indicator = data->max_bssid_indicator;
- } else {
+ break;
+ case BSS_SOURCE_DIRECT:
ts = jiffies;
if (channel->band == NL80211_BAND_60GHZ) {
@@ -2227,6 +2232,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
regulatory_hint_found_beacon(wiphy, channel,
gfp);
}
+ break;
}
/*
@@ -2655,6 +2661,7 @@ struct tbtt_info_iter_data {
u8 param_ch_count;
u32 use_for;
u8 mld_id, link_id;
+ bool non_tx;
};
static enum cfg80211_rnr_iter_ret
@@ -2665,14 +2672,20 @@ cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
const struct ieee80211_rnr_mld_params *mld_params;
struct tbtt_info_iter_data *data = _data;
u8 link_id;
+ bool non_tx = false;
if (type == IEEE80211_TBTT_INFO_TYPE_TBTT &&
tbtt_info_len >= offsetofend(struct ieee80211_tbtt_info_ge_11,
- mld_params))
- mld_params = (void *)(tbtt_info +
- offsetof(struct ieee80211_tbtt_info_ge_11,
- mld_params));
- else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
+ mld_params)) {
+ const struct ieee80211_tbtt_info_ge_11 *tbtt_info_ge_11 =
+ (void *)tbtt_info;
+
+ non_tx = (tbtt_info_ge_11->bss_params &
+ (IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID |
+ IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID)) ==
+ IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID;
+ mld_params = &tbtt_info_ge_11->mld_params;
+ } else if (type == IEEE80211_TBTT_INFO_TYPE_MLD &&
tbtt_info_len >= sizeof(struct ieee80211_rnr_mld_params))
mld_params = (void *)tbtt_info;
else
@@ -2691,6 +2704,7 @@ cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
data->param_ch_count =
le16_get_bits(mld_params->params,
IEEE80211_RNR_MLD_PARAMS_BSS_CHANGE_COUNT);
+ data->non_tx = non_tx;
if (type == IEEE80211_TBTT_INFO_TYPE_TBTT)
data->use_for = NL80211_BSS_USE_FOR_ALL;
@@ -2702,7 +2716,7 @@ cfg802121_mld_ap_rnr_iter(void *_data, u8 type,
static u8
cfg80211_rnr_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
const struct ieee80211_neighbor_ap_info **ap_info,
- u8 *param_ch_count)
+ u8 *param_ch_count, bool *non_tx)
{
struct tbtt_info_iter_data data = {
.mld_id = mld_id,
@@ -2713,6 +2727,7 @@ cfg80211_rnr_info_for_mld_ap(const u8 *ie, size_t ielen, u8 mld_id, u8 link_id,
*ap_info = data.ap_info;
*param_ch_count = data.param_ch_count;
+ *non_tx = data.non_tx;
return data.use_for;
}
@@ -2892,6 +2907,7 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
ssize_t profile_len;
u8 param_ch_count;
u8 link_id, use_for;
+ bool non_tx;
if (!ieee80211_mle_basic_sta_prof_size_ok((u8 *)mle->sta_prof[i],
mle->sta_prof_len[i]))
@@ -2937,10 +2953,24 @@ cfg80211_parse_ml_elem_sta_data(struct wiphy *wiphy,
tx_data->ielen,
mld_id, link_id,
&ap_info,
- &param_ch_count);
+ &param_ch_count,
+ &non_tx);
if (!use_for)
continue;
+ /*
+ * As of 802.11be_D5.0, the specification does not give us any
+ * way of discovering both the MaxBSSID and the Multiple-BSSID
+ * Index. It does seem like the Multiple-BSSID Index element
+ * may be provided, but section 9.4.2.45 explicitly forbids
+ * including a Multiple-BSSID Element (in this case without any
+ * subelements).
+ * Without both pieces of information we cannot calculate the
+ * reference BSSID, so simply ignore the BSS.
+ */
+ if (non_tx)
+ continue;
+
/* We could sanity check the BSSID is included */
if (!ieee80211_operating_class_to_band(ap_info->op_class,
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 565511a3f..62f26618f 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -5,7 +5,7 @@
*
* Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
*/
#include <linux/device.h>
@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
if (rdev->wiphy.registered && rdev->ops->resume)
ret = rdev_resume(rdev);
rdev->suspended = false;
- schedule_work(&rdev->wiphy_work);
+ queue_work(system_unbound_wq, &rdev->wiphy_work);
wiphy_unlock(&rdev->wiphy);
if (ret)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 2bde8a354..082c6f9c5 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -2549,6 +2549,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
{
struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
+ int ret;
wdev = dev->ieee80211_ptr;
if (!wdev)
@@ -2560,7 +2561,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
memset(sinfo, 0, sizeof(*sinfo));
- return rdev_get_station(rdev, dev, mac_addr, sinfo);
+ wiphy_lock(&rdev->wiphy);
+ ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
+ wiphy_unlock(&rdev->wiphy);
+
+ return ret;
}
EXPORT_SYMBOL(cfg80211_get_station);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 727aa20be..7d1c0986f 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -313,13 +313,10 @@ static bool xsk_is_bound(struct xdp_sock *xs)
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
- struct net_device *dev = xdp->rxq->dev;
- u32 qid = xdp->rxq->queue_index;
-
if (!xsk_is_bound(xs))
return -ENXIO;
- if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
+ if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
return -EINVAL;
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 53d8fabfa..d15459772 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2598,8 +2598,7 @@ static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
int nfheader_len)
{
if (dst->ops->family == AF_INET6) {
- struct rt6_info *rt = (struct rt6_info *)dst;
- path->path_cookie = rt6_get_cookie(rt);
+ path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
path->u.rt6.rt6i_nfheader_len = nfheader_len;
}
}
@@ -3905,15 +3904,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
/* Impossible. Such dst must be popped before reaches point of failure. */
}
-static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
+static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
{
- if (dst) {
- if (dst->obsolete) {
- dst_release(dst);
- dst = NULL;
- }
- }
- return dst;
+ if (dst->obsolete)
+ sk_dst_reset(sk);
}
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c
index 32e930c85..8b8ecd65c 100644
--- a/samples/landlock/sandboxer.c
+++ b/samples/landlock/sandboxer.c
@@ -153,7 +153,7 @@ static int populate_ruleset_net(const char *const env_var, const int ruleset_fd,
const __u64 allowed_access)
{
int ret = 1;
- char *env_port_name, *strport;
+ char *env_port_name, *env_port_name_next, *strport;
struct landlock_net_port_attr net_port = {
.allowed_access = allowed_access,
.port = 0,
@@ -165,7 +165,8 @@ static int populate_ruleset_net(const char *const env_var, const int ruleset_fd,
env_port_name = strdup(env_port_name);
unsetenv(env_var);
- while ((strport = strsep(&env_port_name, ENV_DELIMITER))) {
+ env_port_name_next = env_port_name;
+ while ((strport = strsep(&env_port_name_next, ENV_DELIMITER))) {
net_port.port = atoi(strport);
if (landlock_add_rule(ruleset_fd, LANDLOCK_RULE_NET_PORT,
&net_port, 0)) {
diff --git a/scripts/Makefile.vdsoinst b/scripts/Makefile.vdsoinst
index c477d17b0..a81ca7350 100644
--- a/scripts/Makefile.vdsoinst
+++ b/scripts/Makefile.vdsoinst
@@ -21,7 +21,7 @@ $$(dest): $$(src) FORCE
$$(call cmd,install)
# Some architectures create .build-id symlinks
-ifneq ($(filter arm sparc x86, $(SRCARCH)),)
+ifneq ($(filter arm s390 sparc x86, $(SRCARCH)),)
link := $(install-dir)/.build-id/$$(shell $(READELF) -n $$(src) | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p').debug
__default: $$(link)
diff --git a/scripts/atomic/kerneldoc/sub_and_test b/scripts/atomic/kerneldoc/sub_and_test
index d3760f774..96615e508 100644
--- a/scripts/atomic/kerneldoc/sub_and_test
+++ b/scripts/atomic/kerneldoc/sub_and_test
@@ -1,7 +1,7 @@
cat <<EOF
/**
* ${class}${atomicname}() - atomic subtract and test if zero with ${desc_order} ordering
- * @i: ${int} value to add
+ * @i: ${int} value to subtract
* @v: pointer to ${atomic}_t
*
* Atomically updates @v to (@v - @i) with ${desc_order} ordering.
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 81fe1884e..67a509778 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -14,6 +14,7 @@
struct symbol symbol_yes = {
.name = "y",
+ .type = S_TRISTATE,
.curr = { "y", yes },
.menus = LIST_HEAD_INIT(symbol_yes.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -21,6 +22,7 @@ struct symbol symbol_yes = {
struct symbol symbol_mod = {
.name = "m",
+ .type = S_TRISTATE,
.curr = { "m", mod },
.menus = LIST_HEAD_INIT(symbol_mod.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -28,6 +30,7 @@ struct symbol symbol_mod = {
struct symbol symbol_no = {
.name = "n",
+ .type = S_TRISTATE,
.curr = { "n", no },
.menus = LIST_HEAD_INIT(symbol_no.menus),
.flags = SYMBOL_CONST|SYMBOL_VALID,
@@ -788,8 +791,7 @@ const char *sym_get_string_value(struct symbol *sym)
case no:
return "n";
case mod:
- sym_calc_value(modules_sym);
- return (modules_sym->curr.tri == no) ? "n" : "m";
+ return "m";
case yes:
return "y";
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 2f5b91da5..c27c762e6 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1652,10 +1652,11 @@ static void read_symbols(const char *modname)
namespace = get_next_modinfo(&info, "import_ns",
namespace);
}
+
+ if (extra_warn && !get_modinfo(&info, "description"))
+ warn("missing MODULE_DESCRIPTION() in %s\n", modname);
}
- if (extra_warn && !get_modinfo(&info, "description"))
- warn("missing MODULE_DESCRIPTION() in %s\n", modname);
for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
symname = remove_dot(info.strtab + sym->st_name);
diff --git a/scripts/module.lds.S b/scripts/module.lds.S
index bf5bcf283..89ff01a22 100644
--- a/scripts/module.lds.S
+++ b/scripts/module.lds.S
@@ -13,6 +13,7 @@ SECTIONS {
/DISCARD/ : {
*(.discard)
*(.discard.*)
+ *(.export_symbol)
}
__ksymtab 0 : { *(SORT(___ksymtab+*)) }
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index 45beb1c5f..6b5181c66 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -217,7 +217,7 @@ void aa_audit_rule_free(void *vrule)
}
}
-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp)
{
struct aa_audit_rule *rule;
@@ -230,14 +230,14 @@ int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
return -EINVAL;
}
- rule = kzalloc(sizeof(struct aa_audit_rule), GFP_KERNEL);
+ rule = kzalloc(sizeof(struct aa_audit_rule), gfp);
if (!rule)
return -ENOMEM;
/* Currently rules are treated as coming from the root ns */
rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr,
- GFP_KERNEL, true, false);
+ gfp, true, false);
if (IS_ERR(rule->label)) {
int err = PTR_ERR(rule->label);
aa_audit_rule_free(rule);
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index acbb03b9b..0c8cc86b4 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -200,7 +200,7 @@ static inline int complain_error(int error)
}
void aa_audit_rule_free(void *vrule);
-int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule);
+int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp);
int aa_audit_rule_known(struct audit_krule *rule);
int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule);
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 11d7c0332..0a4f274f7 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -540,7 +540,7 @@ static inline void ima_free_modsig(struct modsig *modsig)
#else
static inline int ima_filter_rule_init(u32 field, u32 op, char *rulestr,
- void **lsmrule)
+ void **lsmrule, gfp_t gfp)
{
return -EINVAL;
}
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index b37d043d5..1856981e3 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -245,8 +245,8 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file,
const char *audit_cause = "failed";
struct inode *inode = file_inode(file);
struct inode *real_inode = d_real_inode(file_dentry(file));
- const char *filename = file->f_path.dentry->d_name.name;
struct ima_max_digest_data hash;
+ struct name_snapshot filename;
struct kstat stat;
int result = 0;
int length;
@@ -317,9 +317,13 @@ out:
if (file->f_flags & O_DIRECT)
audit_cause = "failed(directio)";
+ take_dentry_name_snapshot(&filename, file->f_path.dentry);
+
integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
- filename, "collect_data", audit_cause,
- result, 0);
+ filename.name.name, "collect_data",
+ audit_cause, result, 0);
+
+ release_dentry_name_snapshot(&filename);
}
return result;
}
@@ -432,6 +436,7 @@ out:
*/
const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
{
+ struct name_snapshot filename;
char *pathname = NULL;
*pathbuf = __getname();
@@ -445,7 +450,10 @@ const char *ima_d_path(const struct path *path, char **pathbuf, char *namebuf)
}
if (!pathname) {
- strscpy(namebuf, path->dentry->d_name.name, NAME_MAX);
+ take_dentry_name_snapshot(&filename, path->dentry);
+ strscpy(namebuf, filename.name.name, NAME_MAX);
+ release_dentry_name_snapshot(&filename);
+
pathname = namebuf;
}
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index c0556907c..09da8e639 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -401,7 +401,8 @@ static void ima_free_rule(struct ima_rule_entry *entry)
kfree(entry);
}
-static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
+static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry,
+ gfp_t gfp)
{
struct ima_rule_entry *nentry;
int i;
@@ -410,7 +411,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
* Immutable elements are copied over as pointers and data; only
* lsm rules can change
*/
- nentry = kmemdup(entry, sizeof(*nentry), GFP_KERNEL);
+ nentry = kmemdup(entry, sizeof(*nentry), gfp);
if (!nentry)
return NULL;
@@ -425,7 +426,8 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
ima_filter_rule_init(nentry->lsm[i].type, Audit_equal,
nentry->lsm[i].args_p,
- &nentry->lsm[i].rule);
+ &nentry->lsm[i].rule,
+ gfp);
if (!nentry->lsm[i].rule)
pr_warn("rule for LSM \'%s\' is undefined\n",
nentry->lsm[i].args_p);
@@ -438,7 +440,7 @@ static int ima_lsm_update_rule(struct ima_rule_entry *entry)
int i;
struct ima_rule_entry *nentry;
- nentry = ima_lsm_copy_rule(entry);
+ nentry = ima_lsm_copy_rule(entry, GFP_KERNEL);
if (!nentry)
return -ENOMEM;
@@ -664,7 +666,7 @@ retry:
}
if (rc == -ESTALE && !rule_reinitialized) {
- lsm_rule = ima_lsm_copy_rule(rule);
+ lsm_rule = ima_lsm_copy_rule(rule, GFP_ATOMIC);
if (lsm_rule) {
rule_reinitialized = true;
goto retry;
@@ -1140,7 +1142,8 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
entry->lsm[lsm_rule].type = audit_type;
result = ima_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal,
entry->lsm[lsm_rule].args_p,
- &entry->lsm[lsm_rule].rule);
+ &entry->lsm[lsm_rule].rule,
+ GFP_KERNEL);
if (!entry->lsm[lsm_rule].rule) {
pr_warn("rule for LSM \'%s\' is undefined\n",
entry->lsm[lsm_rule].args_p);
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
index 6cd0add52..3b2cb8f10 100644
--- a/security/integrity/ima/ima_template_lib.c
+++ b/security/integrity/ima/ima_template_lib.c
@@ -483,7 +483,10 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
bool size_limit)
{
const char *cur_filename = NULL;
+ struct name_snapshot filename;
u32 cur_filename_len = 0;
+ bool snapshot = false;
+ int ret;
BUG_ON(event_data->filename == NULL && event_data->file == NULL);
@@ -496,7 +499,10 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
}
if (event_data->file) {
- cur_filename = event_data->file->f_path.dentry->d_name.name;
+ take_dentry_name_snapshot(&filename,
+ event_data->file->f_path.dentry);
+ snapshot = true;
+ cur_filename = filename.name.name;
cur_filename_len = strlen(cur_filename);
} else
/*
@@ -505,8 +511,13 @@ static int ima_eventname_init_common(struct ima_event_data *event_data,
*/
cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
out:
- return ima_write_template_field_data(cur_filename, cur_filename_len,
- DATA_FMT_STRING, field_data);
+ ret = ima_write_template_field_data(cur_filename, cur_filename_len,
+ DATA_FMT_STRING, field_data);
+
+ if (snapshot)
+ release_dentry_name_snapshot(&filename);
+
+ return ret;
}
/*
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index c15559432..3e43e68a6 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -950,6 +950,7 @@ static int current_check_refer_path(struct dentry *const old_dentry,
bool allow_parent1, allow_parent2;
access_mask_t access_request_parent1, access_request_parent2;
struct path mnt_dir;
+ struct dentry *old_parent;
layer_mask_t layer_masks_parent1[LANDLOCK_NUM_ACCESS_FS] = {},
layer_masks_parent2[LANDLOCK_NUM_ACCESS_FS] = {};
@@ -997,9 +998,17 @@ static int current_check_refer_path(struct dentry *const old_dentry,
mnt_dir.mnt = new_dir->mnt;
mnt_dir.dentry = new_dir->mnt->mnt_root;
+ /*
+ * old_dentry may be the root of the common mount point and
+ * !IS_ROOT(old_dentry) at the same time (e.g. with open_tree() and
+ * OPEN_TREE_CLONE). We do not need to call dget(old_parent) because
+ * we keep a reference to old_dentry.
+ */
+ old_parent = (old_dentry == mnt_dir.dentry) ? old_dentry :
+ old_dentry->d_parent;
+
/* new_dir->dentry is equal to new_dentry->d_parent */
- allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry,
- old_dentry->d_parent,
+ allow_parent1 = collect_domain_accesses(dom, mnt_dir.dentry, old_parent,
&layer_masks_parent1);
allow_parent2 = collect_domain_accesses(
dom, mnt_dir.dentry, new_dir->dentry, &layer_masks_parent2);
diff --git a/security/security.c b/security/security.c
index 0a9a0ac3f..4fd3c8393 100644
--- a/security/security.c
+++ b/security/security.c
@@ -5331,15 +5331,17 @@ void security_key_post_create_or_update(struct key *keyring, struct key *key,
* @op: rule operator
* @rulestr: rule context
* @lsmrule: receive buffer for audit rule struct
+ * @gfp: GFP flag used for kmalloc
*
* Allocate and initialize an LSM audit rule structure.
*
* Return: Return 0 if @lsmrule has been successfully set, -EINVAL in case of
* an invalid rule.
*/
-int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule)
+int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule,
+ gfp_t gfp)
{
- return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule);
+ return call_int_hook(audit_rule_init, field, op, rulestr, lsmrule, gfp);
}
/**
diff --git a/security/selinux/include/audit.h b/security/selinux/include/audit.h
index 52aca7121..29c7d4c86 100644
--- a/security/selinux/include/audit.h
+++ b/security/selinux/include/audit.h
@@ -21,12 +21,14 @@
* @op: the operator the rule uses
* @rulestr: the text "target" of the rule
* @rule: pointer to the new rule structure returned via this
+ * @gfp: GFP flag used for kmalloc
*
* Returns 0 if successful, -errno if not. On success, the rule structure
* will be allocated internally. The caller must free this structure with
* selinux_audit_rule_free() after use.
*/
-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule);
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **rule,
+ gfp_t gfp);
/**
* selinux_audit_rule_free - free an selinux audit rule structure.
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index e88b1b6c4..ded250e52 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -3508,7 +3508,8 @@ void selinux_audit_rule_free(void *vrule)
}
}
-int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
+ gfp_t gfp)
{
struct selinux_state *state = &selinux_state;
struct selinux_policy *policy;
@@ -3549,7 +3550,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
return -EINVAL;
}
- tmprule = kzalloc(sizeof(struct selinux_audit_rule), GFP_KERNEL);
+ tmprule = kzalloc(sizeof(struct selinux_audit_rule), gfp);
if (!tmprule)
return -ENOMEM;
context_init(&tmprule->au_ctxt);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 146667937..c6df5c00c 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4692,11 +4692,13 @@ static int smack_post_notification(const struct cred *w_cred,
* @op: required testing operator (=, !=, >, <, ...)
* @rulestr: smack label to be audited
* @vrule: pointer to save our own audit rule representation
+ * @gfp: type of the memory for the allocation
*
* Prepare to audit cases where (@field @op @rulestr) is true.
* The label to be audited is created if necessay.
*/
-static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule,
+ gfp_t gfp)
{
struct smack_known *skp;
char **rule = (char **)vrule;
diff --git a/sound/core/init.c b/sound/core/init.c
index 4ed5037d8..b8912de04 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -313,8 +313,8 @@ static int snd_card_init(struct snd_card *card, struct device *parent,
card->number = idx;
#ifdef MODULE
WARN_ON(!module);
- card->module = module;
#endif
+ card->module = module;
INIT_LIST_HEAD(&card->devices);
init_rwsem(&card->controls_rwsem);
rwlock_init(&card->ctl_files_rwlock);
@@ -516,6 +516,14 @@ void snd_card_disconnect(struct snd_card *card)
}
}
+#ifdef CONFIG_PM
+ /* wake up sleepers here before other callbacks for avoiding potential
+ * deadlocks with other locks (e.g. in kctls);
+ * then this notifies the shutdown and sleepers would abort immediately
+ */
+ wake_up_all(&card->power_sleep);
+#endif
+
/* notify all connected devices about disconnection */
/* at this point, they cannot respond to any calls except release() */
@@ -531,6 +539,11 @@ void snd_card_disconnect(struct snd_card *card)
synchronize_irq(card->sync_irq);
snd_info_card_disconnect(card);
+#ifdef CONFIG_SND_DEBUG
+ debugfs_remove(card->debugfs_root);
+ card->debugfs_root = NULL;
+#endif
+
if (card->registered) {
device_del(&card->card_dev);
card->registered = false;
@@ -543,7 +556,6 @@ void snd_card_disconnect(struct snd_card *card)
}
#ifdef CONFIG_PM
- wake_up(&card->power_sleep);
snd_power_sync_ref(card);
#endif
}
@@ -583,10 +595,6 @@ static int snd_card_do_free(struct snd_card *card)
dev_warn(card->dev, "unable to free card info\n");
/* Not fatal error */
}
-#ifdef CONFIG_SND_DEBUG
- debugfs_remove(card->debugfs_root);
- card->debugfs_root = NULL;
-#endif
if (card->release_completion)
complete(card->release_completion);
if (!card->managed)
diff --git a/sound/core/jack.c b/sound/core/jack.c
index e08b2c4fb..e4bcecdf8 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -37,11 +37,15 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
};
#endif /* CONFIG_SND_JACK_INPUT_DEV */
+static void snd_jack_remove_debugfs(struct snd_jack *jack);
+
static int snd_jack_dev_disconnect(struct snd_device *device)
{
-#ifdef CONFIG_SND_JACK_INPUT_DEV
struct snd_jack *jack = device->device_data;
+ snd_jack_remove_debugfs(jack);
+
+#ifdef CONFIG_SND_JACK_INPUT_DEV
guard(mutex)(&jack->input_dev_lock);
if (!jack->input_dev)
return 0;
@@ -381,10 +385,14 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
return 0;
}
-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
+static void snd_jack_remove_debugfs(struct snd_jack *jack)
{
- debugfs_remove(jack_kctl->jack_debugfs_root);
- jack_kctl->jack_debugfs_root = NULL;
+ struct snd_jack_kctl *jack_kctl;
+
+ list_for_each_entry(jack_kctl, &jack->kctl_list, list) {
+ debugfs_remove(jack_kctl->jack_debugfs_root);
+ jack_kctl->jack_debugfs_root = NULL;
+ }
}
#else /* CONFIG_SND_JACK_INJECTION_DEBUG */
static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
@@ -393,7 +401,7 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
return 0;
}
-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
+static void snd_jack_remove_debugfs(struct snd_jack *jack)
{
}
#endif /* CONFIG_SND_JACK_INJECTION_DEBUG */
@@ -404,7 +412,6 @@ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl)
jack_kctl = kctl->private_data;
if (jack_kctl) {
- snd_jack_debugfs_clear_inject_node(jack_kctl);
list_del(&jack_kctl->list);
kfree(jack_kctl);
}
@@ -497,8 +504,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
.dev_free = snd_jack_dev_free,
#ifdef CONFIG_SND_JACK_INPUT_DEV
.dev_register = snd_jack_dev_register,
- .dev_disconnect = snd_jack_dev_disconnect,
#endif /* CONFIG_SND_JACK_INPUT_DEV */
+ .dev_disconnect = snd_jack_dev_disconnect,
};
if (initial_kctl) {
diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
index ee6ac649d..d81f776a4 100644
--- a/sound/core/seq/seq_ump_convert.c
+++ b/sound/core/seq/seq_ump_convert.c
@@ -157,7 +157,7 @@ static void ump_system_to_one_param_ev(const union snd_ump_midi1_msg *val,
static void ump_system_to_songpos_ev(const union snd_ump_midi1_msg *val,
struct snd_seq_event *ev)
{
- ev->data.control.value = (val->system.parm1 << 7) | val->system.parm2;
+ ev->data.control.value = (val->system.parm2 << 7) | val->system.parm1;
}
/* Encoders for 0xf0 - 0xff */
@@ -368,6 +368,7 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
struct snd_seq_ump_event ev_cvt;
const union snd_ump_midi1_msg *midi1 = (const union snd_ump_midi1_msg *)event->ump;
union snd_ump_midi2_msg *midi2 = (union snd_ump_midi2_msg *)ev_cvt.ump;
+ struct snd_seq_ump_midi2_bank *cc;
ev_cvt = *event;
memset(&ev_cvt.ump, 0, sizeof(ev_cvt.ump));
@@ -387,11 +388,29 @@ static int cvt_ump_midi1_to_midi2(struct snd_seq_client *dest,
midi2->paf.data = upscale_7_to_32bit(midi1->paf.data);
break;
case UMP_MSG_STATUS_CC:
+ cc = &dest_port->midi2_bank[midi1->note.channel];
+ switch (midi1->cc.index) {
+ case UMP_CC_BANK_SELECT:
+ cc->bank_set = 1;
+ cc->cc_bank_msb = midi1->cc.data;
+ return 0; // skip
+ case UMP_CC_BANK_SELECT_LSB:
+ cc->bank_set = 1;
+ cc->cc_bank_lsb = midi1->cc.data;
+ return 0; // skip
+ }
midi2->cc.index = midi1->cc.index;
midi2->cc.data = upscale_7_to_32bit(midi1->cc.data);
break;
case UMP_MSG_STATUS_PROGRAM:
midi2->pg.program = midi1->pg.program;
+ cc = &dest_port->midi2_bank[midi1->note.channel];
+ if (cc->bank_set) {
+ midi2->pg.bank_valid = 1;
+ midi2->pg.bank_msb = cc->cc_bank_msb;
+ midi2->pg.bank_lsb = cc->cc_bank_lsb;
+ cc->bank_set = 0;
+ }
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
midi2->caf.data = upscale_7_to_32bit(midi1->caf.data);
@@ -419,6 +438,7 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
struct snd_seq_ump_event ev_cvt;
union snd_ump_midi1_msg *midi1 = (union snd_ump_midi1_msg *)ev_cvt.ump;
const union snd_ump_midi2_msg *midi2 = (const union snd_ump_midi2_msg *)event->ump;
+ int err;
u16 v;
ev_cvt = *event;
@@ -443,6 +463,24 @@ static int cvt_ump_midi2_to_midi1(struct snd_seq_client *dest,
midi1->cc.data = downscale_32_to_7bit(midi2->cc.data);
break;
case UMP_MSG_STATUS_PROGRAM:
+ if (midi2->pg.bank_valid) {
+ midi1->cc.status = UMP_MSG_STATUS_CC;
+ midi1->cc.index = UMP_CC_BANK_SELECT;
+ midi1->cc.data = midi2->pg.bank_msb;
+ err = __snd_seq_deliver_single_event(dest, dest_port,
+ (struct snd_seq_event *)&ev_cvt,
+ atomic, hop);
+ if (err < 0)
+ return err;
+ midi1->cc.index = UMP_CC_BANK_SELECT_LSB;
+ midi1->cc.data = midi2->pg.bank_lsb;
+ err = __snd_seq_deliver_single_event(dest, dest_port,
+ (struct snd_seq_event *)&ev_cvt,
+ atomic, hop);
+ if (err < 0)
+ return err;
+ midi1->note.status = midi2->note.status;
+ }
midi1->pg.program = midi2->pg.program;
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
@@ -691,6 +729,7 @@ static int system_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
return 1;
}
@@ -701,6 +740,7 @@ static int system_1p_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
data->system.parm1 = event->data.control.value & 0x7f;
return 1;
@@ -712,9 +752,10 @@ static int system_2p_ev_to_ump_midi1(const struct snd_seq_event *event,
union snd_ump_midi1_msg *data,
unsigned char status)
{
+ data->system.type = UMP_MSG_TYPE_SYSTEM; // override
data->system.status = status;
- data->system.parm1 = (event->data.control.value >> 7) & 0x7f;
- data->system.parm2 = event->data.control.value & 0x7f;
+ data->system.parm1 = event->data.control.value & 0x7f;
+ data->system.parm2 = (event->data.control.value >> 7) & 0x7f;
return 1;
}
@@ -854,7 +895,6 @@ static int pgm_ev_to_ump_midi2(const struct snd_seq_event *event,
data->pg.bank_msb = cc->cc_bank_msb;
data->pg.bank_lsb = cc->cc_bank_lsb;
cc->bank_set = 0;
- cc->cc_bank_msb = cc->cc_bank_lsb = 0;
}
return 1;
}
@@ -1035,6 +1075,8 @@ static const struct seq_ev_to_ump seq_ev_ump_encoders[] = {
system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
{ SNDRV_SEQ_EVENT_SENSING, UMP_SYSTEM_STATUS_ACTIVE_SENSING,
system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
+ { SNDRV_SEQ_EVENT_RESET, UMP_SYSTEM_STATUS_RESET,
+ system_ev_to_ump_midi1, system_ev_to_ump_midi2 },
};
static const struct seq_ev_to_ump *find_ump_encoder(int type)
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 4d2ee99c1..d104adc75 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -544,6 +544,14 @@ static int snd_timer_start1(struct snd_timer_instance *timeri,
SNDRV_TIMER_IFLG_START))
return -EBUSY;
+ /* check the actual time for the start tick;
+ * bail out as error if it's way too low (< 100us)
+ */
+ if (start) {
+ if ((u64)snd_timer_hw_resolution(timer) * ticks < 100000)
+ return -EINVAL;
+ }
+
if (start)
timeri->ticks = timeri->cticks = ticks;
else if (!timeri->cticks)
diff --git a/sound/core/ump.c b/sound/core/ump.c
index fd6a68a54..117c7ecc4 100644
--- a/sound/core/ump.c
+++ b/sound/core/ump.c
@@ -685,10 +685,17 @@ static void seq_notify_protocol(struct snd_ump_endpoint *ump)
*/
int snd_ump_switch_protocol(struct snd_ump_endpoint *ump, unsigned int protocol)
{
+ unsigned int type;
+
protocol &= ump->info.protocol_caps;
if (protocol == ump->info.protocol)
return 0;
+ type = protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK;
+ if (type != SNDRV_UMP_EP_INFO_PROTO_MIDI1 &&
+ type != SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+ return 0;
+
ump->info.protocol = protocol;
ump_dbg(ump, "New protocol = %x (caps = %x)\n",
protocol, ump->info.protocol_caps);
diff --git a/sound/core/ump_convert.c b/sound/core/ump_convert.c
index de04799fd..f67c44c83 100644
--- a/sound/core/ump_convert.c
+++ b/sound/core/ump_convert.c
@@ -404,7 +404,6 @@ static int cvt_legacy_cmd_to_ump(struct ump_cvt_to_ump *cvt,
midi2->pg.bank_msb = cc->cc_bank_msb;
midi2->pg.bank_lsb = cc->cc_bank_lsb;
cc->bank_set = 0;
- cc->cc_bank_msb = cc->cc_bank_lsb = 0;
}
break;
case UMP_MSG_STATUS_CHANNEL_PRESSURE:
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index d1f6cdcf1..e7c2ef6c6 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -16,7 +16,7 @@
static int dsp_driver;
module_param(dsp_driver, int, 0444);
-MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF)");
+MODULE_PARM_DESC(dsp_driver, "Force the DSP driver for Intel DSP (0=auto, 1=legacy, 2=SST, 3=SOF, 4=AVS)");
#define FLAG_SST BIT(0)
#define FLAG_SOF BIT(1)
diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c
index d3fa6e136..ec688c60c 100644
--- a/sound/pci/hda/cs35l41_hda.c
+++ b/sound/pci/hda/cs35l41_hda.c
@@ -1362,7 +1362,7 @@ static void cs35l41_hda_unbind(struct device *dev, struct device *master, void *
if (comps[cs35l41->index].dev == dev) {
memset(&comps[cs35l41->index], 0, sizeof(*comps));
sleep_flags = lock_system_sleep();
- device_link_remove(&comps->codec->core.dev, cs35l41->dev);
+ device_link_remove(&cs35l41->codec->core.dev, cs35l41->dev);
unlock_system_sleep(sleep_flags);
}
}
@@ -1857,6 +1857,8 @@ void cs35l41_hda_remove(struct device *dev)
{
struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+ component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
+
pm_runtime_get_sync(cs35l41->dev);
pm_runtime_dont_use_autosuspend(cs35l41->dev);
pm_runtime_disable(cs35l41->dev);
@@ -1864,8 +1866,6 @@ void cs35l41_hda_remove(struct device *dev)
if (cs35l41->halo_initialized)
cs35l41_remove_dsp(cs35l41);
- component_del(cs35l41->dev, &cs35l41_hda_comp_ops);
-
acpi_dev_put(cs35l41->dacpi);
pm_runtime_put_noidle(cs35l41->dev);
diff --git a/sound/pci/hda/cs35l41_hda_property.c b/sound/pci/hda/cs35l41_hda_property.c
index 8fb688e41..4f5e581cd 100644
--- a/sound/pci/hda/cs35l41_hda_property.c
+++ b/sound/pci/hda/cs35l41_hda_property.c
@@ -110,8 +110,8 @@ static const struct cs35l41_config cs35l41_config_table[] = {
{ "10431F62", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 0, 0, 0 },
{ "10433A60", 2, INTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 1, 2, 0, 1000, 4500, 24 },
{ "17AA386F", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
- { "17AA3877", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
- { "17AA3878", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
+ { "17AA3877", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
+ { "17AA3878", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, -1, -1, 0, 0, 0 },
{ "17AA38A9", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
{ "17AA38AB", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 2, -1, 0, 0, 0 },
{ "17AA38B4", 2, EXTERNAL, { CS35L41_LEFT, CS35L41_RIGHT, 0, 0 }, 0, 1, -1, 0, 0, 0 },
diff --git a/sound/pci/hda/cs35l56_hda.c b/sound/pci/hda/cs35l56_hda.c
index 558c1f38f..6b77c38a0 100644
--- a/sound/pci/hda/cs35l56_hda.c
+++ b/sound/pci/hda/cs35l56_hda.c
@@ -732,8 +732,6 @@ static void cs35l56_hda_unbind(struct device *dev, struct device *master, void *
if (cs35l56->base.fw_patched)
cs_dsp_power_down(&cs35l56->cs_dsp);
- cs_dsp_remove(&cs35l56->cs_dsp);
-
if (comps[cs35l56->index].dev == dev)
memset(&comps[cs35l56->index], 0, sizeof(*comps));
@@ -1035,7 +1033,7 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
ARRAY_SIZE(cs35l56_hda_dai_config));
ret = cs35l56_force_sync_asp1_registers_from_cache(&cs35l56->base);
if (ret)
- goto err;
+ goto dsp_err;
/*
* By default only enable one ASP1TXn, where n=amplifier index,
@@ -1061,6 +1059,8 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
pm_err:
pm_runtime_disable(cs35l56->base.dev);
+dsp_err:
+ cs_dsp_remove(&cs35l56->cs_dsp);
err:
gpiod_set_value_cansleep(cs35l56->base.reset_gpio, 0);
@@ -1072,11 +1072,13 @@ void cs35l56_hda_remove(struct device *dev)
{
struct cs35l56_hda *cs35l56 = dev_get_drvdata(dev);
+ component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
+
pm_runtime_dont_use_autosuspend(cs35l56->base.dev);
pm_runtime_get_sync(cs35l56->base.dev);
pm_runtime_disable(cs35l56->base.dev);
- component_del(cs35l56->base.dev, &cs35l56_hda_comp_ops);
+ cs_dsp_remove(&cs35l56->cs_dsp);
kfree(cs35l56->system_name);
pm_runtime_put_noidle(cs35l56->base.dev);
diff --git a/sound/pci/hda/hda_component.c b/sound/pci/hda/hda_component.c
index cd299d7d8..d02589014 100644
--- a/sound/pci/hda/hda_component.c
+++ b/sound/pci/hda/hda_component.c
@@ -123,6 +123,21 @@ static int hda_comp_match_dev_name(struct device *dev, void *data)
return !strcmp(d + n, tmp);
}
+int hda_component_manager_bind(struct hda_codec *cdc,
+ struct hda_component *comps, int count)
+{
+ int i;
+
+ /* Init shared data */
+ for (i = 0; i < count; ++i) {
+ memset(&comps[i], 0, sizeof(comps[i]));
+ comps[i].codec = cdc;
+ }
+
+ return component_bind_all(hda_codec_dev(cdc), comps);
+}
+EXPORT_SYMBOL_NS_GPL(hda_component_manager_bind, SND_HDA_SCODEC_COMPONENT);
+
int hda_component_manager_init(struct hda_codec *cdc,
struct hda_component *comps, int count,
const char *bus, const char *hid,
@@ -143,7 +158,6 @@ int hda_component_manager_init(struct hda_codec *cdc,
sm->hid = hid;
sm->match_str = match_str;
sm->index = i;
- comps[i].codec = cdc;
component_match_add(dev, &match, hda_comp_match_dev_name, sm);
}
diff --git a/sound/pci/hda/hda_component.h b/sound/pci/hda/hda_component.h
index c80a66691..c70b3de68 100644
--- a/sound/pci/hda/hda_component.h
+++ b/sound/pci/hda/hda_component.h
@@ -75,11 +75,8 @@ int hda_component_manager_init(struct hda_codec *cdc,
void hda_component_manager_free(struct hda_codec *cdc,
const struct component_master_ops *ops);
-static inline int hda_component_manager_bind(struct hda_codec *cdc,
- struct hda_component *comps)
-{
- return component_bind_all(hda_codec_dev(cdc), comps);
-}
+int hda_component_manager_bind(struct hda_codec *cdc,
+ struct hda_component *comps, int count);
static inline void hda_component_manager_unbind(struct hda_codec *cdc,
struct hda_component *comps)
diff --git a/sound/pci/hda/hda_cs_dsp_ctl.c b/sound/pci/hda/hda_cs_dsp_ctl.c
index 463ca0603..9db45d7c1 100644
--- a/sound/pci/hda/hda_cs_dsp_ctl.c
+++ b/sound/pci/hda/hda_cs_dsp_ctl.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <sound/soc.h>
+#include <linux/cleanup.h>
#include <linux/firmware/cirrus/cs_dsp.h>
#include <linux/firmware/cirrus/wmfw.h>
#include "hda_cs_dsp_ctl.h"
@@ -97,11 +98,23 @@ static unsigned int wmfw_convert_flags(unsigned int in)
return out;
}
-static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char *name)
+static void hda_cs_dsp_free_kcontrol(struct snd_kcontrol *kctl)
{
+ struct hda_cs_dsp_coeff_ctl *ctl = (struct hda_cs_dsp_coeff_ctl *)snd_kcontrol_chip(kctl);
struct cs_dsp_coeff_ctl *cs_ctl = ctl->cs_ctl;
+
+ /* NULL priv to prevent a double-free in hda_cs_dsp_control_remove() */
+ cs_ctl->priv = NULL;
+ kfree(ctl);
+}
+
+static void hda_cs_dsp_add_kcontrol(struct cs_dsp_coeff_ctl *cs_ctl,
+ const struct hda_cs_dsp_ctl_info *info,
+ const char *name)
+{
struct snd_kcontrol_new kcontrol = {0};
struct snd_kcontrol *kctl;
+ struct hda_cs_dsp_coeff_ctl *ctl __free(kfree) = NULL;
int ret = 0;
if (cs_ctl->len > ADSP_MAX_STD_CTRL_SIZE) {
@@ -110,6 +123,13 @@ static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
return;
}
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return;
+
+ ctl->cs_ctl = cs_ctl;
+ ctl->card = info->card;
+
kcontrol.name = name;
kcontrol.info = hda_cs_dsp_coeff_info;
kcontrol.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
@@ -117,20 +137,22 @@ static void hda_cs_dsp_add_kcontrol(struct hda_cs_dsp_coeff_ctl *ctl, const char
kcontrol.get = hda_cs_dsp_coeff_get;
kcontrol.put = hda_cs_dsp_coeff_put;
- /* Save ctl inside private_data, ctl is owned by cs_dsp,
- * and will be freed when cs_dsp removes the control */
kctl = snd_ctl_new1(&kcontrol, (void *)ctl);
if (!kctl)
return;
- ret = snd_ctl_add(ctl->card, kctl);
+ kctl->private_free = hda_cs_dsp_free_kcontrol;
+ ctl->kctl = kctl;
+
+ /* snd_ctl_add() calls our private_free on error, which will kfree(ctl) */
+ cs_ctl->priv = no_free_ptr(ctl);
+ ret = snd_ctl_add(info->card, kctl);
if (ret) {
dev_err(cs_ctl->dsp->dev, "Failed to add KControl %s = %d\n", kcontrol.name, ret);
return;
}
dev_dbg(cs_ctl->dsp->dev, "Added KControl: %s\n", kcontrol.name);
- ctl->kctl = kctl;
}
static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
@@ -138,7 +160,6 @@ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
{
struct cs_dsp *cs_dsp = cs_ctl->dsp;
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- struct hda_cs_dsp_coeff_ctl *ctl;
const char *region_name;
int ret;
@@ -163,15 +184,7 @@ static void hda_cs_dsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl,
" %.*s", cs_ctl->subname_len - skip, cs_ctl->subname + skip);
}
- ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
- if (!ctl)
- return;
-
- ctl->cs_ctl = cs_ctl;
- ctl->card = info->card;
- cs_ctl->priv = ctl;
-
- hda_cs_dsp_add_kcontrol(ctl, name);
+ hda_cs_dsp_add_kcontrol(cs_ctl, info, name);
}
void hda_cs_dsp_add_controls(struct cs_dsp *dsp, const struct hda_cs_dsp_ctl_info *info)
@@ -203,7 +216,9 @@ void hda_cs_dsp_control_remove(struct cs_dsp_coeff_ctl *cs_ctl)
{
struct hda_cs_dsp_coeff_ctl *ctl = cs_ctl->priv;
- kfree(ctl);
+ /* ctl and kctl may already have been removed by ALSA private_free */
+ if (ctl && ctl->kctl)
+ snd_ctl_remove(ctl->card, ctl->kctl);
}
EXPORT_SYMBOL_NS_GPL(hda_cs_dsp_control_remove, SND_HDA_CS_DSP_CONTROLS);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index b29739bd3..3e6de1d86 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6793,7 +6793,7 @@ static int comp_bind(struct device *dev)
struct alc_spec *spec = cdc->spec;
int ret;
- ret = hda_component_manager_bind(cdc, spec->comps);
+ ret = hda_component_manager_bind(cdc, spec->comps, ARRAY_SIZE(spec->comps));
if (ret)
return ret;
@@ -10103,7 +10103,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8a2c, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a2d, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a2e, "HP Envy 16", ALC287_FIXUP_CS35L41_I2C_2),
- SND_PCI_QUIRK(0x103c, 0x8a2e, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a30, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a31, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4),
@@ -10148,6 +10147,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8bb3, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8bb4, "HP Slim OMEN", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10168,6 +10169,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c4d, "HP Omen", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x103c, 0x8c4e, "HP Omen", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c4f, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c50, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x103c, 0x8c51, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
@@ -10180,8 +10183,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c7b, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7c, "HP ProBook 445 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7d, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c7e, "HP ProBook 465 G11", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8c89, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c8a, "HP EliteBook 630", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c8d, "HP ProBook 440 G11", ALC236_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8c8e, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
@@ -10292,7 +10302,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
- SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
+ SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
@@ -10484,7 +10494,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940 / Yoga Duet 7", ALC298_FIXUP_LENOVO_C940_DUET7),
SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
- SND_PCI_QUIRK(0x17aa, 0x3820, "Yoga Duet 7 13ITL6", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3820, "IdeaPad 330-17IKB 81DM", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x3824, "Legion Y9000X 2020", ALC285_FIXUP_LEGION_Y9000X_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3834, "Lenovo IdeaPad Slim 9i 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
@@ -10495,6 +10505,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6),
+ SND_PCI_QUIRK(0x17aa, 0x3865, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7/7i", ALC287_FIXUP_LENOVO_LEGION_7),
SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
@@ -10506,6 +10518,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3882, "Lenovo Yoga Pro 7 14APH8", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x3884, "Y780 YG DUAL", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x3886, "Y780 VECO DUAL", ALC287_FIXUP_TAS2781_I2C),
+ SND_PCI_QUIRK(0x17aa, 0x3891, "Lenovo Yoga Pro 7 14AHP9", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
SND_PCI_QUIRK(0x17aa, 0x38a7, "Y780P AMD YG dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a8, "Y780P AMD VECO dual", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x17aa, 0x38a9, "Thinkbook 16P", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
@@ -10560,6 +10573,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
+ SND_PCI_QUIRK(0x1c6c, 0x122a, "Positivo N14AP7", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1c6c, 0x1251, "Positivo N14KP6-TG", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d05, 0x1132, "TongFang PHxTxX1", ALC256_FIXUP_SET_COEF_DEFAULTS),
SND_PCI_QUIRK(0x1d05, 0x1096, "TongFang GMxMRxx", ALC269_FIXUP_NO_SHUTUP),
@@ -10584,7 +10598,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0xf111, 0x0005, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
#if 0
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 75f7674c6..fdee6592c 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -777,11 +777,11 @@ static void tas2781_hda_remove(struct device *dev)
{
struct tas2781_hda *tas_hda = dev_get_drvdata(dev);
+ component_del(tas_hda->dev, &tas2781_hda_comp_ops);
+
pm_runtime_get_sync(tas_hda->dev);
pm_runtime_disable(tas_hda->dev);
- component_del(tas_hda->dev, &tas2781_hda_comp_ops);
-
pm_runtime_put_noidle(tas_hda->dev);
tasdevice_remove(tas_hda->priv);
diff --git a/sound/soc/amd/acp/acp-legacy-common.c b/sound/soc/amd/acp/acp-legacy-common.c
index b5aff3f23..3be7c6d55 100644
--- a/sound/soc/amd/acp/acp-legacy-common.c
+++ b/sound/soc/amd/acp/acp-legacy-common.c
@@ -358,11 +358,25 @@ int smn_read(struct pci_dev *dev, u32 smn_addr)
}
EXPORT_SYMBOL_NS_GPL(smn_read, SND_SOC_ACP_COMMON);
-int check_acp_pdm(struct pci_dev *pci, struct acp_chip_info *chip)
+static void check_acp3x_config(struct acp_chip_info *chip)
{
- struct acpi_device *pdm_dev;
- const union acpi_object *obj;
- u32 pdm_addr, val;
+ u32 val;
+
+ val = readl(chip->base + ACP3X_PIN_CONFIG);
+ switch (val) {
+ case ACP_CONFIG_4:
+ chip->is_i2s_config = true;
+ chip->is_pdm_config = true;
+ break;
+ default:
+ chip->is_pdm_config = true;
+ break;
+ }
+}
+
+static void check_acp6x_config(struct acp_chip_info *chip)
+{
+ u32 val;
val = readl(chip->base + ACP_PIN_CONFIG);
switch (val) {
@@ -371,42 +385,94 @@ int check_acp_pdm(struct pci_dev *pci, struct acp_chip_info *chip)
case ACP_CONFIG_6:
case ACP_CONFIG_7:
case ACP_CONFIG_8:
- case ACP_CONFIG_10:
case ACP_CONFIG_11:
+ case ACP_CONFIG_14:
+ chip->is_pdm_config = true;
+ break;
+ case ACP_CONFIG_9:
+ chip->is_i2s_config = true;
+ break;
+ case ACP_CONFIG_10:
case ACP_CONFIG_12:
case ACP_CONFIG_13:
+ chip->is_i2s_config = true;
+ chip->is_pdm_config = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void check_acp70_config(struct acp_chip_info *chip)
+{
+ u32 val;
+
+ val = readl(chip->base + ACP_PIN_CONFIG);
+ switch (val) {
+ case ACP_CONFIG_4:
+ case ACP_CONFIG_5:
+ case ACP_CONFIG_6:
+ case ACP_CONFIG_7:
+ case ACP_CONFIG_8:
+ case ACP_CONFIG_11:
case ACP_CONFIG_14:
+ case ACP_CONFIG_17:
+ case ACP_CONFIG_18:
+ chip->is_pdm_config = true;
+ break;
+ case ACP_CONFIG_9:
+ chip->is_i2s_config = true;
+ break;
+ case ACP_CONFIG_10:
+ case ACP_CONFIG_12:
+ case ACP_CONFIG_13:
+ case ACP_CONFIG_19:
+ case ACP_CONFIG_20:
+ chip->is_i2s_config = true;
+ chip->is_pdm_config = true;
break;
default:
- return -EINVAL;
+ break;
}
+}
+
+void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip)
+{
+ struct acpi_device *pdm_dev;
+ const union acpi_object *obj;
+ u32 pdm_addr;
switch (chip->acp_rev) {
case ACP3X_DEV:
pdm_addr = ACP_RENOIR_PDM_ADDR;
+ check_acp3x_config(chip);
break;
case ACP6X_DEV:
pdm_addr = ACP_REMBRANDT_PDM_ADDR;
+ check_acp6x_config(chip);
break;
case ACP63_DEV:
pdm_addr = ACP63_PDM_ADDR;
+ check_acp6x_config(chip);
break;
case ACP70_DEV:
pdm_addr = ACP70_PDM_ADDR;
+ check_acp70_config(chip);
break;
default:
- return -EINVAL;
+ break;
}
- pdm_dev = acpi_find_child_device(ACPI_COMPANION(&pci->dev), pdm_addr, 0);
- if (pdm_dev) {
- if (!acpi_dev_get_property(pdm_dev, "acp-audio-device-type",
- ACPI_TYPE_INTEGER, &obj) &&
- obj->integer.value == pdm_addr)
- return 0;
+ if (chip->is_pdm_config) {
+ pdm_dev = acpi_find_child_device(ACPI_COMPANION(&pci->dev), pdm_addr, 0);
+ if (pdm_dev) {
+ if (!acpi_dev_get_property(pdm_dev, "acp-audio-device-type",
+ ACPI_TYPE_INTEGER, &obj) &&
+ obj->integer.value == pdm_addr)
+ chip->is_pdm_dev = true;
+ }
}
- return -ENODEV;
}
-EXPORT_SYMBOL_NS_GPL(check_acp_pdm, SND_SOC_ACP_COMMON);
+EXPORT_SYMBOL_NS_GPL(check_acp_config, SND_SOC_ACP_COMMON);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/sound/soc/amd/acp/acp-pci.c b/sound/soc/amd/acp/acp-pci.c
index 5f35b90ea..ad320b29e 100644
--- a/sound/soc/amd/acp/acp-pci.c
+++ b/sound/soc/amd/acp/acp-pci.c
@@ -100,7 +100,6 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
ret = -EINVAL;
goto release_regions;
}
-
dmic_dev = platform_device_register_data(dev, "dmic-codec", PLATFORM_DEVID_NONE, NULL, 0);
if (IS_ERR(dmic_dev)) {
dev_err(dev, "failed to create DMIC device\n");
@@ -119,6 +118,10 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
if (ret)
goto unregister_dmic_dev;
+ check_acp_config(pci, chip);
+ if (!chip->is_pdm_dev && !chip->is_i2s_config)
+ goto skip_pdev_creation;
+
res = devm_kcalloc(&pci->dev, num_res, sizeof(struct resource), GFP_KERNEL);
if (!res) {
ret = -ENOMEM;
@@ -136,10 +139,6 @@ static int acp_pci_probe(struct pci_dev *pci, const struct pci_device_id *pci_id
}
}
- ret = check_acp_pdm(pci, chip);
- if (ret < 0)
- goto skip_pdev_creation;
-
chip->flag = flag;
memset(&pdevinfo, 0, sizeof(pdevinfo));
diff --git a/sound/soc/amd/acp/amd.h b/sound/soc/amd/acp/amd.h
index 5017e868f..d75b4eb34 100644
--- a/sound/soc/amd/acp/amd.h
+++ b/sound/soc/amd/acp/amd.h
@@ -138,6 +138,9 @@ struct acp_chip_info {
void __iomem *base; /* ACP memory PCI base */
struct platform_device *chip_pdev;
unsigned int flag; /* Distinguish b/w Legacy or Only PDM */
+ bool is_pdm_dev; /* flag set to true when ACP PDM controller exists */
+ bool is_pdm_config; /* flag set to true when PDM configuration is selected from BIOS */
+ bool is_i2s_config; /* flag set to true when I2S configuration is selected from BIOS */
};
struct acp_stream {
@@ -212,6 +215,11 @@ enum acp_config {
ACP_CONFIG_13,
ACP_CONFIG_14,
ACP_CONFIG_15,
+ ACP_CONFIG_16,
+ ACP_CONFIG_17,
+ ACP_CONFIG_18,
+ ACP_CONFIG_19,
+ ACP_CONFIG_20,
};
extern const struct snd_soc_dai_ops asoc_acp_cpu_dai_ops;
@@ -240,7 +248,7 @@ void restore_acp_pdm_params(struct snd_pcm_substream *substream,
int restore_acp_i2s_params(struct snd_pcm_substream *substream,
struct acp_dev_data *adata, struct acp_stream *stream);
-int check_acp_pdm(struct pci_dev *pci, struct acp_chip_info *chip);
+void check_acp_config(struct pci_dev *pci, struct acp_chip_info *chip);
static inline u64 acp_get_byte_count(struct acp_dev_data *adata, int dai_id, int direction)
{
diff --git a/sound/soc/amd/acp/chip_offset_byte.h b/sound/soc/amd/acp/chip_offset_byte.h
index cfd6c4d07..18da734c0 100644
--- a/sound/soc/amd/acp/chip_offset_byte.h
+++ b/sound/soc/amd/acp/chip_offset_byte.h
@@ -20,6 +20,7 @@
#define ACP_SOFT_RESET 0x1000
#define ACP_CONTROL 0x1004
#define ACP_PIN_CONFIG 0x1440
+#define ACP3X_PIN_CONFIG 0x1400
#define ACP_EXTERNAL_INTR_REG_ADDR(adata, offset, ctrl) \
(adata->acp_base + adata->rsrc->irq_reg_offset + offset + (ctrl * 0x04))
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 94685449f..926743142 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -310,8 +310,9 @@ static int cs42l43_startup(struct snd_pcm_substream *substream, struct snd_soc_d
struct snd_soc_component *component = dai->component;
struct cs42l43_codec *priv = snd_soc_component_get_drvdata(component);
struct cs42l43 *cs42l43 = priv->core;
- int provider = !!regmap_test_bits(cs42l43->regmap, CS42L43_ASP_CLK_CONFIG2,
- CS42L43_ASP_MASTER_MODE_MASK);
+ int provider = !dai->id || !!regmap_test_bits(cs42l43->regmap,
+ CS42L43_ASP_CLK_CONFIG2,
+ CS42L43_ASP_MASTER_MODE_MASK);
if (provider)
priv->constraint.mask = CS42L43_PROVIDER_RATE_MASK;
diff --git a/sound/soc/codecs/rt715-sdca-sdw.c b/sound/soc/codecs/rt715-sdca-sdw.c
index ee4501261..9a55e77af 100644
--- a/sound/soc/codecs/rt715-sdca-sdw.c
+++ b/sound/soc/codecs/rt715-sdca-sdw.c
@@ -234,10 +234,10 @@ static int __maybe_unused rt715_dev_resume(struct device *dev)
if (!slave->unattach_request)
goto regmap_sync;
- time = wait_for_completion_timeout(&slave->enumeration_complete,
+ time = wait_for_completion_timeout(&slave->initialization_complete,
msecs_to_jiffies(RT715_PROBE_TIMEOUT));
if (!time) {
- dev_err(&slave->dev, "%s: Enumeration not complete, timed out\n", __func__);
+ dev_err(&slave->dev, "%s: Initialization not complete, timed out\n", __func__);
sdw_show_ping_status(slave->bus, true);
return -ETIMEDOUT;
diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
index 8c9dc318b..c65a4219e 100644
--- a/sound/soc/codecs/tas2552.c
+++ b/sound/soc/codecs/tas2552.c
@@ -2,7 +2,8 @@
/*
* tas2552.c - ALSA SoC Texas Instruments TAS2552 Mono Audio Amplifier
*
- * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com
+ * Copyright (C) 2014 - 2024 Texas Instruments Incorporated -
+ * https://www.ti.com
*
* Author: Dan Murphy <dmurphy@ti.com>
*/
@@ -119,12 +120,14 @@ static const struct snd_soc_dapm_widget tas2552_dapm_widgets[] =
&tas2552_input_mux_control),
SND_SOC_DAPM_AIF_IN("DAC IN", "DAC Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("ASI OUT", "DAC Capture", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_OUT_DRV("ClassD", TAS2552_CFG_2, 7, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("PLL", TAS2552_CFG_2, 3, 0, NULL, 0),
SND_SOC_DAPM_POST("Post Event", tas2552_post_event),
- SND_SOC_DAPM_OUTPUT("OUT")
+ SND_SOC_DAPM_OUTPUT("OUT"),
+ SND_SOC_DAPM_INPUT("DMIC")
};
static const struct snd_soc_dapm_route tas2552_audio_map[] = {
@@ -134,6 +137,7 @@ static const struct snd_soc_dapm_route tas2552_audio_map[] = {
{"ClassD", NULL, "Input selection"},
{"OUT", NULL, "ClassD"},
{"ClassD", NULL, "PLL"},
+ {"ASI OUT", NULL, "DMIC"}
};
#ifdef CONFIG_PM
@@ -538,6 +542,13 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = TAS2552_FORMATS,
},
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = TAS2552_FORMATS,
+ },
.ops = &tas2552_speaker_dai_ops,
},
};
diff --git a/sound/soc/codecs/tas2781-fmwlib.c b/sound/soc/codecs/tas2781-fmwlib.c
index 45760fe19..265a8ca25 100644
--- a/sound/soc/codecs/tas2781-fmwlib.c
+++ b/sound/soc/codecs/tas2781-fmwlib.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
//
-// tasdevice-fmw.c -- TASDEVICE firmware support
+// tas2781-fmwlib.c -- TASDEVICE firmware support
//
-// Copyright 2023 Texas Instruments, Inc.
+// Copyright 2023 - 2024 Texas Instruments, Inc.
//
// Author: Shenghao Ding <shenghao-ding@ti.com>
@@ -1878,7 +1878,7 @@ int tas2781_load_calibration(void *context, char *file_name,
{
struct tasdevice_priv *tas_priv = (struct tasdevice_priv *)context;
struct tasdevice *tasdev = &(tas_priv->tasdevice[i]);
- const struct firmware *fw_entry;
+ const struct firmware *fw_entry = NULL;
struct tasdevice_fw *tas_fmw;
struct firmware fmw;
int offset = 0;
@@ -2151,6 +2151,24 @@ static int tasdevice_load_data(struct tasdevice_priv *tas_priv,
return ret;
}
+static void tasdev_load_calibrated_data(struct tasdevice_priv *priv, int i)
+{
+ struct tasdevice_calibration *cal;
+ struct tasdevice_fw *cal_fmw;
+
+ cal_fmw = priv->tasdevice[i].cali_data_fmw;
+
+ /* No calibrated data for current devices, playback will go ahead. */
+ if (!cal_fmw)
+ return;
+
+ cal = cal_fmw->calibrations;
+ if (cal)
+ return;
+
+ load_calib_data(priv, &cal->dev_data);
+}
+
int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
int cfg_no, int rca_conf_no)
{
@@ -2210,21 +2228,9 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
for (i = 0; i < tas_priv->ndev; i++) {
if (tas_priv->tasdevice[i].is_loaderr == true)
continue;
- else if (tas_priv->tasdevice[i].is_loaderr == false
- && tas_priv->tasdevice[i].is_loading == true) {
- struct tasdevice_fw *cal_fmw =
- tas_priv->tasdevice[i].cali_data_fmw;
-
- if (cal_fmw) {
- struct tasdevice_calibration
- *cal = cal_fmw->calibrations;
-
- if (cal)
- load_calib_data(tas_priv,
- &(cal->dev_data));
- }
+ if (tas_priv->tasdevice[i].is_loaderr == false &&
+ tas_priv->tasdevice[i].is_loading == true)
tas_priv->tasdevice[i].cur_prog = prm_no;
- }
}
}
@@ -2245,11 +2251,15 @@ int tasdevice_select_tuningprm_cfg(void *context, int prm_no,
tasdevice_load_data(tas_priv, &(conf->dev_data));
for (i = 0; i < tas_priv->ndev; i++) {
if (tas_priv->tasdevice[i].is_loaderr == true) {
- status |= 1 << (i + 4);
+ status |= BIT(i + 4);
continue;
- } else if (tas_priv->tasdevice[i].is_loaderr == false
- && tas_priv->tasdevice[i].is_loading == true)
+ }
+
+ if (tas_priv->tasdevice[i].is_loaderr == false &&
+ tas_priv->tasdevice[i].is_loading == true) {
+ tasdev_load_calibrated_data(tas_priv, i);
tas_priv->tasdevice[i].cur_conf = cfg_no;
+ }
}
} else
dev_dbg(tas_priv->dev, "%s: Unneeded loading dsp conf %d\n",
@@ -2308,65 +2318,6 @@ out:
}
EXPORT_SYMBOL_NS_GPL(tasdevice_prmg_load, SND_SOC_TAS2781_FMWLIB);
-int tasdevice_prmg_calibdata_load(void *context, int prm_no)
-{
- struct tasdevice_priv *tas_priv = (struct tasdevice_priv *) context;
- struct tasdevice_fw *tas_fmw = tas_priv->fmw;
- struct tasdevice_prog *program;
- int prog_status = 0;
- int i;
-
- if (!tas_fmw) {
- dev_err(tas_priv->dev, "%s: Firmware is NULL\n", __func__);
- goto out;
- }
-
- if (prm_no >= tas_fmw->nr_programs) {
- dev_err(tas_priv->dev,
- "%s: prm(%d) is not in range of Programs %u\n",
- __func__, prm_no, tas_fmw->nr_programs);
- goto out;
- }
-
- for (i = 0, prog_status = 0; i < tas_priv->ndev; i++) {
- if (prm_no >= 0 && tas_priv->tasdevice[i].cur_prog != prm_no) {
- tas_priv->tasdevice[i].cur_conf = -1;
- tas_priv->tasdevice[i].is_loading = true;
- prog_status++;
- }
- tas_priv->tasdevice[i].is_loaderr = false;
- }
-
- if (prog_status) {
- program = &(tas_fmw->programs[prm_no]);
- tasdevice_load_data(tas_priv, &(program->dev_data));
- for (i = 0; i < tas_priv->ndev; i++) {
- if (tas_priv->tasdevice[i].is_loaderr == true)
- continue;
- else if (tas_priv->tasdevice[i].is_loaderr == false
- && tas_priv->tasdevice[i].is_loading == true) {
- struct tasdevice_fw *cal_fmw =
- tas_priv->tasdevice[i].cali_data_fmw;
-
- if (cal_fmw) {
- struct tasdevice_calibration *cal =
- cal_fmw->calibrations;
-
- if (cal)
- load_calib_data(tas_priv,
- &(cal->dev_data));
- }
- tas_priv->tasdevice[i].cur_prog = prm_no;
- }
- }
- }
-
-out:
- return prog_status;
-}
-EXPORT_SYMBOL_NS_GPL(tasdevice_prmg_calibdata_load,
- SND_SOC_TAS2781_FMWLIB);
-
void tasdevice_tuning_switch(void *context, int state)
{
struct tasdevice_priv *tas_priv = (struct tasdevice_priv *) context;
diff --git a/sound/soc/codecs/tas2781-i2c.c b/sound/soc/codecs/tas2781-i2c.c
index b5abff230..9350972df 100644
--- a/sound/soc/codecs/tas2781-i2c.c
+++ b/sound/soc/codecs/tas2781-i2c.c
@@ -2,7 +2,7 @@
//
// ALSA SoC Texas Instruments TAS2563/TAS2781 Audio Smart Amplifier
//
-// Copyright (C) 2022 - 2023 Texas Instruments Incorporated
+// Copyright (C) 2022 - 2024 Texas Instruments Incorporated
// https://www.ti.com
//
// The TAS2563/TAS2781 driver implements a flexible and configurable
@@ -414,7 +414,7 @@ static void tasdevice_fw_ready(const struct firmware *fmw,
__func__, tas_priv->cal_binaryname[i]);
}
- tasdevice_prmg_calibdata_load(tas_priv, 0);
+ tasdevice_prmg_load(tas_priv, 0);
tas_priv->cur_prog = 0;
out:
if (tas_priv->fw_state == TASDEVICE_DSP_FW_FAIL) {
diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c
index d6f7f046c..f634261e4 100644
--- a/sound/soc/intel/avs/boards/ssm4567.c
+++ b/sound/soc/intel/avs/boards/ssm4567.c
@@ -172,7 +172,6 @@ static int avs_ssm4567_probe(struct platform_device *pdev)
card->dapm_routes = card_base_routes;
card->num_dapm_routes = ARRAY_SIZE(card_base_routes);
card->fully_routed = true;
- card->disable_route_checks = true;
ret = snd_soc_fixup_dai_links_platform_name(card, pname);
if (ret)
diff --git a/sound/soc/intel/avs/cldma.c b/sound/soc/intel/avs/cldma.c
index d7a9390b5..585579840 100644
--- a/sound/soc/intel/avs/cldma.c
+++ b/sound/soc/intel/avs/cldma.c
@@ -35,7 +35,7 @@ struct hda_cldma {
unsigned int buffer_size;
unsigned int num_periods;
- unsigned int stream_tag;
+ unsigned char stream_tag;
void __iomem *sd_addr;
struct snd_dma_buffer dmab_data;
diff --git a/sound/soc/intel/avs/icl.c b/sound/soc/intel/avs/icl.c
index d2554c857..284d38f3b 100644
--- a/sound/soc/intel/avs/icl.c
+++ b/sound/soc/intel/avs/icl.c
@@ -66,7 +66,7 @@ struct avs_icl_memwnd2 {
struct avs_icl_memwnd2_desc slot_desc[AVS_ICL_MEMWND2_SLOTS_COUNT];
u8 rsvd[SZ_4K];
};
- u8 slot_array[AVS_ICL_MEMWND2_SLOTS_COUNT][PAGE_SIZE];
+ u8 slot_array[AVS_ICL_MEMWND2_SLOTS_COUNT][SZ_4K];
} __packed;
#define AVS_ICL_SLOT_UNUSED \
@@ -89,8 +89,7 @@ static int avs_icl_slot_offset(struct avs_dev *adev, union avs_icl_memwnd2_slot_
for (i = 0; i < AVS_ICL_MEMWND2_SLOTS_COUNT; i++)
if (desc[i].slot_id.val == slot_type.val)
- return offsetof(struct avs_icl_memwnd2, slot_array) +
- avs_skl_log_buffer_offset(adev, i);
+ return offsetof(struct avs_icl_memwnd2, slot_array) + i * SZ_4K;
return -ENXIO;
}
diff --git a/sound/soc/intel/avs/path.c b/sound/soc/intel/avs/path.c
index e785fc2a7..a44ed33b5 100644
--- a/sound/soc/intel/avs/path.c
+++ b/sound/soc/intel/avs/path.c
@@ -367,6 +367,7 @@ static int avs_asrc_create(struct avs_dev *adev, struct avs_path_module *mod)
struct avs_tplg_module *t = mod->template;
struct avs_asrc_cfg cfg;
+ memset(&cfg, 0, sizeof(cfg));
cfg.base.cpc = t->cfg_base->cpc;
cfg.base.ibs = t->cfg_base->ibs;
cfg.base.obs = t->cfg_base->obs;
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index 2cafbc392..72f1bc3b7 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -356,6 +356,7 @@ static int avs_dai_hda_be_prepare(struct snd_pcm_substream *substream, struct sn
stream_info->sig_bits);
format_val = snd_hdac_stream_format(runtime->channels, bits, runtime->rate);
+ snd_hdac_ext_stream_decouple(bus, link_stream, true);
snd_hdac_ext_stream_reset(link_stream);
snd_hdac_ext_stream_setup(link_stream, format_val);
@@ -611,6 +612,7 @@ static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_so
struct avs_dev *adev = to_avs_dev(dai->dev);
struct hdac_ext_stream *host_stream;
unsigned int format_val;
+ struct hdac_bus *bus;
unsigned int bits;
int ret;
@@ -620,6 +622,8 @@ static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_so
if (hdac_stream(host_stream)->prepared)
return 0;
+ bus = hdac_stream(host_stream)->bus;
+ snd_hdac_ext_stream_decouple(bus, data->host_stream, true);
snd_hdac_stream_reset(hdac_stream(host_stream));
stream_info = snd_soc_dai_get_pcm_stream(dai, substream->stream);
diff --git a/sound/soc/intel/avs/probes.c b/sound/soc/intel/avs/probes.c
index 817e54303..7e781a315 100644
--- a/sound/soc/intel/avs/probes.c
+++ b/sound/soc/intel/avs/probes.c
@@ -19,8 +19,11 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
struct avs_probe_cfg cfg = {{0}};
struct avs_module_entry mentry;
u8 dummy;
+ int ret;
- avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
+ ret = avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
+ if (ret)
+ return ret;
/*
* Probe module uses no cycles, audio data format and input and output
@@ -39,11 +42,12 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
static void avs_dsp_delete_probe(struct avs_dev *adev)
{
struct avs_module_entry mentry;
+ int ret;
- avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
-
- /* There is only ever one probe module instance. */
- avs_dsp_delete_module(adev, mentry.module_id, 0, INVALID_PIPELINE_ID, 0);
+ ret = avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
+ if (!ret)
+ /* There is only ever one probe module instance. */
+ avs_dsp_delete_module(adev, mentry.module_id, 0, INVALID_PIPELINE_ID, 0);
}
static inline struct hdac_ext_stream *avs_compr_get_host_stream(struct snd_compr_stream *cstream)
diff --git a/sound/soc/intel/boards/bxt_da7219_max98357a.c b/sound/soc/intel/boards/bxt_da7219_max98357a.c
index 540f7a293..3fe3f38c6 100644
--- a/sound/soc/intel/boards/bxt_da7219_max98357a.c
+++ b/sound/soc/intel/boards/bxt_da7219_max98357a.c
@@ -768,6 +768,7 @@ static struct snd_soc_card broxton_audio_card = {
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = bxt_card_late_probe,
};
diff --git a/sound/soc/intel/boards/bxt_rt298.c b/sound/soc/intel/boards/bxt_rt298.c
index c0eb65c14..afc499be8 100644
--- a/sound/soc/intel/boards/bxt_rt298.c
+++ b/sound/soc/intel/boards/bxt_rt298.c
@@ -574,6 +574,7 @@ static struct snd_soc_card broxton_rt298 = {
.dapm_routes = broxton_rt298_map,
.num_dapm_routes = ARRAY_SIZE(broxton_rt298_map),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = bxt_card_late_probe,
};
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index 657e46582..4098b2d32 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -649,6 +649,8 @@ static int geminilake_audio_probe(struct platform_device *pdev)
card = &glk_audio_card_rt5682_m98357a;
card->dev = &pdev->dev;
snd_soc_card_set_drvdata(card, ctx);
+ if (!snd_soc_acpi_sof_parent(&pdev->dev))
+ card->disable_route_checks = true;
/* override platform name, if required */
mach = pdev->dev.platform_data;
diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
index a5d896530..9dbc15f9d 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
@@ -639,6 +639,7 @@ static struct snd_soc_card kabylake_audio_card_da7219_m98357a = {
.dapm_routes = kabylake_map,
.num_dapm_routes = ARRAY_SIZE(kabylake_map),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
index 98c11ec0a..e662da5af 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -1036,6 +1036,7 @@ static struct snd_soc_card kbl_audio_card_da7219_m98927 = {
.codec_conf = max98927_codec_conf,
.num_configs = ARRAY_SIZE(max98927_codec_conf),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
@@ -1054,6 +1055,7 @@ static struct snd_soc_card kbl_audio_card_max98927 = {
.codec_conf = max98927_codec_conf,
.num_configs = ARRAY_SIZE(max98927_codec_conf),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
@@ -1071,6 +1073,7 @@ static struct snd_soc_card kbl_audio_card_da7219_m98373 = {
.codec_conf = max98373_codec_conf,
.num_configs = ARRAY_SIZE(max98373_codec_conf),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
@@ -1088,6 +1091,7 @@ static struct snd_soc_card kbl_audio_card_max98373 = {
.codec_conf = max98373_codec_conf,
.num_configs = ARRAY_SIZE(max98373_codec_conf),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
diff --git a/sound/soc/intel/boards/kbl_rt5660.c b/sound/soc/intel/boards/kbl_rt5660.c
index 30e0aca16..894d127c4 100644
--- a/sound/soc/intel/boards/kbl_rt5660.c
+++ b/sound/soc/intel/boards/kbl_rt5660.c
@@ -518,6 +518,7 @@ static struct snd_soc_card kabylake_audio_card_rt5660 = {
.dapm_routes = kabylake_rt5660_map,
.num_dapm_routes = ARRAY_SIZE(kabylake_rt5660_map),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 9071b1f1c..646e8ff8e 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -966,6 +966,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663_m98927 = {
.codec_conf = max98927_codec_conf,
.num_configs = ARRAY_SIZE(max98927_codec_conf),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
@@ -982,6 +983,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663 = {
.dapm_routes = kabylake_5663_map,
.num_dapm_routes = ARRAY_SIZE(kabylake_5663_map),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index 178fe9c37..924d5d1de 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -791,6 +791,7 @@ static struct snd_soc_card kabylake_audio_card = {
.codec_conf = max98927_codec_conf,
.num_configs = ARRAY_SIZE(max98927_codec_conf),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = kabylake_card_late_probe,
};
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index 6e172719c..4aa7fd2a0 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -227,6 +227,8 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
ctx->common_hdmi_codec_drv = mach->mach_params.common_hdmi_codec_drv;
hda_soc_card.dev = &pdev->dev;
+ if (!snd_soc_acpi_sof_parent(&pdev->dev))
+ hda_soc_card.disable_route_checks = true;
if (mach->mach_params.dmic_num > 0) {
snprintf(hda_soc_components, sizeof(hda_soc_components),
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index 0e7025834..e4630c331 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -654,6 +654,7 @@ static struct snd_soc_card skylake_audio_card = {
.dapm_routes = skylake_map,
.num_dapm_routes = ARRAY_SIZE(skylake_map),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = skylake_card_late_probe,
};
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index c59c60e28..9a8044274 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -523,6 +523,7 @@ static struct snd_soc_card skylake_rt286 = {
.dapm_routes = skylake_rt286_map,
.num_dapm_routes = ARRAY_SIZE(skylake_rt286_map),
.fully_routed = true,
+ .disable_route_checks = true,
.late_probe = skylake_card_late_probe,
};
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index a90b43162..5d0125ef1 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -433,6 +433,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
.callback = sof_sdw_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0C0F")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0C10"),
},
/* No Jack */
@@ -495,6 +504,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
SOF_BT_OFFLOAD_SSP(1) |
SOF_SSP_BT_OFFLOAD_PRESENT),
},
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OMEN Transcend Gaming Laptop"),
+ },
+ .driver_data = (void *)(RT711_JD2),
+ },
+
/* LunarLake devices */
{
.callback = sof_sdw_quirk_cb,
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index dd2f80652..ef00792e1 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -182,6 +182,9 @@ static int kirkwood_dma_hw_params(struct snd_soc_component *component,
const struct mbus_dram_target_info *dram = mv_mbus_dram_info();
unsigned long addr = substream->runtime->dma_addr;
+ if (!dram)
+ return 0;
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
kirkwood_dma_conf_mbus_windows(priv->io,
KIRKWOOD_PLAYBACK_WIN, addr, dram);
diff --git a/sound/soc/mediatek/common/mtk-soundcard-driver.c b/sound/soc/mediatek/common/mtk-soundcard-driver.c
index a58e1e367..000a086a8 100644
--- a/sound/soc/mediatek/common/mtk-soundcard-driver.c
+++ b/sound/soc/mediatek/common/mtk-soundcard-driver.c
@@ -22,7 +22,11 @@ static int set_card_codec_info(struct snd_soc_card *card,
codec_node = of_get_child_by_name(sub_node, "codec");
if (!codec_node) {
- dev_dbg(dev, "%s no specified codec\n", dai_link->name);
+ dev_dbg(dev, "%s no specified codec: setting dummy.\n", dai_link->name);
+
+ dai_link->codecs = &snd_soc_dummy_dlc;
+ dai_link->num_codecs = 1;
+ dai_link->dynamic = 1;
return 0;
}
diff --git a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
index 9ce06821c..49440db37 100644
--- a/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
+++ b/sound/soc/mediatek/mt8192/mt8192-dai-tdm.c
@@ -566,10 +566,10 @@ static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream,
tdm_con |= 1 << DELAY_DATA_SFT;
tdm_con |= get_tdm_lrck_width(format) << LRCK_TDM_WIDTH_SFT;
} else if (tdm_priv->tdm_out_mode == TDM_OUT_DSP_A) {
- tdm_con |= 0 << DELAY_DATA_SFT;
+ tdm_con |= 1 << DELAY_DATA_SFT;
tdm_con |= 0 << LRCK_TDM_WIDTH_SFT;
} else if (tdm_priv->tdm_out_mode == TDM_OUT_DSP_B) {
- tdm_con |= 1 << DELAY_DATA_SFT;
+ tdm_con |= 0 << DELAY_DATA_SFT;
tdm_con |= 0 << LRCK_TDM_WIDTH_SFT;
}
diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
index 7275437ea..6481da318 100644
--- a/sound/soc/sof/debug.c
+++ b/sound/soc/sof/debug.c
@@ -345,8 +345,27 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
debugfs_create_str("fw_path", 0444, fw_profile,
(char **)&plat_data->fw_filename_prefix);
- debugfs_create_str("fw_lib_path", 0444, fw_profile,
- (char **)&plat_data->fw_lib_prefix);
+ /* library path is not valid for IPC3 */
+ if (plat_data->ipc_type != SOF_IPC_TYPE_3) {
+ /*
+ * fw_lib_prefix can be NULL if the vendor/platform does not
+ * support loadable libraries
+ */
+ if (plat_data->fw_lib_prefix) {
+ debugfs_create_str("fw_lib_path", 0444, fw_profile,
+ (char **)&plat_data->fw_lib_prefix);
+ } else {
+ static char *fw_lib_path;
+
+ fw_lib_path = devm_kasprintf(sdev->dev, GFP_KERNEL,
+ "Not supported");
+ if (!fw_lib_path)
+ return -ENOMEM;
+
+ debugfs_create_str("fw_lib_path", 0444, fw_profile,
+ (char **)&fw_lib_path);
+ }
+ }
debugfs_create_str("tplg_path", 0444, fw_profile,
(char **)&plat_data->tplg_filename_prefix);
debugfs_create_str("fw_name", 0444, fw_profile,
diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
index c1682bcdb..6a39ca632 100644
--- a/sound/soc/sof/intel/hda-dai.c
+++ b/sound/soc/sof/intel/hda-dai.c
@@ -439,10 +439,17 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
int link_id)
{
struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
+ struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
const struct hda_dai_widget_dma_ops *ops;
+ struct snd_soc_dai_link_ch_map *ch_maps;
struct hdac_ext_stream *hext_stream;
+ struct snd_soc_dai *dai;
struct snd_sof_dev *sdev;
+ bool cpu_dai_found = false;
+ int cpu_dai_id;
+ int ch_mask;
int ret;
+ int j;
ret = non_hda_dai_hw_params(substream, params, cpu_dai);
if (ret < 0) {
@@ -457,9 +464,29 @@ int sdw_hda_dai_hw_params(struct snd_pcm_substream *substream,
if (!hext_stream)
return -ENODEV;
- /* in the case of SoundWire we need to program the PCMSyCM registers */
+ /*
+ * in the case of SoundWire we need to program the PCMSyCM registers. In case
+ * of aggregated devices, we need to define the channel mask for each sublink
+ * by reconstructing the split done in soc-pcm.c
+ */
+ for_each_rtd_cpu_dais(rtd, cpu_dai_id, dai) {
+ if (dai == cpu_dai) {
+ cpu_dai_found = true;
+ break;
+ }
+ }
+
+ if (!cpu_dai_found)
+ return -ENODEV;
+
+ ch_mask = 0;
+ for_each_link_ch_maps(rtd->dai_link, j, ch_maps) {
+ if (ch_maps->cpu == cpu_dai_id)
+ ch_mask |= ch_maps->ch_mask;
+ }
+
ret = hdac_bus_eml_sdw_map_stream_ch(sof_to_bus(sdev), link_id, cpu_dai->id,
- GENMASK(params_channels(params) - 1, 0),
+ ch_mask,
hdac_stream(hext_stream)->stream_tag,
substream->stream);
if (ret < 0) {
diff --git a/sound/soc/sof/intel/lnl.c b/sound/soc/sof/intel/lnl.c
index aeb4350cc..6055a33bb 100644
--- a/sound/soc/sof/intel/lnl.c
+++ b/sound/soc/sof/intel/lnl.c
@@ -16,6 +16,7 @@
#include "hda-ipc.h"
#include "../sof-audio.h"
#include "mtl.h"
+#include "lnl.h"
#include <sound/hda-mlink.h>
/* LunarLake ops */
@@ -208,7 +209,7 @@ const struct sof_intel_dsp_desc lnl_chip_info = {
.ipc_ack = MTL_DSP_REG_HFIPCXIDA,
.ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
.ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
- .rom_status_reg = MTL_DSP_ROM_STS,
+ .rom_status_reg = LNL_DSP_REG_HFDSC,
.rom_init_timeout = 300,
.ssp_count = MTL_SSP_COUNT,
.d0i3_offset = MTL_HDA_VS_D0I3C,
diff --git a/sound/soc/sof/intel/lnl.h b/sound/soc/sof/intel/lnl.h
new file mode 100644
index 000000000..4f4734fe7
--- /dev/null
+++ b/sound/soc/sof/intel/lnl.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2024 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __SOF_INTEL_LNL_H
+#define __SOF_INTEL_LNL_H
+
+#define LNL_DSP_REG_HFDSC 0x160200 /* DSP core0 status */
+#define LNL_DSP_REG_HFDEC 0x160204 /* DSP core0 error */
+
+#endif /* __SOF_INTEL_LNL_H */
diff --git a/sound/soc/sof/intel/mtl.c b/sound/soc/sof/intel/mtl.c
index 060c34988..050237630 100644
--- a/sound/soc/sof/intel/mtl.c
+++ b/sound/soc/sof/intel/mtl.c
@@ -439,7 +439,7 @@ int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
{
struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata;
const struct sof_intel_dsp_desc *chip = hda->desc;
- unsigned int status;
+ unsigned int status, target_status;
u32 ipc_hdr, flags;
char *dump_msg;
int ret;
@@ -485,13 +485,40 @@ int mtl_dsp_cl_init(struct snd_sof_dev *sdev, int stream_tag, bool imr_boot)
mtl_enable_ipc_interrupts(sdev);
+ if (chip->rom_status_reg == MTL_DSP_ROM_STS) {
+ /*
+ * Workaround: when the ROM status register is pointing to
+ * the SRAM window (MTL_DSP_ROM_STS) the platform cannot catch
+ * ROM_INIT_DONE because of a very short timing window.
+ * Follow the recommendations and skip target state waiting.
+ */
+ return 0;
+ }
+
/*
- * ACE workaround: don't wait for ROM INIT.
- * The platform cannot catch ROM_INIT_DONE because of a very short
- * timing window. Follow the recommendations and skip this part.
+ * step 7:
+ * - Cold/Full boot: wait for ROM init to proceed to download the firmware
+ * - IMR boot: wait for ROM firmware entered (firmware booted up from IMR)
*/
+ if (imr_boot)
+ target_status = FSR_STATE_FW_ENTERED;
+ else
+ target_status = FSR_STATE_INIT_DONE;
- return 0;
+ ret = snd_sof_dsp_read_poll_timeout(sdev, HDA_DSP_BAR,
+ chip->rom_status_reg, status,
+ (FSR_TO_STATE_CODE(status) == target_status),
+ HDA_DSP_REG_POLL_INTERVAL_US,
+ chip->rom_init_timeout *
+ USEC_PER_MSEC);
+
+ if (!ret)
+ return 0;
+
+ if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS)
+ dev_err(sdev->dev,
+ "%s: timeout with rom_status_reg (%#x) read\n",
+ __func__, chip->rom_status_reg);
err:
flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL;
@@ -503,6 +530,7 @@ err:
dump_msg = kasprintf(GFP_KERNEL, "Boot iteration failed: %d/%d",
hda->boot_iteration, HDA_FW_BOOT_ATTEMPTS);
snd_sof_dsp_dbg_dump(sdev, dump_msg, flags);
+ mtl_enable_interrupts(sdev, false);
mtl_dsp_core_power_down(sdev, SOF_DSP_PRIMARY_CORE);
kfree(dump_msg);
@@ -727,7 +755,7 @@ const struct sof_intel_dsp_desc mtl_chip_info = {
.ipc_ack = MTL_DSP_REG_HFIPCXIDA,
.ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
.ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
- .rom_status_reg = MTL_DSP_ROM_STS,
+ .rom_status_reg = MTL_DSP_REG_HFFLGPXQWY,
.rom_init_timeout = 300,
.ssp_count = MTL_SSP_COUNT,
.ssp_base_offset = CNL_SSP_BASE_OFFSET,
@@ -755,7 +783,7 @@ const struct sof_intel_dsp_desc arl_s_chip_info = {
.ipc_ack = MTL_DSP_REG_HFIPCXIDA,
.ipc_ack_mask = MTL_DSP_REG_HFIPCXIDA_DONE,
.ipc_ctl = MTL_DSP_REG_HFIPCXCTL,
- .rom_status_reg = MTL_DSP_ROM_STS,
+ .rom_status_reg = MTL_DSP_REG_HFFLGPXQWY,
.rom_init_timeout = 300,
.ssp_count = MTL_SSP_COUNT,
.ssp_base_offset = CNL_SSP_BASE_OFFSET,
diff --git a/sound/soc/sof/intel/mtl.h b/sound/soc/sof/intel/mtl.h
index ea8c1b83f..3c56427a9 100644
--- a/sound/soc/sof/intel/mtl.h
+++ b/sound/soc/sof/intel/mtl.h
@@ -70,8 +70,8 @@
#define MTL_DSP_ROM_STS MTL_SRAM_WINDOW_OFFSET(0) /* ROM status */
#define MTL_DSP_ROM_ERROR (MTL_SRAM_WINDOW_OFFSET(0) + 0x4) /* ROM error code */
-#define MTL_DSP_REG_HFFLGPXQWY 0x163200 /* ROM debug status */
-#define MTL_DSP_REG_HFFLGPXQWY_ERROR 0x163204 /* ROM debug error code */
+#define MTL_DSP_REG_HFFLGPXQWY 0x163200 /* DSP core0 status */
+#define MTL_DSP_REG_HFFLGPXQWY_ERROR 0x163204 /* DSP core0 error */
#define MTL_DSP_REG_HfIMRIS1 0x162088
#define MTL_DSP_REG_HfIMRIS1_IU_MASK BIT(0)
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index 5cca05842..35941027b 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -217,6 +217,14 @@ sof_ipc4_get_input_pin_audio_fmt(struct snd_sof_widget *swidget, int pin_index)
}
process = swidget->private;
+
+ /*
+ * For process modules without base config extension, base module config
+ * format is used for all input pins
+ */
+ if (process->init_config != SOF_IPC4_MODULE_INIT_CONFIG_TYPE_BASE_CFG_WITH_EXT)
+ return &process->base_config.audio_fmt;
+
base_cfg_ext = process->base_config_ext;
/*
diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
index ccc13e991..cd8420e8c 100644
--- a/tools/arch/arm64/include/asm/sysreg.h
+++ b/tools/arch/arm64/include/asm/sysreg.h
@@ -701,18 +701,18 @@
* Permission Indirection Extension (PIE) permission encodings.
* Encodings with the _O suffix, have overlays applied (Permission Overlay Extension).
*/
-#define PIE_NONE_O 0x0
-#define PIE_R_O 0x1
-#define PIE_X_O 0x2
-#define PIE_RX_O 0x3
-#define PIE_RW_O 0x5
-#define PIE_RWnX_O 0x6
-#define PIE_RWX_O 0x7
-#define PIE_R 0x8
-#define PIE_GCS 0x9
-#define PIE_RX 0xa
-#define PIE_RW 0xc
-#define PIE_RWX 0xe
+#define PIE_NONE_O UL(0x0)
+#define PIE_R_O UL(0x1)
+#define PIE_X_O UL(0x2)
+#define PIE_RX_O UL(0x3)
+#define PIE_RW_O UL(0x5)
+#define PIE_RWnX_O UL(0x6)
+#define PIE_RWX_O UL(0x7)
+#define PIE_R UL(0x8)
+#define PIE_GCS UL(0x9)
+#define PIE_RX UL(0xa)
+#define PIE_RW UL(0xc)
+#define PIE_RWX UL(0xe)
#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4))
diff --git a/tools/arch/x86/intel_sdsi/intel_sdsi.c b/tools/arch/x86/intel_sdsi/intel_sdsi.c
index 2cd92761f..ba2a6b664 100644
--- a/tools/arch/x86/intel_sdsi/intel_sdsi.c
+++ b/tools/arch/x86/intel_sdsi/intel_sdsi.c
@@ -43,7 +43,6 @@
#define METER_CERT_MAX_SIZE 4096
#define STATE_MAX_NUM_LICENSES 16
#define STATE_MAX_NUM_IN_BUNDLE (uint32_t)8
-#define METER_MAX_NUM_BUNDLES 8
#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
@@ -154,11 +153,12 @@ struct bundle_encoding {
};
struct meter_certificate {
- uint32_t block_signature;
- uint32_t counter_unit;
+ uint32_t signature;
+ uint32_t version;
uint64_t ppin;
+ uint32_t counter_unit;
uint32_t bundle_length;
- uint32_t reserved;
+ uint64_t reserved;
uint32_t mmrc_encoding;
uint32_t mmrc_counter;
};
@@ -167,6 +167,11 @@ struct bundle_encoding_counter {
uint32_t encoding;
uint32_t counter;
};
+#define METER_BUNDLE_SIZE sizeof(struct bundle_encoding_counter)
+#define BUNDLE_COUNT(length) ((length) / METER_BUNDLE_SIZE)
+#define METER_MAX_NUM_BUNDLES \
+ ((METER_CERT_MAX_SIZE - sizeof(struct meter_certificate)) / \
+ sizeof(struct bundle_encoding_counter))
struct sdsi_dev {
struct sdsi_regs regs;
@@ -334,6 +339,7 @@ static int sdsi_meter_cert_show(struct sdsi_dev *s)
uint32_t count = 0;
FILE *cert_ptr;
int ret, size;
+ char name[4];
ret = sdsi_update_registers(s);
if (ret)
@@ -375,32 +381,40 @@ static int sdsi_meter_cert_show(struct sdsi_dev *s)
printf("\n");
printf("Meter certificate for device %s\n", s->dev_name);
printf("\n");
- printf("Block Signature: 0x%x\n", mc->block_signature);
- printf("Count Unit: %dms\n", mc->counter_unit);
- printf("PPIN: 0x%lx\n", mc->ppin);
- printf("Feature Bundle Length: %d\n", mc->bundle_length);
- printf("MMRC encoding: %d\n", mc->mmrc_encoding);
- printf("MMRC counter: %d\n", mc->mmrc_counter);
- if (mc->bundle_length % 8) {
+
+ get_feature(mc->signature, name);
+ printf("Signature: %.4s\n", name);
+
+ printf("Version: %d\n", mc->version);
+ printf("Count Unit: %dms\n", mc->counter_unit);
+ printf("PPIN: 0x%lx\n", mc->ppin);
+ printf("Feature Bundle Length: %d\n", mc->bundle_length);
+
+ get_feature(mc->mmrc_encoding, name);
+ printf("MMRC encoding: %.4s\n", name);
+
+ printf("MMRC counter: %d\n", mc->mmrc_counter);
+ if (mc->bundle_length % METER_BUNDLE_SIZE) {
fprintf(stderr, "Invalid bundle length\n");
return -1;
}
- if (mc->bundle_length > METER_MAX_NUM_BUNDLES * 8) {
- fprintf(stderr, "More than %d bundles: %d\n",
- METER_MAX_NUM_BUNDLES, mc->bundle_length / 8);
+ if (mc->bundle_length > METER_MAX_NUM_BUNDLES * METER_BUNDLE_SIZE) {
+ fprintf(stderr, "More than %ld bundles: actual %ld\n",
+ METER_MAX_NUM_BUNDLES, BUNDLE_COUNT(mc->bundle_length));
return -1;
}
- bec = (void *)(mc) + sizeof(mc);
+ bec = (struct bundle_encoding_counter *)(mc + 1);
- printf("Number of Feature Counters: %d\n", mc->bundle_length / 8);
- while (count++ < mc->bundle_length / 8) {
+ printf("Number of Feature Counters: %ld\n", BUNDLE_COUNT(mc->bundle_length));
+ while (count < BUNDLE_COUNT(mc->bundle_length)) {
char feature[5];
feature[4] = '\0';
get_feature(bec[count].encoding, feature);
printf(" %s: %d\n", feature, bec[count].counter);
+ ++count;
}
return 0;
diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
index 12af57220..da9347552 100644
--- a/tools/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/arch/x86/lib/x86-opcode-map.txt
@@ -148,7 +148,7 @@ AVXcode:
65: SEG=GS (Prefix)
66: Operand-Size (Prefix)
67: Address-Size (Prefix)
-68: PUSH Iz (d64)
+68: PUSH Iz
69: IMUL Gv,Ev,Iz
6a: PUSH Ib (d64)
6b: IMUL Gv,Ev,Ib
@@ -698,10 +698,10 @@ AVXcode: 2
4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-50: vpdpbusd Vx,Hx,Wx (66),(ev)
-51: vpdpbusds Vx,Hx,Wx (66),(ev)
-52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66),(ev) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
-53: vpdpwssds Vx,Hx,Wx (66),(ev) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
+50: vpdpbusd Vx,Hx,Wx (66)
+51: vpdpbusds Vx,Hx,Wx (66)
+52: vdpbf16ps Vx,Hx,Wx (F3),(ev) | vpdpwssd Vx,Hx,Wx (66) | vp4dpwssd Vdqq,Hdqq,Wdq (F2),(ev)
+53: vpdpwssds Vx,Hx,Wx (66) | vp4dpwssds Vdqq,Hdqq,Wdq (F2),(ev)
54: vpopcntb/w Vx,Wx (66),(ev)
55: vpopcntd/q Vx,Wx (66),(ev)
58: vpbroadcastd Vx,Wx (66),(v)
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index cc6e6aae2..958e92acc 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -244,29 +244,101 @@ int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
return fd;
}
-int mount_bpffs_for_pin(const char *name, bool is_dir)
+int create_and_mount_bpffs_dir(const char *dir_name)
{
char err_str[ERR_MAX_LEN];
- char *file;
- char *dir;
+ bool dir_exists;
int err = 0;
- if (is_dir && is_bpffs(name))
+ if (is_bpffs(dir_name))
return err;
- file = malloc(strlen(name) + 1);
- if (!file) {
+ dir_exists = access(dir_name, F_OK) == 0;
+
+ if (!dir_exists) {
+ char *temp_name;
+ char *parent_name;
+
+ temp_name = strdup(dir_name);
+ if (!temp_name) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+
+ parent_name = dirname(temp_name);
+
+ if (is_bpffs(parent_name)) {
+ /* nothing to do if already mounted */
+ free(temp_name);
+ return err;
+ }
+
+ if (access(parent_name, F_OK) == -1) {
+ p_err("can't create dir '%s' to pin BPF object: parent dir '%s' doesn't exist",
+ dir_name, parent_name);
+ free(temp_name);
+ return -1;
+ }
+
+ free(temp_name);
+ }
+
+ if (block_mount) {
+ p_err("no BPF file system found, not mounting it due to --nomount option");
+ return -1;
+ }
+
+ if (!dir_exists) {
+ err = mkdir(dir_name, S_IRWXU);
+ if (err) {
+ p_err("failed to create dir '%s': %s", dir_name, strerror(errno));
+ return err;
+ }
+ }
+
+ err = mnt_fs(dir_name, "bpf", err_str, ERR_MAX_LEN);
+ if (err) {
+ err_str[ERR_MAX_LEN - 1] = '\0';
+ p_err("can't mount BPF file system on given dir '%s': %s",
+ dir_name, err_str);
+
+ if (!dir_exists)
+ rmdir(dir_name);
+ }
+
+ return err;
+}
+
+int mount_bpffs_for_file(const char *file_name)
+{
+ char err_str[ERR_MAX_LEN];
+ char *temp_name;
+ char *dir;
+ int err = 0;
+
+ if (access(file_name, F_OK) != -1) {
+ p_err("can't pin BPF object: path '%s' already exists", file_name);
+ return -1;
+ }
+
+ temp_name = strdup(file_name);
+ if (!temp_name) {
p_err("mem alloc failed");
return -1;
}
- strcpy(file, name);
- dir = dirname(file);
+ dir = dirname(temp_name);
if (is_bpffs(dir))
/* nothing to do if already mounted */
goto out_free;
+ if (access(dir, F_OK) == -1) {
+ p_err("can't pin BPF object: dir '%s' doesn't exist", dir);
+ err = -1;
+ goto out_free;
+ }
+
if (block_mount) {
p_err("no BPF file system found, not mounting it due to --nomount option");
err = -1;
@@ -276,12 +348,12 @@ int mount_bpffs_for_pin(const char *name, bool is_dir)
err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
if (err) {
err_str[ERR_MAX_LEN - 1] = '\0';
- p_err("can't mount BPF file system to pin the object (%s): %s",
- name, err_str);
+ p_err("can't mount BPF file system to pin the object '%s': %s",
+ file_name, err_str);
}
out_free:
- free(file);
+ free(temp_name);
return err;
}
@@ -289,7 +361,7 @@ int do_pin_fd(int fd, const char *name)
{
int err;
- err = mount_bpffs_for_pin(name, false);
+ err = mount_bpffs_for_file(name);
if (err)
return err;
diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
index 6b0e5202c..5c39c2ed3 100644
--- a/tools/bpf/bpftool/iter.c
+++ b/tools/bpf/bpftool/iter.c
@@ -76,7 +76,7 @@ static int do_pin(int argc, char **argv)
goto close_obj;
}
- err = mount_bpffs_for_pin(path, false);
+ err = mount_bpffs_for_file(path);
if (err)
goto close_link;
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index b8bb08d10..9eb764fe4 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -142,7 +142,8 @@ const char *get_fd_type_name(enum bpf_obj_type type);
char *get_fdinfo(int fd, const char *key);
int open_obj_pinned(const char *path, bool quiet);
int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
-int mount_bpffs_for_pin(const char *name, bool is_dir);
+int mount_bpffs_for_file(const char *file_name);
+int create_and_mount_bpffs_dir(const char *dir_name);
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
int do_pin_fd(int fd, const char *name);
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 9cb42a336..4c4cf16a4 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1778,7 +1778,10 @@ offload_dev:
goto err_close_obj;
}
- err = mount_bpffs_for_pin(pinfile, !first_prog_only);
+ if (first_prog_only)
+ err = mount_bpffs_for_file(pinfile);
+ else
+ err = create_and_mount_bpffs_dir(pinfile);
if (err)
goto err_close_obj;
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
index 26004f0c5..7bdbcac3c 100644
--- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
+++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
@@ -102,8 +102,8 @@ int iter(struct bpf_iter__task_file *ctx)
BPF_LINK_TYPE_PERF_EVENT___local)) {
struct bpf_link *link = (struct bpf_link *) file->private_data;
- if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
- BPF_LINK_TYPE_PERF_EVENT___local)) {
+ if (BPF_CORE_READ(link, type) == bpf_core_enum_value(enum bpf_link_type___local,
+ BPF_LINK_TYPE_PERF_EVENT___local)) {
e.has_bpf_cookie = true;
e.bpf_cookie = get_bpf_cookie(link);
}
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index d573f2640..aa43dead2 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -515,7 +515,7 @@ static int do_register(int argc, char **argv)
if (argc == 1)
linkdir = GET_ARG();
- if (linkdir && mount_bpffs_for_pin(linkdir, true)) {
+ if (linkdir && create_and_mount_bpffs_dir(linkdir)) {
p_err("can't mount bpffs for pinning");
return -1;
}
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index d9520cb82..af393c7de 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -728,7 +728,7 @@ static int sets_patch(struct object *obj)
static int symbols_patch(struct object *obj)
{
- int err;
+ off_t err;
if (__symbols_patch(obj, &obj->structs) ||
__symbols_patch(obj, &obj->unions) ||
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
index bacfd35c5..5be9d3c74 100644
--- a/tools/include/nolibc/stdlib.h
+++ b/tools/include/nolibc/stdlib.h
@@ -185,7 +185,7 @@ void *realloc(void *old_ptr, size_t new_size)
if (__builtin_expect(!ret, 0))
return NULL;
- memcpy(ret, heap->user_p, heap->len);
+ memcpy(ret, heap->user_p, user_p_len);
munmap(heap, heap->len);
return ret;
}
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 3c42b9f1b..bcd84985f 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -7150,7 +7150,7 @@ struct bpf_fib_lookup {
/* output: MTU value */
__u16 mtu_result;
- };
+ } __attribute__((packed, aligned(2)));
/* input: L3 device index for lookup
* output: device index from FIB lookup
*/
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 97ec005c3..145b8bd1d 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -105,7 +105,7 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
*/
int probe_memcg_account(int token_fd)
{
- const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
struct bpf_insn insns[] = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
BPF_EXIT_INSN(),
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
index 4e783cc7f..a336786a2 100644
--- a/tools/lib/bpf/features.c
+++ b/tools/lib/bpf/features.c
@@ -22,7 +22,7 @@ int probe_fd(int fd)
static int probe_kern_prog_name(int token_fd)
{
- const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index a2061fcd6..f515cf264 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -7321,9 +7321,9 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL, *tmp;
- int btf_fd, ret, err;
bool own_log_buf = true;
__u32 log_level = prog->log_level;
+ int ret, err;
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
/*
@@ -7347,9 +7347,8 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
load_attr.prog_ifindex = prog->prog_ifindex;
/* specify func_info/line_info only if kernel supports them */
- btf_fd = btf__fd(obj->btf);
- if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
- load_attr.prog_btf_fd = btf_fd;
+ if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
+ load_attr.prog_btf_fd = btf__fd(obj->btf);
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
load_attr.func_info_cnt = prog->func_info_cnt;
@@ -11476,7 +11475,7 @@ static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, stru
n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
if (n < 1) {
- pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
+ pr_warn("kprobe multi pattern is invalid: %s\n", spec);
return -EINVAL;
}
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index 9fa75943f..d943d78b7 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -633,11 +633,10 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
const char *const subcommands[], const char *usagestr[], int flags)
{
struct parse_opt_ctx_t ctx;
+ char *buf = NULL;
/* build usage string if it's not provided */
if (subcommands && !usagestr[0]) {
- char *buf = NULL;
-
astrcatf(&buf, "%s %s [<options>] {", subcmd_config.exec_name, argv[0]);
for (int i = 0; subcommands[i]; i++) {
@@ -679,7 +678,10 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
astrcatf(&error_buf, "unknown switch `%c'", *ctx.opt);
usage_with_options(usagestr, options);
}
-
+ if (buf) {
+ usagestr[0] = NULL;
+ free(buf);
+ }
return parse_options_end(&ctx);
}
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 3b1259519..6bf2468f5 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -71,6 +71,7 @@ counted. The following modifiers exist:
D - pin the event to the PMU
W - group is weak and will fallback to non-group if not schedulable,
e - group or event are exclusive and do not share the PMU
+ b - use BPF aggregration (see perf stat --bpf-counters)
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 04d89d2ed..d769aa447 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -458,18 +458,19 @@ SHELL = $(SHELL_PATH)
arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools
ifneq ($(OUTPUT),)
- arm64_gen_sysreg_outdir := $(OUTPUT)
+ arm64_gen_sysreg_outdir := $(abspath $(OUTPUT))
else
arm64_gen_sysreg_outdir := $(CURDIR)
endif
arm64-sysreg-defs: FORCE
- $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir)
+ $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) \
+ prefix= subdir=
arm64-sysreg-defs-clean:
$(call QUIET_CLEAN,arm64-sysreg-defs)
$(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) O=$(arm64_gen_sysreg_outdir) \
- clean > /dev/null
+ prefix= subdir= clean > /dev/null
beauty_linux_dir := $(srctree)/tools/perf/trace/beauty/include/linux/
linux_uapi_dir := $(srctree)/tools/include/uapi/linux
diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
index 49331743c..a759eb232 100644
--- a/tools/perf/bench/inject-buildid.c
+++ b/tools/perf/bench/inject-buildid.c
@@ -362,7 +362,7 @@ static int inject_build_id(struct bench_data *data, u64 *max_rss)
return -1;
for (i = 0; i < nr_mmaps; i++) {
- int idx = rand() % (nr_dsos - 1);
+ int idx = rand() % nr_dsos;
struct bench_dso *dso = &dsos[idx];
u64 timestamp = rand() % 1000000;
diff --git a/tools/perf/bench/uprobe.c b/tools/perf/bench/uprobe.c
index 5c71fdc41..b722ff88f 100644
--- a/tools/perf/bench/uprobe.c
+++ b/tools/perf/bench/uprobe.c
@@ -47,7 +47,7 @@ static const char * const bench_uprobe_usage[] = {
#define bench_uprobe__attach_uprobe(prog) \
skel->links.prog = bpf_program__attach_uprobe_opts(/*prog=*/skel->progs.prog, \
/*pid=*/-1, \
- /*binary_path=*/"/lib64/libc.so.6", \
+ /*binary_path=*/"libc.so.6", \
/*func_offset=*/0, \
/*opts=*/&uprobe_opts); \
if (!skel->links.prog) { \
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 6c1cc7976..9cd97fd76 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -809,8 +809,6 @@ int cmd_annotate(int argc, const char **argv)
"Enable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
- OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
- "Show event group information together"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
index 83954af36..de76bbc50 100644
--- a/tools/perf/builtin-daemon.c
+++ b/tools/perf/builtin-daemon.c
@@ -523,7 +523,7 @@ static int daemon_session__control(struct daemon_session *session,
session->base, SESSION_CONTROL);
control = open(control_path, O_WRONLY|O_NONBLOCK);
- if (!control)
+ if (control < 0)
return -1;
if (do_ack) {
@@ -532,7 +532,7 @@ static int daemon_session__control(struct daemon_session *session,
session->base, SESSION_ACK);
ack = open(ack_path, O_RDONLY, O_NONBLOCK);
- if (!ack) {
+ if (ack < 0) {
close(control);
return -1;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ff7e1d6cf..45c755fb5 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1355,8 +1355,6 @@ static int record__open(struct record *rec)
struct record_opts *opts = &rec->opts;
int rc = 0;
- evlist__config(evlist, opts, &callchain_param);
-
evlist__for_each_entry(evlist, pos) {
try_again:
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
@@ -1958,8 +1956,7 @@ static void record__read_lost_samples(struct record *rec)
if (count.lost) {
if (!lost) {
- lost = zalloc(sizeof(*lost) +
- session->machines.host.id_hdr_size);
+ lost = zalloc(PERF_SAMPLE_MAX_SIZE);
if (!lost) {
pr_debug("Memory allocation failed\n");
return;
@@ -1975,8 +1972,7 @@ static void record__read_lost_samples(struct record *rec)
lost_count = perf_bpf_filter__lost_count(evsel);
if (lost_count) {
if (!lost) {
- lost = zalloc(sizeof(*lost) +
- session->machines.host.id_hdr_size);
+ lost = zalloc(PERF_SAMPLE_MAX_SIZE);
if (!lost) {
pr_debug("Memory allocation failed\n");
return;
@@ -2483,6 +2479,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
evlist__uniquify_name(rec->evlist);
+ evlist__config(rec->evlist, opts, &callchain_param);
+
/* Debug message used by test scripts */
pr_debug3("perf record opening and mmapping events\n");
if (record__open(rec) != 0) {
@@ -2881,10 +2879,10 @@ out_delete_session:
}
#endif
zstd_fini(&session->zstd_data);
- perf_session__delete(session);
-
if (!opts->no_bpf_event)
evlist__stop_sb_thread(rec->sb_evlist);
+
+ perf_session__delete(session);
return status;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index dcd93ee5f..5b684d2ab 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -428,7 +428,7 @@ static int report__setup_sample_type(struct report *rep)
* compatibility, set the bit if it's an old perf data file.
*/
evlist__for_each_entry(session->evlist, evsel) {
- if (strstr(evsel->name, "arm_spe") &&
+ if (strstr(evsel__name(evsel), "arm_spe") &&
!(sample_type & PERF_SAMPLE_DATA_SRC)) {
evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
sample_type |= PERF_SAMPLE_DATA_SRC;
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index b248c4335..1bfb22347 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2963,8 +2963,11 @@ static int timehist_check_attr(struct perf_sched *sched,
return -1;
}
- if (sched->show_callchain && !evsel__has_callchain(evsel)) {
- pr_info("Samples do not have callchains.\n");
+ /* only need to save callchain related to sched_switch event */
+ if (sched->show_callchain &&
+ evsel__name_is(evsel, "sched:sched_switch") &&
+ !evsel__has_callchain(evsel)) {
+ pr_info("Samples of sched_switch event do not have callchains.\n");
sched->show_callchain = 0;
symbol_conf.use_callchain = 0;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 37088cc0f..2e7148d66 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -3806,7 +3806,7 @@ static int parse_insn_trace(const struct option *opt __maybe_unused,
if (ret < 0)
return ret;
- itrace_parse_synth_opts(opt, "i0ns", 0);
+ itrace_parse_synth_opts(opt, "i0nse", 0);
symbol_conf.nanosecs = true;
return 0;
}
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
index ec2ff78e2..3ab1d3a66 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
@@ -2,71 +2,71 @@
{
"BriefDescription": "Transaction count",
"MetricName": "transaction",
- "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL if has_event(TX_C_TEND) else 0"
},
{
"BriefDescription": "Cycles per Instruction",
"MetricName": "cpi",
- "MetricExpr": "CPU_CYCLES / INSTRUCTIONS"
+ "MetricExpr": "CPU_CYCLES / INSTRUCTIONS if has_event(INSTRUCTIONS) else 0"
},
{
"BriefDescription": "Problem State Instruction Ratio",
"MetricName": "prbstate",
- "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100"
+ "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0"
},
{
"BriefDescription": "Level One Miss per 100 Instructions",
"MetricName": "l1mp",
- "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100"
+ "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100 if has_event(INSTRUCTIONS) else 0"
},
{
"BriefDescription": "Percentage sourced from Level 2 cache",
"MetricName": "l2p",
- "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ) else 0"
},
{
"BriefDescription": "Percentage sourced from Level 3 on same chip cache",
"MetricName": "l3p",
- "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_CHIP_HIT) else 0"
},
{
"BriefDescription": "Percentage sourced from Level 4 Local cache on same book",
"MetricName": "l4lp",
- "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_REQ_DRAWER_HIT) else 0"
},
{
"BriefDescription": "Percentage sourced from Level 4 Remote cache on different book",
"MetricName": "l4rp",
- "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_OFF_DRAWER) else 0"
},
{
"BriefDescription": "Percentage sourced from memory",
"MetricName": "memp",
- "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100 if has_event(DCW_ON_CHIP_MEMORY) else 0"
},
{
"BriefDescription": "Cycles per Instructions from Finite cache/memory",
"MetricName": "finite_cpi",
- "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS"
+ "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS if has_event(L1C_TLB2_MISSES) else 0"
},
{
"BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
"MetricName": "est_cpi",
- "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS)"
+ "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS) if has_event(INSTRUCTIONS) else 0"
},
{
"BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
"MetricName": "scpl1m",
- "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES)"
+ "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES) if has_event(L1C_TLB2_MISSES) else 0"
},
{
"BriefDescription": "Estimated TLB CPU percentage of Total CPU",
"MetricName": "tlb_percent",
- "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100"
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100 if has_event(CPU_CYCLES) else 0"
},
{
"BriefDescription": "Estimated Cycles per TLB Miss",
"MetricName": "tlb_miss",
- "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES))"
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) if has_event(DTLB2_MISSES) else 0"
}
]
diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
index a918e1af7..b22648d12 100644
--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
+++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
@@ -5,4 +5,4 @@ Family-model,Version,Filename,EventType
^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
-^IBM.393[12].*3\.7.[[:xdigit:]]+$,3,cf_z16,core
+^IBM.393[12].*$,3,cf_z16,core
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index d13ee7683..e05b370b1 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -274,11 +274,8 @@ static int finish_test(struct child_test *child_test, int width)
struct test_suite *t = child_test->test;
int i = child_test->test_num;
int subi = child_test->subtest;
- int out = child_test->process.out;
int err = child_test->process.err;
- bool out_done = out <= 0;
bool err_done = err <= 0;
- struct strbuf out_output = STRBUF_INIT;
struct strbuf err_output = STRBUF_INIT;
int ret;
@@ -290,11 +287,9 @@ static int finish_test(struct child_test *child_test, int width)
pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1));
/*
- * Busy loop reading from the child's stdout and stderr that are set to
- * be non-blocking until EOF.
+ * Busy loop reading from the child's stdout/stderr that are set to be
+ * non-blocking until EOF.
*/
- if (!out_done)
- fcntl(out, F_SETFL, O_NONBLOCK);
if (!err_done)
fcntl(err, F_SETFL, O_NONBLOCK);
if (verbose > 1) {
@@ -303,11 +298,8 @@ static int finish_test(struct child_test *child_test, int width)
else
pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
}
- while (!out_done || !err_done) {
- struct pollfd pfds[2] = {
- { .fd = out,
- .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
- },
+ while (!err_done) {
+ struct pollfd pfds[1] = {
{ .fd = err,
.events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
},
@@ -317,21 +309,7 @@ static int finish_test(struct child_test *child_test, int width)
/* Poll to avoid excessive spinning, timeout set for 1000ms. */
poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/1000);
- if (!out_done && pfds[0].revents) {
- errno = 0;
- len = read(out, buf, sizeof(buf) - 1);
-
- if (len <= 0) {
- out_done = errno != EAGAIN;
- } else {
- buf[len] = '\0';
- if (verbose > 1)
- fprintf(stdout, "%s", buf);
- else
- strbuf_addstr(&out_output, buf);
- }
- }
- if (!err_done && pfds[1].revents) {
+ if (!err_done && pfds[0].revents) {
errno = 0;
len = read(err, buf, sizeof(buf) - 1);
@@ -354,14 +332,10 @@ static int finish_test(struct child_test *child_test, int width)
pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
else
pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
- fprintf(stdout, "%s", out_output.buf);
fprintf(stderr, "%s", err_output.buf);
}
- strbuf_release(&out_output);
strbuf_release(&err_output);
print_test_result(t, i, subi, ret, width);
- if (out > 0)
- close(out);
if (err > 0)
close(err);
return 0;
@@ -394,6 +368,7 @@ static int start_test(struct test_suite *test, int i, int subi, struct child_tes
(*child)->process.no_stdout = 1;
(*child)->process.no_stderr = 1;
} else {
+ (*child)->process.stdout_to_stderr = 1;
(*child)->process.out = -1;
(*child)->process.err = -1;
}
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 7a3a7bbbe..29d2f3ee4 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -637,11 +637,11 @@ static int do_test_code_reading(bool try_kcore)
evlist__config(evlist, &opts, NULL);
- evsel = evlist__first(evlist);
-
- evsel->core.attr.comm = 1;
- evsel->core.attr.disabled = 1;
- evsel->core.attr.enable_on_exec = 0;
+ evlist__for_each_entry(evlist, evsel) {
+ evsel->core.attr.comm = 1;
+ evsel->core.attr.disabled = 1;
+ evsel->core.attr.enable_on_exec = 0;
+ }
ret = evlist__open(evlist);
if (ret < 0) {
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 65dd85207..3302ea0b9 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -188,7 +188,7 @@ arm_cs_etm_snapshot_test() {
arm_cs_etm_basic_test() {
echo "Recording trace with '$*'"
- perf record -o ${perfdata} "$@" -- ls > /dev/null 2>&1
+ perf record -o ${perfdata} "$@" -m,8M -- ls > /dev/null 2>&1
perf_script_branch_samples ls &&
perf_report_branch_samples ls &&
diff --git a/tools/perf/tests/workloads/datasym.c b/tools/perf/tests/workloads/datasym.c
index ddd40bc63..8e08fc75a 100644
--- a/tools/perf/tests/workloads/datasym.c
+++ b/tools/perf/tests/workloads/datasym.c
@@ -16,6 +16,22 @@ static int datasym(int argc __maybe_unused, const char **argv __maybe_unused)
{
for (;;) {
buf1.data1++;
+ if (buf1.data1 == 123) {
+ /*
+ * Add some 'noise' in the loop to work around errata
+ * 1694299 on Arm N1.
+ *
+ * Bias exists in SPE sampling which can cause the load
+ * and store instructions to be skipped entirely. This
+ * comes and goes randomly depending on the offset the
+ * linker places the datasym loop at in the Perf binary.
+ * With an extra branch in the middle of the loop that
+ * isn't always taken, the instruction stream is no
+ * longer a continuous repeating pattern that interacts
+ * badly with the bias.
+ */
+ buf1.data1++;
+ }
buf1.data2 += buf1.data1;
}
return 0;
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index 603d11283..19503e838 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -203,7 +203,7 @@ void ui_browser__refresh_dimensions(struct ui_browser *browser)
void ui_browser__handle_resize(struct ui_browser *browser)
{
ui__refresh_dimensions(false);
- ui_browser__show(browser, browser->title, ui_helpline__current);
+ ui_browser__show(browser, browser->title ?: "", ui_helpline__current);
ui_browser__refresh(browser);
}
@@ -287,7 +287,8 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
mutex_lock(&ui__lock);
__ui_browser__show_title(browser, title);
- browser->title = title;
+ free(browser->title);
+ browser->title = strdup(title);
zfree(&browser->helpline);
va_start(ap, helpline);
@@ -304,6 +305,7 @@ void ui_browser__hide(struct ui_browser *browser)
mutex_lock(&ui__lock);
ui_helpline__pop();
zfree(&browser->helpline);
+ zfree(&browser->title);
mutex_unlock(&ui__lock);
}
diff --git a/tools/perf/ui/browser.h b/tools/perf/ui/browser.h
index 510ce4554..6e98d5f8f 100644
--- a/tools/perf/ui/browser.h
+++ b/tools/perf/ui/browser.h
@@ -21,7 +21,7 @@ struct ui_browser {
u8 extra_title_lines;
int current_color;
void *priv;
- const char *title;
+ char *title;
char *helpline;
const char *no_samples_msg;
void (*refresh_dimensions)(struct ui_browser *browser);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 50ca92255..79d082155 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -887,10 +887,17 @@ static struct annotated_source *annotated_source__new(void)
static __maybe_unused void annotated_source__delete(struct annotated_source *src)
{
+ struct hashmap_entry *cur;
+ size_t bkt;
+
if (src == NULL)
return;
- hashmap__free(src->samples);
+ if (src->samples) {
+ hashmap__for_each_entry(src->samples, cur, bkt)
+ zfree(&cur->pvalue);
+ hashmap__free(src->samples);
+ }
zfree(&src->histograms);
free(src);
}
@@ -3025,7 +3032,7 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m
annotation__update_column_widths(notes);
}
-static void annotation__calc_lines(struct annotation *notes, struct map *map,
+static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms,
struct rb_root *root)
{
struct annotation_line *al;
@@ -3033,6 +3040,7 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
list_for_each_entry(al, &notes->src->source, node) {
double percent_max = 0.0;
+ u64 addr;
int i;
for (i = 0; i < al->data_nr; i++) {
@@ -3048,8 +3056,9 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map,
if (percent_max <= 0.5)
continue;
- al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL,
- false, true, notes->start + al->offset);
+ addr = map__rip_2objdump(ms->map, ms->sym->start);
+ al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL,
+ false, true, ms->sym->start + al->offset);
insert_source_line(&tmp_root, al);
}
@@ -3060,7 +3069,7 @@ static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root)
{
struct annotation *notes = symbol__annotation(ms->sym);
- annotation__calc_lines(notes, ms->map, root);
+ annotation__calc_lines(notes, ms, root);
}
int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 3684e6009..ef314a579 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1466,6 +1466,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
char *endptr;
bool period_type_set = false;
bool period_set = false;
+ bool iy = false;
synth_opts->set = true;
@@ -1484,6 +1485,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
switch (*p++) {
case 'i':
case 'y':
+ iy = true;
if (p[-1] == 'y')
synth_opts->cycles = true;
else
@@ -1649,7 +1651,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts,
}
}
out:
- if (synth_opts->instructions || synth_opts->cycles) {
+ if (iy) {
if (!period_type_set)
synth_opts->period_type =
PERF_ITRACE_DEFAULT_PERIOD_TYPE;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 279112606..f93e57e2f 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -1136,6 +1136,68 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
}
+#if defined(HAVE_DWARF_GETLOCATIONS_SUPPORT) || defined(HAVE_DWARF_CFI_SUPPORT)
+static int reg_from_dwarf_op(Dwarf_Op *op)
+{
+ switch (op->atom) {
+ case DW_OP_reg0 ... DW_OP_reg31:
+ return op->atom - DW_OP_reg0;
+ case DW_OP_breg0 ... DW_OP_breg31:
+ return op->atom - DW_OP_breg0;
+ case DW_OP_regx:
+ case DW_OP_bregx:
+ return op->number;
+ default:
+ break;
+ }
+ return -1;
+}
+
+static int offset_from_dwarf_op(Dwarf_Op *op)
+{
+ switch (op->atom) {
+ case DW_OP_reg0 ... DW_OP_reg31:
+ case DW_OP_regx:
+ return 0;
+ case DW_OP_breg0 ... DW_OP_breg31:
+ return op->number;
+ case DW_OP_bregx:
+ return op->number2;
+ default:
+ break;
+ }
+ return -1;
+}
+
+static bool check_allowed_ops(Dwarf_Op *ops, size_t nops)
+{
+ /* The first op is checked separately */
+ ops++;
+ nops--;
+
+ /*
+ * It needs to make sure if the location expression matches to the given
+ * register and offset exactly. Thus it rejects any complex expressions
+ * and only allows a few of selected operators that doesn't change the
+ * location.
+ */
+ while (nops) {
+ switch (ops->atom) {
+ case DW_OP_stack_value:
+ case DW_OP_deref_size:
+ case DW_OP_deref:
+ case DW_OP_piece:
+ break;
+ default:
+ return false;
+ }
+ ops++;
+ nops--;
+ }
+ return true;
+}
+#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT || HAVE_DWARF_CFI_SUPPORT */
+
#ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
/**
* die_get_var_innermost_scope - Get innermost scope range of given variable DIE
@@ -1280,7 +1342,7 @@ struct find_var_data {
#define DWARF_OP_DIRECT_REGS 32
static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
- u64 addr_offset, u64 addr_type)
+ u64 addr_offset, u64 addr_type, bool is_pointer)
{
Dwarf_Die type_die;
Dwarf_Word size;
@@ -1294,6 +1356,12 @@ static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
if (die_get_real_type(die_mem, &type_die) == NULL)
return false;
+ if (is_pointer && dwarf_tag(&type_die) == DW_TAG_pointer_type) {
+ /* Get the target type of the pointer */
+ if (die_get_real_type(&type_die, &type_die) == NULL)
+ return false;
+ }
+
if (dwarf_aggregate_size(&type_die, &size) < 0)
return false;
@@ -1305,34 +1373,6 @@ static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
return true;
}
-static bool check_allowed_ops(Dwarf_Op *ops, size_t nops)
-{
- /* The first op is checked separately */
- ops++;
- nops--;
-
- /*
- * It needs to make sure if the location expression matches to the given
- * register and offset exactly. Thus it rejects any complex expressions
- * and only allows a few of selected operators that doesn't change the
- * location.
- */
- while (nops) {
- switch (ops->atom) {
- case DW_OP_stack_value:
- case DW_OP_deref_size:
- case DW_OP_deref:
- case DW_OP_piece:
- break;
- default:
- return false;
- }
- ops++;
- nops--;
- }
- return true;
-}
-
/* Only checks direct child DIEs in the given scope. */
static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
{
@@ -1361,31 +1401,38 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
if (data->is_fbreg && ops->atom == DW_OP_fbreg &&
data->offset >= (int)ops->number &&
check_allowed_ops(ops, nops) &&
- match_var_offset(die_mem, data, data->offset, ops->number))
+ match_var_offset(die_mem, data, data->offset, ops->number,
+ /*is_pointer=*/false))
return DIE_FIND_CB_END;
/* Only match with a simple case */
if (data->reg < DWARF_OP_DIRECT_REGS) {
/* pointer variables saved in a register 0 to 31 */
if (ops->atom == (DW_OP_reg0 + data->reg) &&
- check_allowed_ops(ops, nops))
+ check_allowed_ops(ops, nops) &&
+ match_var_offset(die_mem, data, data->offset, 0,
+ /*is_pointer=*/true))
return DIE_FIND_CB_END;
/* Local variables accessed by a register + offset */
if (ops->atom == (DW_OP_breg0 + data->reg) &&
check_allowed_ops(ops, nops) &&
- match_var_offset(die_mem, data, data->offset, ops->number))
+ match_var_offset(die_mem, data, data->offset, ops->number,
+ /*is_pointer=*/false))
return DIE_FIND_CB_END;
} else {
/* pointer variables saved in a register 32 or above */
if (ops->atom == DW_OP_regx && ops->number == data->reg &&
- check_allowed_ops(ops, nops))
+ check_allowed_ops(ops, nops) &&
+ match_var_offset(die_mem, data, data->offset, 0,
+ /*is_pointer=*/true))
return DIE_FIND_CB_END;
/* Local variables accessed by a register + offset */
if (ops->atom == DW_OP_bregx && data->reg == ops->number &&
check_allowed_ops(ops, nops) &&
- match_var_offset(die_mem, data, data->offset, ops->number2))
+ match_var_offset(die_mem, data, data->offset, ops->number2,
+ /*is_poitner=*/false))
return DIE_FIND_CB_END;
}
}
@@ -1447,7 +1494,8 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg)
continue;
if (check_allowed_ops(ops, nops) &&
- match_var_offset(die_mem, data, data->addr, ops->number))
+ match_var_offset(die_mem, data, data->addr, ops->number,
+ /*is_pointer=*/false))
return DIE_FIND_CB_END;
}
return DIE_FIND_CB_SIBLING;
@@ -1479,41 +1527,69 @@ Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
*offset = data.offset;
return result;
}
-#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
-#ifdef HAVE_DWARF_CFI_SUPPORT
-static int reg_from_dwarf_op(Dwarf_Op *op)
+static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg)
{
- switch (op->atom) {
- case DW_OP_reg0 ... DW_OP_reg31:
- return op->atom - DW_OP_reg0;
- case DW_OP_breg0 ... DW_OP_breg31:
- return op->atom - DW_OP_breg0;
- case DW_OP_regx:
- case DW_OP_bregx:
- return op->number;
- default:
- break;
- }
- return -1;
+ struct die_var_type **var_types = arg;
+ Dwarf_Die type_die;
+ int tag = dwarf_tag(die_mem);
+ Dwarf_Attribute attr;
+ Dwarf_Addr base, start, end;
+ Dwarf_Op *ops;
+ size_t nops;
+ struct die_var_type *vt;
+
+ if (tag != DW_TAG_variable && tag != DW_TAG_formal_parameter)
+ return DIE_FIND_CB_SIBLING;
+
+ if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL)
+ return DIE_FIND_CB_SIBLING;
+
+ /*
+ * Only collect the first location as it can reconstruct the
+ * remaining state by following the instructions.
+ * start = 0 means it covers the whole range.
+ */
+ if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0)
+ return DIE_FIND_CB_SIBLING;
+
+ if (die_get_real_type(die_mem, &type_die) == NULL)
+ return DIE_FIND_CB_SIBLING;
+
+ vt = malloc(sizeof(*vt));
+ if (vt == NULL)
+ return DIE_FIND_CB_END;
+
+ vt->die_off = dwarf_dieoffset(&type_die);
+ vt->addr = start;
+ vt->reg = reg_from_dwarf_op(ops);
+ vt->offset = offset_from_dwarf_op(ops);
+ vt->next = *var_types;
+ *var_types = vt;
+
+ return DIE_FIND_CB_SIBLING;
}
-static int offset_from_dwarf_op(Dwarf_Op *op)
+/**
+ * die_collect_vars - Save all variables and parameters
+ * @sc_die: a scope DIE
+ * @var_types: a pointer to save the resulting list
+ *
+ * Save all variables and parameters in the @sc_die and save them to @var_types.
+ * The @var_types is a singly-linked list containing type and location info.
+ * Actual type can be retrieved using dwarf_offdie() with 'die_off' later.
+ *
+ * Callers should free @var_types.
+ */
+void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types)
{
- switch (op->atom) {
- case DW_OP_reg0 ... DW_OP_reg31:
- case DW_OP_regx:
- return 0;
- case DW_OP_breg0 ... DW_OP_breg31:
- return op->number;
- case DW_OP_bregx:
- return op->number2;
- default:
- break;
- }
- return -1;
+ Dwarf_Die die_mem;
+
+ die_find_child(sc_die, __die_collect_vars_cb, (void *)var_types, &die_mem);
}
+#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
+#ifdef HAVE_DWARF_CFI_SUPPORT
/**
* die_get_cfa - Get frame base information
* @dwarf: a Dwarf info
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 85dd527ae..efafd3a1f 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -135,6 +135,15 @@ void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die,
/* Get the list of including scopes */
int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes);
+/* Variable type information */
+struct die_var_type {
+ struct die_var_type *next;
+ u64 die_off;
+ u64 addr;
+ int reg;
+ int offset;
+};
+
#ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
/* Get byte offset range of given variable DIE */
@@ -150,6 +159,9 @@ Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
Dwarf_Addr addr, Dwarf_Die *die_mem,
int *offset);
+/* Save all variables and parameters in this scope */
+void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types);
+
#else /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
@@ -178,6 +190,11 @@ static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unu
return NULL;
}
+static inline void die_collect_vars(Dwarf_Die *sc_die __maybe_unused,
+ struct die_var_type **var_types __maybe_unused)
+{
+}
+
#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
#ifdef HAVE_DWARF_CFI_SUPPORT
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index b450178e3..e733f6b1f 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -1319,6 +1319,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder, bool no_tip)
bool ret = false;
decoder->state.type &= ~INTEL_PT_BRANCH;
+ decoder->state.insn_op = INTEL_PT_OP_OTHER;
+ decoder->state.insn_len = 0;
if (decoder->set_fup_cfe_ip || decoder->set_fup_cfe) {
bool ip = decoder->set_fup_cfe_ip;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index f38893e0b..4db9a098f 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -764,6 +764,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
addr_location__init(&al);
intel_pt_insn->length = 0;
+ intel_pt_insn->op = INTEL_PT_OP_OTHER;
if (to_ip && *ip == to_ip)
goto out_no_cache;
@@ -898,6 +899,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
if (to_ip && *ip == to_ip) {
intel_pt_insn->length = 0;
+ intel_pt_insn->op = INTEL_PT_OP_OTHER;
goto out_no_cache;
}
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 527517db3..07c22f765 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1549,8 +1549,8 @@ static int machine__update_kernel_mmap(struct machine *machine,
updated = map__get(orig);
machine->vmlinux_map = updated;
- machine__set_kernel_mmap(machine, start, end);
maps__remove(machine__kernel_maps(machine), orig);
+ machine__set_kernel_mmap(machine, start, end);
err = maps__insert(machine__kernel_maps(machine), updated);
map__put(orig);
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index 8f04d3b7f..59fbbba79 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <linux/perf_event.h>
#include "util/evsel_fprintf.h"
+#include "util/pmu.h"
+#include "util/pmus.h"
#include "trace-event.h"
struct bit_names {
@@ -75,9 +77,12 @@ static void __p_read_format(char *buf, size_t size, u64 value)
}
#define ENUM_ID_TO_STR_CASE(x) case x: return (#x);
-static const char *stringify_perf_type_id(u64 value)
+static const char *stringify_perf_type_id(struct perf_pmu *pmu, u32 type)
{
- switch (value) {
+ if (pmu)
+ return pmu->name;
+
+ switch (type) {
ENUM_ID_TO_STR_CASE(PERF_TYPE_HARDWARE)
ENUM_ID_TO_STR_CASE(PERF_TYPE_SOFTWARE)
ENUM_ID_TO_STR_CASE(PERF_TYPE_TRACEPOINT)
@@ -175,9 +180,9 @@ do { \
#define print_id_unsigned(_s) PRINT_ID(_s, "%"PRIu64)
#define print_id_hex(_s) PRINT_ID(_s, "%#"PRIx64)
-static void __p_type_id(char *buf, size_t size, u64 value)
+static void __p_type_id(struct perf_pmu *pmu, char *buf, size_t size, u64 value)
{
- print_id_unsigned(stringify_perf_type_id(value));
+ print_id_unsigned(stringify_perf_type_id(pmu, value));
}
static void __p_config_hw_id(char *buf, size_t size, u64 value)
@@ -217,8 +222,14 @@ static void __p_config_tracepoint_id(char *buf, size_t size, u64 value)
}
#endif
-static void __p_config_id(char *buf, size_t size, u32 type, u64 value)
+static void __p_config_id(struct perf_pmu *pmu, char *buf, size_t size, u32 type, u64 value)
{
+ const char *name = perf_pmu__name_from_config(pmu, value);
+
+ if (name) {
+ print_id_hex(name);
+ return;
+ }
switch (type) {
case PERF_TYPE_HARDWARE:
return __p_config_hw_id(buf, size, value);
@@ -246,8 +257,8 @@ static void __p_config_id(char *buf, size_t size, u32 type, u64 value)
#define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val)
#define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val)
#define p_read_format(val) __p_read_format(buf, BUF_SIZE, val)
-#define p_type_id(val) __p_type_id(buf, BUF_SIZE, val)
-#define p_config_id(val) __p_config_id(buf, BUF_SIZE, attr->type, val)
+#define p_type_id(val) __p_type_id(pmu, buf, BUF_SIZE, val)
+#define p_config_id(val) __p_config_id(pmu, buf, BUF_SIZE, attr->type, val)
#define PRINT_ATTRn(_n, _f, _p, _a) \
do { \
@@ -262,6 +273,7 @@ do { \
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
attr__fprintf_f attr__fprintf, void *priv)
{
+ struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type);
char buf[BUF_SIZE];
int ret = 0;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index f39cbbc1a..cc349d9cb 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -36,6 +36,18 @@ struct perf_pmu perf_pmu__fake = {
#define UNIT_MAX_LEN 31 /* max length for event unit name */
+enum event_source {
+ /* An event loaded from /sys/devices/<pmu>/events. */
+ EVENT_SRC_SYSFS,
+ /* An event loaded from a CPUID matched json file. */
+ EVENT_SRC_CPU_JSON,
+ /*
+ * An event loaded from a /sys/devices/<pmu>/identifier matched json
+ * file.
+ */
+ EVENT_SRC_SYS_JSON,
+};
+
/**
* struct perf_pmu_alias - An event either read from sysfs or builtin in
* pmu-events.c, created by parsing the pmu-events json files.
@@ -425,9 +437,30 @@ static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu,
{
struct perf_pmu_alias *alias;
- if (load && !pmu->sysfs_aliases_loaded)
- pmu_aliases_parse(pmu);
+ if (load && !pmu->sysfs_aliases_loaded) {
+ bool has_sysfs_event;
+ char event_file_name[FILENAME_MAX + 8];
+
+ /*
+ * Test if alias/event 'name' exists in the PMU's sysfs/events
+ * directory. If not skip parsing the sysfs aliases. Sysfs event
+ * name must be all lower or all upper case.
+ */
+ scnprintf(event_file_name, sizeof(event_file_name), "events/%s", name);
+ for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
+ event_file_name[i] = tolower(event_file_name[i]);
+
+ has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
+ if (!has_sysfs_event) {
+ for (size_t i = 7, n = 7 + strlen(name); i < n; i++)
+ event_file_name[i] = toupper(event_file_name[i]);
+
+ has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name);
+ }
+ if (has_sysfs_event)
+ pmu_aliases_parse(pmu);
+ }
list_for_each_entry(alias, &pmu->aliases, list) {
if (!strcasecmp(alias->name, name))
return alias;
@@ -500,7 +533,7 @@ static int update_alias(const struct pmu_event *pe,
static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
const char *desc, const char *val, FILE *val_fd,
- const struct pmu_event *pe)
+ const struct pmu_event *pe, enum event_source src)
{
struct perf_pmu_alias *alias;
int ret;
@@ -552,25 +585,30 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
}
snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
}
- if (!pe) {
- /* Update an event from sysfs with json data. */
- struct update_alias_data data = {
- .pmu = pmu,
- .alias = alias,
- };
-
+ switch (src) {
+ default:
+ case EVENT_SRC_SYSFS:
alias->from_sysfs = true;
if (pmu->events_table) {
+ /* Update an event from sysfs with json data. */
+ struct update_alias_data data = {
+ .pmu = pmu,
+ .alias = alias,
+ };
if (pmu_events_table__find_event(pmu->events_table, pmu, name,
update_alias, &data) == 0)
- pmu->loaded_json_aliases++;
+ pmu->cpu_json_aliases++;
}
- }
-
- if (!pe)
pmu->sysfs_aliases++;
- else
- pmu->loaded_json_aliases++;
+ break;
+ case EVENT_SRC_CPU_JSON:
+ pmu->cpu_json_aliases++;
+ break;
+ case EVENT_SRC_SYS_JSON:
+ pmu->sys_json_aliases++;
+ break;
+
+ }
list_add_tail(&alias->list, &pmu->aliases);
return 0;
}
@@ -646,7 +684,8 @@ static int pmu_aliases_parse(struct perf_pmu *pmu)
}
if (perf_pmu__new_alias(pmu, name, /*desc=*/ NULL,
- /*val=*/ NULL, file, /*pe=*/ NULL) < 0)
+ /*val=*/ NULL, file, /*pe=*/ NULL,
+ EVENT_SRC_SYSFS) < 0)
pr_debug("Cannot set up %s\n", name);
fclose(file);
}
@@ -900,7 +939,8 @@ static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe,
{
struct perf_pmu *pmu = vdata;
- perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, pe);
+ perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL,
+ pe, EVENT_SRC_CPU_JSON);
return 0;
}
@@ -935,13 +975,14 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe,
return 0;
if (pmu_uncore_alias_match(pe->pmu, pmu->name) &&
- pmu_uncore_identifier_match(pe->compat, pmu->id)) {
+ pmu_uncore_identifier_match(pe->compat, pmu->id)) {
perf_pmu__new_alias(pmu,
pe->name,
pe->desc,
pe->event,
/*val_fd=*/ NULL,
- pe);
+ pe,
+ EVENT_SRC_SYS_JSON);
}
return 0;
@@ -1035,6 +1076,12 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
pmu->max_precise = pmu_max_precise(dirfd, pmu);
pmu->alias_name = pmu_find_alias_name(pmu, dirfd);
pmu->events_table = perf_pmu__find_events_table(pmu);
+ /*
+ * Load the sys json events/aliases when loading the PMU as each event
+ * may have a different compat regular expression. We therefore can't
+ * know the number of sys json events/aliases without computing the
+ * regular expressions for them all.
+ */
pmu_add_sys_aliases(pmu);
list_add_tail(&pmu->list, pmus);
@@ -1632,15 +1679,15 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu)
{
size_t nr;
- if (!pmu->sysfs_aliases_loaded)
- pmu_aliases_parse(pmu);
-
- nr = pmu->sysfs_aliases;
+ pmu_aliases_parse(pmu);
+ nr = pmu->sysfs_aliases + pmu->sys_json_aliases;;
if (pmu->cpu_aliases_added)
- nr += pmu->loaded_json_aliases;
+ nr += pmu->cpu_json_aliases;
else if (pmu->events_table)
- nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->loaded_json_aliases;
+ nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases;
+ else
+ assert(pmu->cpu_json_aliases == 0);
return pmu->selectable ? nr + 1 : nr;
}
@@ -1693,6 +1740,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
+ pmu_aliases_parse(pmu);
pmu_add_cpu_aliases(pmu);
list_for_each_entry(event, &pmu->aliases, list) {
size_t buf_used;
@@ -2085,3 +2133,22 @@ void perf_pmu__delete(struct perf_pmu *pmu)
zfree(&pmu->id);
free(pmu);
}
+
+const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config)
+{
+ struct perf_pmu_alias *event;
+
+ if (!pmu)
+ return NULL;
+
+ pmu_aliases_parse(pmu);
+ pmu_add_cpu_aliases(pmu);
+ list_for_each_entry(event, &pmu->aliases, list) {
+ struct perf_event_attr attr = {.config = 0,};
+ int ret = perf_pmu__config(pmu, &attr, &event->terms, NULL);
+
+ if (ret == 0 && config == attr.config)
+ return event->name;
+ }
+ return NULL;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index e35d98520..77c59ebc0 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -123,8 +123,10 @@ struct perf_pmu {
const struct pmu_events_table *events_table;
/** @sysfs_aliases: Number of sysfs aliases loaded. */
uint32_t sysfs_aliases;
- /** @sysfs_aliases: Number of json event aliases loaded. */
- uint32_t loaded_json_aliases;
+ /** @cpu_json_aliases: Number of json event aliases loaded specific to the CPUID. */
+ uint32_t cpu_json_aliases;
+ /** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */
+ uint32_t sys_json_aliases;
/** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */
bool sysfs_aliases_loaded;
/**
@@ -273,5 +275,6 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
void perf_pmu__delete(struct perf_pmu *pmu);
struct perf_pmu *perf_pmus__find_core_pmu(void);
+const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config);
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 2a0ad9ecf..5c12459e9 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -11,6 +11,7 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
+#include <libgen.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 075c0f79b..0aeb97c11 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -103,6 +103,16 @@ int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char
return EOF;
}
+const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused)
+{
+ return NULL;
+}
+
+struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused)
+{
+ return NULL;
+}
+
int perf_pmus__num_core_pmus(void)
{
return 1;
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index bfc1d705f..91d2f7f65 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -1223,6 +1223,9 @@ static void print_metric_headers(struct perf_stat_config *config,
/* Print metrics headers only */
evlist__for_each_entry(evlist, counter) {
+ if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter)
+ continue;
+
os.evsel = counter;
perf_stat__print_shadow_stats(config, counter, 0,
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 9ebdb8e13..68dbeae8d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1287,7 +1287,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
{
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
- struct map *replacement_map = NULL;
+ struct map *map_ref, *replacement_map = NULL;
struct machine *machine;
bool is_64_bit;
int err, fd;
@@ -1365,6 +1365,24 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
if (!replacement_map)
replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
+ /*
+ * Update addresses of vmlinux map. Re-insert it to ensure maps are
+ * correctly ordered. Do this before using maps__merge_in() for the
+ * remaining maps so vmlinux gets split if necessary.
+ */
+ map_ref = map__get(map);
+ maps__remove(kmaps, map_ref);
+
+ map__set_start(map_ref, map__start(replacement_map));
+ map__set_end(map_ref, map__end(replacement_map));
+ map__set_pgoff(map_ref, map__pgoff(replacement_map));
+ map__set_mapping_type(map_ref, map__mapping_type(replacement_map));
+
+ err = maps__insert(kmaps, map_ref);
+ map__put(map_ref);
+ if (err)
+ goto out_err;
+
/* Add new maps */
while (!list_empty(&md.maps)) {
struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
@@ -1372,22 +1390,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
list_del_init(&new_node->node);
- if (RC_CHK_EQUAL(new_map, replacement_map)) {
- struct map *map_ref;
-
- map__set_start(map, map__start(new_map));
- map__set_end(map, map__end(new_map));
- map__set_pgoff(map, map__pgoff(new_map));
- map__set_mapping_type(map, map__mapping_type(new_map));
- /* Ensure maps are correctly ordered */
- map_ref = map__get(map);
- maps__remove(kmaps, map_ref);
- err = maps__insert(kmaps, map_ref);
- map__put(map_ref);
- map__put(new_map);
- if (err)
- goto out_err;
- } else {
+ /* skip if replacement_map, already inserted above */
+ if (!RC_CHK_EQUAL(new_map, replacement_map)) {
/*
* Merge kcore map into existing maps,
* and ensure that current maps (eBPF)
@@ -1969,6 +1973,10 @@ out:
return ret;
}
+/*
+ * Always takes ownership of vmlinux when vmlinux_allocated == true, even if
+ * it returns an error.
+ */
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, bool vmlinux_allocated)
{
@@ -1987,8 +1995,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
else
symtab_type = DSO_BINARY_TYPE__VMLINUX;
- if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
+ if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) {
+ if (vmlinux_allocated)
+ free((char *) vmlinux);
return -1;
+ }
/*
* dso__load_sym() may copy 'dso' which will result in the copies having
@@ -2031,7 +2042,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map)
err = dso__load_vmlinux(dso, map, filename, true);
if (err > 0)
goto out;
- free(filename);
}
out:
return err;
@@ -2183,7 +2193,6 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map)
err = dso__load_vmlinux(dso, map, filename, true);
if (err > 0)
return err;
- free(filename);
}
if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 1aa8962dc..515726489 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -39,12 +39,13 @@ int thread__init_maps(struct thread *thread, struct machine *machine)
struct thread *thread__new(pid_t pid, pid_t tid)
{
- char *comm_str;
- struct comm *comm;
RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
struct thread *thread;
if (ADD_RC_CHK(thread, _thread) != NULL) {
+ struct comm *comm;
+ char comm_str[32];
+
thread__set_pid(thread, pid);
thread__set_tid(thread, tid);
thread__set_ppid(thread, -1);
@@ -56,13 +57,8 @@ struct thread *thread__new(pid_t pid, pid_t tid)
init_rwsem(thread__namespaces_lock(thread));
init_rwsem(thread__comm_lock(thread));
- comm_str = malloc(32);
- if (!comm_str)
- goto err_thread;
-
- snprintf(comm_str, 32, ":%d", tid);
+ snprintf(comm_str, sizeof(comm_str), ":%d", tid);
comm = comm__new(comm_str, 0, false);
- free(comm_str);
if (!comm)
goto err_thread;
@@ -76,7 +72,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
return thread;
err_thread:
- free(thread);
+ thread__delete(thread);
return NULL;
}
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 35ee41e43..0d0ca63de 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -3,6 +3,7 @@
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
+#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/sizes.h>
diff --git a/tools/testing/selftests/alsa/Makefile b/tools/testing/selftests/alsa/Makefile
index 5af9ba8a4..c1ce39874 100644
--- a/tools/testing/selftests/alsa/Makefile
+++ b/tools/testing/selftests/alsa/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
#
-CFLAGS += $(shell pkg-config --cflags alsa)
+CFLAGS += $(shell pkg-config --cflags alsa) $(KHDR_INCLUDES)
LDLIBS += $(shell pkg-config --libs alsa)
ifeq ($(LDLIBS),)
LDLIBS += -lasound
diff --git a/tools/testing/selftests/arm64/tags/tags_test.c b/tools/testing/selftests/arm64/tags/tags_test.c
index 570116346..955f87c11 100644
--- a/tools/testing/selftests/arm64/tags/tags_test.c
+++ b/tools/testing/selftests/arm64/tags/tags_test.c
@@ -6,6 +6,7 @@
#include <stdint.h>
#include <sys/prctl.h>
#include <sys/utsname.h>
+#include "../../kselftest.h"
#define SHIFT_TAG(tag) ((uint64_t)(tag) << 56)
#define SET_TAG(ptr, tag) (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
@@ -21,6 +22,9 @@ int main(void)
if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
tbi_enabled = 1;
ptr = (struct utsname *)malloc(sizeof(*ptr));
+ if (!ptr)
+ ksft_exit_fail_msg("Failed to allocate utsname buffer\n");
+
if (tbi_enabled)
tag = 0x42;
ptr = (struct utsname *)SET_TAG(ptr, tag);
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index 19be9c63d..e812876d7 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -508,6 +508,9 @@ int cgroup_setup_and_join(const char *path) {
/**
* setup_classid_environment() - Setup the cgroupv1 net_cls environment
*
+ * This function should only be called in a custom mount namespace, e.g.
+ * created by running setup_cgroup_environment.
+ *
* After calling this function, cleanup_classid_environment should be called
* once testing is complete.
*
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index 6db27a908..be96bf022 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -461,6 +461,8 @@ struct nstoken *open_netns(const char *name)
return token;
fail:
+ if (token->orig_netns_fd != -1)
+ close(token->orig_netns_fd);
free(token);
return NULL;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
index a8b53b873..f66ceccd7 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
@@ -25,7 +25,7 @@ static void test_lookup_update(void)
int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
struct test_btf_map_in_map *skel;
- int err, key = 0, val, i, fd;
+ int err, key = 0, val, i;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
@@ -102,30 +102,6 @@ static void test_lookup_update(void)
CHECK(map1_id == 0, "map1_id", "failed to get ID 1\n");
CHECK(map2_id == 0, "map2_id", "failed to get ID 2\n");
- test_btf_map_in_map__destroy(skel);
- skel = NULL;
-
- /* we need to either wait for or force synchronize_rcu(), before
- * checking for "still exists" condition, otherwise map could still be
- * resolvable by ID, causing false positives.
- *
- * Older kernels (5.8 and earlier) freed map only after two
- * synchronize_rcu()s, so trigger two, to be entirely sure.
- */
- CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
- CHECK(kern_sync_rcu(), "sync_rcu", "failed\n");
-
- fd = bpf_map_get_fd_by_id(map1_id);
- if (CHECK(fd >= 0, "map1_leak", "inner_map1 leaked!\n")) {
- close(fd);
- goto cleanup;
- }
- fd = bpf_map_get_fd_by_id(map2_id);
- if (CHECK(fd >= 0, "map2_leak", "inner_map2 leaked!\n")) {
- close(fd);
- goto cleanup;
- }
-
cleanup:
test_btf_map_in_map__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
index 74d6d7546..25332e596 100644
--- a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
+++ b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c
@@ -87,9 +87,12 @@ void test_cgroup1_hierarchy(void)
goto destroy;
/* Setup cgroup1 hierarchy */
+ err = setup_cgroup_environment();
+ if (!ASSERT_OK(err, "setup_cgroup_environment"))
+ goto destroy;
err = setup_classid_environment();
if (!ASSERT_OK(err, "setup_classid_environment"))
- goto destroy;
+ goto cleanup_cgroup;
err = join_classid();
if (!ASSERT_OK(err, "join_cgroup1"))
@@ -153,6 +156,8 @@ void test_cgroup1_hierarchy(void)
cleanup:
cleanup_classid_environment();
+cleanup_cgroup:
+ cleanup_cgroup_environment();
destroy:
test_cgroup1_hierarchy__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
index 8269cdee3..38fda42fd 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_multi_test.c
@@ -397,7 +397,7 @@ static void test_attach_api_fails(void)
link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &opts);
if (!ASSERT_ERR(link_fd, "link_fd"))
goto cleanup;
- ASSERT_EQ(link_fd, -ESRCH, "pid_is_wrong");
+ ASSERT_EQ(link_fd, -EINVAL, "pid_is_wrong");
cleanup:
if (link_fd >= 0)
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
index 498d3bdaa..bad0ea167 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c
@@ -107,8 +107,8 @@ void test_xdp_do_redirect(void)
.attach_point = BPF_TC_INGRESS);
memcpy(&data[sizeof(__u64)], &pkt_udp, sizeof(pkt_udp));
- *((__u32 *)data) = 0x42; /* metadata test value */
- *((__u32 *)data + 4) = 0;
+ ((__u32 *)data)[0] = 0x42; /* metadata test value */
+ ((__u32 *)data)[1] = 0;
skel = test_xdp_do_redirect__open();
if (!ASSERT_OK_PTR(skel, "skel"))
diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
index e4bfbba6c..c8ec0d036 100644
--- a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
+++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c
@@ -61,14 +61,15 @@ SEC("lsm.s/socket_post_create")
int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
int protocol, int kern)
{
+ struct sock *sk = sock->sk;
struct storage *stg;
__u32 pid;
pid = bpf_get_current_pid_tgid() >> 32;
- if (pid != bench_pid)
+ if (pid != bench_pid || !sk)
return 0;
- stg = bpf_sk_storage_get(&sk_storage_map, sock->sk, NULL,
+ stg = bpf_sk_storage_get(&sk_storage_map, sk, NULL,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (stg)
diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c
index e5e3a8b8d..637e75df2 100644
--- a/tools/testing/selftests/bpf/progs/local_storage.c
+++ b/tools/testing/selftests/bpf/progs/local_storage.c
@@ -140,11 +140,12 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
+ struct sock *sk = sock->sk;
- if (pid != monitored_pid)
+ if (pid != monitored_pid || !sk)
return 0;
- storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, 0);
+ storage = bpf_sk_storage_get(&sk_storage_map, sk, 0, 0);
if (!storage)
return 0;
@@ -155,24 +156,24 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
/* This tests that we can associate multiple elements
* with the local storage.
*/
- storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
+ storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
- if (bpf_sk_storage_delete(&sk_storage_map2, sock->sk))
+ if (bpf_sk_storage_delete(&sk_storage_map2, sk))
return 0;
- storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0,
+ storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
- if (bpf_sk_storage_delete(&sk_storage_map, sock->sk))
+ if (bpf_sk_storage_delete(&sk_storage_map, sk))
return 0;
/* Ensure that the sk_storage_map is disconnected from the storage. */
- if (!sock->sk->sk_bpf_storage || sock->sk->sk_bpf_storage->smap)
+ if (!sk->sk_bpf_storage || sk->sk_bpf_storage->smap)
return 0;
sk_storage_result = 0;
@@ -185,11 +186,12 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
struct local_storage *storage;
+ struct sock *sk = sock->sk;
- if (pid != monitored_pid)
+ if (pid != monitored_pid || !sk)
return 0;
- storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
+ storage = bpf_sk_storage_get(&sk_storage_map, sk, 0,
BPF_LOCAL_STORAGE_GET_F_CREATE);
if (!storage)
return 0;
diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
index 02c11d16b..d7598538a 100644
--- a/tools/testing/selftests/bpf/progs/lsm_cgroup.c
+++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c
@@ -103,11 +103,15 @@ static __always_inline int real_bind(struct socket *sock,
int addrlen)
{
struct sockaddr_ll sa = {};
+ struct sock *sk = sock->sk;
- if (sock->sk->__sk_common.skc_family != AF_PACKET)
+ if (!sk)
+ return 1;
+
+ if (sk->__sk_common.skc_family != AF_PACKET)
return 1;
- if (sock->sk->sk_kern_sock)
+ if (sk->sk_kern_sock)
return 1;
bpf_probe_read_kernel(&sa, sizeof(sa), address);
diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
index baff5ffe9..a9fc30ed4 100644
--- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
+++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c
@@ -8,6 +8,13 @@
#include "xdp_metadata.h"
#include "bpf_kfuncs.h"
+/* The compiler may be able to detect the access to uninitialized
+ memory in the routines performing out of bound memory accesses and
+ emit warnings about it. This is the case of GCC. */
+#if !defined(__clang__)
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
int arr[1];
int unkn_idx;
const volatile bool call_dead_subprog = false;
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 024a0faaf..43612de44 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -2104,9 +2104,9 @@ out:
free(options.whitelist);
if (options.blacklist)
free(options.blacklist);
+ close(cg_fd);
if (cg_created)
cleanup_cgroup_environment();
- close(cg_fd);
return err;
}
diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh
index 910044f08..7989ec608 100755
--- a/tools/testing/selftests/bpf/test_tc_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh
@@ -72,7 +72,6 @@ cleanup() {
server_listen() {
ip netns exec "${ns2}" nc "${netcat_opt}" -l "${port}" > "${outfile}" &
server_pid=$!
- sleep 0.2
}
client_connect() {
@@ -93,6 +92,16 @@ verify_data() {
fi
}
+wait_for_port() {
+ for i in $(seq 20); do
+ if ip netns exec "${ns2}" ss ${2:--4}OHntl | grep -q "$1"; then
+ return 0
+ fi
+ sleep 0.1
+ done
+ return 1
+}
+
set -e
# no arguments: automated test, run all
@@ -193,6 +202,7 @@ setup
# basic communication works
echo "test basic connectivity"
server_listen
+wait_for_port ${port} ${netcat_opt}
client_connect
verify_data
@@ -204,6 +214,7 @@ ip netns exec "${ns1}" tc filter add dev veth1 egress \
section "encap_${tuntype}_${mac}"
echo "test bpf encap without decap (expect failure)"
server_listen
+wait_for_port ${port} ${netcat_opt}
! client_connect
if [[ "$tuntype" =~ "udp" ]]; then
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 0340d4ca8..432db923b 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -195,10 +195,10 @@ int cg_write_numeric(const char *cgroup, const char *control, long value)
return cg_write(cgroup, control, buf);
}
-int cg_find_unified_root(char *root, size_t len)
+int cg_find_unified_root(char *root, size_t len, bool *nsdelegate)
{
char buf[10 * PAGE_SIZE];
- char *fs, *mount, *type;
+ char *fs, *mount, *type, *options;
const char delim[] = "\n\t ";
if (read_text("/proc/self/mounts", buf, sizeof(buf)) <= 0)
@@ -211,12 +211,14 @@ int cg_find_unified_root(char *root, size_t len)
for (fs = strtok(buf, delim); fs; fs = strtok(NULL, delim)) {
mount = strtok(NULL, delim);
type = strtok(NULL, delim);
- strtok(NULL, delim);
+ options = strtok(NULL, delim);
strtok(NULL, delim);
strtok(NULL, delim);
if (strcmp(type, "cgroup2") == 0) {
strncpy(root, mount, len);
+ if (nsdelegate)
+ *nsdelegate = !!strstr(options, "nsdelegate");
return 0;
}
}
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index 1df7f2022..89e8519fb 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -21,7 +21,7 @@ static inline int values_close(long a, long b, int err)
return abs(a - b) <= (a + b) / 100 * err;
}
-extern int cg_find_unified_root(char *root, size_t len);
+extern int cg_find_unified_root(char *root, size_t len, bool *nsdelegate);
extern char *cg_name(const char *root, const char *name);
extern char *cg_name_indexed(const char *root, const char *name, int index);
extern char *cg_control(const char *cgroup, const char *control);
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index 80aa6b237..a5672a91d 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -18,6 +18,8 @@
#include "../kselftest.h"
#include "cgroup_util.h"
+static bool nsdelegate;
+
static int touch_anon(char *buf, size_t size)
{
int fd;
@@ -775,6 +777,9 @@ static int test_cgcore_lesser_ns_open(const char *root)
pid_t pid;
int status;
+ if (!nsdelegate)
+ return KSFT_SKIP;
+
cg_test_a = cg_name(root, "cg_test_a");
cg_test_b = cg_name(root, "cg_test_b");
@@ -862,7 +867,7 @@ int main(int argc, char *argv[])
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), &nsdelegate))
ksft_exit_skip("cgroup v2 isn't mounted\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
index 24020a2c6..186bf96f6 100644
--- a/tools/testing/selftests/cgroup/test_cpu.c
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -700,7 +700,7 @@ int main(int argc, char *argv[])
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "cpu"))
diff --git a/tools/testing/selftests/cgroup/test_cpuset.c b/tools/testing/selftests/cgroup/test_cpuset.c
index b061ed1e0..4034d14ba 100644
--- a/tools/testing/selftests/cgroup/test_cpuset.c
+++ b/tools/testing/selftests/cgroup/test_cpuset.c
@@ -249,7 +249,7 @@ int main(int argc, char *argv[])
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
if (cg_read_strstr(root, "cgroup.subtree_control", "cpuset"))
diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
index 8845353ac..8730645d3 100644
--- a/tools/testing/selftests/cgroup/test_freezer.c
+++ b/tools/testing/selftests/cgroup/test_freezer.c
@@ -827,7 +827,7 @@ int main(int argc, char *argv[])
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
diff --git a/tools/testing/selftests/cgroup/test_hugetlb_memcg.c b/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
index f0fefeb4c..856f9508e 100644
--- a/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
+++ b/tools/testing/selftests/cgroup/test_hugetlb_memcg.c
@@ -214,7 +214,7 @@ int main(int argc, char **argv)
return ret;
}
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
switch (test_hugetlb_memcg(root)) {
diff --git a/tools/testing/selftests/cgroup/test_kill.c b/tools/testing/selftests/cgroup/test_kill.c
index 615369031..0e5bb6c73 100644
--- a/tools/testing/selftests/cgroup/test_kill.c
+++ b/tools/testing/selftests/cgroup/test_kill.c
@@ -276,7 +276,7 @@ int main(int argc, char *argv[])
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c
index c82f974b8..137506db0 100644
--- a/tools/testing/selftests/cgroup/test_kmem.c
+++ b/tools/testing/selftests/cgroup/test_kmem.c
@@ -420,7 +420,7 @@ int main(int argc, char **argv)
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
/*
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index c7c957200..b462416b3 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -1314,7 +1314,7 @@ int main(int argc, char **argv)
char root[PATH_MAX];
int i, proc_status, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
/*
diff --git a/tools/testing/selftests/cgroup/test_zswap.c b/tools/testing/selftests/cgroup/test_zswap.c
index f0e488ed9..ef7f39545 100644
--- a/tools/testing/selftests/cgroup/test_zswap.c
+++ b/tools/testing/selftests/cgroup/test_zswap.c
@@ -440,7 +440,7 @@ int main(int argc, char **argv)
char root[PATH_MAX];
int i, ret = EXIT_SUCCESS;
- if (cg_find_unified_root(root, sizeof(root)))
+ if (cg_find_unified_root(root, sizeof(root), NULL))
ksft_exit_skip("cgroup v2 isn't mounted\n");
if (!zswap_configured())
diff --git a/tools/testing/selftests/damon/_damon_sysfs.py b/tools/testing/selftests/damon/_damon_sysfs.py
index d23d7398a..fe77d7e73 100644
--- a/tools/testing/selftests/damon/_damon_sysfs.py
+++ b/tools/testing/selftests/damon/_damon_sysfs.py
@@ -287,6 +287,8 @@ class DamonCtx:
nr_schemes_file = os.path.join(
self.sysfs_dir(), 'schemes', 'nr_schemes')
content, err = read_file(nr_schemes_file)
+ if err is not None:
+ return err
if int(content) != len(self.schemes):
err = write_file(nr_schemes_file, '%d' % len(self.schemes))
if err != None:
diff --git a/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh
index 6369927e9..48395cfd4 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh
@@ -42,7 +42,7 @@ __mlxsw_only_on_spectrum()
local src=$1; shift
if ! mlxsw_on_spectrum "$rev"; then
- log_test_skip $src:$caller "(Spectrum-$rev only)"
+ log_test_xfail $src:$caller "(Spectrum-$rev only)"
return 1
fi
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
index a88d8a8c8..899b68926 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
@@ -47,7 +47,6 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
RET=0
target=$(${current_test}_get_target "$should_fail")
if ((target == 0)); then
- log_test_skip "'$current_test' should_fail=$should_fail test"
continue
fi
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
index f981c957f..482ebb744 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
@@ -52,7 +52,6 @@ for current_test in ${TESTS:-$ALL_TESTS}; do
RET=0
target=$(${current_test}_get_target "$should_fail")
if ((target == 0)); then
- log_test_skip "'$current_test' [$profile] should_fail=$should_fail test"
continue
fi
${current_test}_setup_prepare
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile
index c2f7cef91..eb4c3b411 100644
--- a/tools/testing/selftests/filesystems/binderfs/Makefile
+++ b/tools/testing/selftests/filesystems/binderfs/Makefile
@@ -3,6 +3,4 @@
CFLAGS += $(KHDR_INCLUDES) -pthread
TEST_GEN_PROGS := binderfs_test
-binderfs_test: binderfs_test.c ../../kselftest.h ../../kselftest_harness.h
-
include ../../lib.mk
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
index b9c21a81d..c0cdad4c4 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
@@ -53,7 +53,7 @@ fi
echo > dynamic_events
-if [ "$FIELDS" ] ; then
+if [ "$FIELDS" -a "$FPROBES" ] ; then
echo "t:tpevent ${TP2} obj_size=s->object_size" >> dynamic_events
echo "f:fpevent ${TP3}%return path=\$retval->name:string" >> dynamic_events
echo "t:tpevent2 ${TP4} p->se.group_node.next->prev" >> dynamic_events
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc
index d183b8a8e..1e251ce29 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_entry_arg.tc
@@ -11,7 +11,7 @@ echo 1 > events/tests/enable
echo > trace
cat trace > /dev/null
-function streq() {
+streq() {
test $1 = $2
}
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
index d3a79da21..5f72abe6f 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: Generic dynamic event - check if duplicate events are caught
-# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README
+# requires: dynamic_events "e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]":README events/syscalls/sys_enter_openat
echo 0 > events/enable
diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
index 3f74c09c5..118247b8d 100644
--- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
+++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
@@ -10,7 +10,6 @@ fail() { #msg
}
sample_events() {
- echo > trace
echo 1 > events/kmem/kmem_cache_free/enable
echo 1 > tracing_on
ls > /dev/null
@@ -22,6 +21,7 @@ echo 0 > tracing_on
echo 0 > events/enable
echo "Get the most frequently calling function"
+echo > trace
sample_events
target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
@@ -32,7 +32,16 @@ echo > trace
echo "Test event filter function name"
echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter
+
+sample_events
+max_retry=10
+while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
sample_events
+max_retry=$((max_retry - 1))
+if [ $max_retry -eq 0 ]; then
+ exit_fail
+fi
+done
hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
@@ -49,7 +58,16 @@ address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1`
echo "Test event filter function address"
echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter
+echo > trace
+sample_events
+max_retry=10
+while [ `grep kmem_cache_free trace| wc -l` -eq 0 ]; do
sample_events
+max_retry=$((max_retry - 1))
+if [ $max_retry -eq 0 ]; then
+ exit_fail
+fi
+done
hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
index 1f6981ef7..ba19b81ce 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
@@ -30,7 +30,8 @@ find_dot_func() {
fi
grep " [tT] .*\.isra\..*" /proc/kallsyms | cut -f 3 -d " " | while read f; do
- if grep -s $f available_filter_functions; then
+ cnt=`grep -s $f available_filter_functions | wc -l`;
+ if [ $cnt -eq 1 ]; then
echo $f
break
fi
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc
index 53b82f36a..e50470b53 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_entry_arg.tc
@@ -11,7 +11,7 @@ echo 1 > events/kprobes/enable
echo > trace
cat trace > /dev/null
-function streq() {
+streq() {
test $1 = $2
}
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi.c b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
index 7f3ca5c78..215c6cb53 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi.c
@@ -360,7 +360,7 @@ out:
int main(int argc, char *argv[])
{
- const char *test_name;
+ char *test_name;
int c, ret;
while ((c = getopt(argc, argv, "bchlot:v:")) != -1) {
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
index 25110c7c0..d7a8e321b 100644
--- a/tools/testing/selftests/kcmp/kcmp_test.c
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -91,7 +91,7 @@ int main(int argc, char **argv)
ksft_print_header();
ksft_set_plan(3);
- fd2 = open(kpath, O_RDWR, 0644);
+ fd2 = open(kpath, O_RDWR);
if (fd2 < 0) {
perror("Can't open file");
ksft_exit_fail();
diff --git a/tools/testing/selftests/kselftest/ktap_helpers.sh b/tools/testing/selftests/kselftest/ktap_helpers.sh
index f2fbb914e..79a125eb2 100644
--- a/tools/testing/selftests/kselftest/ktap_helpers.sh
+++ b/tools/testing/selftests/kselftest/ktap_helpers.sh
@@ -43,7 +43,7 @@ __ktap_test() {
directive="$3" # optional
local directive_str=
- [[ ! -z "$directive" ]] && directive_str="# $directive"
+ [ ! -z "$directive" ] && directive_str="# $directive"
echo $result $KTAP_TESTNO $description $directive_str
@@ -99,7 +99,7 @@ ktap_exit_fail_msg() {
ktap_finished() {
ktap_print_totals
- if [ $(("$KTAP_CNT_PASS" + "$KTAP_CNT_SKIP")) -eq "$KSFT_NUM_TESTS" ]; then
+ if [ $((KTAP_CNT_PASS + KTAP_CNT_SKIP)) -eq "$KSFT_NUM_TESTS" ]; then
exit "$KSFT_PASS"
else
exit "$KSFT_FAIL"
diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
index 3c8f2965c..b634969cb 100644
--- a/tools/testing/selftests/kselftest_harness.h
+++ b/tools/testing/selftests/kselftest_harness.h
@@ -1216,7 +1216,7 @@ void __run_test(struct __fixture_metadata *f,
struct __test_metadata *t)
{
struct __test_xfail *xfail;
- char *test_name;
+ char test_name[1024];
const char *diagnostic;
/* reset test struct */
@@ -1227,12 +1227,8 @@ void __run_test(struct __fixture_metadata *f,
memset(t->env, 0, sizeof(t->env));
memset(t->results->reason, 0, sizeof(t->results->reason));
- if (asprintf(&test_name, "%s%s%s.%s", f->name,
- variant->name[0] ? "." : "", variant->name, t->name) == -1) {
- ksft_print_msg("ERROR ALLOCATING MEMORY\n");
- t->exit_code = KSFT_FAIL;
- _exit(t->exit_code);
- }
+ snprintf(test_name, sizeof(test_name), "%s%s%s.%s",
+ f->name, variant->name[0] ? "." : "", variant->name, t->name);
ksft_print_msg(" RUN %s ...\n", test_name);
@@ -1270,7 +1266,6 @@ void __run_test(struct __fixture_metadata *f,
ksft_test_result_code(t->exit_code, test_name,
diagnostic ? "%s" : NULL, diagnostic);
- free(test_name);
}
static int test_harness_run(int argc, char **argv)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index da2cade3b..8ae203d8e 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -7,6 +7,8 @@ else ifneq ($(filter -%,$(LLVM)),)
LLVM_SUFFIX := $(LLVM)
endif
+CLANG := $(LLVM_PREFIX)clang$(LLVM_SUFFIX)
+
CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi
CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu
CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl
@@ -18,7 +20,13 @@ CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu
CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu
CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu
CLANG_TARGET_FLAGS_x86_64 := x86_64-linux-gnu
-CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
+
+# Default to host architecture if ARCH is not explicitly given.
+ifeq ($(ARCH),)
+CLANG_TARGET_FLAGS := $(shell $(CLANG) -print-target-triple)
+else
+CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH))
+endif
ifeq ($(CROSS_COMPILE),)
ifeq ($(CLANG_TARGET_FLAGS),)
@@ -30,7 +38,7 @@ else
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
endif # CROSS_COMPILE
-CC := $(LLVM_PREFIX)clang$(LLVM_SUFFIX) $(CLANG_FLAGS) -fintegrated-as
+CC := $(CLANG) $(CLANG_FLAGS) -fintegrated-as
else
CC := $(CROSS_COMPILE)gcc
endif # LLVM
diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c
index 533999b6c..e140558e6 100644
--- a/tools/testing/selftests/mm/compaction_test.c
+++ b/tools/testing/selftests/mm/compaction_test.c
@@ -82,12 +82,16 @@ int prereq(void)
return -1;
}
-int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
+int check_compaction(unsigned long mem_free, unsigned long hugepage_size,
+ unsigned long initial_nr_hugepages)
{
+ unsigned long nr_hugepages_ul;
int fd, ret = -1;
int compaction_index = 0;
- char initial_nr_hugepages[10] = {0};
- char nr_hugepages[10] = {0};
+ char nr_hugepages[20] = {0};
+ char init_nr_hugepages[20] = {0};
+
+ sprintf(init_nr_hugepages, "%lu", initial_nr_hugepages);
/* We want to test with 80% of available memory. Else, OOM killer comes
in to play */
@@ -101,21 +105,6 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
goto out;
}
- if (read(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) <= 0) {
- ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
- strerror(errno));
- goto close_fd;
- }
-
- /* Start with the initial condition of 0 huge pages*/
- if (write(fd, "0", sizeof(char)) != sizeof(char)) {
- ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
- strerror(errno));
- goto close_fd;
- }
-
- lseek(fd, 0, SEEK_SET);
-
/* Request a large number of huge pages. The Kernel will allocate
as much as it can */
if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) {
@@ -134,22 +123,27 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
/* We should have been able to request at least 1/3 rd of the memory in
huge pages */
- compaction_index = mem_free/(atoi(nr_hugepages) * hugepage_size);
+ nr_hugepages_ul = strtoul(nr_hugepages, NULL, 10);
+ if (!nr_hugepages_ul) {
+ ksft_print_msg("ERROR: No memory is available as huge pages\n");
+ goto close_fd;
+ }
+ compaction_index = mem_free/(nr_hugepages_ul * hugepage_size);
lseek(fd, 0, SEEK_SET);
- if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
- != strlen(initial_nr_hugepages)) {
+ if (write(fd, init_nr_hugepages, strlen(init_nr_hugepages))
+ != strlen(init_nr_hugepages)) {
ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n",
strerror(errno));
goto close_fd;
}
- ksft_print_msg("Number of huge pages allocated = %d\n",
- atoi(nr_hugepages));
+ ksft_print_msg("Number of huge pages allocated = %lu\n",
+ nr_hugepages_ul);
if (compaction_index > 3) {
- ksft_print_msg("ERROR: Less that 1/%d of memory is available\n"
+ ksft_print_msg("ERROR: Less than 1/%d of memory is available\n"
"as huge pages\n", compaction_index);
goto close_fd;
}
@@ -163,6 +157,41 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
return ret;
}
+int set_zero_hugepages(unsigned long *initial_nr_hugepages)
+{
+ int fd, ret = -1;
+ char nr_hugepages[20] = {0};
+
+ fd = open("/proc/sys/vm/nr_hugepages", O_RDWR | O_NONBLOCK);
+ if (fd < 0) {
+ ksft_print_msg("Failed to open /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
+ goto out;
+ }
+ if (read(fd, nr_hugepages, sizeof(nr_hugepages)) <= 0) {
+ ksft_print_msg("Failed to read from /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
+ goto close_fd;
+ }
+
+ lseek(fd, 0, SEEK_SET);
+
+ /* Start with the initial condition of 0 huge pages */
+ if (write(fd, "0", sizeof(char)) != sizeof(char)) {
+ ksft_print_msg("Failed to write 0 to /proc/sys/vm/nr_hugepages: %s\n",
+ strerror(errno));
+ goto close_fd;
+ }
+
+ *initial_nr_hugepages = strtoul(nr_hugepages, NULL, 10);
+ ret = 0;
+
+ close_fd:
+ close(fd);
+
+ out:
+ return ret;
+}
int main(int argc, char **argv)
{
@@ -173,14 +202,19 @@ int main(int argc, char **argv)
unsigned long mem_free = 0;
unsigned long hugepage_size = 0;
long mem_fragmentable_MB = 0;
+ unsigned long initial_nr_hugepages;
ksft_print_header();
if (prereq() || geteuid())
- return ksft_exit_skip("Prerequisites unsatisfied\n");
+ ksft_exit_skip("Prerequisites unsatisfied\n");
ksft_set_plan(1);
+ /* Start the test without hugepages reducing mem_free */
+ if (set_zero_hugepages(&initial_nr_hugepages))
+ ksft_exit_fail();
+
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
if (setrlimit(RLIMIT_MEMLOCK, &lim))
@@ -224,8 +258,9 @@ int main(int argc, char **argv)
entry = entry->next;
}
- if (check_compaction(mem_free, hugepage_size) == 0)
- return ksft_exit_pass();
+ if (check_compaction(mem_free, hugepage_size,
+ initial_nr_hugepages) == 0)
+ ksft_exit_pass();
- return ksft_exit_fail();
+ ksft_exit_fail();
}
diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c
index 363bf5f80..fe078d6e1 100644
--- a/tools/testing/selftests/mm/cow.c
+++ b/tools/testing/selftests/mm/cow.c
@@ -1779,5 +1779,5 @@ int main(int argc, char **argv)
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c
index ad168d35b..d7eaca5bb 100644
--- a/tools/testing/selftests/mm/gup_longterm.c
+++ b/tools/testing/selftests/mm/gup_longterm.c
@@ -456,5 +456,5 @@ int main(int argc, char **argv)
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/gup_test.c b/tools/testing/selftests/mm/gup_test.c
index 18a49c70d..bdeaac67f 100644
--- a/tools/testing/selftests/mm/gup_test.c
+++ b/tools/testing/selftests/mm/gup_test.c
@@ -1,3 +1,4 @@
+#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <fcntl.h>
#include <errno.h>
#include <stdio.h>
@@ -228,7 +229,7 @@ int main(int argc, char **argv)
break;
}
ksft_test_result_skip("Please run this test as root\n");
- return ksft_exit_pass();
+ ksft_exit_pass();
}
p = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, filed, 0);
@@ -267,5 +268,5 @@ int main(int argc, char **argv)
free(tid);
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c
index d615767e3..508287560 100644
--- a/tools/testing/selftests/mm/ksm_functional_tests.c
+++ b/tools/testing/selftests/mm/ksm_functional_tests.c
@@ -646,5 +646,5 @@ int main(int argc, char **argv)
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c
index 17bcb07f1..ef7d911da 100644
--- a/tools/testing/selftests/mm/madv_populate.c
+++ b/tools/testing/selftests/mm/madv_populate.c
@@ -307,5 +307,5 @@ int main(int argc, char **argv)
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/mdwe_test.c b/tools/testing/selftests/mm/mdwe_test.c
index 1e01d3ddc..200bedcdc 100644
--- a/tools/testing/selftests/mm/mdwe_test.c
+++ b/tools/testing/selftests/mm/mdwe_test.c
@@ -7,7 +7,6 @@
#include <linux/mman.h>
#include <linux/prctl.h>
-#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <sys/auxv.h>
diff --git a/tools/testing/selftests/mm/mkdirty.c b/tools/testing/selftests/mm/mkdirty.c
index 301abb99e..b8a7efe92 100644
--- a/tools/testing/selftests/mm/mkdirty.c
+++ b/tools/testing/selftests/mm/mkdirty.c
@@ -375,5 +375,5 @@ int main(void)
if (err)
ksft_exit_fail_msg("%d out of %d tests failed\n",
err, ksft_test_num());
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c
index d59517ed3..2d785aca7 100644
--- a/tools/testing/selftests/mm/pagemap_ioctl.c
+++ b/tools/testing/selftests/mm/pagemap_ioctl.c
@@ -1484,7 +1484,7 @@ int main(int argc, char *argv[])
ksft_print_header();
if (init_uffd())
- return ksft_exit_pass();
+ ksft_exit_pass();
ksft_set_plan(115);
@@ -1660,5 +1660,5 @@ int main(int argc, char *argv[])
userfaultfd_tests();
close(pagemap_fd);
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c
index 7dbfa53d9..d9dbf8797 100644
--- a/tools/testing/selftests/mm/soft-dirty.c
+++ b/tools/testing/selftests/mm/soft-dirty.c
@@ -209,5 +209,5 @@ int main(int argc, char **argv)
close(pagemap_fd);
- return ksft_exit_pass();
+ ksft_exit_pass();
}
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index cc5629c3d..a70ae10b5 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -8,6 +8,7 @@
#define __UFFD_COMMON_H__
#define _GNU_SOURCE
+#define __SANE_USERSPACE_TYPES__ // Use ll64
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh
index 75528788c..7e7ed6c55 100755
--- a/tools/testing/selftests/net/amt.sh
+++ b/tools/testing/selftests/net/amt.sh
@@ -77,6 +77,7 @@ readonly LISTENER=$(mktemp -u listener-XXXXXXXX)
readonly GATEWAY=$(mktemp -u gateway-XXXXXXXX)
readonly RELAY=$(mktemp -u relay-XXXXXXXX)
readonly SOURCE=$(mktemp -u source-XXXXXXXX)
+readonly SMCROUTEDIR="$(mktemp -d)"
ERR=4
err=0
@@ -85,6 +86,11 @@ exit_cleanup()
for ns in "$@"; do
ip netns delete "${ns}" 2>/dev/null || true
done
+ if [ -f "$SMCROUTEDIR/amt.pid" ]; then
+ smcpid=$(< $SMCROUTEDIR/amt.pid)
+ kill $smcpid
+ fi
+ rm -rf $SMCROUTEDIR
exit $ERR
}
@@ -167,7 +173,7 @@ setup_iptables()
setup_mcast_routing()
{
- ip netns exec "${RELAY}" smcrouted
+ ip netns exec "${RELAY}" smcrouted -P $SMCROUTEDIR/amt.pid
ip netns exec "${RELAY}" smcroutectl a relay_src \
172.17.0.2 239.0.0.1 amtr
ip netns exec "${RELAY}" smcroutectl a relay_src \
@@ -210,8 +216,8 @@ check_features()
test_ipv4_forward()
{
- RESULT4=$(ip netns exec "${LISTENER}" nc -w 1 -l -u 239.0.0.1 4000)
- if [ "$RESULT4" == "172.17.0.2" ]; then
+ RESULT4=$(ip netns exec "${LISTENER}" timeout 15 socat - UDP4-LISTEN:4000,readbytes=128 || true)
+ if echo "$RESULT4" | grep -q "172.17.0.2"; then
printf "TEST: %-60s [ OK ]\n" "IPv4 amt multicast forwarding"
exit 0
else
@@ -222,8 +228,8 @@ test_ipv4_forward()
test_ipv6_forward()
{
- RESULT6=$(ip netns exec "${LISTENER}" nc -w 1 -l -u ff0e::5:6 6000)
- if [ "$RESULT6" == "2001:db8:3::2" ]; then
+ RESULT6=$(ip netns exec "${LISTENER}" timeout 15 socat - UDP6-LISTEN:6000,readbytes=128 || true)
+ if echo "$RESULT6" | grep -q "2001:db8:3::2"; then
printf "TEST: %-60s [ OK ]\n" "IPv6 amt multicast forwarding"
exit 0
else
@@ -236,14 +242,14 @@ send_mcast4()
{
sleep 2
ip netns exec "${SOURCE}" bash -c \
- 'echo 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
+ 'printf "%s %128s" 172.17.0.2 | nc -w 1 -u 239.0.0.1 4000' &
}
send_mcast6()
{
sleep 2
ip netns exec "${SOURCE}" bash -c \
- 'echo 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
+ 'printf "%s %128s" 2001:db8:3::2 | nc -w 1 -u ff0e::5:6 6000' &
}
check_features
diff --git a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
index a40c0e9bd..eef5cbf6e 100755
--- a/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
+++ b/tools/testing/selftests/net/arp_ndisc_untracked_subnets.sh
@@ -73,25 +73,19 @@ setup_v6() {
# namespaces. veth0 is veth-router, veth1 is veth-host.
# first, set up the inteface's link to the namespace
# then, set the interface "up"
- ip -6 -netns ${ROUTER_NS_V6} link add name ${ROUTER_INTF} \
- type veth peer name ${HOST_INTF}
-
- ip -6 -netns ${ROUTER_NS_V6} link set dev ${ROUTER_INTF} up
- ip -6 -netns ${ROUTER_NS_V6} link set dev ${HOST_INTF} netns \
- ${HOST_NS_V6}
+ ip -n ${ROUTER_NS_V6} link add name ${ROUTER_INTF} \
+ type veth peer name ${HOST_INTF} netns ${HOST_NS_V6}
- ip -6 -netns ${HOST_NS_V6} link set dev ${HOST_INTF} up
- ip -6 -netns ${ROUTER_NS_V6} addr add \
- ${ROUTER_ADDR_V6}/${PREFIX_WIDTH_V6} dev ${ROUTER_INTF} nodad
+ # Add tc rule to filter out host na message
+ tc -n ${ROUTER_NS_V6} qdisc add dev ${ROUTER_INTF} clsact
+ tc -n ${ROUTER_NS_V6} filter add dev ${ROUTER_INTF} \
+ ingress protocol ipv6 pref 1 handle 101 \
+ flower src_ip ${HOST_ADDR_V6} ip_proto icmpv6 type 136 skip_hw action pass
HOST_CONF=net.ipv6.conf.${HOST_INTF}
ip netns exec ${HOST_NS_V6} sysctl -qw ${HOST_CONF}.ndisc_notify=1
ip netns exec ${HOST_NS_V6} sysctl -qw ${HOST_CONF}.disable_ipv6=0
- ip -6 -netns ${HOST_NS_V6} addr add ${HOST_ADDR_V6}/${PREFIX_WIDTH_V6} \
- dev ${HOST_INTF}
-
ROUTER_CONF=net.ipv6.conf.${ROUTER_INTF}
-
ip netns exec ${ROUTER_NS_V6} sysctl -w \
${ROUTER_CONF}.forwarding=1 >/dev/null 2>&1
ip netns exec ${ROUTER_NS_V6} sysctl -w \
@@ -99,6 +93,13 @@ setup_v6() {
ip netns exec ${ROUTER_NS_V6} sysctl -w \
${ROUTER_CONF}.accept_untracked_na=${accept_untracked_na} \
>/dev/null 2>&1
+
+ ip -n ${ROUTER_NS_V6} link set dev ${ROUTER_INTF} up
+ ip -n ${HOST_NS_V6} link set dev ${HOST_INTF} up
+ ip -n ${ROUTER_NS_V6} addr add ${ROUTER_ADDR_V6}/${PREFIX_WIDTH_V6} \
+ dev ${ROUTER_INTF} nodad
+ ip -n ${HOST_NS_V6} addr add ${HOST_ADDR_V6}/${PREFIX_WIDTH_V6} \
+ dev ${HOST_INTF}
set +e
}
@@ -162,26 +163,6 @@ arp_test_gratuitous_combinations() {
arp_test_gratuitous 2 1
}
-cleanup_tcpdump() {
- set -e
- [[ ! -z ${tcpdump_stdout} ]] && rm -f ${tcpdump_stdout}
- [[ ! -z ${tcpdump_stderr} ]] && rm -f ${tcpdump_stderr}
- tcpdump_stdout=
- tcpdump_stderr=
- set +e
-}
-
-start_tcpdump() {
- set -e
- tcpdump_stdout=`mktemp`
- tcpdump_stderr=`mktemp`
- ip netns exec ${ROUTER_NS_V6} timeout 15s \
- tcpdump --immediate-mode -tpni ${ROUTER_INTF} -c 1 \
- "icmp6 && icmp6[0] == 136 && src ${HOST_ADDR_V6}" \
- > ${tcpdump_stdout} 2> /dev/null
- set +e
-}
-
verify_ndisc() {
local accept_untracked_na=$1
local same_subnet=$2
@@ -222,8 +203,9 @@ ndisc_test_untracked_advertisements() {
HOST_ADDR_V6=2001:db8:abcd:0012::3
fi
fi
- setup_v6 $1 $2
- start_tcpdump
+ setup_v6 $1
+ slowwait_for_counter 15 1 \
+ tc_rule_handle_stats_get "dev ${ROUTER_INTF} ingress" 101 ".packets" "-n ${ROUTER_NS_V6}"
if verify_ndisc $1 $2; then
printf " TEST: %-60s [ OK ]\n" "${test_msg[*]}"
@@ -231,7 +213,6 @@ ndisc_test_untracked_advertisements() {
printf " TEST: %-60s [FAIL]\n" "${test_msg[*]}"
fi
- cleanup_tcpdump
cleanup_v6
set +e
}
diff --git a/tools/testing/selftests/net/cmsg_sender.c b/tools/testing/selftests/net/cmsg_sender.c
index c79e65581..161db24e3 100644
--- a/tools/testing/selftests/net/cmsg_sender.c
+++ b/tools/testing/selftests/net/cmsg_sender.c
@@ -333,16 +333,17 @@ static const char *cs_ts_info2str(unsigned int info)
return "unknown";
}
-static void
+static unsigned long
cs_read_cmsg(int fd, struct msghdr *msg, char *cbuf, size_t cbuf_sz)
{
struct sock_extended_err *see;
struct scm_timestamping *ts;
+ unsigned long ts_seen = 0;
struct cmsghdr *cmsg;
int i, err;
if (!opt.ts.ena)
- return;
+ return 0;
msg->msg_control = cbuf;
msg->msg_controllen = cbuf_sz;
@@ -396,8 +397,11 @@ cs_read_cmsg(int fd, struct msghdr *msg, char *cbuf, size_t cbuf_sz)
printf(" %5s ts%d %lluus\n",
cs_ts_info2str(see->ee_info),
i, rel_time);
+ ts_seen |= 1 << see->ee_info;
}
}
+
+ return ts_seen;
}
static void ca_set_sockopts(int fd)
@@ -509,10 +513,16 @@ int main(int argc, char *argv[])
err = ERN_SUCCESS;
if (opt.ts.ena) {
- /* Make sure all timestamps have time to loop back */
- usleep(opt.txtime.delay);
+ unsigned long seen;
+ int i;
- cs_read_cmsg(fd, &msg, cbuf, sizeof(cbuf));
+ /* Make sure all timestamps have time to loop back */
+ for (i = 0; i < 40; i++) {
+ seen = cs_read_cmsg(fd, &msg, cbuf, sizeof(cbuf));
+ if (seen & (1 << SCM_TSTAMP_SND))
+ break;
+ usleep(opt.txtime.delay / 20);
+ }
}
err_out:
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 5e4390cac..04de7a6ba 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -30,6 +30,7 @@ CONFIG_IP_GRE=m
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NF_CONNTRACK=m
+CONFIG_IPV6_MROUTE=y
CONFIG_IPV6_SIT=y
CONFIG_IP_DCCP=m
CONFIG_NF_NAT=m
diff --git a/tools/testing/selftests/net/forwarding/bridge_igmp.sh b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
index 2aa66d2a1..e6a3e04fd 100755
--- a/tools/testing/selftests/net/forwarding/bridge_igmp.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_igmp.sh
@@ -478,10 +478,10 @@ v3exc_timeout_test()
RET=0
local X=("192.0.2.20" "192.0.2.30")
- # GMI should be 3 seconds
+ # GMI should be 5 seconds
ip link set dev br0 type bridge mcast_query_interval 100 \
mcast_query_response_interval 100 \
- mcast_membership_interval 300
+ mcast_membership_interval 500
v3exclude_prepare $h1 $ALL_MAC $ALL_GROUP
ip link set dev br0 type bridge mcast_query_interval 500 \
@@ -489,7 +489,7 @@ v3exc_timeout_test()
mcast_membership_interval 1500
$MZ $h1 -c 1 -b $ALL_MAC -B $ALL_GROUP -t ip "proto=2,p=$MZPKT_ALLOW2" -q
- sleep 3
+ sleep 5
bridge -j -d -s mdb show dev br0 \
| jq -e ".[].mdb[] | \
select(.grp == \"$TEST_GROUP\" and \
diff --git a/tools/testing/selftests/net/forwarding/bridge_mld.sh b/tools/testing/selftests/net/forwarding/bridge_mld.sh
index e2b9ff773..f84ab2e65 100755
--- a/tools/testing/selftests/net/forwarding/bridge_mld.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_mld.sh
@@ -478,10 +478,10 @@ mldv2exc_timeout_test()
RET=0
local X=("2001:db8:1::20" "2001:db8:1::30")
- # GMI should be 3 seconds
+ # GMI should be 5 seconds
ip link set dev br0 type bridge mcast_query_interval 100 \
mcast_query_response_interval 100 \
- mcast_membership_interval 300
+ mcast_membership_interval 500
mldv2exclude_prepare $h1
ip link set dev br0 type bridge mcast_query_interval 500 \
@@ -489,7 +489,7 @@ mldv2exc_timeout_test()
mcast_membership_interval 1500
$MZ $h1 -c 1 $MZPKT_ALLOW2 -q
- sleep 3
+ sleep 5
bridge -j -d -s mdb show dev br0 \
| jq -e ".[].mdb[] | \
select(.grp == \"$TEST_GROUP\" and \
diff --git a/tools/testing/selftests/net/forwarding/ethtool_rmon.sh b/tools/testing/selftests/net/forwarding/ethtool_rmon.sh
index 41a34a61f..e78776db8 100755
--- a/tools/testing/selftests/net/forwarding/ethtool_rmon.sh
+++ b/tools/testing/selftests/net/forwarding/ethtool_rmon.sh
@@ -78,7 +78,7 @@ rmon_histogram()
for if in $iface $neigh; do
if ! ensure_mtu $if ${bucket[0]}; then
- log_test_skip "$if does not support the required MTU for $step"
+ log_test_xfail "$if does not support the required MTU for $step"
return
fi
done
@@ -93,7 +93,7 @@ rmon_histogram()
jq -r ".[0].rmon[\"${set}-pktsNtoM\"][]|[.low, .high]|@tsv" 2>/dev/null)
if [ $nbuckets -eq 0 ]; then
- log_test_skip "$iface does not support $set histogram counters"
+ log_test_xfail "$iface does not support $set histogram counters"
return
fi
}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index e579c2e0c..e78f11140 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -38,32 +38,6 @@ fi
source "$net_forwarding_dir/../lib.sh"
-# timeout in seconds
-slowwait()
-{
- local timeout=$1; shift
-
- local start_time="$(date -u +%s)"
- while true
- do
- local out
- out=$("$@")
- local ret=$?
- if ((!ret)); then
- echo -n "$out"
- return 0
- fi
-
- local current_time="$(date -u +%s)"
- if ((current_time - start_time > timeout)); then
- echo -n "$out"
- return 1
- fi
-
- sleep 0.1
- done
-}
-
##############################################################################
# Sanity checks
@@ -358,14 +332,24 @@ EXIT_STATUS=0
# Per-test return value. Clear at the beginning of each test.
RET=0
+ret_set_ksft_status()
+{
+ local ksft_status=$1; shift
+ local msg=$1; shift
+
+ RET=$(ksft_status_merge $RET $ksft_status)
+ if (( $? )); then
+ retmsg=$msg
+ fi
+}
+
check_err()
{
local err=$1
local msg=$2
- if [[ $RET -eq 0 && $err -ne 0 ]]; then
- RET=$err
- retmsg=$msg
+ if ((err)); then
+ ret_set_ksft_status $ksft_fail "$msg"
fi
}
@@ -374,10 +358,7 @@ check_fail()
local err=$1
local msg=$2
- if [[ $RET -eq 0 && $err -eq 0 ]]; then
- RET=1
- retmsg=$msg
- fi
+ check_err $((!err)) "$msg"
}
check_err_fail()
@@ -393,6 +374,62 @@ check_err_fail()
fi
}
+log_test_result()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+ local result=$1; shift
+ local retmsg=$1; shift
+
+ printf "TEST: %-60s [%s]\n" "$test_name $opt_str" "$result"
+ if [[ $retmsg ]]; then
+ printf "\t%s\n" "$retmsg"
+ fi
+}
+
+pause_on_fail()
+{
+ if [[ $PAUSE_ON_FAIL == yes ]]; then
+ echo "Hit enter to continue, 'q' to quit"
+ read a
+ [[ $a == q ]] && exit 1
+ fi
+}
+
+handle_test_result_pass()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+
+ log_test_result "$test_name" "$opt_str" " OK "
+}
+
+handle_test_result_fail()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+
+ log_test_result "$test_name" "$opt_str" FAIL "$retmsg"
+ pause_on_fail
+}
+
+handle_test_result_xfail()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+
+ log_test_result "$test_name" "$opt_str" XFAIL "$retmsg"
+ pause_on_fail
+}
+
+handle_test_result_skip()
+{
+ local test_name=$1; shift
+ local opt_str=$1; shift
+
+ log_test_result "$test_name" "$opt_str" SKIP "$retmsg"
+}
+
log_test()
{
local test_name=$1
@@ -402,31 +439,28 @@ log_test()
opt_str="($opt_str)"
fi
- if [[ $RET -ne 0 ]]; then
- EXIT_STATUS=1
- printf "TEST: %-60s [FAIL]\n" "$test_name $opt_str"
- if [[ ! -z "$retmsg" ]]; then
- printf "\t%s\n" "$retmsg"
- fi
- if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
- echo "Hit enter to continue, 'q' to quit"
- read a
- [ "$a" = "q" ] && exit 1
- fi
- return 1
+ if ((RET == ksft_pass)); then
+ handle_test_result_pass "$test_name" "$opt_str"
+ elif ((RET == ksft_xfail)); then
+ handle_test_result_xfail "$test_name" "$opt_str"
+ elif ((RET == ksft_skip)); then
+ handle_test_result_skip "$test_name" "$opt_str"
+ else
+ handle_test_result_fail "$test_name" "$opt_str"
fi
- printf "TEST: %-60s [ OK ]\n" "$test_name $opt_str"
- return 0
+ EXIT_STATUS=$(ksft_exit_status_merge $EXIT_STATUS $RET)
+ return $RET
}
log_test_skip()
{
- local test_name=$1
- local opt_str=$2
+ RET=$ksft_skip retmsg= log_test "$@"
+}
- printf "TEST: %-60s [SKIP]\n" "$test_name $opt_str"
- return 0
+log_test_xfail()
+{
+ RET=$ksft_xfail retmsg= log_test "$@"
}
log_info()
@@ -487,33 +521,6 @@ wait_for_trap()
"$@" | grep -q trap
}
-until_counter_is()
-{
- local expr=$1; shift
- local current=$("$@")
-
- echo $((current))
- ((current $expr))
-}
-
-busywait_for_counter()
-{
- local timeout=$1; shift
- local delta=$1; shift
-
- local base=$("$@")
- busywait "$timeout" until_counter_is ">= $((base + delta))" "$@"
-}
-
-slowwait_for_counter()
-{
- local timeout=$1; shift
- local delta=$1; shift
-
- local base=$("$@")
- slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@"
-}
-
setup_wait_dev()
{
local dev=$1; shift
@@ -819,29 +826,6 @@ link_stats_rx_errors_get()
link_stats_get $1 rx errors
}
-tc_rule_stats_get()
-{
- local dev=$1; shift
- local pref=$1; shift
- local dir=$1; shift
- local selector=${1:-.packets}; shift
-
- tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
- | jq ".[1].options.actions[].stats$selector"
-}
-
-tc_rule_handle_stats_get()
-{
- local id=$1; shift
- local handle=$1; shift
- local selector=${1:-.packets}; shift
- local netns=${1:-""}; shift
-
- tc $netns -j -s filter show $id \
- | jq ".[] | select(.options.handle == $handle) | \
- .options.actions[0].stats$selector"
-}
-
ethtool_stats_get()
{
local dev=$1; shift
diff --git a/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh b/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh
index 7e7d62161..b2d2c6cec 100644
--- a/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh
+++ b/tools/testing/selftests/net/forwarding/router_mpath_nh_lib.sh
@@ -69,7 +69,7 @@ nh_stats_test_dispatch_swhw()
nh_stats_do_test "HW $what" "$nh1_id" "$nh2_id" "$group_id" \
nh_stats_get_hw "${mz[@]}"
elif [[ $kind == veth ]]; then
- log_test_skip "HW stats not offloaded on veth topology"
+ log_test_xfail "HW stats not offloaded on veth topology"
fi
}
diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh
index f9fe182df..16372ca16 100644
--- a/tools/testing/selftests/net/lib.sh
+++ b/tools/testing/selftests/net/lib.sh
@@ -10,33 +10,116 @@ BUSYWAIT_TIMEOUT=$((WAIT_TIMEOUT * 1000)) # ms
# Kselftest framework requirement - SKIP code is 4.
ksft_skip=4
# namespace list created by setup_ns
-NS_LIST=""
+NS_LIST=()
##############################################################################
# Helpers
-busywait()
+
+__ksft_status_merge()
{
- local timeout=$1; shift
+ local a=$1; shift
+ local b=$1; shift
+ local -A weights
+ local weight=0
+
+ local i
+ for i in "$@"; do
+ weights[$i]=$((weight++))
+ done
+
+ if [[ ${weights[$a]} > ${weights[$b]} ]]; then
+ echo "$a"
+ return 0
+ else
+ echo "$b"
+ return 1
+ fi
+}
+
+ksft_status_merge()
+{
+ local a=$1; shift
+ local b=$1; shift
+
+ __ksft_status_merge "$a" "$b" \
+ $ksft_pass $ksft_xfail $ksft_skip $ksft_fail
+}
+
+ksft_exit_status_merge()
+{
+ local a=$1; shift
+ local b=$1; shift
+
+ __ksft_status_merge "$a" "$b" \
+ $ksft_xfail $ksft_pass $ksft_skip $ksft_fail
+}
+
+loopy_wait()
+{
+ local sleep_cmd=$1; shift
+ local timeout_ms=$1; shift
local start_time="$(date -u +%s%3N)"
while true
do
local out
- out=$("$@")
- local ret=$?
- if ((!ret)); then
+ if out=$("$@"); then
echo -n "$out"
return 0
fi
local current_time="$(date -u +%s%3N)"
- if ((current_time - start_time > timeout)); then
+ if ((current_time - start_time > timeout_ms)); then
echo -n "$out"
return 1
fi
+
+ $sleep_cmd
done
}
+busywait()
+{
+ local timeout_ms=$1; shift
+
+ loopy_wait : "$timeout_ms" "$@"
+}
+
+# timeout in seconds
+slowwait()
+{
+ local timeout_sec=$1; shift
+
+ loopy_wait "sleep 0.1" "$((timeout_sec * 1000))" "$@"
+}
+
+until_counter_is()
+{
+ local expr=$1; shift
+ local current=$("$@")
+
+ echo $((current))
+ ((current $expr))
+}
+
+busywait_for_counter()
+{
+ local timeout=$1; shift
+ local delta=$1; shift
+
+ local base=$("$@")
+ busywait "$timeout" until_counter_is ">= $((base + delta))" "$@"
+}
+
+slowwait_for_counter()
+{
+ local timeout=$1; shift
+ local delta=$1; shift
+
+ local base=$("$@")
+ slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@"
+}
+
cleanup_ns()
{
local ns=""
@@ -50,6 +133,7 @@ cleanup_ns()
fi
for ns in "$@"; do
+ [ -z "${ns}" ] && continue
ip netns delete "${ns}" &> /dev/null
if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
echo "Warn: Failed to remove namespace $ns"
@@ -63,7 +147,7 @@ cleanup_ns()
cleanup_all_ns()
{
- cleanup_ns $NS_LIST
+ cleanup_ns "${NS_LIST[@]}"
}
# setup netns with given names as prefix. e.g
@@ -72,25 +156,50 @@ setup_ns()
{
local ns=""
local ns_name=""
- local ns_list=""
+ local ns_list=()
+ local ns_exist=
for ns_name in "$@"; do
# Some test may setup/remove same netns multi times
if unset ${ns_name} 2> /dev/null; then
ns="${ns_name,,}-$(mktemp -u XXXXXX)"
eval readonly ${ns_name}="$ns"
+ ns_exist=false
else
eval ns='$'${ns_name}
cleanup_ns "$ns"
-
+ ns_exist=true
fi
if ! ip netns add "$ns"; then
echo "Failed to create namespace $ns_name"
- cleanup_ns "$ns_list"
+ cleanup_ns "${ns_list[@]}"
return $ksft_skip
fi
ip -n "$ns" link set lo up
- ns_list="$ns_list $ns"
+ ! $ns_exist && ns_list+=("$ns")
done
- NS_LIST="$NS_LIST $ns_list"
+ NS_LIST+=("${ns_list[@]}")
+}
+
+tc_rule_stats_get()
+{
+ local dev=$1; shift
+ local pref=$1; shift
+ local dir=$1; shift
+ local selector=${1:-.packets}; shift
+
+ tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \
+ | jq ".[1].options.actions[].stats$selector"
+}
+
+tc_rule_handle_stats_get()
+{
+ local id=$1; shift
+ local handle=$1; shift
+ local selector=${1:-.packets}; shift
+ local netns=${1:-""}; shift
+
+ tc $netns -j -s filter show $id \
+ | jq ".[] | select(.options.handle == $handle) | \
+ .options.actions[0].stats$selector"
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index e4403236f..2290125f6 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -125,8 +125,8 @@ init_shapers()
{
local i
for i in $(seq 1 4); do
- tc -n $ns1 qdisc add dev ns1eth$i root netem rate 20mbit delay 1
- tc -n $ns2 qdisc add dev ns2eth$i root netem rate 20mbit delay 1
+ tc -n $ns1 qdisc add dev ns1eth$i root netem rate 20mbit delay 1ms
+ tc -n $ns2 qdisc add dev ns2eth$i root netem rate 20mbit delay 1ms
done
}
@@ -262,6 +262,8 @@ reset()
TEST_NAME="${1}"
+ MPTCP_LIB_SUBTEST_FLAKY=0 # reset if modified
+
if skip_test; then
MPTCP_LIB_TEST_COUNTER=$((MPTCP_LIB_TEST_COUNTER+1))
last_test_ignored=1
@@ -449,7 +451,9 @@ reset_with_tcp_filter()
# $1: err msg
fail_test()
{
- ret=${KSFT_FAIL}
+ if ! mptcp_lib_subtest_is_flaky; then
+ ret=${KSFT_FAIL}
+ fi
if [ ${#} -gt 0 ]; then
print_fail "${@}"
@@ -2354,9 +2358,10 @@ remove_tests()
if reset "remove invalid addresses"; then
pm_nl_set_limits $ns1 3 3
pm_nl_add_endpoint $ns1 10.0.12.1 flags signal
+ # broadcast IP: no packet for this address will be received on ns1
+ pm_nl_add_endpoint $ns1 224.0.0.1 flags signal
pm_nl_add_endpoint $ns1 10.0.3.1 flags signal
- pm_nl_add_endpoint $ns1 10.0.14.1 flags signal
- pm_nl_set_limits $ns2 3 3
+ pm_nl_set_limits $ns2 2 2
addr_nr_ns1=-3 speed=10 \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
@@ -3178,6 +3183,7 @@ fullmesh_tests()
fastclose_tests()
{
if reset_check_counter "fastclose test" "MPTcpExtMPFastcloseTx"; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=client \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
@@ -3186,6 +3192,7 @@ fastclose_tests()
fi
if reset_check_counter "fastclose server test" "MPTcpExtMPFastcloseRx"; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=1024 fastclose=server \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0 0 0 0 1
@@ -3204,6 +3211,7 @@ fail_tests()
{
# single subflow
if reset_with_fail "Infinite map" 1; then
+ MPTCP_LIB_SUBTEST_FLAKY=1
test_linkfail=128 \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0 +1 +0 1 0 1 "$(pedit_action_pkts)"
@@ -3212,7 +3220,8 @@ fail_tests()
# multiple subflows
if reset_with_fail "MP_FAIL MP_RST" 2; then
- tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5
+ MPTCP_LIB_SUBTEST_FLAKY=1
+ tc -n $ns2 qdisc add dev ns2eth1 root netem rate 1mbit delay 5ms
pm_nl_set_limits $ns1 0 1
pm_nl_set_limits $ns2 0 1
pm_nl_add_endpoint $ns2 10.0.2.2 dev ns2eth2 flags subflow
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
index 1b2366220..7322e1e4e 100755
--- a/tools/testing/selftests/net/mptcp/simult_flows.sh
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -216,8 +216,8 @@ run_test()
shift 4
local msg=$*
- [ $delay1 -gt 0 ] && delay1="delay $delay1" || delay1=""
- [ $delay2 -gt 0 ] && delay2="delay $delay2" || delay2=""
+ [ $delay1 -gt 0 ] && delay1="delay ${delay1}ms" || delay1=""
+ [ $delay2 -gt 0 ] && delay2="delay ${delay2}ms" || delay2=""
for dev in ns1eth1 ns1eth2; do
tc -n $ns1 qdisc del dev $dev root >/dev/null 2>&1
@@ -243,7 +243,7 @@ run_test()
do_transfer $small $large $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
- if [ $lret -ne 0 ]; then
+ if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
ret=$lret
[ $bail -eq 0 ] || exit $ret
fi
@@ -253,7 +253,7 @@ run_test()
do_transfer $large $small $time
lret=$?
mptcp_lib_result_code "${lret}" "${msg}"
- if [ $lret -ne 0 ]; then
+ if [ $lret -ne 0 ] && ! mptcp_lib_subtest_is_flaky; then
ret=$lret
[ $bail -eq 0 ] || exit $ret
fi
@@ -286,7 +286,7 @@ run_test 10 10 0 0 "balanced bwidth"
run_test 10 10 1 25 "balanced bwidth with unbalanced delay"
# we still need some additional infrastructure to pass the following test-cases
-run_test 10 3 0 0 "unbalanced bwidth"
+MPTCP_LIB_SUBTEST_FLAKY=1 run_test 10 3 0 0 "unbalanced bwidth"
run_test 10 3 1 25 "unbalanced bwidth with unbalanced delay"
run_test 10 3 25 1 "unbalanced bwidth with opposed, unbalanced delay"
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 9e2981f2d..9cb059782 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -160,10 +160,12 @@ make_connection()
local is_v6=$1
local app_port=$app4_port
local connect_addr="10.0.1.1"
+ local client_addr="10.0.1.2"
local listen_addr="0.0.0.0"
if [ "$is_v6" = "v6" ]
then
connect_addr="dead:beef:1::1"
+ client_addr="dead:beef:1::2"
listen_addr="::"
app_port=$app6_port
else
@@ -206,6 +208,7 @@ make_connection()
[ "$server_serverside" = 1 ]
then
test_pass
+ print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
else
test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
mptcp_lib_result_print_all_tap
@@ -297,7 +300,7 @@ test_announce()
ip netns exec "$ns2"\
./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id $client_addr_id dev\
ns2eth1
- print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, reuse port"
+ print_test "ADD_ADDR id:client 10.0.2.2 (ns2) => ns1, reuse port"
sleep 0.5
verify_announce_event $server_evts $ANNOUNCED $server4_token "10.0.2.2" $client_addr_id \
"$client4_port"
@@ -306,7 +309,7 @@ test_announce()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl ann\
dead:beef:2::2 token "$client6_token" id $client_addr_id dev ns2eth1
- print_test "ADD_ADDR6 id:${client_addr_id} dead:beef:2::2 (ns2) => ns1, reuse port"
+ print_test "ADD_ADDR6 id:client dead:beef:2::2 (ns2) => ns1, reuse port"
sleep 0.5
verify_announce_event "$server_evts" "$ANNOUNCED" "$server6_token" "dead:beef:2::2"\
"$client_addr_id" "$client6_port" "v6"
@@ -316,7 +319,7 @@ test_announce()
client_addr_id=$((client_addr_id+1))
ip netns exec "$ns2" ./pm_nl_ctl ann 10.0.2.2 token "$client4_token" id\
$client_addr_id dev ns2eth1 port $new4_port
- print_test "ADD_ADDR id:${client_addr_id} 10.0.2.2 (ns2) => ns1, new port"
+ print_test "ADD_ADDR id:client+1 10.0.2.2 (ns2) => ns1, new port"
sleep 0.5
verify_announce_event "$server_evts" "$ANNOUNCED" "$server4_token" "10.0.2.2"\
"$client_addr_id" "$new4_port"
@@ -327,7 +330,7 @@ test_announce()
# ADD_ADDR from the server to client machine reusing the subflow port
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR id:server 10.0.2.1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
"$server_addr_id" "$app4_port"
@@ -336,7 +339,7 @@ test_announce()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann dead:beef:2::1 token "$server6_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR6 id:${server_addr_id} dead:beef:2::1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR6 id:server dead:beef:2::1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "dead:beef:2::1"\
"$server_addr_id" "$app6_port" "v6"
@@ -346,7 +349,7 @@ test_announce()
server_addr_id=$((server_addr_id+1))
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server4_token" id\
$server_addr_id dev ns1eth2 port $new4_port
- print_test "ADD_ADDR id:${server_addr_id} 10.0.2.1 (ns1) => ns2, new port"
+ print_test "ADD_ADDR id:server+1 10.0.2.1 (ns1) => ns2, new port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client4_token" "10.0.2.1"\
"$server_addr_id" "$new4_port"
@@ -380,7 +383,7 @@ test_remove()
local invalid_token=$(( client4_token - 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token $invalid_token id\
$client_addr_id > /dev/null 2>&1
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1, invalid token"
+ print_test "RM_ADDR id:client ns2 => ns1, invalid token"
local type
type=$(mptcp_lib_evts_get_info type "$server_evts")
if [ "$type" = "" ]
@@ -394,7 +397,7 @@ test_remove()
local invalid_id=$(( client_addr_id + 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$invalid_id > /dev/null 2>&1
- print_test "RM_ADDR id:${invalid_id} ns2 => ns1, invalid id"
+ print_test "RM_ADDR id:client+1 ns2 => ns1, invalid id"
type=$(mptcp_lib_evts_get_info type "$server_evts")
if [ "$type" = "" ]
then
@@ -407,7 +410,7 @@ test_remove()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$client_addr_id
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR id:client ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
@@ -416,7 +419,7 @@ test_remove()
client_addr_id=$(( client_addr_id - 1 ))
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client4_token" id\
$client_addr_id
- print_test "RM_ADDR id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR id:client-1 ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server4_token" "$client_addr_id"
@@ -424,7 +427,7 @@ test_remove()
:>"$server_evts"
ip netns exec "$ns2" ./pm_nl_ctl rem token "$client6_token" id\
$client_addr_id
- print_test "RM_ADDR6 id:${client_addr_id} ns2 => ns1"
+ print_test "RM_ADDR6 id:client-1 ns2 => ns1"
sleep 0.5
verify_remove_event "$server_evts" "$REMOVED" "$server6_token" "$client_addr_id"
@@ -434,7 +437,7 @@ test_remove()
# RM_ADDR from the server to client machine
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
$server_addr_id
- print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR id:server ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
@@ -443,7 +446,7 @@ test_remove()
server_addr_id=$(( server_addr_id - 1 ))
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server4_token" id\
$server_addr_id
- print_test "RM_ADDR id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR id:server-1 ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client4_token" "$server_addr_id"
@@ -451,7 +454,7 @@ test_remove()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl rem token "$server6_token" id\
$server_addr_id
- print_test "RM_ADDR6 id:${server_addr_id} ns1 => ns2"
+ print_test "RM_ADDR6 id:server-1 ns1 => ns2"
sleep 0.5
verify_remove_event "$client_evts" "$REMOVED" "$client6_token" "$server_addr_id"
}
@@ -479,8 +482,14 @@ verify_subflow_events()
local locid
local remid
local info
+ local e_dport_txt
- info="${e_saddr} (${e_from}) => ${e_daddr}:${e_dport} (${e_to})"
+ # only display the fixed ports
+ if [ "${e_dport}" -ge "${app4_port}" ] && [ "${e_dport}" -le "${app6_port}" ]; then
+ e_dport_txt=":${e_dport}"
+ fi
+
+ info="${e_saddr} (${e_from}) => ${e_daddr}${e_dport_txt} (${e_to})"
if [ "$e_type" = "$SUB_ESTABLISHED" ]
then
@@ -766,7 +775,7 @@ test_subflows_v4_v6_mix()
:>"$client_evts"
ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\
$server_addr_id dev ns1eth2
- print_test "ADD_ADDR4 id:${server_addr_id} 10.0.2.1 (ns1) => ns2, reuse port"
+ print_test "ADD_ADDR4 id:server 10.0.2.1 (ns1) => ns2, reuse port"
sleep 0.5
verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\
"$server_addr_id" "$app6_port"
@@ -861,7 +870,7 @@ test_listener()
local listener_pid=$!
sleep 0.5
- print_test "CREATE_LISTENER 10.0.2.2:$client4_port"
+ print_test "CREATE_LISTENER 10.0.2.2 (client port)"
verify_listener_events $client_evts $LISTENER_CREATED $AF_INET 10.0.2.2 $client4_port
# ADD_ADDR from client to server machine reusing the subflow port
@@ -878,13 +887,14 @@ test_listener()
mptcp_lib_kill_wait $listener_pid
sleep 0.5
- print_test "CLOSE_LISTENER 10.0.2.2:$client4_port"
+ print_test "CLOSE_LISTENER 10.0.2.2 (client port)"
verify_listener_events $client_evts $LISTENER_CLOSED $AF_INET 10.0.2.2 $client4_port
}
print_title "Make connections"
make_connection
make_connection "v6"
+print_title "Will be using address IDs ${client_addr_id} (client) and ${server_addr_id} (server)"
test_announce
test_remove
diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh
index 5cae53543..15bca0708 100755
--- a/tools/testing/selftests/net/openvswitch/openvswitch.sh
+++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# OVS kernel module self tests
diff --git a/tools/testing/selftests/power_supply/test_power_supply_properties.sh b/tools/testing/selftests/power_supply/test_power_supply_properties.sh
index df272dfe1..a66b1313e 100755
--- a/tools/testing/selftests/power_supply/test_power_supply_properties.sh
+++ b/tools/testing/selftests/power_supply/test_power_supply_properties.sh
@@ -23,7 +23,7 @@ count_tests() {
total_tests=0
for i in $SUPPLIES; do
- total_tests=$(("$total_tests" + "$NUM_TESTS"))
+ total_tests=$((total_tests + NUM_TESTS))
done
echo "$total_tests"
diff --git a/tools/testing/selftests/powerpc/dexcr/Makefile b/tools/testing/selftests/powerpc/dexcr/Makefile
index 76210f2bc..829ad075b 100644
--- a/tools/testing/selftests/powerpc/dexcr/Makefile
+++ b/tools/testing/selftests/powerpc/dexcr/Makefile
@@ -3,7 +3,7 @@ TEST_GEN_FILES := lsdexcr
include ../../lib.mk
-$(OUTPUT)/hashchk_test: CFLAGS += -fno-pie $(call cc-option,-mno-rop-protect)
+$(OUTPUT)/hashchk_test: CFLAGS += -fno-pie -no-pie $(call cc-option,-mno-rop-protect)
$(TEST_GEN_PROGS): ../harness.c ../utils.c ./dexcr.c
$(TEST_GEN_FILES): ../utils.c ./dexcr.c
diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
index 2deac2031..021863f86 100644
--- a/tools/testing/selftests/resctrl/Makefile
+++ b/tools/testing/selftests/resctrl/Makefile
@@ -5,6 +5,8 @@ CFLAGS += $(KHDR_INCLUDES)
TEST_GEN_PROGS := resctrl_tests
+LOCAL_HDRS += $(wildcard *.h)
+
include ../lib.mk
-$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
+$(OUTPUT)/resctrl_tests: $(wildcard *.c)
diff --git a/tools/testing/selftests/riscv/hwprobe/.gitignore b/tools/testing/selftests/riscv/hwprobe/.gitignore
index 8113dc3bd..6e384e80e 100644
--- a/tools/testing/selftests/riscv/hwprobe/.gitignore
+++ b/tools/testing/selftests/riscv/hwprobe/.gitignore
@@ -1 +1,3 @@
hwprobe
+cbo
+which-cpus
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
index 12da0a939..557fb074a 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json
@@ -133,6 +133,50 @@
]
},
{
+ "id": "6f62",
+ "name": "Add taprio Qdisc with too short interval",
+ "category": [
+ "qdisc",
+ "taprio"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 300 sched-entry S 02 1700 clockid CLOCK_TAI",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $ETH",
+ "matchPattern": "qdisc taprio 1: root refcnt",
+ "matchCount": "0",
+ "teardown": [
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
+ },
+ {
+ "id": "831f",
+ "name": "Add taprio Qdisc with too short cycle-time",
+ "category": [
+ "qdisc",
+ "taprio"
+ ],
+ "plugins": {
+ "requires": "nsPlugin"
+ },
+ "setup": [
+ "echo \"1 1 8\" > /sys/bus/netdevsim/new_device"
+ ],
+ "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 clockid CLOCK_TAI",
+ "expExitCode": "2",
+ "verifyCmd": "$TC qdisc show dev $ETH",
+ "matchPattern": "qdisc taprio 1: root refcnt",
+ "matchCount": "0",
+ "teardown": [
+ "echo \"1\" > /sys/bus/netdevsim/del_device"
+ ]
+ },
+ {
"id": "3e1e",
"name": "Add taprio Qdisc with an invalid cycle-time",
"category": [
diff --git a/tools/tracing/latency/latency-collector.c b/tools/tracing/latency/latency-collector.c
index 0fd9c747d..cf263fe9d 100644
--- a/tools/tracing/latency/latency-collector.c
+++ b/tools/tracing/latency/latency-collector.c
@@ -935,12 +935,12 @@ static void show_available(void)
}
if (!tracers) {
- warnx(no_tracer_msg);
+ warnx("%s", no_tracer_msg);
return;
}
if (!found) {
- warnx(no_latency_tr_msg);
+ warnx("%s", no_latency_tr_msg);
tracefs_list_free(tracers);
return;
}
@@ -983,7 +983,7 @@ static const char *find_default_tracer(void)
for (i = 0; relevant_tracers[i]; i++) {
valid = tracer_valid(relevant_tracers[i], &notracer);
if (notracer)
- errx(EXIT_FAILURE, no_tracer_msg);
+ errx(EXIT_FAILURE, "%s", no_tracer_msg);
if (valid)
return relevant_tracers[i];
}
@@ -1878,7 +1878,7 @@ static void scan_arguments(int argc, char *argv[])
}
valid = tracer_valid(current_tracer, &notracer);
if (notracer)
- errx(EXIT_FAILURE, no_tracer_msg);
+ errx(EXIT_FAILURE, "%s", no_tracer_msg);
if (!valid)
errx(EXIT_FAILURE,
"The tracer %s is not supported by your kernel!\n", current_tracer);
diff --git a/tools/tracing/rtla/src/timerlat_aa.c b/tools/tracing/rtla/src/timerlat_aa.c
index 7093fd533..7bd80ee2a 100644
--- a/tools/tracing/rtla/src/timerlat_aa.c
+++ b/tools/tracing/rtla/src/timerlat_aa.c
@@ -16,6 +16,9 @@ enum timelat_state {
TIMERLAT_WAITING_THREAD,
};
+/* Used to fill spaces in the output */
+static const char *spaces = " ";
+
#define MAX_COMM 24
/*
@@ -274,14 +277,17 @@ static int timerlat_aa_nmi_handler(struct trace_seq *s, struct tep_record *recor
taa_data->prev_irq_timstamp = start;
trace_seq_reset(taa_data->prev_irqs_seq);
- trace_seq_printf(taa_data->prev_irqs_seq, "\t%24s \t\t\t%9.2f us\n",
- "nmi", ns_to_usf(duration));
+ trace_seq_printf(taa_data->prev_irqs_seq, " %24s %.*s %9.2f us\n",
+ "nmi",
+ 24, spaces,
+ ns_to_usf(duration));
return 0;
}
taa_data->thread_nmi_sum += duration;
- trace_seq_printf(taa_data->nmi_seq, " %24s \t\t\t%9.2f us\n",
- "nmi", ns_to_usf(duration));
+ trace_seq_printf(taa_data->nmi_seq, " %24s %.*s %9.2f us\n",
+ "nmi",
+ 24, spaces, ns_to_usf(duration));
return 0;
}
@@ -323,8 +329,10 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor
taa_data->prev_irq_timstamp = start;
trace_seq_reset(taa_data->prev_irqs_seq);
- trace_seq_printf(taa_data->prev_irqs_seq, "\t%24s:%-3llu \t\t%9.2f us\n",
- desc, vector, ns_to_usf(duration));
+ trace_seq_printf(taa_data->prev_irqs_seq, " %24s:%-3llu %.*s %9.2f us\n",
+ desc, vector,
+ 15, spaces,
+ ns_to_usf(duration));
return 0;
}
@@ -372,8 +380,10 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor
* IRQ interference.
*/
taa_data->thread_irq_sum += duration;
- trace_seq_printf(taa_data->irqs_seq, " %24s:%-3llu \t %9.2f us\n",
- desc, vector, ns_to_usf(duration));
+ trace_seq_printf(taa_data->irqs_seq, " %24s:%-3llu %.*s %9.2f us\n",
+ desc, vector,
+ 24, spaces,
+ ns_to_usf(duration));
return 0;
}
@@ -408,8 +418,10 @@ static int timerlat_aa_softirq_handler(struct trace_seq *s, struct tep_record *r
taa_data->thread_softirq_sum += duration;
- trace_seq_printf(taa_data->softirqs_seq, "\t%24s:%-3llu \t %9.2f us\n",
- softirq_name[vector], vector, ns_to_usf(duration));
+ trace_seq_printf(taa_data->softirqs_seq, " %24s:%-3llu %.*s %9.2f us\n",
+ softirq_name[vector], vector,
+ 24, spaces,
+ ns_to_usf(duration));
return 0;
}
@@ -452,8 +464,10 @@ static int timerlat_aa_thread_handler(struct trace_seq *s, struct tep_record *re
} else {
taa_data->thread_thread_sum += duration;
- trace_seq_printf(taa_data->threads_seq, "\t%24s:%-3llu \t\t%9.2f us\n",
- comm, pid, ns_to_usf(duration));
+ trace_seq_printf(taa_data->threads_seq, " %24s:%-12llu %.*s %9.2f us\n",
+ comm, pid,
+ 15, spaces,
+ ns_to_usf(duration));
}
return 0;
@@ -482,7 +496,8 @@ static int timerlat_aa_stack_handler(struct trace_seq *s, struct tep_record *rec
function = tep_find_function(taa_ctx->tool->trace.tep, caller[i]);
if (!function)
break;
- trace_seq_printf(taa_data->stack_seq, "\t\t-> %s\n", function);
+ trace_seq_printf(taa_data->stack_seq, " %.*s -> %s\n",
+ 14, spaces, function);
}
}
return 0;
@@ -568,23 +583,24 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
exp_irq_ts = taa_data->timer_irq_start_time - taa_data->timer_irq_start_delay;
if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) {
if (taa_data->prev_irq_timstamp < taa_data->timer_irq_start_time)
- printf(" Previous IRQ interference: \t\t up to %9.2f us\n",
- ns_to_usf(taa_data->prev_irq_duration));
+ printf(" Previous IRQ interference: %.*s up to %9.2f us\n",
+ 16, spaces,
+ ns_to_usf(taa_data->prev_irq_duration));
}
/*
* The delay that the IRQ suffered before starting.
*/
- printf(" IRQ handler delay: %16s %9.2f us (%.2f %%)\n",
- (ns_to_usf(taa_data->timer_exit_from_idle) > 10) ? "(exit from idle)" : "",
- ns_to_usf(taa_data->timer_irq_start_delay),
- ns_to_per(total, taa_data->timer_irq_start_delay));
+ printf(" IRQ handler delay: %.*s %16s %9.2f us (%.2f %%)\n", 16, spaces,
+ (ns_to_usf(taa_data->timer_exit_from_idle) > 10) ? "(exit from idle)" : "",
+ ns_to_usf(taa_data->timer_irq_start_delay),
+ ns_to_per(total, taa_data->timer_irq_start_delay));
/*
* Timerlat IRQ.
*/
- printf(" IRQ latency: \t\t\t\t %9.2f us\n",
- ns_to_usf(taa_data->tlat_irq_latency));
+ printf(" IRQ latency: %.*s %9.2f us\n", 40, spaces,
+ ns_to_usf(taa_data->tlat_irq_latency));
if (irq) {
/*
@@ -595,15 +611,16 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
* so it will be displayed, it is the key.
*/
printf(" Blocking thread:\n");
- printf(" %24s:%-9llu\n",
- taa_data->run_thread_comm, taa_data->run_thread_pid);
+ printf(" %.*s %24s:%-9llu\n", 6, spaces, taa_data->run_thread_comm,
+ taa_data->run_thread_pid);
} else {
/*
* The duration of the IRQ handler that handled the timerlat IRQ.
*/
- printf(" Timerlat IRQ duration: \t\t %9.2f us (%.2f %%)\n",
- ns_to_usf(taa_data->timer_irq_duration),
- ns_to_per(total, taa_data->timer_irq_duration));
+ printf(" Timerlat IRQ duration: %.*s %9.2f us (%.2f %%)\n",
+ 30, spaces,
+ ns_to_usf(taa_data->timer_irq_duration),
+ ns_to_per(total, taa_data->timer_irq_duration));
/*
* The amount of time that the current thread postponed the scheduler.
@@ -611,13 +628,13 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
* Recalling that it is net from NMI/IRQ/Softirq interference, so there
* is no need to compute values here.
*/
- printf(" Blocking thread: \t\t\t %9.2f us (%.2f %%)\n",
- ns_to_usf(taa_data->thread_blocking_duration),
- ns_to_per(total, taa_data->thread_blocking_duration));
+ printf(" Blocking thread: %.*s %9.2f us (%.2f %%)\n", 36, spaces,
+ ns_to_usf(taa_data->thread_blocking_duration),
+ ns_to_per(total, taa_data->thread_blocking_duration));
- printf(" %24s:%-9llu %9.2f us\n",
- taa_data->run_thread_comm, taa_data->run_thread_pid,
- ns_to_usf(taa_data->thread_blocking_duration));
+ printf(" %.*s %24s:%-9llu %.*s %9.2f us\n", 6, spaces,
+ taa_data->run_thread_comm, taa_data->run_thread_pid,
+ 12, spaces, ns_to_usf(taa_data->thread_blocking_duration));
}
/*
@@ -629,9 +646,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
* NMIs can happen during the IRQ, so they are always possible.
*/
if (taa_data->thread_nmi_sum)
- printf(" NMI interference \t\t\t %9.2f us (%.2f %%)\n",
- ns_to_usf(taa_data->thread_nmi_sum),
- ns_to_per(total, taa_data->thread_nmi_sum));
+ printf(" NMI interference %.*s %9.2f us (%.2f %%)\n", 36, spaces,
+ ns_to_usf(taa_data->thread_nmi_sum),
+ ns_to_per(total, taa_data->thread_nmi_sum));
/*
* If it is an IRQ latency, the other factors can be skipped.
@@ -643,9 +660,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
* Prints the interference caused by IRQs to the thread latency.
*/
if (taa_data->thread_irq_sum) {
- printf(" IRQ interference \t\t\t %9.2f us (%.2f %%)\n",
- ns_to_usf(taa_data->thread_irq_sum),
- ns_to_per(total, taa_data->thread_irq_sum));
+ printf(" IRQ interference %.*s %9.2f us (%.2f %%)\n", 36, spaces,
+ ns_to_usf(taa_data->thread_irq_sum),
+ ns_to_per(total, taa_data->thread_irq_sum));
trace_seq_do_printf(taa_data->irqs_seq);
}
@@ -654,9 +671,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
* Prints the interference caused by Softirqs to the thread latency.
*/
if (taa_data->thread_softirq_sum) {
- printf(" Softirq interference \t\t\t %9.2f us (%.2f %%)\n",
- ns_to_usf(taa_data->thread_softirq_sum),
- ns_to_per(total, taa_data->thread_softirq_sum));
+ printf(" Softirq interference %.*s %9.2f us (%.2f %%)\n", 32, spaces,
+ ns_to_usf(taa_data->thread_softirq_sum),
+ ns_to_per(total, taa_data->thread_softirq_sum));
trace_seq_do_printf(taa_data->softirqs_seq);
}
@@ -670,9 +687,9 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
* timer handling latency.
*/
if (taa_data->thread_thread_sum) {
- printf(" Thread interference \t\t\t %9.2f us (%.2f %%)\n",
- ns_to_usf(taa_data->thread_thread_sum),
- ns_to_per(total, taa_data->thread_thread_sum));
+ printf(" Thread interference %.*s %9.2f us (%.2f %%)\n", 33, spaces,
+ ns_to_usf(taa_data->thread_thread_sum),
+ ns_to_per(total, taa_data->thread_thread_sum));
trace_seq_do_printf(taa_data->threads_seq);
}
@@ -682,8 +699,8 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
*/
print_total:
printf("------------------------------------------------------------------------\n");
- printf(" %s latency: \t\t\t %9.2f us (100%%)\n", irq ? "IRQ" : "Thread",
- ns_to_usf(total));
+ printf(" %s latency: %.*s %9.2f us (100%%)\n", irq ? " IRQ" : "Thread",
+ 37, spaces, ns_to_usf(total));
}
static int timerlat_auto_analysis_collect_trace(struct timerlat_aa_context *taa_ctx)
diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c
index 8bd51aab6..5b869caed 100644
--- a/tools/tracing/rtla/src/timerlat_hist.c
+++ b/tools/tracing/rtla/src/timerlat_hist.c
@@ -324,17 +324,29 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
- if (!params->no_irq)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].min_irq);
+ if (!params->no_irq) {
+ if (data->hist[cpu].irq_count)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].min_irq);
+ else
+ trace_seq_printf(trace->seq, " - ");
+ }
- if (!params->no_thread)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].min_thread);
+ if (!params->no_thread) {
+ if (data->hist[cpu].thread_count)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].min_thread);
+ else
+ trace_seq_printf(trace->seq, " - ");
+ }
- if (params->user_hist)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].min_user);
+ if (params->user_hist) {
+ if (data->hist[cpu].user_count)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].min_user);
+ else
+ trace_seq_printf(trace->seq, " - ");
+ }
}
trace_seq_printf(trace->seq, "\n");
@@ -384,17 +396,29 @@ timerlat_print_summary(struct timerlat_hist_params *params,
if (!data->hist[cpu].irq_count && !data->hist[cpu].thread_count)
continue;
- if (!params->no_irq)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].max_irq);
+ if (!params->no_irq) {
+ if (data->hist[cpu].irq_count)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].max_irq);
+ else
+ trace_seq_printf(trace->seq, " - ");
+ }
- if (!params->no_thread)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].max_thread);
+ if (!params->no_thread) {
+ if (data->hist[cpu].thread_count)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].max_thread);
+ else
+ trace_seq_printf(trace->seq, " - ");
+ }
- if (params->user_hist)
- trace_seq_printf(trace->seq, "%9llu ",
- data->hist[cpu].max_user);
+ if (params->user_hist) {
+ if (data->hist[cpu].user_count)
+ trace_seq_printf(trace->seq, "%9llu ",
+ data->hist[cpu].max_user);
+ else
+ trace_seq_printf(trace->seq, " - ");
+ }
}
trace_seq_printf(trace->seq, "\n");
trace_seq_do_printf(trace->seq);
diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c
index 8a3fa6431..2665e0bb5 100644
--- a/tools/tracing/rtla/src/timerlat_top.c
+++ b/tools/tracing/rtla/src/timerlat_top.c
@@ -212,6 +212,8 @@ static void timerlat_top_header(struct osnoise_tool *top)
trace_seq_printf(s, "\n");
}
+static const char *no_value = " -";
+
/*
* timerlat_top_print - prints the output of a given CPU
*/
@@ -239,10 +241,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
trace_seq_printf(s, "%3d #%-9d |", cpu, cpu_data->irq_count);
if (!cpu_data->irq_count) {
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " - |");
+ trace_seq_printf(s, "%s %s %s %s |", no_value, no_value, no_value, no_value);
} else {
trace_seq_printf(s, "%9llu ", cpu_data->cur_irq / params->output_divisor);
trace_seq_printf(s, "%9llu ", cpu_data->min_irq / params->output_divisor);
@@ -251,10 +250,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
}
if (!cpu_data->thread_count) {
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " -\n");
+ trace_seq_printf(s, "%s %s %s %s", no_value, no_value, no_value, no_value);
} else {
trace_seq_printf(s, "%9llu ", cpu_data->cur_thread / divisor);
trace_seq_printf(s, "%9llu ", cpu_data->min_thread / divisor);
@@ -271,10 +267,7 @@ static void timerlat_top_print(struct osnoise_tool *top, int cpu)
trace_seq_printf(s, " |");
if (!cpu_data->user_count) {
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " - ");
- trace_seq_printf(s, " -\n");
+ trace_seq_printf(s, "%s %s %s %s\n", no_value, no_value, no_value, no_value);
} else {
trace_seq_printf(s, "%9llu ", cpu_data->cur_user / divisor);
trace_seq_printf(s, "%9llu ", cpu_data->min_user / divisor);
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 0f4e0cf4f..747fe251e 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -510,8 +510,10 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
}
if (folio_test_hwpoison(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
r = -EHWPOISON;
- goto out_unlock;
+ goto out_fput;
}
page = folio_file_page(folio, index);
@@ -522,7 +524,6 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
r = 0;
-out_unlock:
folio_unlock(folio);
out_fput:
fput(file);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ff0a20565..d9ce063c7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4066,12 +4066,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
- int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
+ int last_boosted_vcpu;
unsigned long i;
int yielded = 0;
int try = 3;
int pass;
+ last_boosted_vcpu = READ_ONCE(kvm->last_boosted_vcpu);
kvm_vcpu_set_in_spin_loop(me, true);
/*
* We boost the priority of a VCPU that is runnable but not
@@ -4109,7 +4110,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
yielded = kvm_vcpu_yield_to(vcpu);
if (yielded > 0) {
- kvm->last_boosted_vcpu = i;
+ WRITE_ONCE(kvm->last_boosted_vcpu, i);
break;
} else if (yielded < 0) {
try--;