summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-07 02:18:06 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-07 02:18:06 +0000
commitdbbf0dcdfc9a5d90d5146bb195fce97064d92c76 (patch)
treebab1434b47a284ca2893dcc0b908d1b95d982e7c
parentAdding upstream version 6.1.85. (diff)
downloadlinux-dbbf0dcdfc9a5d90d5146bb195fce97064d92c76.tar.xz
linux-dbbf0dcdfc9a5d90d5146bb195fce97064d92c76.zip
Adding upstream version 6.1.90.upstream/6.1.90upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst22
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt15
-rw-r--r--Documentation/admin-guide/sysctl/net.rst5
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/arc/boot/dts/hsdk.dts1
-rw-r--r--arch/arm/boot/dts/at91-sama7g5ek.dts8
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c5
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h2
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712-evb.dts8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi3
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi34
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8192.dtsi1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi11
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi31
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi12
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts6
-rw-r--r--arch/arm64/mm/pageattr.c3
-rw-r--r--arch/loongarch/include/asm/perf_event.h8
-rw-r--r--arch/loongarch/mm/fault.c4
-rw-r--r--arch/riscv/include/asm/pgtable.h4
-rw-r--r--arch/x86/Kconfig32
-rw-r--r--arch/x86/boot/Makefile2
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/misc.c1
-rw-r--r--arch/x86/boot/compressed/sev.c3
-rw-r--r--arch/x86/boot/compressed/vmlinux.lds.S6
-rw-r--r--arch/x86/boot/header.S211
-rw-r--r--arch/x86/boot/setup.ld14
-rw-r--r--arch/x86/boot/tools/build.c273
-rw-r--r--arch/x86/events/amd/lbr.c6
-rw-r--r--arch/x86/events/core.c1
-rw-r--r--arch/x86/include/asm/apic.h3
-rw-r--r--arch/x86/include/asm/boot.h1
-rw-r--r--arch/x86/include/asm/coco.h5
-rw-r--r--arch/x86/include/asm/init.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/mem_encrypt.h8
-rw-r--r--arch/x86/include/asm/page_types.h12
-rw-r--r--arch/x86/include/asm/pgtable_types.h3
-rw-r--r--arch/x86/include/asm/sev.h10
-rw-r--r--arch/x86/kernel/amd_gart_64.c2
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kernel/cpu/bugs.c87
-rw-r--r--arch/x86/kernel/cpu/common.c48
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c6
-rw-r--r--arch/x86/kernel/head64.c7
-rw-r--r--arch/x86/kernel/platform-quirks.c1
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/sev-shared.c23
-rw-r--r--arch/x86/kernel/sev.c11
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/cpuid.h10
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/vmx/vmx.c24
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S4
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c58
-rw-r--r--arch/x86/mm/pat/set_memory.c6
-rw-r--r--arch/x86/mm/pti.c2
-rw-r--r--block/blk-stat.c2
-rw-r--r--crypto/algapi.c1
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/acpi/cppc_acpi.c72
-rw-r--r--drivers/acpi/sleep.c12
-rw-r--r--drivers/android/binder.c4
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/bluetooth/btintel.c2
-rw-r--r--drivers/bluetooth/btmtk.c1
-rw-r--r--drivers/bluetooth/btmtk.h1
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/bluetooth/hci_qca.c21
-rw-r--r--drivers/bus/mhi/host/init.c1
-rw-r--r--drivers/bus/mhi/host/internal.h9
-rw-r--r--drivers/bus/mhi/host/pm.c20
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/clk/clk.c201
-rw-r--r--drivers/clk/mediatek/clk-gate.c23
-rw-r--r--drivers/clk/mediatek/clk-gate.h7
-rw-r--r--drivers/clk/mediatek/clk-mt2701-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-eth.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-g3d.c2
-rw-r--r--drivers/clk/mediatek/clk-mt2701-hif.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2701.c22
-rw-r--r--drivers/clk/mediatek/clk-mt2712-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt2712.c24
-rw-r--r--drivers/clk/mediatek/clk-mt6765.c13
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c21
-rw-r--r--drivers/clk/mediatek/clk-mt6795-infracfg.c3
-rw-r--r--drivers/clk/mediatek/clk-mt6795-mm.c3
-rw-r--r--drivers/clk/mediatek/clk-mt6795-pericfg.c6
-rw-r--r--drivers/clk/mediatek/clk-mt6795-topckgen.c6
-rw-r--r--drivers/clk/mediatek/clk-mt6797-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6797.c7
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7622-eth.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7622-hif.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c22
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c7
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c18
-rw-r--r--drivers/clk/mediatek/clk-mt7986-eth.c10
-rw-r--r--drivers/clk/mediatek/clk-mt7986-infracfg.c7
-rw-r--r--drivers/clk/mediatek/clk-mt7986-topckgen.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8135.c18
-rw-r--r--drivers/clk/mediatek/clk-mt8167-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-img.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mfgcfg.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8167-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8167-vdec.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8167.c12
-rw-r--r--drivers/clk/mediatek/clk-mt8173-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8173.c34
-rw-r--r--drivers/clk/mediatek/clk-mt8183-audio.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c36
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mcu.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8186-mm.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8186-topckgen.c9
-rw-r--r--drivers/clk/mediatek/clk-mt8192-aud.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8192-mm.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8192.c90
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apmixedsys.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c9
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c3
-rw-r--r--drivers/clk/mediatek/clk-mt8365-mm.c5
-rw-r--r--drivers/clk/mediatek/clk-mt8365.c14
-rw-r--r--drivers/clk/mediatek/clk-mt8516-aud.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8516.c12
-rw-r--r--drivers/clk/mediatek/clk-mtk.c127
-rw-r--r--drivers/clk/mediatek/clk-mtk.h13
-rw-r--r--drivers/clk/mediatek/clk-mux.c14
-rw-r--r--drivers/clk/mediatek/clk-mux.h3
-rw-r--r--drivers/comedi/drivers/vmk80xx.c35
-rw-r--r--drivers/cpufreq/cpufreq.c17
-rw-r--r--drivers/cpuidle/driver.c3
-rw-r--r--drivers/dma/idma64.c4
-rw-r--r--drivers/dma/idxd/perfmon.c9
-rw-r--r--drivers/dma/owl-dma.c4
-rw-r--r--drivers/dma/tegra186-gpc-dma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c13
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c58
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c12
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c3
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c3
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c7
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c7
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c50
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-kye.c62
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-quirks.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c9
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c2
-rw-r--r--drivers/i2c/i2c-core-base.c12
-rw-r--r--drivers/infiniband/core/cm.c23
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c2
-rw-r--r--drivers/input/rmi4/rmi_driver.c6
-rw-r--r--drivers/input/touchscreen/imagis.c20
-rw-r--r--drivers/iommu/intel/svm.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c9
-rw-r--r--drivers/media/cec/core/cec-adap.c14
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c9
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_datagram.c6
-rw-r--r--drivers/mmc/host/sdhci-msm.c16
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/net/dsa/mt7530.c287
-rw-r--r--drivers/net/dsa/mt7530.h11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c35
-rw-r--r--drivers/net/ethernet/broadcom/b44.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/selq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c54
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h3
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c16
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c11
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c5
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c18
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c5
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/gtp.c3
-rw-r--r--drivers/net/macsec.c44
-rw-r--r--drivers/net/pcs/pcs-xpcs.c4
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/ax88179_178a.c15
-rw-r--r--drivers/net/virtio_net.c26
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h2
-rw-r--r--drivers/nfc/trf7970a.c42
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/pci/bus.c49
-rw-r--r--drivers/pci/pci.c78
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/aspm.c21
-rw-r--r--drivers/pci/pcie/dpc.c5
-rw-r--r--drivers/pci/quirks.c68
-rw-r--r--drivers/pci/switch/switchtec.c158
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c112
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c9
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c31
-rw-r--r--drivers/phy/ti/phy-tusb1210.c23
-rw-r--r--drivers/pinctrl/renesas/core.c4
-rw-r--r--drivers/platform/x86/intel/vbtn.c5
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c9
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c2
-rw-r--r--drivers/scsi/scsi_lib.c52
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/soundwire/dmi-quirks.c8
-rw-r--r--drivers/thermal/thermal_of.c12
-rw-r--r--drivers/thunderbolt/quirks.c16
-rw-r--r--drivers/thunderbolt/switch.c48
-rw-r--r--drivers/thunderbolt/tb.c53
-rw-r--r--drivers/thunderbolt/tb.h7
-rw-r--r--drivers/thunderbolt/usb4.c13
-rw-r--r--drivers/tty/n_gsm.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c8
-rw-r--r--drivers/tty/serial/pmac_zilog.c14
-rw-r--r--drivers/tty/serial/stm32-usart.c13
-rw-r--r--drivers/usb/class/cdc-wdm.c6
-rw-r--r--drivers/usb/core/hub.c15
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/quirks.c7
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c4
-rw-r--r--drivers/usb/gadget/function/f_ncm.c4
-rw-r--r--drivers/usb/gadget/function/uvc_video.c3
-rw-r--r--drivers/usb/host/pci-quirks.c4
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c23
-rw-r--r--drivers/usb/host/xhci.h9
-rw-r--r--drivers/usb/serial/option.c40
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c1
-rw-r--r--drivers/vhost/vhost.c24
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/video/fbdev/core/fbmon.c7
-rw-r--r--drivers/video/fbdev/via/accel.c4
-rw-r--r--drivers/virtio/virtio.c10
-rw-r--r--fs/btrfs/backref.c12
-rw-r--r--fs/btrfs/delayed-inode.c3
-rw-r--r--fs/btrfs/export.c9
-rw-r--r--fs/btrfs/qgroup.c2
-rw-r--r--fs/btrfs/send.c10
-rw-r--r--fs/btrfs/transaction.c17
-rw-r--r--fs/btrfs/volumes.c12
-rw-r--r--fs/ext4/mballoc.c5
-rw-r--r--fs/ext4/super.c12
-rw-r--r--fs/isofs/inode.c18
-rw-r--r--fs/nilfs2/dir.c2
-rw-r--r--fs/orangefs/super.c2
-rw-r--r--fs/pstore/zone.c2
-rw-r--r--fs/smb/client/cached_dir.c4
-rw-r--r--fs/smb/client/cifs_spnego.h2
-rw-r--r--fs/smb/client/cifsfs.c1
-rw-r--r--fs/smb/client/cifspdu.h100
-rw-r--r--fs/smb/client/fs_context.c12
-rw-r--r--fs/smb/client/fs_context.h2
-rw-r--r--fs/smb/client/readdir.c6
-rw-r--r--fs/smb/client/smb2pdu.c4
-rw-r--r--fs/smb/client/smb2pdu.h4
-rw-r--r--fs/smb/client/transport.c3
-rw-r--r--fs/smb/common/smb2pdu.h2
-rw-r--r--fs/smb/server/server.c13
-rw-r--r--fs/smb/server/smb2pdu.c4
-rw-r--r--fs/smb/server/vfs.c5
-rw-r--r--fs/sysfs/file.c2
-rw-r--r--fs/sysv/itree.c10
-rw-r--r--include/linux/bootconfig.h7
-rw-r--r--include/linux/dma-fence.h7
-rw-r--r--include/linux/etherdevice.h25
-rw-r--r--include/linux/irqflags.h2
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/randomize_kstack.h2
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/skbuff.h11
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/switchtec.h1
-rw-r--r--include/linux/u64_stats_sync.h9
-rw-r--r--include/linux/usb/hcd.h5
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/af_unix.h5
-rw-r--r--include/net/bluetooth/bluetooth.h9
-rw-r--r--include/net/dsa.h8
-rw-r--r--include/net/ip_tunnels.h33
-rw-r--r--include/net/macsec.h1
-rw-r--r--include/net/netfilter/nf_flow_table.h12
-rw-r--r--include/net/sock.h39
-rw-r--r--include/scsi/scsi_device.h51
-rw-r--r--include/trace/events/rpcgss.h4
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--init/Kconfig2
-rw-r--r--init/main.c2
-rw-r--r--io_uring/io_uring.c41
-rw-r--r--io_uring/net.c1
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/dma/direct.c9
-rw-r--r--kernel/fork.c18
-rw-r--r--kernel/kprobes.c18
-rw-r--r--kernel/panic.c8
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/trace/ring_buffer.c8
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--lib/bootconfig.c19
-rw-r--r--lib/stackdepot.c4
-rw-r--r--mm/memory-failure.c18
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/batman-adv/translation-table.c2
-rw-r--r--net/bluetooth/hci_request.c4
-rw-r--r--net/bluetooth/l2cap_sock.c59
-rw-r--r--net/bluetooth/mgmt.c24
-rw-r--r--net/bluetooth/sco.c30
-rw-r--r--net/bridge/br_input.c15
-rw-r--r--net/bridge/br_netfilter_hooks.c6
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c14
-rw-r--r--net/core/sock.c1
-rw-r--r--net/core/sysctl_net_core.c9
-rw-r--r--net/dsa/dsa2.c24
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ipv4/icmp.c12
-rw-r--r--net/ipv4/netfilter/arp_tables.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/ip6_fib.c7
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/mpls/mpls_gso.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c6
-rw-r--r--net/netfilter/nf_flow_table_inet.c3
-rw-r--r--net/netfilter/nf_flow_table_ip.c10
-rw-r--r--net/netfilter/nf_tables_api.c63
-rw-r--r--net/netfilter/nft_chain_filter.c4
-rw-r--r--net/netfilter/nft_set_pipapo.c14
-rw-r--r--net/openvswitch/conntrack.c9
-rw-r--r--net/smc/smc_pnet.c10
-rw-r--r--net/unix/af_unix.c20
-rw-r--r--net/unix/garbage.c35
-rw-r--r--net/unix/scm.c8
-rw-r--r--net/xdp/xsk.c2
-rw-r--r--rust/macros/lib.rs12
-rw-r--r--scripts/gcc-plugins/stackleak_plugin.c2
-rw-r--r--sound/firewire/amdtp-stream.c12
-rw-r--r--sound/firewire/amdtp-stream.h4
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/intel/boards/sof_sdw.c11
-rw-r--r--sound/soc/soc-core.c3
-rw-r--r--sound/usb/Makefile2
-rw-r--r--sound/usb/mixer_quirks.c9
-rw-r--r--sound/usb/mixer_scarlett2.c (renamed from sound/usb/mixer_scarlett_gen2.c)259
-rw-r--r--sound/usb/mixer_scarlett2.h7
-rw-r--r--sound/usb/mixer_scarlett_gen2.h7
-rw-r--r--tools/iio/iio_utils.c2
-rw-r--r--tools/lib/perf/evlist.c18
-rw-r--r--tools/lib/perf/include/internal/evlist.h4
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c1
-rwxr-xr-xtools/testing/ktest/ktest.pl1
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc6
-rw-r--r--tools/testing/selftests/timers/posix_timers.c2
432 files changed, 4065 insertions, 2430 deletions
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 9edb2860a..e0a1be97f 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -439,12 +439,12 @@ The possible values in this file are:
- System is protected by retpoline
* - BHI: BHI_DIS_S
- System is protected by BHI_DIS_S
- * - BHI: SW loop; KVM SW loop
+ * - BHI: SW loop, KVM SW loop
- System is protected by software clearing sequence
- * - BHI: Syscall hardening
- - Syscalls are hardened against BHI
- * - BHI: Syscall hardening; KVM: SW loop
- - System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
+ * - BHI: Vulnerable
+ - System is vulnerable to BHI
+ * - BHI: Vulnerable, KVM: SW loop
+ - System is vulnerable; KVM is protected by software clearing sequence
Full mitigation might require a microcode update from the CPU
vendor. When the necessary microcode is not available, the kernel will
@@ -661,18 +661,14 @@ kernel command line.
spectre_bhi=
[X86] Control mitigation of Branch History Injection
- (BHI) vulnerability. Syscalls are hardened against BHI
- regardless of this setting. This setting affects the deployment
+ (BHI) vulnerability. This setting affects the deployment
of the HW BHI control and the SW BHB clearing sequence.
on
- unconditionally enable.
+ (default) Enable the HW or SW mitigation as
+ needed.
off
- unconditionally disable.
- auto
- enable if hardware mitigation
- control(BHI_DIS_S) is available, otherwise
- enable alternate mitigation in KVM.
+ Disable the mitigation.
For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index b2c7b2f01..e6f0570cf 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3283,6 +3283,7 @@
reg_file_data_sampling=off [X86]
retbleed=off [X86]
spec_store_bypass_disable=off [X86,PPC]
+ spectre_bhi=off [X86]
spectre_v2_user=off [X86]
srbds=off [X86,INTEL]
ssbd=force-off [ARM64]
@@ -5734,16 +5735,13 @@
See Documentation/admin-guide/laptops/sonypi.rst
spectre_bhi= [X86] Control mitigation of Branch History Injection
- (BHI) vulnerability. Syscalls are hardened against BHI
- reglardless of this setting. This setting affects the
+ (BHI) vulnerability. This setting affects the
deployment of the HW BHI control and the SW BHB
clearing sequence.
- on - unconditionally enable.
- off - unconditionally disable.
- auto - (default) enable hardware mitigation
- (BHI_DIS_S) if available, otherwise enable
- alternate mitigation in KVM.
+ on - (default) Enable the HW or SW mitigation
+ as needed.
+ off - Disable the mitigation.
spectre_v2= [X86] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.
@@ -6605,6 +6603,9 @@
pause after every control message);
o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
delay after resetting its port);
+ p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT
+ (Reduce timeout of the SET_ADDRESS
+ request from 5000 ms to 500 ms);
Example: quirks=0781:5580:bk,0a5c:5834:gij
usbhid.mousepoll=
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 6394f5dc2..e3894c928 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -205,6 +205,11 @@ Will increase power usage.
Default: 0 (off)
+mem_pcpu_rsv
+------------
+
+Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
+
rmem_default
------------
diff --git a/MAINTAINERS b/MAINTAINERS
index bbfedb0b2..ecf4d0c8f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8031,7 +8031,7 @@ M: Geoffrey D. Bennett <g@b4.vu>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
-F: sound/usb/mixer_scarlett_gen2.c
+F: sound/usb/mixer_scarlett2.c
FORCEDETH GIGABIT ETHERNET DRIVER
M: Rain River <rain.1986.08.12@gmail.com>
diff --git a/Makefile b/Makefile
index 5dff9ff99..7ae5cf9ec 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 1
-SUBLEVEL = 85
+SUBLEVEL = 90
EXTRAVERSION =
NAME = Curry Ramen
diff --git a/arch/Kconfig b/arch/Kconfig
index f99fd9a4c..e959abf96 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -9,6 +9,14 @@
#
source "arch/$(SRCARCH)/Kconfig"
+config ARCH_CONFIGURES_CPU_MITIGATIONS
+ bool
+
+if !ARCH_CONFIGURES_CPU_MITIGATIONS
+config CPU_MITIGATIONS
+ def_bool y
+endif
+
menu "General architecture-dependent options"
config CRASH_CORE
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 6691f4255..41b980df8 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -205,7 +205,6 @@
};
gmac: ethernet@8000 {
- #interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
interrupts = <10>;
diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
index 4af8a1c96..bede6e88a 100644
--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
@@ -293,7 +293,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1150000>;
+ regulator-suspend-microvolt = <1150000>;
regulator-mode = <4>;
};
@@ -314,7 +314,7 @@
regulator-state-standby {
regulator-on-in-suspend;
- regulator-suspend-voltage = <1050000>;
+ regulator-suspend-microvolt = <1050000>;
regulator-mode = <4>;
};
@@ -331,7 +331,7 @@
regulator-always-on;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
@@ -346,7 +346,7 @@
regulator-max-microvolt = <3700000>;
regulator-state-standby {
- regulator-suspend-voltage = <1800000>;
+ regulator-suspend-microvolt = <1800000>;
regulator-on-in-suspend;
};
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 5e86145db..8897364e5 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -22,7 +22,6 @@
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <linux/platform_data/mmc-omap.h>
#include <linux/mfd/menelaus.h>
-#include <sound/tlv320aic3x.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
@@ -567,10 +566,6 @@ struct menelaus_platform_data n8x0_menelaus_platform_data = {
.late_init = n8x0_menelaus_late_init,
};
-struct aic3x_pdata n810_aic33_data = {
- .gpio_reset = 118,
-};
-
static int __init n8x0_late_initcall(void)
{
if (!board_caps)
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index b23962c38..69694af71 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -2,12 +2,10 @@
#ifndef __OMAP_COMMON_BOARD_DEVICES__
#define __OMAP_COMMON_BOARD_DEVICES__
-#include <sound/tlv320aic3x.h>
#include <linux/mfd/menelaus.h>
void *n8x0_legacy_init(void);
extern struct menelaus_platform_data n8x0_menelaus_platform_data;
-extern struct aic3x_pdata n810_aic33_data;
#endif /* __OMAP_COMMON_BOARD_DEVICES__ */
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 5b99d602c..9deba798c 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -440,7 +440,6 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
#ifdef CONFIG_MACH_NOKIA_N8X0
OF_DEV_AUXDATA("ti,omap2420-mmc", 0x4809c000, "mmci-omap.0", NULL),
OF_DEV_AUXDATA("menelaus", 0x72, "1-0072", &n8x0_menelaus_platform_data),
- OF_DEV_AUXDATA("tlv320aic3x", 0x18, "2-0018", &n810_aic33_data),
#endif
#ifdef CONFIG_ARCH_OMAP3
OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
index 10370d1a6..dbb298b90 100644
--- a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
@@ -38,8 +38,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b010000 0x10000>;
clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
- <&sdhc0_lpcg IMX_LPCG_CLK_0>,
- <&sdhc0_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc0_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc0_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_0>;
status = "disabled";
@@ -49,8 +49,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b020000 0x10000>;
clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
- <&sdhc1_lpcg IMX_LPCG_CLK_0>,
- <&sdhc1_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc1_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc1_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_1>;
fsl,tuning-start-tap = <20>;
@@ -62,8 +62,8 @@ conn_subsys: bus@5b000000 {
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
reg = <0x5b030000 0x10000>;
clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
- <&sdhc2_lpcg IMX_LPCG_CLK_0>,
- <&sdhc2_lpcg IMX_LPCG_CLK_5>;
+ <&sdhc2_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc2_lpcg IMX_LPCG_CLK_0>;
clock-names = "ipg", "ahb", "per";
power-domains = <&pd IMX_SC_R_SDHC_2>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
index d31a19412..03fd9df16 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt2712-evb.dts
@@ -128,7 +128,7 @@
};
&pio {
- eth_default: eth_default {
+ eth_default: eth-default-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
@@ -155,7 +155,7 @@
};
};
- eth_sleep: eth_sleep {
+ eth_sleep: eth-sleep-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
@@ -181,14 +181,14 @@
};
};
- usb0_id_pins_float: usb0_iddig {
+ usb0_id_pins_float: usb0-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
bias-pull-up;
};
};
- usb1_id_pins_float: usb1_iddig {
+ usb1_id_pins_float: usb1-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
bias-pull-up;
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 1ac0b2cf3..fde2b165f 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -249,10 +249,11 @@
#clock-cells = <1>;
};
- infracfg: syscon@10001000 {
+ infracfg: clock-controller@10001000 {
compatible = "mediatek,mt2712-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
pericfg: syscon@10003000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 7bb316922..f8a320068 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -251,7 +251,7 @@
clock-names = "hif_sel";
};
- cir: cir@10009000 {
+ cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@@ -282,16 +282,14 @@
};
};
- apmixedsys: apmixedsys@10209000 {
- compatible = "mediatek,mt7622-apmixedsys",
- "syscon";
+ apmixedsys: clock-controller@10209000 {
+ compatible = "mediatek,mt7622-apmixedsys";
reg = <0 0x10209000 0 0x1000>;
#clock-cells = <1>;
};
- topckgen: topckgen@10210000 {
- compatible = "mediatek,mt7622-topckgen",
- "syscon";
+ topckgen: clock-controller@10210000 {
+ compatible = "mediatek,mt7622-topckgen";
reg = <0 0x10210000 0 0x1000>;
#clock-cells = <1>;
};
@@ -514,7 +512,6 @@
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
- reset-names = "therm";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
@@ -734,9 +731,8 @@
power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
};
- ssusbsys: ssusbsys@1a000000 {
- compatible = "mediatek,mt7622-ssusbsys",
- "syscon";
+ ssusbsys: clock-controller@1a000000 {
+ compatible = "mediatek,mt7622-ssusbsys";
reg = <0 0x1a000000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -793,9 +789,8 @@
};
};
- pciesys: pciesys@1a100800 {
- compatible = "mediatek,mt7622-pciesys",
- "syscon";
+ pciesys: clock-controller@1a100800 {
+ compatible = "mediatek,mt7622-pciesys";
reg = <0 0x1a100800 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
@@ -921,12 +916,13 @@
};
};
- hifsys: syscon@1af00000 {
- compatible = "mediatek,mt7622-hifsys", "syscon";
+ hifsys: clock-controller@1af00000 {
+ compatible = "mediatek,mt7622-hifsys";
reg = <0 0x1af00000 0 0x70>;
+ #clock-cells = <1>;
};
- ethsys: syscon@1b000000 {
+ ethsys: clock-controller@1b000000 {
compatible = "mediatek,mt7622-ethsys",
"syscon";
reg = <0 0x1b000000 0 0x1000>;
@@ -966,9 +962,7 @@
};
eth: ethernet@1b100000 {
- compatible = "mediatek,mt7622-eth",
- "mediatek,mt2701-eth",
- "syscon";
+ compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index d5d9b954c..2147e1526 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -1554,6 +1554,7 @@
compatible = "mediatek,mt8183-mfgcfg", "syscon";
reg = <0 0x13000000 0 0x1000>;
#clock-cells = <1>;
+ power-domains = <&spm MT8183_POWER_DOMAIN_MFG_ASYNC>;
};
gpu: gpu@13040000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
index c6080af1e..0814ed6a7 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
@@ -903,7 +903,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@@ -913,7 +913,7 @@
mt6315_6_vbuck3: vbuck3 {
regulator-compatible = "vbuck3";
regulator-name = "Vlcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
@@ -930,7 +930,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
- regulator-min-microvolt = <606250>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <800000>;
regulator-enable-ramp-delay = <256>;
regulator-allowed-modes = <0 1 2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8192.dtsi b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
index 4ed8a0f18..7ecba8c72 100644
--- a/arch/arm64/boot/dts/mediatek/mt8192.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8192.dtsi
@@ -1240,6 +1240,7 @@
reg = <0 0x14001000 0 0x1000>;
interrupts = <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&mmsys CLK_MM_DISP_MUTEX0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0x1000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_0>,
<CMDQ_EVENT_DISP_STREAM_DONE_ENG_EVENT_1>;
power-domains = <&spm MT8192_POWER_DOMAIN_DISP>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
index 4b8a1c462..9180a73db 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi
@@ -845,7 +845,7 @@
mt6315_6_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vbcpu";
- regulator-min-microvolt = <300000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
@@ -863,7 +863,7 @@
mt6315_7_vbuck1: vbuck1 {
regulator-compatible = "vbuck1";
regulator-name = "Vgpu";
- regulator-min-microvolt = <625000>;
+ regulator-min-microvolt = <400000>;
regulator-max-microvolt = <1193750>;
regulator-enable-ramp-delay = <256>;
regulator-ramp-delay = <6250>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index 414cbe345..bdf002e9c 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -1492,6 +1492,7 @@
compatible = "mediatek,mt8195-vppsys0";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_1400XXXX 0 0x1000>;
};
smi_sub_common_vpp0_vpp1_2x1: smi@14010000 {
@@ -1597,6 +1598,7 @@
compatible = "mediatek,mt8195-vppsys1";
reg = <0 0x14f00000 0 0x1000>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce1 SUBSYS_14f0XXXX 0 0x1000>;
};
larb5: larb@14f02000 {
@@ -1982,6 +1984,7 @@
reg = <0 0x1c01a000 0 0x1000>;
mboxes = <&gce0 0 CMDQ_THR_PRIO_4>;
#clock-cells = <1>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0xa000 0x1000>;
};
larb20: larb@1b010000 {
@@ -2085,6 +2088,7 @@
interrupts = <GIC_SPI 658 IRQ_TYPE_LEVEL_HIGH 0>;
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>;
clocks = <&vdosys0 CLK_VDO0_DISP_MUTEX0>;
+ mediatek,gce-client-reg = <&gce0 SUBSYS_1c01XXXX 0x6000 0x1000>;
mediatek,gce-events = <CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0>;
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 905a50aa5..d42846eff 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -741,11 +741,20 @@
status = "disabled";
ports {
- hdmi_in: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_in: port@0 {
+ reg = <0>;
+
hdmi_in_vop: endpoint {
remote-endpoint = <&vop_out_hdmi>;
};
};
+
+ hdmi_out: port@1 {
+ reg = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index 194e48c75..a51e8d049 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -789,7 +789,6 @@
};
&pcie0 {
- bus-scan-delay-ms = <1000>;
ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index aa3e21bd6..937a15005 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -401,16 +401,22 @@
gpio1830-supply = <&vcc_1v8>;
};
-&pmu_io_domains {
- status = "okay";
- pmu1830-supply = <&vcc_1v8>;
-};
-
-&pwm2 {
- status = "okay";
+&pcie_clkreqn_cpm {
+ rockchip,pins =
+ <2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pinctrl {
+ pinctrl-names = "default";
+ pinctrl-0 = <&q7_thermal_pin>;
+
+ gpios {
+ q7_thermal_pin: q7-thermal-pin {
+ rockchip,pins =
+ <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
@@ -443,11 +449,20 @@
usb3 {
usb3_id: usb3-id {
rockchip,pins =
- <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_none>;
+ <1 RK_PC2 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
};
+&pmu_io_domains {
+ status = "okay";
+ pmu1830-supply = <&vcc_1v8>;
+};
+
+&pwm2 {
+ status = "okay";
+};
+
&sdhci {
/*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index a7e6eccb1..8363cc13e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1906,6 +1906,7 @@
hdmi: hdmi@ff940000 {
compatible = "rockchip,rk3399-dw-hdmi";
reg = <0x0 0xff940000 0x0 0x20000>;
+ reg-io-width = <4>;
interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru PCLK_HDMI_CTRL>,
<&cru SCLK_HDMI_SFR>,
@@ -1914,13 +1915,16 @@
<&cru PLL_VPLL>;
clock-names = "iahb", "isfr", "cec", "grf", "ref";
power-domains = <&power RK3399_PD_HDCP>;
- reg-io-width = <4>;
rockchip,grf = <&grf>;
#sound-dai-cells = <0>;
status = "disabled";
ports {
- hdmi_in: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ hdmi_in: port@0 {
+ reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1933,6 +1937,10 @@
remote-endpoint = <&vopl_out_hdmi>;
};
};
+
+ hdmi_out: port@1 {
+ reg = <1>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
index 26d7fda27..856fe4b66 100644
--- a/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3568-bpi-r2-pro.dts
@@ -412,6 +412,8 @@
vccio_sd: LDO_REG5 {
regulator-name = "vccio_sd";
+ regulator-always-on;
+ regulator-boot-on;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
@@ -521,9 +523,9 @@
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+ switch@1f {
compatible = "mediatek,mt7531";
- reg = <0>;
+ reg = <0x1f>;
ports {
#address-cells = <1>;
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 826cb200b..425b398f8 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -220,9 +220,6 @@ bool kernel_page_present(struct page *page)
pte_t *ptep;
unsigned long addr = (unsigned long)page_address(page);
- if (!can_set_direct_map())
- return true;
-
pgdp = pgd_offset_k(addr);
if (pgd_none(READ_ONCE(*pgdp)))
return false;
diff --git a/arch/loongarch/include/asm/perf_event.h b/arch/loongarch/include/asm/perf_event.h
index 2a35a0bc2..52b638059 100644
--- a/arch/loongarch/include/asm/perf_event.h
+++ b/arch/loongarch/include/asm/perf_event.h
@@ -7,6 +7,14 @@
#ifndef __LOONGARCH_PERF_EVENT_H__
#define __LOONGARCH_PERF_EVENT_H__
+#include <asm/ptrace.h>
+
#define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->csr_era = (__ip); \
+ (regs)->regs[3] = current_stack_pointer; \
+ (regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
+}
+
#endif /* __LOONGARCH_PERF_EVENT_H__ */
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index b829ab911..007718d51 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -193,10 +193,10 @@ good_area:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
- if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
- goto bad_area;
if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
goto bad_area;
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
+ goto bad_area;
}
/*
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 63055c6ad..2d9416a6a 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -799,8 +799,8 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#define PAGE_SHARED __pgprot(0)
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
-#define TASK_SIZE 0xffffffffUL
-#define VMALLOC_START 0
+#define TASK_SIZE _AC(-1, UL)
+#define VMALLOC_START _AC(0, UL)
#define VMALLOC_END TASK_SIZE
#endif /* !CONFIG_MMU */
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba815ac47..49cea5b81 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -61,6 +61,7 @@ config X86
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_INIT
+ select ARCH_CONFIGURES_CPU_MITIGATIONS
select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64
@@ -2449,17 +2450,17 @@ config CC_HAS_SLS
config CC_HAS_RETURN_THUNK
def_bool $(cc-option,-mfunction-return=thunk-extern)
-menuconfig SPECULATION_MITIGATIONS
- bool "Mitigations for speculative execution vulnerabilities"
+menuconfig CPU_MITIGATIONS
+ bool "Mitigations for CPU vulnerabilities"
default y
help
- Say Y here to enable options which enable mitigations for
- speculative execution hardware vulnerabilities.
+ Say Y here to enable options which enable mitigations for hardware
+ vulnerabilities (usually related to speculative execution).
If you say N, all mitigations will be disabled. You really
should know what you are doing to say so.
-if SPECULATION_MITIGATIONS
+if CPU_MITIGATIONS
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
@@ -2563,31 +2564,16 @@ config MITIGATION_RFDS
stored in floating point, vector and integer registers.
See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
-choice
- prompt "Clear branch history"
+config MITIGATION_SPECTRE_BHI
+ bool "Mitigate Spectre-BHB (Branch History Injection)"
depends on CPU_SUP_INTEL
- default SPECTRE_BHI_ON
+ default y
help
Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
where the branch history buffer is poisoned to speculatively steer
indirect branches.
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
-config SPECTRE_BHI_ON
- bool "on"
- help
- Equivalent to setting spectre_bhi=on command line parameter.
-config SPECTRE_BHI_OFF
- bool "off"
- help
- Equivalent to setting spectre_bhi=off command line parameter.
-config SPECTRE_BHI_AUTO
- bool "auto"
- help
- Equivalent to setting spectre_bhi=auto command line parameter.
-
-endchoice
-
endif
config ARCH_HAS_ADD_PAGES
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 9e38ffaad..3c1b35203 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -91,7 +91,7 @@ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
SETUP_OBJS = $(addprefix $(obj)/,$(setup-y))
-sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|startup_64\|efi32_stub_entry\|efi64_stub_entry\|efi_pe_entry\|efi32_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|z_.*\)$$/\#define ZO_\2 0x\1/p'
+sed-zoffset := -e 's/^\([0-9a-fA-F]*\) [a-zA-Z] \(startup_32\|efi.._stub_entry\|efi\(32\)\?_pe_entry\|input_data\|kernel_info\|_end\|_ehead\|_text\|_e\?data\|z_.*\)$$/\#define ZO_\2 0x\1/p'
quiet_cmd_zoffset = ZOFFSET $@
cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 3965b2c9e..6e61baff2 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -84,7 +84,7 @@ LDFLAGS_vmlinux += -T
hostprogs := mkpiggy
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
-sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
+sed-voffset := -e 's/^\([0-9a-fA-F]*\) [ABCDGRSTVW] \(_text\|__start_rodata\|__bss_start\|_end\)$$/\#define VO_\2 _AC(0x\1,UL)/p'
quiet_cmd_voffset = VOFFSET $@
cmd_voffset = $(NM) $< | sed -n $(sed-voffset) > $@
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 8ae7893d7..45435ff88 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -330,6 +330,7 @@ static size_t parse_elf(void *output)
return ehdr.e_entry - LOAD_PHYSICAL_ADDR;
}
+const unsigned long kernel_text_size = VO___start_rodata - VO__text;
const unsigned long kernel_total_size = VO__end - VO__text;
static u8 boot_heap[BOOT_HEAP_SIZE] __aligned(4);
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index d07e665bb..3c5d5c97f 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -118,6 +118,9 @@ static bool fault_in_kernel_space(unsigned long address)
#define __init
#define __pa(x) ((unsigned long)(x))
+#undef __head
+#define __head
+
#define __BOOT_COMPRESSED
/* Basic instruction decoding support needed */
diff --git a/arch/x86/boot/compressed/vmlinux.lds.S b/arch/x86/boot/compressed/vmlinux.lds.S
index 112b2375d..bcf0e4e4c 100644
--- a/arch/x86/boot/compressed/vmlinux.lds.S
+++ b/arch/x86/boot/compressed/vmlinux.lds.S
@@ -42,11 +42,13 @@ SECTIONS
*(.rodata.*)
_erodata = . ;
}
- .data : {
+ .data : ALIGN(0x1000) {
_data = . ;
*(.data)
*(.data.*)
- *(.bss.efistub)
+
+ /* Add 4 bytes of extra space for a CRC-32 checksum */
+ . = ALIGN(. + 4, 0x200);
_edata = . ;
}
. = ALIGN(L1_CACHE_BYTES);
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index d31982509..7593339b5 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -36,65 +36,19 @@ SYSSEG = 0x1000 /* historical load address >> 4 */
#define ROOT_RDONLY 1
#endif
+ .set salign, 0x1000
+ .set falign, 0x200
+
.code16
.section ".bstext", "ax"
-
- .global bootsect_start
-bootsect_start:
#ifdef CONFIG_EFI_STUB
# "MZ", MS-DOS header
.word MZ_MAGIC
-#endif
-
- # Normalize the start address
- ljmp $BOOTSEG, $start2
-
-start2:
- movw %cs, %ax
- movw %ax, %ds
- movw %ax, %es
- movw %ax, %ss
- xorw %sp, %sp
- sti
- cld
-
- movw $bugger_off_msg, %si
-
-msg_loop:
- lodsb
- andb %al, %al
- jz bs_die
- movb $0xe, %ah
- movw $7, %bx
- int $0x10
- jmp msg_loop
-
-bs_die:
- # Allow the user to press a key, then reboot
- xorw %ax, %ax
- int $0x16
- int $0x19
-
- # int 0x19 should never return. In case it does anyway,
- # invoke the BIOS reset code...
- ljmp $0xf000,$0xfff0
-
-#ifdef CONFIG_EFI_STUB
.org 0x3c
#
# Offset to the PE header.
#
.long pe_header
-#endif /* CONFIG_EFI_STUB */
-
- .section ".bsdata", "a"
-bugger_off_msg:
- .ascii "Use a boot loader.\r\n"
- .ascii "\n"
- .ascii "Remove disk and press any key to reboot...\r\n"
- .byte 0
-
-#ifdef CONFIG_EFI_STUB
pe_header:
.long PE_MAGIC
@@ -123,30 +77,26 @@ optional_header:
.byte 0x02 # MajorLinkerVersion
.byte 0x14 # MinorLinkerVersion
- # Filled in by build.c
- .long 0 # SizeOfCode
+ .long ZO__data # SizeOfCode
- .long 0 # SizeOfInitializedData
+ .long ZO__end - ZO__data # SizeOfInitializedData
.long 0 # SizeOfUninitializedData
- # Filled in by build.c
- .long 0x0000 # AddressOfEntryPoint
+ .long setup_size + ZO_efi_pe_entry # AddressOfEntryPoint
- .long 0x0200 # BaseOfCode
+ .long setup_size # BaseOfCode
#ifdef CONFIG_X86_32
.long 0 # data
#endif
extra_header_fields:
- # PE specification requires ImageBase to be 64k aligned
- .set image_base, (LOAD_PHYSICAL_ADDR + 0xffff) & ~0xffff
#ifdef CONFIG_X86_32
- .long image_base # ImageBase
+ .long 0 # ImageBase
#else
- .quad image_base # ImageBase
+ .quad 0 # ImageBase
#endif
- .long 0x20 # SectionAlignment
- .long 0x20 # FileAlignment
+ .long salign # SectionAlignment
+ .long falign # FileAlignment
.word 0 # MajorOperatingSystemVersion
.word 0 # MinorOperatingSystemVersion
.word LINUX_EFISTUB_MAJOR_VERSION # MajorImageVersion
@@ -155,12 +105,9 @@ extra_header_fields:
.word 0 # MinorSubsystemVersion
.long 0 # Win32VersionValue
- #
- # The size of the bzImage is written in tools/build.c
- #
- .long 0 # SizeOfImage
+ .long setup_size + ZO__end # SizeOfImage
- .long 0x200 # SizeOfHeaders
+ .long salign # SizeOfHeaders
.long 0 # CheckSum
.word IMAGE_SUBSYSTEM_EFI_APPLICATION # Subsystem (EFI application)
#ifdef CONFIG_EFI_DXE_MEM_ATTRIBUTES
@@ -191,87 +138,77 @@ extra_header_fields:
# Section table
section_table:
- #
- # The offset & size fields are filled in by build.c.
- #
.ascii ".setup"
.byte 0
.byte 0
- .long 0
- .long 0x0 # startup_{32,64}
- .long 0 # Size of initialized data
- # on disk
- .long 0x0 # startup_{32,64}
- .long 0 # PointerToRelocations
- .long 0 # PointerToLineNumbers
- .word 0 # NumberOfRelocations
- .word 0 # NumberOfLineNumbers
- .long IMAGE_SCN_CNT_CODE | \
- IMAGE_SCN_MEM_READ | \
- IMAGE_SCN_MEM_EXECUTE | \
- IMAGE_SCN_ALIGN_16BYTES # Characteristics
+ .long pecompat_fstart - salign # VirtualSize
+ .long salign # VirtualAddress
+ .long pecompat_fstart - salign # SizeOfRawData
+ .long salign # PointerToRawData
- #
- # The EFI application loader requires a relocation section
- # because EFI applications must be relocatable. The .reloc
- # offset & size fields are filled in by build.c.
- #
- .ascii ".reloc"
- .byte 0
- .byte 0
- .long 0
- .long 0
- .long 0 # SizeOfRawData
- .long 0 # PointerToRawData
- .long 0 # PointerToRelocations
- .long 0 # PointerToLineNumbers
- .word 0 # NumberOfRelocations
- .word 0 # NumberOfLineNumbers
+ .long 0, 0, 0
.long IMAGE_SCN_CNT_INITIALIZED_DATA | \
IMAGE_SCN_MEM_READ | \
- IMAGE_SCN_MEM_DISCARDABLE | \
- IMAGE_SCN_ALIGN_1BYTES # Characteristics
+ IMAGE_SCN_MEM_DISCARDABLE # Characteristics
#ifdef CONFIG_EFI_MIXED
- #
- # The offset & size fields are filled in by build.c.
- #
.asciz ".compat"
- .long 0
- .long 0x0
- .long 0 # Size of initialized data
- # on disk
- .long 0x0
- .long 0 # PointerToRelocations
- .long 0 # PointerToLineNumbers
- .word 0 # NumberOfRelocations
- .word 0 # NumberOfLineNumbers
+
+ .long pecompat_fsize # VirtualSize
+ .long pecompat_fstart # VirtualAddress
+ .long pecompat_fsize # SizeOfRawData
+ .long pecompat_fstart # PointerToRawData
+
+ .long 0, 0, 0
.long IMAGE_SCN_CNT_INITIALIZED_DATA | \
IMAGE_SCN_MEM_READ | \
- IMAGE_SCN_MEM_DISCARDABLE | \
- IMAGE_SCN_ALIGN_1BYTES # Characteristics
+ IMAGE_SCN_MEM_DISCARDABLE # Characteristics
+
+ /*
+ * Put the IA-32 machine type and the associated entry point address in
+ * the .compat section, so loaders can figure out which other execution
+ * modes this image supports.
+ */
+ .pushsection ".pecompat", "a", @progbits
+ .balign salign
+ .globl pecompat_fstart
+pecompat_fstart:
+ .byte 0x1 # Version
+ .byte 8 # Size
+ .word IMAGE_FILE_MACHINE_I386 # PE machine type
+ .long setup_size + ZO_efi32_pe_entry # Entrypoint
+ .byte 0x0 # Sentinel
+ .popsection
+#else
+ .set pecompat_fstart, setup_size
#endif
-
- #
- # The offset & size fields are filled in by build.c.
- #
.ascii ".text"
.byte 0
.byte 0
.byte 0
- .long 0
- .long 0x0 # startup_{32,64}
- .long 0 # Size of initialized data
+ .long ZO__data
+ .long setup_size
+ .long ZO__data # Size of initialized data
# on disk
- .long 0x0 # startup_{32,64}
+ .long setup_size
.long 0 # PointerToRelocations
.long 0 # PointerToLineNumbers
.word 0 # NumberOfRelocations
.word 0 # NumberOfLineNumbers
.long IMAGE_SCN_CNT_CODE | \
IMAGE_SCN_MEM_READ | \
- IMAGE_SCN_MEM_EXECUTE | \
- IMAGE_SCN_ALIGN_16BYTES # Characteristics
+ IMAGE_SCN_MEM_EXECUTE # Characteristics
+
+ .ascii ".data\0\0\0"
+ .long ZO__end - ZO__data # VirtualSize
+ .long setup_size + ZO__data # VirtualAddress
+ .long ZO__edata - ZO__data # SizeOfRawData
+ .long setup_size + ZO__data # PointerToRawData
+
+ .long 0, 0, 0
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE # Characteristics
.set section_count, (. - section_table) / 40
#endif /* CONFIG_EFI_STUB */
@@ -285,12 +222,12 @@ sentinel: .byte 0xff, 0xff /* Used to detect broken loaders */
.globl hdr
hdr:
-setup_sects: .byte 0 /* Filled in by build.c */
+ .byte setup_sects - 1
root_flags: .word ROOT_RDONLY
-syssize: .long 0 /* Filled in by build.c */
+syssize: .long ZO__edata / 16
ram_size: .word 0 /* Obsolete */
vid_mode: .word SVGA_MODE
-root_dev: .word 0 /* Filled in by build.c */
+root_dev: .word 0 /* Default to major/minor 0/0 */
boot_flag: .word 0xAA55
# offset 512, entry point
@@ -578,9 +515,25 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
# define INIT_SIZE VO_INIT_SIZE
#endif
+ .macro __handover_offset
+#ifndef CONFIG_EFI_HANDOVER_PROTOCOL
+ .long 0
+#elif !defined(CONFIG_X86_64)
+ .long ZO_efi32_stub_entry
+#else
+ /* Yes, this is really how we defined it :( */
+ .long ZO_efi64_stub_entry - 0x200
+#ifdef CONFIG_EFI_MIXED
+ .if ZO_efi32_stub_entry != ZO_efi64_stub_entry - 0x200
+ .error "32-bit and 64-bit EFI entry points do not match"
+ .endif
+#endif
+#endif
+ .endm
+
init_size: .long INIT_SIZE # kernel initialization size
-handover_offset: .long 0 # Filled in by build.c
-kernel_info_offset: .long 0 # Filled in by build.c
+handover_offset: __handover_offset
+kernel_info_offset: .long ZO_kernel_info
# End of setup header #####################################################
diff --git a/arch/x86/boot/setup.ld b/arch/x86/boot/setup.ld
index 49546c247..3a2d1360a 100644
--- a/arch/x86/boot/setup.ld
+++ b/arch/x86/boot/setup.ld
@@ -10,10 +10,11 @@ ENTRY(_start)
SECTIONS
{
. = 0;
- .bstext : { *(.bstext) }
- .bsdata : { *(.bsdata) }
+ .bstext : {
+ *(.bstext)
+ . = 495;
+ } =0xffffffff
- . = 495;
.header : { *(.header) }
.entrytext : { *(.entrytext) }
.inittext : { *(.inittext) }
@@ -23,6 +24,9 @@ SECTIONS
.text : { *(.text .text.*) }
.text32 : { *(.text32) }
+ .pecompat : { *(.pecompat) }
+ PROVIDE(pecompat_fsize = setup_size - pecompat_fstart);
+
. = ALIGN(16);
.rodata : { *(.rodata*) }
@@ -38,8 +42,10 @@ SECTIONS
.signature : {
setup_sig = .;
LONG(0x5a5aaa55)
- }
+ setup_size = ALIGN(ABSOLUTE(.), 4096);
+ setup_sects = ABSOLUTE(setup_size / 512);
+ }
. = ALIGN(16);
.bss :
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index bd247692b..10311d77c 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -40,10 +40,6 @@ typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
-#define DEFAULT_MAJOR_ROOT 0
-#define DEFAULT_MINOR_ROOT 0
-#define DEFAULT_ROOT_DEV (DEFAULT_MAJOR_ROOT << 8 | DEFAULT_MINOR_ROOT)
-
/* Minimal number of setup sectors */
#define SETUP_SECT_MIN 5
#define SETUP_SECT_MAX 64
@@ -51,22 +47,7 @@ typedef unsigned int u32;
/* This must be large enough to hold the entire setup */
u8 buf[SETUP_SECT_MAX*512];
-#define PECOFF_RELOC_RESERVE 0x20
-
-#ifdef CONFIG_EFI_MIXED
-#define PECOFF_COMPAT_RESERVE 0x20
-#else
-#define PECOFF_COMPAT_RESERVE 0x0
-#endif
-
-static unsigned long efi32_stub_entry;
-static unsigned long efi64_stub_entry;
-static unsigned long efi_pe_entry;
-static unsigned long efi32_pe_entry;
-static unsigned long kernel_info;
-static unsigned long startup_64;
-static unsigned long _ehead;
-static unsigned long _end;
+static unsigned long _edata;
/*----------------------------------------------------------------------*/
@@ -152,180 +133,6 @@ static void usage(void)
die("Usage: build setup system zoffset.h image");
}
-#ifdef CONFIG_EFI_STUB
-
-static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
-{
- unsigned int pe_header;
- unsigned short num_sections;
- u8 *section;
-
- pe_header = get_unaligned_le32(&buf[0x3c]);
- num_sections = get_unaligned_le16(&buf[pe_header + 6]);
-
-#ifdef CONFIG_X86_32
- section = &buf[pe_header + 0xa8];
-#else
- section = &buf[pe_header + 0xb8];
-#endif
-
- while (num_sections > 0) {
- if (strncmp((char*)section, section_name, 8) == 0) {
- /* section header size field */
- put_unaligned_le32(size, section + 0x8);
-
- /* section header vma field */
- put_unaligned_le32(vma, section + 0xc);
-
- /* section header 'size of initialised data' field */
- put_unaligned_le32(datasz, section + 0x10);
-
- /* section header 'file offset' field */
- put_unaligned_le32(offset, section + 0x14);
-
- break;
- }
- section += 0x28;
- num_sections--;
- }
-}
-
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
-{
- update_pecoff_section_header_fields(section_name, offset, size, size, offset);
-}
-
-static void update_pecoff_setup_and_reloc(unsigned int size)
-{
- u32 setup_offset = 0x200;
- u32 reloc_offset = size - PECOFF_RELOC_RESERVE - PECOFF_COMPAT_RESERVE;
-#ifdef CONFIG_EFI_MIXED
- u32 compat_offset = reloc_offset + PECOFF_RELOC_RESERVE;
-#endif
- u32 setup_size = reloc_offset - setup_offset;
-
- update_pecoff_section_header(".setup", setup_offset, setup_size);
- update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
-
- /*
- * Modify .reloc section contents with a single entry. The
- * relocation is applied to offset 10 of the relocation section.
- */
- put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
- put_unaligned_le32(10, &buf[reloc_offset + 4]);
-
-#ifdef CONFIG_EFI_MIXED
- update_pecoff_section_header(".compat", compat_offset, PECOFF_COMPAT_RESERVE);
-
- /*
- * Put the IA-32 machine type (0x14c) and the associated entry point
- * address in the .compat section, so loaders can figure out which other
- * execution modes this image supports.
- */
- buf[compat_offset] = 0x1;
- buf[compat_offset + 1] = 0x8;
- put_unaligned_le16(0x14c, &buf[compat_offset + 2]);
- put_unaligned_le32(efi32_pe_entry + size, &buf[compat_offset + 4]);
-#endif
-}
-
-static void update_pecoff_text(unsigned int text_start, unsigned int file_sz,
- unsigned int init_sz)
-{
- unsigned int pe_header;
- unsigned int text_sz = file_sz - text_start;
- unsigned int bss_sz = init_sz - file_sz;
-
- pe_header = get_unaligned_le32(&buf[0x3c]);
-
- /*
- * The PE/COFF loader may load the image at an address which is
- * misaligned with respect to the kernel_alignment field in the setup
- * header.
- *
- * In order to avoid relocating the kernel to correct the misalignment,
- * add slack to allow the buffer to be aligned within the declared size
- * of the image.
- */
- bss_sz += CONFIG_PHYSICAL_ALIGN;
- init_sz += CONFIG_PHYSICAL_ALIGN;
-
- /*
- * Size of code: Subtract the size of the first sector (512 bytes)
- * which includes the header.
- */
- put_unaligned_le32(file_sz - 512 + bss_sz, &buf[pe_header + 0x1c]);
-
- /* Size of image */
- put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
-
- /*
- * Address of entry point for PE/COFF executable
- */
- put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);
-
- update_pecoff_section_header_fields(".text", text_start, text_sz + bss_sz,
- text_sz, text_start);
-}
-
-static int reserve_pecoff_reloc_section(int c)
-{
- /* Reserve 0x20 bytes for .reloc section */
- memset(buf+c, 0, PECOFF_RELOC_RESERVE);
- return PECOFF_RELOC_RESERVE;
-}
-
-static void efi_stub_defaults(void)
-{
- /* Defaults for old kernel */
-#ifdef CONFIG_X86_32
- efi_pe_entry = 0x10;
-#else
- efi_pe_entry = 0x210;
- startup_64 = 0x200;
-#endif
-}
-
-static void efi_stub_entry_update(void)
-{
- unsigned long addr = efi32_stub_entry;
-
-#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
-#ifdef CONFIG_X86_64
- /* Yes, this is really how we defined it :( */
- addr = efi64_stub_entry - 0x200;
-#endif
-
-#ifdef CONFIG_EFI_MIXED
- if (efi32_stub_entry != addr)
- die("32-bit and 64-bit EFI entry points do not match\n");
-#endif
-#endif
- put_unaligned_le32(addr, &buf[0x264]);
-}
-
-#else
-
-static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
-static inline void update_pecoff_text(unsigned int text_start,
- unsigned int file_sz,
- unsigned int init_sz) {}
-static inline void efi_stub_defaults(void) {}
-static inline void efi_stub_entry_update(void) {}
-
-static inline int reserve_pecoff_reloc_section(int c)
-{
- return 0;
-}
-#endif /* CONFIG_EFI_STUB */
-
-static int reserve_pecoff_compat_section(int c)
-{
- /* Reserve 0x20 bytes for .compat section */
- memset(buf+c, 0, PECOFF_COMPAT_RESERVE);
- return PECOFF_COMPAT_RESERVE;
-}
-
/*
* Parse zoffset.h and find the entry points. We could just #include zoffset.h
* but that would mean tools/build would have to be rebuilt every time. It's
@@ -354,14 +161,7 @@ static void parse_zoffset(char *fname)
p = (char *)buf;
while (p && *p) {
- PARSE_ZOFS(p, efi32_stub_entry);
- PARSE_ZOFS(p, efi64_stub_entry);
- PARSE_ZOFS(p, efi_pe_entry);
- PARSE_ZOFS(p, efi32_pe_entry);
- PARSE_ZOFS(p, kernel_info);
- PARSE_ZOFS(p, startup_64);
- PARSE_ZOFS(p, _ehead);
- PARSE_ZOFS(p, _end);
+ PARSE_ZOFS(p, _edata);
p = strchr(p, '\n');
while (p && (*p == '\r' || *p == '\n'))
@@ -371,17 +171,14 @@ static void parse_zoffset(char *fname)
int main(int argc, char ** argv)
{
- unsigned int i, sz, setup_sectors, init_sz;
+ unsigned int i, sz, setup_sectors;
int c;
- u32 sys_size;
struct stat sb;
FILE *file, *dest;
int fd;
void *kernel;
u32 crc = 0xffffffffUL;
- efi_stub_defaults();
-
if (argc != 5)
usage();
parse_zoffset(argv[3]);
@@ -403,72 +200,27 @@ int main(int argc, char ** argv)
die("Boot block hasn't got boot flag (0xAA55)");
fclose(file);
- c += reserve_pecoff_compat_section(c);
- c += reserve_pecoff_reloc_section(c);
-
/* Pad unused space with zeros */
- setup_sectors = (c + 511) / 512;
+ setup_sectors = (c + 4095) / 4096;
+ setup_sectors *= 8;
if (setup_sectors < SETUP_SECT_MIN)
setup_sectors = SETUP_SECT_MIN;
i = setup_sectors*512;
memset(buf+c, 0, i-c);
- update_pecoff_setup_and_reloc(i);
-
- /* Set the default root device */
- put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
-
/* Open and stat the kernel file */
fd = open(argv[2], O_RDONLY);
if (fd < 0)
die("Unable to open `%s': %m", argv[2]);
if (fstat(fd, &sb))
die("Unable to stat `%s': %m", argv[2]);
- sz = sb.st_size;
+ if (_edata != sb.st_size)
+ die("Unexpected file size `%s': %u != %u", argv[2], _edata,
+ sb.st_size);
+ sz = _edata - 4;
kernel = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
if (kernel == MAP_FAILED)
die("Unable to mmap '%s': %m", argv[2]);
- /* Number of 16-byte paragraphs, including space for a 4-byte CRC */
- sys_size = (sz + 15 + 4) / 16;
-#ifdef CONFIG_EFI_STUB
- /*
- * COFF requires minimum 32-byte alignment of sections, and
- * adding a signature is problematic without that alignment.
- */
- sys_size = (sys_size + 1) & ~1;
-#endif
-
- /* Patch the setup code with the appropriate size parameters */
- buf[0x1f1] = setup_sectors-1;
- put_unaligned_le32(sys_size, &buf[0x1f4]);
-
- init_sz = get_unaligned_le32(&buf[0x260]);
-#ifdef CONFIG_EFI_STUB
- /*
- * The decompression buffer will start at ImageBase. When relocating
- * the compressed kernel to its end, we must ensure that the head
- * section does not get overwritten. The head section occupies
- * [i, i + _ehead), and the destination is [init_sz - _end, init_sz).
- *
- * At present these should never overlap, because 'i' is at most 32k
- * because of SETUP_SECT_MAX, '_ehead' is less than 1k, and the
- * calculation of INIT_SIZE in boot/header.S ensures that
- * 'init_sz - _end' is at least 64k.
- *
- * For future-proofing, increase init_sz if necessary.
- */
-
- if (init_sz - _end < i + _ehead) {
- init_sz = (i + _ehead + _end + 4095) & ~4095;
- put_unaligned_le32(init_sz, &buf[0x260]);
- }
-#endif
- update_pecoff_text(setup_sectors * 512, i + (sys_size * 16), init_sz);
-
- efi_stub_entry_update();
-
- /* Update kernel_info offset. */
- put_unaligned_le32(kernel_info, &buf[0x268]);
crc = partial_crc32(buf, i, crc);
if (fwrite(buf, 1, i, dest) != i)
@@ -479,13 +231,6 @@ int main(int argc, char ** argv)
if (fwrite(kernel, 1, sz, dest) != sz)
die("Writing kernel failed");
- /* Add padding leaving 4 bytes for the checksum */
- while (sz++ < (sys_size*16) - 4) {
- crc = partial_crc32_one('\0', crc);
- if (fwrite("\0", 1, 1, dest) != 1)
- die("Writing padding failed");
- }
-
/* Write the CRC */
put_unaligned_le32(crc, buf);
if (fwrite(buf, 1, 4, dest) != 4)
diff --git a/arch/x86/events/amd/lbr.c b/arch/x86/events/amd/lbr.c
index b8fe74e8e..48f4095f5 100644
--- a/arch/x86/events/amd/lbr.c
+++ b/arch/x86/events/amd/lbr.c
@@ -173,9 +173,11 @@ void amd_pmu_lbr_read(void)
/*
* Check if a branch has been logged; if valid = 0, spec = 0
- * then no branch was recorded
+ * then no branch was recorded; if reserved = 1 then an
+ * erroneous branch was recorded (see Erratum 1452)
*/
- if (!entry.to.split.valid && !entry.to.split.spec)
+ if ((!entry.to.split.valid && !entry.to.split.spec) ||
+ entry.to.split.reserved)
continue;
perf_clear_branch_entry_bitfields(br + out);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 30fb4931d..1394312b7 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1644,6 +1644,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
while (++i < cpuc->n_events) {
cpuc->event_list[i-1] = cpuc->event_list[i];
cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
+ cpuc->assign[i-1] = cpuc->assign[i];
}
cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 3216da707..36ceecd40 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -12,6 +12,7 @@
#include <asm/mpspec.h>
#include <asm/msr.h>
#include <asm/hardirq.h>
+#include <asm/io.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@@ -109,7 +110,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
static inline u32 native_apic_mem_read(u32 reg)
{
- return *((volatile u32 *)(APIC_BASE + reg));
+ return readl((void __iomem *)(APIC_BASE + reg));
}
extern void native_apic_wait_icr_idle(void);
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index a38cc0afc..a3e0be047 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -81,6 +81,7 @@
#ifndef __ASSEMBLY__
extern unsigned int output_len;
+extern const unsigned long kernel_text_size;
extern const unsigned long kernel_total_size;
unsigned long decompress_kernel(unsigned char *outbuf, unsigned long virt_addr,
diff --git a/arch/x86/include/asm/coco.h b/arch/x86/include/asm/coco.h
index 1f97d00ad..100a752c3 100644
--- a/arch/x86/include/asm/coco.h
+++ b/arch/x86/include/asm/coco.h
@@ -13,9 +13,10 @@ enum cc_vendor {
};
extern enum cc_vendor cc_vendor;
-extern u64 cc_mask;
#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
+extern u64 cc_mask;
+
static inline void cc_set_mask(u64 mask)
{
RIP_REL_REF(cc_mask) = mask;
@@ -25,6 +26,8 @@ u64 cc_mkenc(u64 val);
u64 cc_mkdec(u64 val);
void cc_random_init(void);
#else
+static const u64 cc_mask = 0;
+
static inline u64 cc_mkenc(u64 val)
{
return val;
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 5f1d3c421..cc9ccf61b 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_INIT_H
#define _ASM_X86_INIT_H
+#define __head __section(".head.text")
+
struct x86_mapping_info {
void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
void *context; /* context for alloc_pgt_page */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index dfcdcafe3..887a17148 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -773,6 +773,7 @@ struct kvm_vcpu_arch {
int cpuid_nent;
struct kvm_cpuid_entry2 *cpuid_entries;
u32 kvm_cpuid_base;
+ bool is_amd_compatible;
u64 reserved_gpa_bits;
int maxphyaddr;
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 41d06822b..853f423b1 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -46,8 +46,8 @@ void __init sme_unmap_bootdata(char *real_mode_data);
void __init sme_early_init(void);
void __init sev_setup_arch(void);
-void __init sme_encrypt_kernel(struct boot_params *bp);
-void __init sme_enable(struct boot_params *bp);
+void sme_encrypt_kernel(struct boot_params *bp);
+void sme_enable(struct boot_params *bp);
int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
@@ -80,8 +80,8 @@ static inline void __init sme_unmap_bootdata(char *real_mode_data) { }
static inline void __init sme_early_init(void) { }
static inline void __init sev_setup_arch(void) { }
-static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
-static inline void __init sme_enable(struct boot_params *bp) { }
+static inline void sme_encrypt_kernel(struct boot_params *bp) { }
+static inline void sme_enable(struct boot_params *bp) { }
static inline void sev_es_init_vc_handling(void) { }
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index a506a4114..86bd4311d 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -11,20 +11,14 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
-#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
-#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
-
-#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
-#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
-
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
-/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
+/* Cast P*D_MASK to a signed type so that it is sign-extended if
virtual addresses are 32-bits but physical addresses are larger
(ie, 32-bit PAE). */
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
-#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
-#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
+#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_MASK) & __PHYSICAL_MASK)
+#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_MASK) & __PHYSICAL_MASK)
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index f6116b66f..f0b9b37c4 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -127,7 +127,7 @@
*/
#define _COMMON_PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY |\
- _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_CC | \
_PAGE_UFFD_WP)
#define _PAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PAT)
#define _HPAGE_CHG_MASK (_COMMON_PAGE_CHG_MASK | _PAGE_PSE | _PAGE_PAT_LARGE)
@@ -153,6 +153,7 @@ enum page_cache_mode {
};
#endif
+#define _PAGE_CC (_AT(pteval_t, cc_mask))
#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index c57dd2115..bcac2e53d 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -192,15 +192,15 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate)
struct snp_guest_request_ioctl;
void setup_ghcb(void);
-void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
- unsigned long npages);
-void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
- unsigned long npages);
+void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages);
+void early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
+ unsigned long npages);
void snp_set_memory_shared(unsigned long vaddr, unsigned long npages);
void snp_set_memory_private(unsigned long vaddr, unsigned long npages);
void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
-void __init __noreturn snp_abort(void);
+void __noreturn snp_abort(void);
void snp_dmi_setup(void);
int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio);
u64 snp_get_unsupported_features(u64 status);
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index 19a0207e5..56a917df4 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -504,7 +504,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
}
a = aper + iommu_size;
- iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
+ iommu_size -= round_up(a, PMD_SIZE) - a;
if (iommu_size < 64*1024*1024) {
pr_warn("PCI-DMA: Warning: Small IOMMU %luMB."
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 770557110..e1672cc77 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1760,11 +1760,11 @@ static int x2apic_state;
static bool x2apic_hw_locked(void)
{
- u64 ia32_cap;
+ u64 x86_arch_cap_msr;
u64 msr;
- ia32_cap = x86_read_arch_cap_msr();
- if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
+ x86_arch_cap_msr = x86_read_arch_cap_msr();
+ if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
return (msr & LEGACY_XAPIC_DISABLED);
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 96bd3ee83..3f38592ec 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -60,6 +60,8 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
EXPORT_SYMBOL_GPL(x86_pred_cmd);
+static u64 __ro_after_init x86_arch_cap_msr;
+
static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
@@ -143,6 +145,8 @@ void __init cpu_select_mitigations(void)
x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
}
+ x86_arch_cap_msr = x86_read_arch_cap_msr();
+
/* Select the proper CPU mitigations before patching alternatives: */
spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
@@ -300,8 +304,6 @@ static const char * const taa_strings[] = {
static void __init taa_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_TAA)) {
taa_mitigation = TAA_MITIGATION_OFF;
return;
@@ -340,9 +342,8 @@ static void __init taa_select_mitigation(void)
* On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
* update is required.
*/
- ia32_cap = x86_read_arch_cap_msr();
- if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
- !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+ if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
+ !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
/*
@@ -400,8 +401,6 @@ static const char * const mmio_strings[] = {
static void __init mmio_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
cpu_mitigations_off()) {
@@ -412,8 +411,6 @@ static void __init mmio_select_mitigation(void)
if (mmio_mitigation == MMIO_MITIGATION_OFF)
return;
- ia32_cap = x86_read_arch_cap_msr();
-
/*
* Enable CPU buffer clear mitigation for host and VMM, if also affected
* by MDS or TAA. Otherwise, enable mitigation for VMM only.
@@ -436,7 +433,7 @@ static void __init mmio_select_mitigation(void)
* be propagated to uncore buffers, clearing the Fill buffers on idle
* is required irrespective of SMT state.
*/
- if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
+ if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear);
/*
@@ -446,10 +443,10 @@ static void __init mmio_select_mitigation(void)
* FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
* affected systems.
*/
- if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
+ if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
(boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
- !(ia32_cap & ARCH_CAP_MDS_NO)))
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
mmio_mitigation = MMIO_MITIGATION_VERW;
else
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
@@ -507,7 +504,7 @@ static void __init rfds_select_mitigation(void)
if (rfds_mitigation == RFDS_MITIGATION_OFF)
return;
- if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
else
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@@ -658,8 +655,6 @@ void update_srbds_msr(void)
static void __init srbds_select_mitigation(void)
{
- u64 ia32_cap;
-
if (!boot_cpu_has_bug(X86_BUG_SRBDS))
return;
@@ -668,8 +663,7 @@ static void __init srbds_select_mitigation(void)
* are only exposed to SRBDS when TSX is enabled or when CPU is affected
* by Processor MMIO Stale Data vulnerability.
*/
- ia32_cap = x86_read_arch_cap_msr();
- if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+ if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -812,7 +806,7 @@ static void __init gds_select_mitigation(void)
/* Will verify below that mitigation _can_ be disabled */
/* No microcode */
- if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
+ if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
if (gds_mitigation == GDS_MITIGATION_FORCE) {
/*
* This only needs to be done on the boot CPU so do it
@@ -1521,20 +1515,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
return SPECTRE_V2_RETPOLINE;
}
+static bool __ro_after_init rrsba_disabled;
+
/* Disable in-kernel use of non-RSB RET predictors */
static void __init spec_ctrl_disable_kernel_rrsba(void)
{
- u64 ia32_cap;
+ if (rrsba_disabled)
+ return;
- if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+ if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
+ rrsba_disabled = true;
return;
+ }
- ia32_cap = x86_read_arch_cap_msr();
+ if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+ return;
- if (ia32_cap & ARCH_CAP_RRSBA) {
- x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
- update_spec_ctrl(x86_spec_ctrl_base);
- }
+ x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ rrsba_disabled = true;
}
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
@@ -1603,13 +1602,10 @@ static bool __init spec_ctrl_bhi_dis(void)
enum bhi_mitigations {
BHI_MITIGATION_OFF,
BHI_MITIGATION_ON,
- BHI_MITIGATION_AUTO,
};
static enum bhi_mitigations bhi_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_SPECTRE_BHI_ON) ? BHI_MITIGATION_ON :
- IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF :
- BHI_MITIGATION_AUTO;
+ IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
static int __init spectre_bhi_parse_cmdline(char *str)
{
@@ -1620,8 +1616,6 @@ static int __init spectre_bhi_parse_cmdline(char *str)
bhi_mitigation = BHI_MITIGATION_OFF;
else if (!strcmp(str, "on"))
bhi_mitigation = BHI_MITIGATION_ON;
- else if (!strcmp(str, "auto"))
- bhi_mitigation = BHI_MITIGATION_AUTO;
else
pr_err("Ignoring unknown spectre_bhi option (%s)", str);
@@ -1635,9 +1629,12 @@ static void __init bhi_select_mitigation(void)
return;
/* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
- if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
- !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
- return;
+ if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
+ !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
+ spec_ctrl_disable_kernel_rrsba();
+ if (rrsba_disabled)
+ return;
+ }
if (spec_ctrl_bhi_dis())
return;
@@ -1649,9 +1646,6 @@ static void __init bhi_select_mitigation(void)
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
- if (bhi_mitigation == BHI_MITIGATION_AUTO)
- return;
-
/* Mitigate syscalls when the mitigation is forced =on */
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
@@ -1884,8 +1878,6 @@ static void update_indir_branch_cond(void)
/* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle(void)
{
- u64 ia32_cap = x86_read_arch_cap_msr();
-
/*
* Enable the idle clearing if SMT is active on CPUs which are
* affected only by MSBDS and not any other MDS variant.
@@ -1900,7 +1892,7 @@ static void update_mds_branch_idle(void)
if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
- (ia32_cap & ARCH_CAP_FBSDP_NO)) {
+ (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear);
}
}
@@ -2788,21 +2780,22 @@ static char *pbrsb_eibrs_state(void)
}
}
-static const char * const spectre_bhi_state(void)
+static const char *spectre_bhi_state(void)
{
if (!boot_cpu_has_bug(X86_BUG_BHI))
return "; BHI: Not affected";
- else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW))
return "; BHI: BHI_DIS_S";
- else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
return "; BHI: SW loop, KVM: SW loop";
else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
- !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
+ !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) &&
+ rrsba_disabled)
return "; BHI: Retpoline";
- else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
- return "; BHI: Syscall hardening, KVM: SW loop";
+ else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+ return "; BHI: Vulnerable, KVM: SW loop";
- return "; BHI: Vulnerable (Syscall hardening enabled)";
+ return "; BHI: Vulnerable";
}
static ssize_t spectre_v2_show_state(char *buf)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 08fe77d2a..f2bc651c0 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1308,25 +1308,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
u64 x86_read_arch_cap_msr(void)
{
- u64 ia32_cap = 0;
+ u64 x86_arch_cap_msr = 0;
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
- return ia32_cap;
+ return x86_arch_cap_msr;
}
-static bool arch_cap_mmio_immune(u64 ia32_cap)
+static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
{
- return (ia32_cap & ARCH_CAP_FBSDP_NO &&
- ia32_cap & ARCH_CAP_PSDP_NO &&
- ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
+ return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
+ x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
+ x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
}
-static bool __init vulnerable_to_rfds(u64 ia32_cap)
+static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
{
/* The "immunity" bit trumps everything else: */
- if (ia32_cap & ARCH_CAP_RFDS_NO)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
return false;
/*
@@ -1334,7 +1334,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
* indicate that mitigation is needed because guest is running on a
* vulnerable hardware or may migrate to such hardware:
*/
- if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+ if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
return true;
/* Only consult the blacklist when there is no enumeration: */
@@ -1343,11 +1343,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
- u64 ia32_cap = x86_read_arch_cap_msr();
+ u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
- !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+ !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
@@ -1359,7 +1359,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
- !(ia32_cap & ARCH_CAP_SSB_NO) &&
+ !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@@ -1367,15 +1367,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
* flag and protect from vendor-specific bugs via the whitelist.
*/
- if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
+ if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
- !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
}
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
- !(ia32_cap & ARCH_CAP_MDS_NO)) {
+ !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS);
if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
@@ -1394,9 +1394,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* TSX_CTRL check alone is not sufficient for cases when the microcode
* update is not present or running as guest that don't get TSX_CTRL.
*/
- if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+ if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
(cpu_has(c, X86_FEATURE_RTM) ||
- (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+ (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
setup_force_cpu_bug(X86_BUG_TAA);
/*
@@ -1422,7 +1422,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
*/
- if (!arch_cap_mmio_immune(ia32_cap)) {
+ if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
if (cpu_matches(cpu_vuln_blacklist, MMIO))
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
@@ -1430,7 +1430,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
}
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
- if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+ if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
@@ -1443,7 +1443,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
* which means that AVX will be disabled.
*/
- if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
+ if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
boot_cpu_has(X86_FEATURE_AVX))
setup_force_cpu_bug(X86_BUG_GDS);
@@ -1452,11 +1452,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_SRSO);
}
- if (vulnerable_to_rfds(ia32_cap))
+ if (vulnerable_to_rfds(x86_arch_cap_msr))
setup_force_cpu_bug(X86_BUG_RFDS);
/* When virtualized, eIBRS could be hidden, assume vulnerable */
- if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
+ if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
@@ -1466,7 +1466,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
return;
/* Rogue Data Cache Load? No! */
- if (ia32_cap & ARCH_CAP_RDCL_NO)
+ if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
return;
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index c881bcafb..9c19f40b1 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
+ { X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
+ { X86_FEATURE_VAES, X86_FEATURE_AVX },
+ { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
@@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
- { X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
- { X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
- { X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 84adf12a7..4fae511b2 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -41,6 +41,7 @@
#include <asm/trapnr.h>
#include <asm/sev.h>
#include <asm/tdx.h>
+#include <asm/init.h>
/*
* Manage page tables very early on.
@@ -84,8 +85,6 @@ static struct desc_ptr startup_gdt_descr = {
.address = 0,
};
-#define __head __section(".head.text")
-
static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
{
return ptr - (void *)_text + (void *)physaddr;
@@ -203,7 +202,7 @@ unsigned long __head __startup_64(unsigned long physaddr,
load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
/* Is the address not 2M aligned? */
- if (load_delta & ~PMD_PAGE_MASK)
+ if (load_delta & ~PMD_MASK)
for (;;);
/* Include the SME encryption mask in the fixup value */
@@ -588,7 +587,7 @@ static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
}
/* This runs while still in the direct mapping */
-static void startup_64_load_idt(unsigned long physbase)
+static void __head startup_64_load_idt(unsigned long physbase)
{
struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
index b348a672f..b525fe6d6 100644
--- a/arch/x86/kernel/platform-quirks.c
+++ b/arch/x86/kernel/platform-quirks.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/pnp.h>
#include <asm/setup.h>
#include <asm/bios_ebda.h>
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 7f94dbbc3..a0d3059be 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -137,7 +137,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
log_lvl, d3, d6, d7);
}
- if (cpu_feature_enabled(X86_FEATURE_OSPKE))
+ if (cr4 & X86_CR4_PKE)
printk("%sPKRU: %08x\n", log_lvl, read_pkru());
}
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index 271e70d57..3fe76bf17 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -86,7 +86,8 @@ static bool __init sev_es_check_cpu_features(void)
return true;
}
-static void __noreturn sev_es_terminate(unsigned int set, unsigned int reason)
+static void __head __noreturn
+sev_es_terminate(unsigned int set, unsigned int reason)
{
u64 val = GHCB_MSR_TERM_REQ;
@@ -323,13 +324,7 @@ static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid
*/
static const struct snp_cpuid_table *snp_cpuid_get_table(void)
{
- void *ptr;
-
- asm ("lea cpuid_table_copy(%%rip), %0"
- : "=r" (ptr)
- : "p" (&cpuid_table_copy));
-
- return ptr;
+ return &RIP_REL_REF(cpuid_table_copy);
}
/*
@@ -388,7 +383,7 @@ static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
return xsave_size;
}
-static bool
+static bool __head
snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@@ -525,7 +520,8 @@ static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
* Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
* should be treated as fatal by caller.
*/
-static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
+static int __head
+snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
{
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
@@ -567,7 +563,7 @@ static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_le
* page yet, so it only supports the MSR based communication with the
* hypervisor and only the CPUID exit-code.
*/
-void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
+void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
{
unsigned int subfn = lower_bits(regs->cx, 32);
unsigned int fn = lower_bits(regs->ax, 32);
@@ -1013,7 +1009,8 @@ struct cc_setup_data {
* Search for a Confidential Computing blob passed in as a setup_data entry
* via the Linux Boot Protocol.
*/
-static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
+static __head
+struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
{
struct cc_setup_data *sd = NULL;
struct setup_data *hdr;
@@ -1040,7 +1037,7 @@ static struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
* mapping needs to be updated in sync with all the changes to virtual memory
* layout and related mapping facilities throughout the boot process.
*/
-static void __init setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
+static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
{
const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
int i;
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index e35fcc8d4..f8a8249ae 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -26,6 +26,7 @@
#include <linux/dmi.h>
#include <uapi/linux/sev-guest.h>
+#include <asm/init.h>
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
#include <asm/sev.h>
@@ -690,7 +691,7 @@ static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool vali
}
}
-static void __init early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
+static void __head early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
{
unsigned long paddr_end;
u64 val;
@@ -728,7 +729,7 @@ e_term:
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
}
-void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
+void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
unsigned long npages)
{
/*
@@ -2085,7 +2086,7 @@ fail:
*
* Scan for the blob in that order.
*/
-static __init struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
+static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
@@ -2111,7 +2112,7 @@ found_cc_info:
return cc_info;
}
-bool __init snp_init(struct boot_params *bp)
+bool __head snp_init(struct boot_params *bp)
{
struct cc_blob_sev_info *cc_info;
@@ -2133,7 +2134,7 @@ bool __init snp_init(struct boot_params *bp)
return true;
}
-void __init __noreturn snp_abort(void)
+void __head __noreturn snp_abort(void)
{
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
}
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 62a44455c..f02961cbb 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -340,6 +340,7 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_update_pv_runtime(vcpu);
+ vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index b1658c0de..18fd2e845 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -125,6 +125,16 @@ static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
}
+static inline bool guest_cpuid_is_amd_compatible(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.is_amd_compatible;
+}
+
+static inline bool guest_cpuid_is_intel_compatible(struct kvm_vcpu *vcpu)
+{
+ return !guest_cpuid_is_amd_compatible(vcpu);
+}
+
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index bfeafe485..c90fef025 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2548,7 +2548,8 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
- if (r && lvt_type == APIC_LVTPC)
+ if (r && lvt_type == APIC_LVTPC &&
+ guest_cpuid_is_intel_compatible(apic->vcpu))
kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
return r;
}
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d30325e29..13134954e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4649,7 +4649,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
context->cpu_role.base.level, is_efer_nx(context),
guest_can_use_gbpages(vcpu),
is_cr4_pse(context),
- guest_cpuid_is_amd_or_hygon(vcpu));
+ guest_cpuid_is_amd_compatible(vcpu));
}
static void
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5c1590855..10aff2c9a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7742,8 +7742,28 @@ static u64 vmx_get_perf_capabilities(void)
if (vmx_pebs_supported()) {
perf_cap |= host_perf_cap & PERF_CAP_PEBS_MASK;
- if ((perf_cap & PERF_CAP_PEBS_FORMAT) < 4)
- perf_cap &= ~PERF_CAP_PEBS_BASELINE;
+
+ /*
+ * Disallow adaptive PEBS as it is functionally broken, can be
+ * used by the guest to read *host* LBRs, and can be used to
+ * bypass userspace event filters. To correctly and safely
+ * support adaptive PEBS, KVM needs to:
+ *
+ * 1. Account for the ADAPTIVE flag when (re)programming fixed
+ * counters.
+ *
+ * 2. Gain support from perf (or take direct control of counter
+ * programming) to support events without adaptive PEBS
+ * enabled for the hardware counter.
+ *
+ * 3. Ensure LBR MSRs cannot hold host data on VM-Entry with
+ * adaptive PEBS enabled and MSR_PEBS_DATA_CFG.LBRS=1.
+ *
+ * 4. Document which PMU events are effectively exposed to the
+ * guest via adaptive PEBS, and make adaptive PEBS mutually
+ * exclusive with KVM_SET_PMU_EVENT_FILTER if necessary.
+ */
+ perf_cap &= ~PERF_CAP_PEBS_BASELINE;
}
return perf_cap;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f72476503..a2ea636a2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3278,7 +3278,7 @@ static bool is_mci_status_msr(u32 msr)
static bool can_set_mci_status(struct kvm_vcpu *vcpu)
{
/* McStatusWrEn enabled? */
- if (guest_cpuid_is_amd_or_hygon(vcpu))
+ if (guest_cpuid_is_amd_compatible(vcpu))
return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
return false;
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 9de3d900b..e25288ee3 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -26,7 +26,7 @@ SYM_FUNC_START(sme_encrypt_execute)
* RCX - virtual address of the encryption workarea, including:
* - stack page (PAGE_SIZE)
* - encryption routine page (PAGE_SIZE)
- * - intermediate copy buffer (PMD_PAGE_SIZE)
+ * - intermediate copy buffer (PMD_SIZE)
* R8 - physical address of the pagetables to use for encryption
*/
@@ -123,7 +123,7 @@ SYM_FUNC_START(__enc_copy)
wbinvd /* Invalidate any cache entries */
/* Copy/encrypt up to 2MB at a time */
- movq $PMD_PAGE_SIZE, %r12
+ movq $PMD_SIZE, %r12
1:
cmpq %r12, %r9
jnb 2f
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 06ccbd36e..f17609884 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -41,6 +41,7 @@
#include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>
+#include <asm/init.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cmdline.h>
@@ -93,12 +94,12 @@ struct sme_populate_pgd_data {
* section is 2MB aligned to allow for simple pagetable setup using only
* PMD entries (see vmlinux.lds.S).
*/
-static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch");
+static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
static char sme_cmdline_arg[] __initdata = "mem_encrypt";
static char sme_cmdline_on[] __initdata = "on";
-static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
+static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
{
unsigned long pgd_start, pgd_end, pgd_size;
pgd_t *pgd_p;
@@ -113,7 +114,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
memset(pgd_p, 0, pgd_size);
}
-static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
+static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
{
pgd_t *pgd;
p4d_t *p4d;
@@ -150,7 +151,7 @@ static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
return pud;
}
-static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
+static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
{
pud_t *pud;
pmd_t *pmd;
@@ -166,7 +167,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
}
-static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
+static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
{
pud_t *pud;
pmd_t *pmd;
@@ -192,17 +193,17 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
}
-static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
+static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
{
while (ppd->vaddr < ppd->vaddr_end) {
sme_populate_pgd_large(ppd);
- ppd->vaddr += PMD_PAGE_SIZE;
- ppd->paddr += PMD_PAGE_SIZE;
+ ppd->vaddr += PMD_SIZE;
+ ppd->paddr += PMD_SIZE;
}
}
-static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
+static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
{
while (ppd->vaddr < ppd->vaddr_end) {
sme_populate_pgd(ppd);
@@ -212,7 +213,7 @@ static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
}
}
-static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
+static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
pmdval_t pmd_flags, pteval_t pte_flags)
{
unsigned long vaddr_end;
@@ -224,11 +225,11 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
vaddr_end = ppd->vaddr_end;
/* If start is not 2MB aligned, create PTE entries */
- ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
+ ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
__sme_map_range_pte(ppd);
/* Create PMD entries */
- ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
+ ppd->vaddr_end = vaddr_end & PMD_MASK;
__sme_map_range_pmd(ppd);
/* If end is not 2MB aligned, create PTE entries */
@@ -236,22 +237,22 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
__sme_map_range_pte(ppd);
}
-static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
+static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
}
-static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
+static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
}
-static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
+static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
{
__sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
}
-static unsigned long __init sme_pgtable_calc(unsigned long len)
+static unsigned long __head sme_pgtable_calc(unsigned long len)
{
unsigned long entries = 0, tables = 0;
@@ -288,7 +289,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len)
return entries + tables;
}
-void __init sme_encrypt_kernel(struct boot_params *bp)
+void __head sme_encrypt_kernel(struct boot_params *bp)
{
unsigned long workarea_start, workarea_end, workarea_len;
unsigned long execute_start, execute_end, execute_len;
@@ -323,9 +324,8 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
* memory from being cached.
*/
- /* Physical addresses gives us the identity mapped virtual addresses */
- kernel_start = __pa_symbol(_text);
- kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
+ kernel_start = (unsigned long)RIP_REL_REF(_text);
+ kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
kernel_len = kernel_end - kernel_start;
initrd_start = 0;
@@ -343,24 +343,16 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
#endif
/*
- * We're running identity mapped, so we must obtain the address to the
- * SME encryption workarea using rip-relative addressing.
- */
- asm ("lea sme_workarea(%%rip), %0"
- : "=r" (workarea_start)
- : "p" (sme_workarea));
-
- /*
* Calculate required number of workarea bytes needed:
* executable encryption area size:
* stack page (PAGE_SIZE)
* encryption routine page (PAGE_SIZE)
- * intermediate copy buffer (PMD_PAGE_SIZE)
+ * intermediate copy buffer (PMD_SIZE)
* pagetable structures for the encryption of the kernel
* pagetable structures for workarea (in case not currently mapped)
*/
- execute_start = workarea_start;
- execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
+ execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
+ execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
execute_len = execute_end - execute_start;
/*
@@ -383,7 +375,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
* before it is mapped.
*/
workarea_len = execute_len + pgtable_area_len;
- workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
+ workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE);
/*
* Set the address to the start of where newly created pagetable
@@ -502,7 +494,7 @@ void __init sme_encrypt_kernel(struct boot_params *bp)
native_write_cr3(__native_read_cr3());
}
-void __init sme_enable(struct boot_params *bp)
+void __head sme_enable(struct boot_params *bp)
{
const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
unsigned int eax, ebx, ecx, edx;
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 5f0ce77a2..68d4f328f 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -747,11 +747,11 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
switch (level) {
case PG_LEVEL_1G:
phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
- offset = virt_addr & ~PUD_PAGE_MASK;
+ offset = virt_addr & ~PUD_MASK;
break;
case PG_LEVEL_2M:
phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
- offset = virt_addr & ~PMD_PAGE_MASK;
+ offset = virt_addr & ~PMD_MASK;
break;
default:
phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
@@ -1041,7 +1041,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
case PG_LEVEL_1G:
ref_prot = pud_pgprot(*(pud_t *)kpte);
ref_pfn = pud_pfn(*(pud_t *)kpte);
- pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
+ pfninc = PMD_SIZE >> PAGE_SHIFT;
lpaddr = address & PUD_MASK;
lpinc = PMD_SIZE;
/*
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index ffe3b3a08..78414c6d1 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -592,7 +592,7 @@ static void pti_set_kernel_image_nonglobal(void)
* of the image.
*/
unsigned long start = PFN_ALIGN(_text);
- unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
+ unsigned long end = ALIGN((unsigned long)_end, PMD_SIZE);
/*
* This clears _PAGE_GLOBAL from the entire kernel image.
diff --git a/block/blk-stat.c b/block/blk-stat.c
index da9407b7d..41be89eca 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -28,7 +28,7 @@ void blk_rq_stat_init(struct blk_rq_stat *stat)
/* src is a per-cpu stat, mean isn't initialized */
void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
{
- if (!src->nr_samples)
+ if (dst->nr_samples + src->nr_samples <= dst->nr_samples)
return;
dst->min = min(dst->min, src->min);
diff --git a/crypto/algapi.c b/crypto/algapi.c
index c73d1359b..5dc9ccdd5 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -290,7 +290,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
- !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 73db0cb44..45d906f17 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -573,7 +573,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
- while (tmpx < vc->vc_cols - 1) {
+ while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 093675b1a..49339f37d 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -163,6 +163,13 @@ show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
+/* Check for valid access_width, otherwise, fallback to using bit_width */
+#define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
+
+/* Shift and apply the mask for CPC reads/writes */
+#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) & \
+ GENMASK(((reg)->bit_width) - 1, 0))
+
static ssize_t show_feedback_ctrs(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -776,6 +783,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
if (gas_t->address) {
void __iomem *addr;
+ size_t access_width;
if (!osc_cpc_flexible_adr_space_confirmed) {
pr_debug("Flexible address space capability not supported\n");
@@ -783,7 +791,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
goto out_free;
}
- addr = ioremap(gas_t->address, gas_t->bit_width/8);
+ access_width = GET_BIT_WIDTH(gas_t) / 8;
+ addr = ioremap(gas_t->address, access_width);
if (!addr)
goto out_free;
cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
@@ -979,6 +988,7 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
void __iomem *vaddr = NULL;
+ int size;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
@@ -988,14 +998,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
}
*val = 0;
+ size = GET_BIT_WIDTH(reg);
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = 8 << (reg->access_width - 1);
u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
- &val_u32, width);
+ &val_u32, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
@@ -1004,17 +1014,24 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = val_u32;
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_read_ffh(cpu, reg, val);
else
return acpi_os_read_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
+ val, size);
- switch (reg->bit_width) {
+ switch (size) {
case 8:
*val = readb_relaxed(vaddr);
break;
@@ -1028,27 +1045,37 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
*val = readq_relaxed(vaddr);
break;
default:
- pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
return -EFAULT;
}
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ *val = MASK_VAL(reg, *val);
+
return 0;
}
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
{
int ret_val = 0;
+ int size;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
+ size = GET_BIT_WIDTH(reg);
+
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
- u32 width = 8 << (reg->access_width - 1);
acpi_status status;
status = acpi_os_write_port((acpi_io_address)reg->address,
- (u32)val, width);
+ (u32)val, size);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to write SystemIO port %llx\n",
reg->address);
@@ -1056,17 +1083,27 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
}
return 0;
- } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+ /*
+ * For registers in PCC space, the register size is determined
+ * by the bit width field; the access size is used to indicate
+ * the PCC subspace id.
+ */
+ size = reg->bit_width;
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+ }
else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
vaddr = reg_res->sys_mem_vaddr;
else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
return cpc_write_ffh(cpu, reg, val);
else
return acpi_os_write_memory((acpi_physical_address)reg->address,
- val, reg->bit_width);
+ val, size);
+
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ val = MASK_VAL(reg, val);
- switch (reg->bit_width) {
+ switch (size) {
case 8:
writeb_relaxed(val, vaddr);
break;
@@ -1080,8 +1117,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
writeq_relaxed(val, vaddr);
break;
default:
- pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
- reg->bit_width, pcc_ss_id);
+ if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
+ size, reg->address);
+ } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+ pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+ size, pcc_ss_id);
+ }
ret_val = -EFAULT;
break;
}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 539c12fbd..6026e20f0 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -385,18 +385,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"),
},
},
- /*
- * ASUS B1400CEAE hangs on resume from suspend (see
- * https://bugzilla.kernel.org/show_bug.cgi?id=215742).
- */
- {
- .callback = init_default_s3,
- .ident = "ASUS B1400CEAE",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK B1400CEAE"),
- },
- },
{},
};
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 55cd17a13..8c2b7c074 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1707,8 +1707,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
return 0;
+
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index a09548630..65fde5717 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -4667,7 +4667,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
* bail out.
*/
if (ap->pflags & ATA_PFLAG_SUSPENDED)
- goto unlock;
+ goto unlock_ap;
if (!sdev)
continue;
@@ -4680,7 +4680,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
if (do_resume) {
ret = scsi_resume_device(sdev);
if (ret == -EWOULDBLOCK)
- goto unlock;
+ goto unlock_scan;
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);
@@ -4688,12 +4688,13 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_lock_irqsave(ap->lock, flags);
if (ret)
- goto unlock;
+ goto unlock_ap;
}
}
-unlock:
+unlock_ap:
spin_unlock_irqrestore(ap->lock, flags);
+unlock_scan:
mutex_unlock(&ap->scsi_scan_mutex);
/* Reschedule with a delay if scsi_rescan_device() returned an error */
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index bbad1207c..7a9d2da3c 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -411,7 +411,7 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
return PTR_ERR(skb);
}
- if (skb->len != sizeof(*ver)) {
+ if (!skb || skb->len != sizeof(*ver)) {
bt_dev_err(hdev, "Intel version event size mismatch");
kfree_skb(skb);
return -EILSEQ;
diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
index 809762d64..b77e33777 100644
--- a/drivers/bluetooth/btmtk.c
+++ b/drivers/bluetooth/btmtk.c
@@ -288,4 +288,5 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_MT7622);
MODULE_FIRMWARE(FIRMWARE_MT7663);
MODULE_FIRMWARE(FIRMWARE_MT7668);
+MODULE_FIRMWARE(FIRMWARE_MT7922);
MODULE_FIRMWARE(FIRMWARE_MT7961);
diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h
index 2a88ea8e4..ee0b1d27a 100644
--- a/drivers/bluetooth/btmtk.h
+++ b/drivers/bluetooth/btmtk.h
@@ -4,6 +4,7 @@
#define FIRMWARE_MT7622 "mediatek/mt7622pr2h.bin"
#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+#define FIRMWARE_MT7922 "mediatek/BT_RAM_CODE_MT7922_1_1_hdr.bin"
#define FIRMWARE_MT7961 "mediatek/BT_RAM_CODE_MT7961_1_2_hdr.bin"
#define HCI_EV_WMT 0xe4
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 954f7f3b5..6a772b955 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -535,6 +535,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 33956ddec..179278b80 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1645,6 +1645,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
+ if (!hu->serdev)
+ return true;
+
/* BT SoC attached through the serial bus is handled by the serdev driver.
* So we need to use the device handle of the serdev driver to get the
* status of device may wakeup.
@@ -2257,16 +2260,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855)) {
dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
- power_ctrl_enabled = false;
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
GPIOD_IN);
if (IS_ERR(qcadev->sw_ctrl) &&
(data->soc_type == QCA_WCN6750 ||
data->soc_type == QCA_WCN6855 ||
- data->soc_type == QCA_WCN7850))
- dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ data->soc_type == QCA_WCN7850)) {
+ dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+ return PTR_ERR(qcadev->sw_ctrl);
+ }
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
@@ -2285,10 +2293,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(qcadev->bt_en)) {
- dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
- power_ctrl_enabled = false;
+ dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(qcadev->bt_en);
}
+ if (!qcadev->bt_en)
+ power_ctrl_enabled = false;
+
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
if (IS_ERR(qcadev->susclk)) {
dev_warn(&serdev->dev, "failed to acquire clk\n");
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 04fbccff6..60c1df048 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -62,6 +62,7 @@ static const char * const mhi_pm_state_str[] = {
[MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
[MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
[MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
+ [MHI_PM_STATE_SYS_ERR_FAIL] = "SYS ERROR Failure",
[MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
[MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
};
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 01fd10a39..6abf09da4 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -88,6 +88,7 @@ enum mhi_pm_state {
MHI_PM_STATE_FW_DL_ERR,
MHI_PM_STATE_SYS_ERR_DETECT,
MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SYS_ERR_FAIL,
MHI_PM_STATE_SHUTDOWN_PROCESS,
MHI_PM_STATE_LD_ERR_FATAL_DETECT,
MHI_PM_STATE_MAX
@@ -104,14 +105,16 @@ enum mhi_pm_state {
#define MHI_PM_FW_DL_ERR BIT(7)
#define MHI_PM_SYS_ERR_DETECT BIT(8)
#define MHI_PM_SYS_ERR_PROCESS BIT(9)
-#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+#define MHI_PM_SYS_ERR_FAIL BIT(10)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(11)
/* link not accessible */
-#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+ MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
+ MHI_PM_FW_DL_ERR)))
#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 8a4362d75..27f8a40f2 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -36,7 +36,10 @@
* M0 <--> M0
* M0 -> FW_DL_ERR
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
+ * SYS_ERR_PROCESS -> SYS_ERR_FAIL
+ * SYS_ERR_FAIL -> SYS_ERR_DETECT
+ * SYS_ERR_PROCESS --> POR
* L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
* SHUTDOWN_PROCESS -> DISABLE
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
@@ -93,7 +96,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = {
},
{
MHI_PM_SYS_ERR_PROCESS,
- MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_FAIL,
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
/* L2 States */
@@ -624,7 +632,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
!in_reset, timeout);
if (!ret || in_reset) {
dev_err(dev, "Device failed to exit MHI Reset state\n");
- goto exit_sys_error_transition;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_FAIL);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /* Shutdown may have occurred, otherwise cleanup now */
+ if (cur_state != MHI_PM_SYS_ERR_FAIL)
+ goto exit_sys_error_transition;
}
/*
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5d1c8e1c9..fd57eb372 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -683,7 +683,7 @@ static void extract_entropy(void *buf, size_t len)
static void __cold _credit_init_bits(size_t bits)
{
- static struct execute_work set_ready;
+ static DECLARE_WORK(set_ready, crng_set_ready);
unsigned int new, orig, add;
unsigned long flags;
@@ -699,8 +699,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized)
- execute_in_process_context(crng_set_ready, &set_ready);
+ if (static_key_initialized && system_unbound_wq)
+ queue_work(system_unbound_wq, &set_ready);
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
pr_notice("crng init done\n");
@@ -870,8 +870,8 @@ void __init random_init(void)
/*
* If we were initialized by the cpu or bootloader before jump labels
- * are initialized, then we should enable the static branch here, where
- * it's guaranteed that jump labels have been initialized.
+ * or workqueues are initialized, then we should enable the static
+ * branch here, where it's guaranteed that these have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9004e0718..fe1d45eac 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/* List of registered clks that use runtime PM */
+static HLIST_HEAD(clk_rpm_list);
+static DEFINE_MUTEX(clk_rpm_list_lock);
+
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
+ struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
+/**
+ * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
+ *
+ * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
+ * that disabling unused clks avoids a deadlock where a device is runtime PM
+ * resuming/suspending and the runtime PM callback is trying to grab the
+ * prepare_lock for something like clk_prepare_enable() while
+ * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
+ * PM resume/suspend the device as well.
+ *
+ * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
+ * success. Otherwise the lock is released on failure.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+static int clk_pm_runtime_get_all(void)
+{
+ int ret;
+ struct clk_core *core, *failed;
+
+ /*
+ * Grab the list lock to prevent any new clks from being registered
+ * or unregistered until clk_pm_runtime_put_all().
+ */
+ mutex_lock(&clk_rpm_list_lock);
+
+ /*
+ * Runtime PM "get" all the devices that are needed for the clks
+ * currently registered. Do this without holding the prepare_lock, to
+ * avoid the deadlock.
+ */
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ ret = clk_pm_runtime_get(core);
+ if (ret) {
+ failed = core;
+ pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
+ dev_name(failed->dev), failed->name);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
+ if (core == failed)
+ break;
+
+ clk_pm_runtime_put(core);
+ }
+ mutex_unlock(&clk_rpm_list_lock);
+
+ return ret;
+}
+
+/**
+ * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
+ *
+ * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
+ * the 'clk_rpm_list_lock'.
+ */
+static void clk_pm_runtime_put_all(void)
+{
+ struct clk_core *core;
+
+ hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
+ clk_pm_runtime_put(core);
+ mutex_unlock(&clk_rpm_list_lock);
+}
+
+static void clk_pm_runtime_init(struct clk_core *core)
+{
+ struct device *dev = core->dev;
+
+ if (dev && pm_runtime_enabled(dev)) {
+ core->rpm_enabled = true;
+
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_add_head(&core->rpm_node, &clk_rpm_list);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+}
+
/*** locking ***/
static void clk_prepare_lock(void)
{
@@ -1310,9 +1398,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
- if (clk_pm_runtime_get(core))
- return;
-
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@@ -1321,8 +1406,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
-
- clk_pm_runtime_put(core);
}
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1338,9 +1421,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
- if (clk_pm_runtime_get(core))
- goto unprepare_out;
-
flags = clk_enable_lock();
if (core->enable_count)
@@ -1365,8 +1445,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
- clk_pm_runtime_put(core);
-unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@@ -1382,12 +1460,22 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int __init clk_disable_unused(void)
{
struct clk_core *core;
+ int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
+ pr_info("clk: Disabling unused clocks\n");
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
+ /*
+ * Grab the prepare lock to keep the clk topology stable while iterating
+ * over clks.
+ */
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1404,6 +1492,8 @@ static int __init clk_disable_unused(void)
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
+
return 0;
}
late_initcall_sync(clk_disable_unused);
@@ -3115,28 +3205,41 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
int level)
{
int phase;
+ struct clk *clk_user;
+ int multi_node = 0;
- seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
+ seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ",
level * 3 + 1, "",
- 30 - level * 3, c->name,
+ 35 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate_recalc(c),
clk_core_get_accuracy_recalc(c));
phase = clk_core_get_phase(c);
if (phase >= 0)
- seq_printf(s, "%5d", phase);
+ seq_printf(s, "%-5d", phase);
else
seq_puts(s, "-----");
- seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
+ seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000));
if (c->ops->is_enabled)
- seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
+ seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N');
else if (!c->ops->enable)
- seq_printf(s, " %9c\n", 'Y');
+ seq_printf(s, " %5c ", 'Y');
else
- seq_printf(s, " %9c\n", '?');
+ seq_printf(s, " %5c ", '?');
+
+ hlist_for_each_entry(clk_user, &c->clks, clks_node) {
+ seq_printf(s, "%*s%-*s %-25s\n",
+ level * 3 + 2 + 105 * multi_node, "",
+ 30,
+ clk_user->dev_id ? clk_user->dev_id : "deviceless",
+ clk_user->con_id ? clk_user->con_id : "no_connection_id");
+
+ multi_node = 1;
+ }
+
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -3144,9 +3247,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
- clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
- clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3155,11 +3256,16 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
static int clk_summary_show(struct seq_file *s, void *data)
{
struct clk_core *c;
- struct hlist_head **lists = (struct hlist_head **)s->private;
+ struct hlist_head **lists = s->private;
+ int ret;
- seq_puts(s, " enable prepare protect duty hardware\n");
- seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
- seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
+ seq_puts(s, " enable prepare protect duty hardware connection\n");
+ seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
+ seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
clk_prepare_lock();
@@ -3168,6 +3274,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
clk_summary_show_subtree(s, c, 0);
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
return 0;
}
@@ -3214,9 +3321,15 @@ static int clk_dump_show(struct seq_file *s, void *data)
{
struct clk_core *c;
bool first_node = true;
- struct hlist_head **lists = (struct hlist_head **)s->private;
+ struct hlist_head **lists = s->private;
+ int ret;
+
+ ret = clk_pm_runtime_get_all();
+ if (ret)
+ return ret;
seq_putc(s, '{');
+
clk_prepare_lock();
for (; *lists; lists++) {
@@ -3229,6 +3342,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
clk_prepare_unlock();
+ clk_pm_runtime_put_all();
seq_puts(s, "}\n");
return 0;
@@ -3836,8 +3950,6 @@ static int __clk_core_init(struct clk_core *core)
}
clk_core_reparent_orphans_nolock();
-
- kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@@ -4066,6 +4178,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
+/* Free memory allocated for a struct clk_core */
+static void __clk_release(struct kref *ref)
+{
+ struct clk_core *core = container_of(ref, struct clk_core, ref);
+
+ if (core->rpm_enabled) {
+ mutex_lock(&clk_rpm_list_lock);
+ hlist_del(&core->rpm_node);
+ mutex_unlock(&clk_rpm_list_lock);
+ }
+
+ clk_core_free_parent_map(core);
+ kfree_const(core->name);
+ kfree(core);
+}
+
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@@ -4086,6 +4214,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
+ kref_init(&core->ref);
+
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@@ -4098,9 +4228,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
- if (dev && pm_runtime_enabled(dev))
- core->rpm_enabled = true;
core->dev = dev;
+ clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@@ -4140,12 +4269,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
- clk_core_free_parent_map(core);
fail_parents:
fail_ops:
- kfree_const(core->name);
fail_name:
- kfree(core);
+ kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@@ -4225,18 +4352,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
-/* Free memory allocated for a clock. */
-static void __clk_release(struct kref *ref)
-{
- struct clk_core *core = container_of(ref, struct clk_core, ref);
-
- lockdep_assert_held(&prepare_lock);
-
- clk_core_free_parent_map(core);
- kfree_const(core->name);
- kfree(core);
-}
-
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 0c867136e..67d9e741c 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -152,12 +152,12 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
-static struct clk_hw *mtk_clk_register_gate(const char *name,
+static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name,
const char *parent_name,
struct regmap *regmap, int set_ofs,
int clr_ofs, int sta_ofs, u8 bit,
const struct clk_ops *ops,
- unsigned long flags, struct device *dev)
+ unsigned long flags)
{
struct mtk_clk_gate *cg;
int ret;
@@ -202,10 +202,9 @@ static void mtk_clk_unregister_gate(struct clk_hw *hw)
kfree(cg);
}
-int mtk_clk_register_gates_with_dev(struct device_node *node,
- const struct mtk_gate *clks, int num,
- struct clk_hw_onecell_data *clk_data,
- struct device *dev)
+int mtk_clk_register_gates(struct device *dev, struct device_node *node,
+ const struct mtk_gate *clks, int num,
+ struct clk_hw_onecell_data *clk_data)
{
int i;
struct clk_hw *hw;
@@ -229,13 +228,13 @@ int mtk_clk_register_gates_with_dev(struct device_node *node,
continue;
}
- hw = mtk_clk_register_gate(gate->name, gate->parent_name,
+ hw = mtk_clk_register_gate(dev, gate->name, gate->parent_name,
regmap,
gate->regs->set_ofs,
gate->regs->clr_ofs,
gate->regs->sta_ofs,
gate->shift, gate->ops,
- gate->flags, dev);
+ gate->flags);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", gate->name,
@@ -261,14 +260,6 @@ err:
return PTR_ERR(hw);
}
-EXPORT_SYMBOL_GPL(mtk_clk_register_gates_with_dev);
-
-int mtk_clk_register_gates(struct device_node *node,
- const struct mtk_gate *clks, int num,
- struct clk_hw_onecell_data *clk_data)
-{
- return mtk_clk_register_gates_with_dev(node, clks, num, clk_data, NULL);
-}
EXPORT_SYMBOL_GPL(mtk_clk_register_gates);
void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index d9897ef53..1a46b4c56 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -50,15 +50,10 @@ struct mtk_gate {
#define GATE_MTK(_id, _name, _parent, _regs, _shift, _ops) \
GATE_MTK_FLAGS(_id, _name, _parent, _regs, _shift, _ops, 0)
-int mtk_clk_register_gates(struct device_node *node,
+int mtk_clk_register_gates(struct device *dev, struct device_node *node,
const struct mtk_gate *clks, int num,
struct clk_hw_onecell_data *clk_data);
-int mtk_clk_register_gates_with_dev(struct device_node *node,
- const struct mtk_gate *clks, int num,
- struct clk_hw_onecell_data *clk_data,
- struct device *dev);
-
void mtk_clk_unregister_gates(const struct mtk_gate *clks, int num,
struct clk_hw_onecell_data *clk_data);
diff --git a/drivers/clk/mediatek/clk-mt2701-aud.c b/drivers/clk/mediatek/clk-mt2701-aud.c
index 4287bd3f5..03ab212aa 100644
--- a/drivers/clk/mediatek/clk-mt2701-aud.c
+++ b/drivers/clk/mediatek/clk-mt2701-aud.c
@@ -127,8 +127,8 @@ static int clk_mt2701_aud_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_AUD_NR);
- mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, audio_clks,
+ ARRAY_SIZE(audio_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
diff --git a/drivers/clk/mediatek/clk-mt2701-eth.c b/drivers/clk/mediatek/clk-mt2701-eth.c
index 601358748..924725d67 100644
--- a/drivers/clk/mediatek/clk-mt2701-eth.c
+++ b/drivers/clk/mediatek/clk-mt2701-eth.c
@@ -51,8 +51,8 @@ static int clk_mt2701_eth_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_ETHSYS_NR);
- mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, eth_clks,
+ ARRAY_SIZE(eth_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt2701-g3d.c b/drivers/clk/mediatek/clk-mt2701-g3d.c
index 8d1fc8e33..501fb99bb 100644
--- a/drivers/clk/mediatek/clk-mt2701-g3d.c
+++ b/drivers/clk/mediatek/clk-mt2701-g3d.c
@@ -45,7 +45,7 @@ static int clk_mt2701_g3dsys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_G3DSYS_NR);
- mtk_clk_register_gates(node, g3d_clks, ARRAY_SIZE(g3d_clks),
+ mtk_clk_register_gates(&pdev->dev, node, g3d_clks, ARRAY_SIZE(g3d_clks),
clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/mediatek/clk-mt2701-hif.c b/drivers/clk/mediatek/clk-mt2701-hif.c
index edeeb033a..1ddefc21d 100644
--- a/drivers/clk/mediatek/clk-mt2701-hif.c
+++ b/drivers/clk/mediatek/clk-mt2701-hif.c
@@ -48,8 +48,8 @@ static int clk_mt2701_hif_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_HIFSYS_NR);
- mtk_clk_register_gates(node, hif_clks, ARRAY_SIZE(hif_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, hif_clks,
+ ARRAY_SIZE(hif_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
diff --git a/drivers/clk/mediatek/clk-mt2701-mm.c b/drivers/clk/mediatek/clk-mt2701-mm.c
index eb069f3bc..f4885dffb 100644
--- a/drivers/clk/mediatek/clk-mt2701-mm.c
+++ b/drivers/clk/mediatek/clk-mt2701-mm.c
@@ -76,8 +76,8 @@ static int clk_mt2701_mm_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
index 00d2e81bd..e80fe9c94 100644
--- a/drivers/clk/mediatek/clk-mt2701.c
+++ b/drivers/clk/mediatek/clk-mt2701.c
@@ -679,14 +679,15 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
- base, &mt2701_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt2701_clk_lock, clk_data);
mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
base, &mt2701_clk_lock, clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
@@ -789,8 +790,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
}
}
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- infra_clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), infra_clk_data);
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
@@ -902,11 +903,12 @@ static int mtk_pericfg_init(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
- mtk_clk_register_composites(peri_muxs, ARRAY_SIZE(peri_muxs), base,
- &mt2701_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, peri_muxs,
+ ARRAY_SIZE(peri_muxs), base,
+ &mt2701_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt2712-mm.c b/drivers/clk/mediatek/clk-mt2712-mm.c
index ad6daa8f2..e5264f1ce 100644
--- a/drivers/clk/mediatek/clk-mt2712-mm.c
+++ b/drivers/clk/mediatek/clk-mt2712-mm.c
@@ -117,8 +117,8 @@ static int clk_mt2712_mm_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/mediatek/clk-mt2712.c b/drivers/clk/mediatek/clk-mt2712.c
index d6c2cc183..a0f0c9ed4 100644
--- a/drivers/clk/mediatek/clk-mt2712.c
+++ b/drivers/clk/mediatek/clk-mt2712.c
@@ -1320,12 +1320,13 @@ static int clk_mt2712_top_probe(struct platform_device *pdev)
mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
top_clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt2712_clk_lock, top_clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt2712_clk_lock, top_clk_data);
mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
&mt2712_clk_lock, top_clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- top_clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), top_clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
@@ -1344,8 +1345,8 @@ static int clk_mt2712_infra_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -1366,8 +1367,8 @@ static int clk_mt2712_peri_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -1395,8 +1396,11 @@ static int clk_mt2712_mcu_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
- mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
- &mt2712_clk_lock, clk_data);
+ r = mtk_clk_register_composites(&pdev->dev, mcu_muxes,
+ ARRAY_SIZE(mcu_muxes), base,
+ &mt2712_clk_lock, clk_data);
+ if (r)
+ dev_err(&pdev->dev, "Could not register composites: %d\n", r);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/mediatek/clk-mt6765.c b/drivers/clk/mediatek/clk-mt6765.c
index 2c6a52ff5..c4941523f 100644
--- a/drivers/clk/mediatek/clk-mt6765.c
+++ b/drivers/clk/mediatek/clk-mt6765.c
@@ -743,7 +743,7 @@ static int clk_mt6765_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- mtk_clk_register_gates(node, apmixed_clks,
+ mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -782,10 +782,11 @@ static int clk_mt6765_top_probe(struct platform_device *pdev)
clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
clk_data);
- mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ mtk_clk_register_muxes(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), node,
&mt6765_clk_lock, clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -820,8 +821,8 @@ static int clk_mt6765_ifr_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
+ ARRAY_SIZE(ifr_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index eda8cbee3..2cccf62d3 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -93,8 +93,8 @@ static int clk_mt6779_mm_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 39dadc954..7fe9d12b2 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -1223,7 +1223,7 @@ static int clk_mt6779_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- mtk_clk_register_gates(node, apmixed_clks,
+ mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -1248,14 +1248,17 @@ static int clk_mt6779_top_probe(struct platform_device *pdev)
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
- node, &mt6779_clk_lock, clk_data);
+ mtk_clk_register_muxes(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), node,
+ &mt6779_clk_lock, clk_data);
- mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
- base, &mt6779_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_aud_muxes,
+ ARRAY_SIZE(top_aud_muxes), base,
+ &mt6779_clk_lock, clk_data);
- mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
- base, &mt6779_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_aud_divs,
+ ARRAY_SIZE(top_aud_divs), base,
+ &mt6779_clk_lock, clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
@@ -1267,8 +1270,8 @@ static int clk_mt6779_infra_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt6795-infracfg.c b/drivers/clk/mediatek/clk-mt6795-infracfg.c
index df7eed6e0..8025d171d 100644
--- a/drivers/clk/mediatek/clk-mt6795-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-infracfg.c
@@ -101,7 +101,8 @@ static int clk_mt6795_infracfg_probe(struct platform_device *pdev)
if (ret)
goto free_clk_data;
- ret = mtk_clk_register_gates(node, infra_gates, ARRAY_SIZE(infra_gates), clk_data);
+ ret = mtk_clk_register_gates(&pdev->dev, node, infra_gates,
+ ARRAY_SIZE(infra_gates), clk_data);
if (ret)
goto free_clk_data;
diff --git a/drivers/clk/mediatek/clk-mt6795-mm.c b/drivers/clk/mediatek/clk-mt6795-mm.c
index fd73f202f..eebb6143a 100644
--- a/drivers/clk/mediatek/clk-mt6795-mm.c
+++ b/drivers/clk/mediatek/clk-mt6795-mm.c
@@ -87,7 +87,8 @@ static int clk_mt6795_mm_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- ret = mtk_clk_register_gates(node, mm_gates, ARRAY_SIZE(mm_gates), clk_data);
+ ret = mtk_clk_register_gates(&pdev->dev, node, mm_gates,
+ ARRAY_SIZE(mm_gates), clk_data);
if (ret)
goto free_clk_data;
diff --git a/drivers/clk/mediatek/clk-mt6795-pericfg.c b/drivers/clk/mediatek/clk-mt6795-pericfg.c
index cb28d35da..08aaa9b09 100644
--- a/drivers/clk/mediatek/clk-mt6795-pericfg.c
+++ b/drivers/clk/mediatek/clk-mt6795-pericfg.c
@@ -109,11 +109,13 @@ static int clk_mt6795_pericfg_probe(struct platform_device *pdev)
if (ret)
goto free_clk_data;
- ret = mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates), clk_data);
+ ret = mtk_clk_register_gates(&pdev->dev, node, peri_gates,
+ ARRAY_SIZE(peri_gates), clk_data);
if (ret)
goto free_clk_data;
- ret = mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
+ ret = mtk_clk_register_composites(&pdev->dev, peri_clks,
+ ARRAY_SIZE(peri_clks), base,
&mt6795_peri_clk_lock, clk_data);
if (ret)
goto unregister_gates;
diff --git a/drivers/clk/mediatek/clk-mt6795-topckgen.c b/drivers/clk/mediatek/clk-mt6795-topckgen.c
index 2948dd1ae..2ab8bf5d6 100644
--- a/drivers/clk/mediatek/clk-mt6795-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt6795-topckgen.c
@@ -552,12 +552,14 @@ static int clk_mt6795_topckgen_probe(struct platform_device *pdev)
if (ret)
goto unregister_fixed_clks;
- ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ ret = mtk_clk_register_muxes(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), node,
&mt6795_top_clk_lock, clk_data);
if (ret)
goto unregister_factors;
- ret = mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs), base,
+ ret = mtk_clk_register_composites(&pdev->dev, top_aud_divs,
+ ARRAY_SIZE(top_aud_divs), base,
&mt6795_top_clk_lock, clk_data);
if (ret)
goto unregister_muxes;
diff --git a/drivers/clk/mediatek/clk-mt6797-mm.c b/drivers/clk/mediatek/clk-mt6797-mm.c
index 99a63f466..d5e9fe445 100644
--- a/drivers/clk/mediatek/clk-mt6797-mm.c
+++ b/drivers/clk/mediatek/clk-mt6797-mm.c
@@ -89,8 +89,8 @@ static int clk_mt6797_mm_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_MM_NR);
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt6797.c b/drivers/clk/mediatek/clk-mt6797.c
index b362e99c8..0429a80f3 100644
--- a/drivers/clk/mediatek/clk-mt6797.c
+++ b/drivers/clk/mediatek/clk-mt6797.c
@@ -398,7 +398,8 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_factors(top_fixed_divs, ARRAY_SIZE(top_fixed_divs),
clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
&mt6797_clk_lock, clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -584,8 +585,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
}
}
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- infra_clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), infra_clk_data);
mtk_clk_register_factors(infra_fixed_divs, ARRAY_SIZE(infra_fixed_divs),
infra_clk_data);
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index b17731fa1..e9070d0be 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -114,8 +114,8 @@ static int clk_mt7622_audiosys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
- mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, audio_clks,
+ ARRAY_SIZE(audio_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
diff --git a/drivers/clk/mediatek/clk-mt7622-eth.c b/drivers/clk/mediatek/clk-mt7622-eth.c
index a60190e83..ece0f7a7c 100644
--- a/drivers/clk/mediatek/clk-mt7622-eth.c
+++ b/drivers/clk/mediatek/clk-mt7622-eth.c
@@ -69,8 +69,8 @@ static int clk_mt7622_ethsys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
- mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, eth_clks,
+ ARRAY_SIZE(eth_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -91,8 +91,8 @@ static int clk_mt7622_sgmiisys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
- mtk_clk_register_gates(node, sgmii_clks, ARRAY_SIZE(sgmii_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, sgmii_clks,
+ ARRAY_SIZE(sgmii_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt7622-hif.c b/drivers/clk/mediatek/clk-mt7622-hif.c
index 55baa6d06..c57ac2273 100644
--- a/drivers/clk/mediatek/clk-mt7622-hif.c
+++ b/drivers/clk/mediatek/clk-mt7622-hif.c
@@ -80,8 +80,8 @@ static int clk_mt7622_ssusbsys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
- mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, ssusb_clks,
+ ARRAY_SIZE(ssusb_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -102,8 +102,8 @@ static int clk_mt7622_pciesys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
- mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, pcie_clks,
+ ARRAY_SIZE(pcie_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index eebbb8790..67a296646 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -615,14 +615,15 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
- base, &mt7622_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt7622_clk_lock, clk_data);
mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
base, &mt7622_clk_lock, clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
@@ -635,8 +636,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
clk_data);
@@ -663,7 +664,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
clk_data);
- mtk_clk_register_gates(node, apmixed_clks,
+ mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -682,10 +683,11 @@ static int mtk_pericfg_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
- mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
+ mtk_clk_register_composites(&pdev->dev, peri_muxes,
+ ARRAY_SIZE(peri_muxes), base,
&mt7622_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
index e1d2635c7..eab838af6 100644
--- a/drivers/clk/mediatek/clk-mt7629-eth.c
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -82,7 +82,8 @@ static int clk_mt7629_ethsys_init(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, eth_clks,
+ CLK_ETH_NR_CLK, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -106,8 +107,8 @@ static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK,
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, sgmii_clks[id++],
+ CLK_SGMII_NR_CLK, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
index 3628811a2..804900792 100644
--- a/drivers/clk/mediatek/clk-mt7629-hif.c
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -75,8 +75,8 @@ static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
- mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, ssusb_clks,
+ ARRAY_SIZE(ssusb_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -97,8 +97,8 @@ static int clk_mt7629_pciesys_init(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
- mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, pcie_clks,
+ ARRAY_SIZE(pcie_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
index 01ee45fcd..2019e272d 100644
--- a/drivers/clk/mediatek/clk-mt7629.c
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -566,8 +566,9 @@ static int mtk_topckgen_init(struct platform_device *pdev)
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
- base, &mt7629_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt7629_clk_lock, clk_data);
clk_prepare_enable(clk_data->hws[CLK_TOP_AXI_SEL]->clk);
clk_prepare_enable(clk_data->hws[CLK_TOP_MEM_SEL]->clk);
@@ -585,8 +586,8 @@ static int mtk_infrasys_init(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
clk_data);
@@ -610,10 +611,11 @@ static int mtk_pericfg_init(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
- mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
+ mtk_clk_register_composites(&pdev->dev, peri_muxes,
+ ARRAY_SIZE(peri_muxes), base,
&mt7629_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -637,7 +639,7 @@ static int mtk_apmixedsys_init(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
clk_data);
- mtk_clk_register_gates(node, apmixed_clks,
+ mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
ARRAY_SIZE(apmixed_clks), clk_data);
clk_prepare_enable(clk_data->hws[CLK_APMIXED_ARMPLL]->clk);
diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
index c21e1d672..e04bc6845 100644
--- a/drivers/clk/mediatek/clk-mt7986-eth.c
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -72,8 +72,8 @@ static void __init mtk_sgmiisys_0_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii0_clks));
- mtk_clk_register_gates(node, sgmii0_clks, ARRAY_SIZE(sgmii0_clks),
- clk_data);
+ mtk_clk_register_gates(NULL, node, sgmii0_clks,
+ ARRAY_SIZE(sgmii0_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -90,8 +90,8 @@ static void __init mtk_sgmiisys_1_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii1_clks));
- mtk_clk_register_gates(node, sgmii1_clks, ARRAY_SIZE(sgmii1_clks),
- clk_data);
+ mtk_clk_register_gates(NULL, node, sgmii1_clks,
+ ARRAY_SIZE(sgmii1_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -109,7 +109,7 @@ static void __init mtk_ethsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(ARRAY_SIZE(eth_clks));
- mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data);
+ mtk_clk_register_gates(NULL, node, eth_clks, ARRAY_SIZE(eth_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
index 74e68a719..0a4bf87ee 100644
--- a/drivers/clk/mediatek/clk-mt7986-infracfg.c
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -178,10 +178,11 @@ static int clk_mt7986_infracfg_probe(struct platform_device *pdev)
return -ENOMEM;
mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
- mtk_clk_register_muxes(infra_muxes, ARRAY_SIZE(infra_muxes), node,
+ mtk_clk_register_muxes(&pdev->dev, infra_muxes,
+ ARRAY_SIZE(infra_muxes), node,
&mt7986_clk_lock, clk_data);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
index de5121cf2..c9bf47e60 100644
--- a/drivers/clk/mediatek/clk-mt7986-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
@@ -303,7 +303,8 @@ static int clk_mt7986_topckgen_probe(struct platform_device *pdev)
mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ mtk_clk_register_muxes(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), node,
&mt7986_clk_lock, clk_data);
clk_prepare_enable(clk_data->hws[CLK_TOP_SYSAXI_SEL]->clk);
diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c
index 3ea06d2ec..a39ad58e2 100644
--- a/drivers/clk/mediatek/clk-mt8135.c
+++ b/drivers/clk/mediatek/clk-mt8135.c
@@ -536,8 +536,9 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8135_clk_lock, clk_data);
+ mtk_clk_register_composites(NULL, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt8135_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -553,8 +554,8 @@ static void __init mtk_infrasys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(NULL, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -579,10 +580,11 @@ static void __init mtk_pericfg_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
- mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates),
- clk_data);
- mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
- &mt8135_clk_lock, clk_data);
+ mtk_clk_register_gates(NULL, node, peri_gates,
+ ARRAY_SIZE(peri_gates), clk_data);
+ mtk_clk_register_composites(NULL, peri_clks,
+ ARRAY_SIZE(peri_clks), base,
+ &mt8135_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt8167-aud.c b/drivers/clk/mediatek/clk-mt8167-aud.c
index b5ac196cd..47a7d89d5 100644
--- a/drivers/clk/mediatek/clk-mt8167-aud.c
+++ b/drivers/clk/mediatek/clk-mt8167-aud.c
@@ -50,7 +50,7 @@ static void __init mtk_audsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
- mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+ mtk_clk_register_gates(NULL, node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt8167-img.c b/drivers/clk/mediatek/clk-mt8167-img.c
index 4e7c0772b..e196b3b89 100644
--- a/drivers/clk/mediatek/clk-mt8167-img.c
+++ b/drivers/clk/mediatek/clk-mt8167-img.c
@@ -42,7 +42,7 @@ static void __init mtk_imgsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks), clk_data);
+ mtk_clk_register_gates(NULL, node, img_clks, ARRAY_SIZE(img_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
index 192714498..602d25f4c 100644
--- a/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
+++ b/drivers/clk/mediatek/clk-mt8167-mfgcfg.c
@@ -40,7 +40,7 @@ static void __init mtk_mfgcfg_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_MFG_NR_CLK);
- mtk_clk_register_gates(node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data);
+ mtk_clk_register_gates(NULL, node, mfg_clks, ARRAY_SIZE(mfg_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8167-mm.c b/drivers/clk/mediatek/clk-mt8167-mm.c
index a94961b7b..abc70e122 100644
--- a/drivers/clk/mediatek/clk-mt8167-mm.c
+++ b/drivers/clk/mediatek/clk-mt8167-mm.c
@@ -98,8 +98,8 @@ static int clk_mt8167_mm_probe(struct platform_device *pdev)
data = &mt8167_mmsys_driver_data;
- ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
- clk_data);
+ ret = mtk_clk_register_gates(&pdev->dev, node, data->gates_clk,
+ data->gates_num, clk_data);
if (ret)
return ret;
diff --git a/drivers/clk/mediatek/clk-mt8167-vdec.c b/drivers/clk/mediatek/clk-mt8167-vdec.c
index 38f0ba357..92bc05d99 100644
--- a/drivers/clk/mediatek/clk-mt8167-vdec.c
+++ b/drivers/clk/mediatek/clk-mt8167-vdec.c
@@ -49,7 +49,8 @@ static void __init mtk_vdecsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks), clk_data);
+ mtk_clk_register_gates(NULL, node, vdec_clks, ARRAY_SIZE(vdec_clks),
+ clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
diff --git a/drivers/clk/mediatek/clk-mt8167.c b/drivers/clk/mediatek/clk-mt8167.c
index f900ac4bf..91669ebaf 100644
--- a/drivers/clk/mediatek/clk-mt8167.c
+++ b/drivers/clk/mediatek/clk-mt8167.c
@@ -937,11 +937,12 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+ mtk_clk_register_gates(NULL, node, top_clks, ARRAY_SIZE(top_clks), clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8167_clk_lock, clk_data);
+ mtk_clk_register_composites(NULL, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt8167_clk_lock, clk_data);
mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
base, &mt8167_clk_lock, clk_data);
@@ -966,8 +967,9 @@ static void __init mtk_infracfg_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
- mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
- &mt8167_clk_lock, clk_data);
+ mtk_clk_register_composites(NULL, ifr_muxes,
+ ARRAY_SIZE(ifr_muxes), base,
+ &mt8167_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt8173-mm.c b/drivers/clk/mediatek/clk-mt8173-mm.c
index 5826eabdc..444a3d58c 100644
--- a/drivers/clk/mediatek/clk-mt8173-mm.c
+++ b/drivers/clk/mediatek/clk-mt8173-mm.c
@@ -112,8 +112,8 @@ static int clk_mt8173_mm_probe(struct platform_device *pdev)
data = &mt8173_mmsys_driver_data;
- ret = mtk_clk_register_gates(node, data->gates_clk, data->gates_num,
- clk_data);
+ ret = mtk_clk_register_gates(&pdev->dev, node, data->gates_clk,
+ data->gates_num, clk_data);
if (ret)
return ret;
diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c
index b8529ee71..d05c1109b 100644
--- a/drivers/clk/mediatek/clk-mt8173.c
+++ b/drivers/clk/mediatek/clk-mt8173.c
@@ -869,8 +869,9 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks), clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8173_clk_lock, clk_data);
+ mtk_clk_register_composites(NULL, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt8173_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -888,8 +889,8 @@ static void __init mtk_infrasys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(NULL, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
mtk_clk_register_cpumuxes(node, cpu_muxes, ARRAY_SIZE(cpu_muxes),
@@ -918,10 +919,11 @@ static void __init mtk_pericfg_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
- mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates),
- clk_data);
- mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base,
- &mt8173_clk_lock, clk_data);
+ mtk_clk_register_gates(NULL, node, peri_gates,
+ ARRAY_SIZE(peri_gates), clk_data);
+ mtk_clk_register_composites(NULL, peri_clks,
+ ARRAY_SIZE(peri_clks), base,
+ &mt8173_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -1062,8 +1064,8 @@ static void __init mtk_imgsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_IMG_NR_CLK);
- mtk_clk_register_gates(node, img_clks, ARRAY_SIZE(img_clks),
- clk_data);
+ mtk_clk_register_gates(NULL, node, img_clks,
+ ARRAY_SIZE(img_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
@@ -1080,8 +1082,8 @@ static void __init mtk_vdecsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_VDEC_NR_CLK);
- mtk_clk_register_gates(node, vdec_clks, ARRAY_SIZE(vdec_clks),
- clk_data);
+ mtk_clk_register_gates(NULL, node, vdec_clks,
+ ARRAY_SIZE(vdec_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -1097,8 +1099,8 @@ static void __init mtk_vencsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_VENC_NR_CLK);
- mtk_clk_register_gates(node, venc_clks, ARRAY_SIZE(venc_clks),
- clk_data);
+ mtk_clk_register_gates(NULL, node, venc_clks,
+ ARRAY_SIZE(venc_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -1114,8 +1116,8 @@ static void __init mtk_vencltsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_VENCLT_NR_CLK);
- mtk_clk_register_gates(node, venclt_clks, ARRAY_SIZE(venclt_clks),
- clk_data);
+ mtk_clk_register_gates(NULL, node, venclt_clks,
+ ARRAY_SIZE(venclt_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt8183-audio.c b/drivers/clk/mediatek/clk-mt8183-audio.c
index b2d7746ed..f358a6e7a 100644
--- a/drivers/clk/mediatek/clk-mt8183-audio.c
+++ b/drivers/clk/mediatek/clk-mt8183-audio.c
@@ -75,8 +75,8 @@ static int clk_mt8183_audio_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_AUDIO_NR_CLK);
- mtk_clk_register_gates(node, audio_clks, ARRAY_SIZE(audio_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, audio_clks,
+ ARRAY_SIZE(audio_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt8183-mm.c b/drivers/clk/mediatek/clk-mt8183-mm.c
index 11ecc6fb0..358031530 100644
--- a/drivers/clk/mediatek/clk-mt8183-mm.c
+++ b/drivers/clk/mediatek/clk-mt8183-mm.c
@@ -90,8 +90,8 @@ static int clk_mt8183_mm_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 1860a35a7..786202441 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -1172,8 +1172,8 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
@@ -1238,17 +1238,20 @@ static int clk_mt8183_top_probe(struct platform_device *pdev)
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
- mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
- node, &mt8183_clk_lock, top_clk_data);
+ mtk_clk_register_muxes(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), node,
+ &mt8183_clk_lock, top_clk_data);
- mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
- base, &mt8183_clk_lock, top_clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_aud_muxes,
+ ARRAY_SIZE(top_aud_muxes), base,
+ &mt8183_clk_lock, top_clk_data);
- mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
- base, &mt8183_clk_lock, top_clk_data);
+ mtk_clk_register_composites(&pdev->dev, top_aud_divs,
+ ARRAY_SIZE(top_aud_divs), base,
+ &mt8183_clk_lock, top_clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- top_clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), top_clk_data);
ret = clk_mt8183_reg_mfg_mux_notifier(&pdev->dev,
top_clk_data->hws[CLK_TOP_MUX_MFG]->clk);
@@ -1267,8 +1270,8 @@ static int clk_mt8183_infra_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
- mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r) {
@@ -1290,8 +1293,8 @@ static int clk_mt8183_peri_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
- mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
- clk_data);
+ mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
@@ -1308,8 +1311,9 @@ static int clk_mt8183_mcu_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_MCU_NR_CLK);
- mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
- &mt8183_clk_lock, clk_data);
+ mtk_clk_register_composites(&pdev->dev, mcu_muxes,
+ ARRAY_SIZE(mcu_muxes), base,
+ &mt8183_clk_lock, clk_data);
return of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
}
diff --git a/drivers/clk/mediatek/clk-mt8186-mcu.c b/drivers/clk/mediatek/clk-mt8186-mcu.c
index dfc305c1f..e52a2d986 100644
--- a/drivers/clk/mediatek/clk-mt8186-mcu.c
+++ b/drivers/clk/mediatek/clk-mt8186-mcu.c
@@ -65,7 +65,8 @@ static int clk_mt8186_mcu_probe(struct platform_device *pdev)
goto free_mcu_data;
}
- r = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes), base,
+ r = mtk_clk_register_composites(&pdev->dev, mcu_muxes,
+ ARRAY_SIZE(mcu_muxes), base,
NULL, clk_data);
if (r)
goto free_mcu_data;
diff --git a/drivers/clk/mediatek/clk-mt8186-mm.c b/drivers/clk/mediatek/clk-mt8186-mm.c
index 1d33be407..0b7260777 100644
--- a/drivers/clk/mediatek/clk-mt8186-mm.c
+++ b/drivers/clk/mediatek/clk-mt8186-mm.c
@@ -69,7 +69,8 @@ static int clk_mt8186_mm_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data);
if (r)
goto free_mm_data;
diff --git a/drivers/clk/mediatek/clk-mt8186-topckgen.c b/drivers/clk/mediatek/clk-mt8186-topckgen.c
index d7f2c4663..70b6e008a 100644
--- a/drivers/clk/mediatek/clk-mt8186-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8186-topckgen.c
@@ -715,17 +715,20 @@ static int clk_mt8186_topck_probe(struct platform_device *pdev)
if (r)
goto unregister_fixed_clks;
- r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
+ r = mtk_clk_register_muxes(&pdev->dev, top_mtk_muxes,
+ ARRAY_SIZE(top_mtk_muxes), node,
&mt8186_clk_lock, clk_data);
if (r)
goto unregister_factors;
- r = mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ r = mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
&mt8186_clk_lock, clk_data);
if (r)
goto unregister_muxes;
- r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ r = mtk_clk_register_composites(&pdev->dev, top_adj_divs,
+ ARRAY_SIZE(top_adj_divs), base,
&mt8186_clk_lock, clk_data);
if (r)
goto unregister_composite_muxes;
diff --git a/drivers/clk/mediatek/clk-mt8192-aud.c b/drivers/clk/mediatek/clk-mt8192-aud.c
index 8c989bffd..f524188fe 100644
--- a/drivers/clk/mediatek/clk-mt8192-aud.c
+++ b/drivers/clk/mediatek/clk-mt8192-aud.c
@@ -87,7 +87,8 @@ static int clk_mt8192_aud_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, aud_clks,
+ ARRAY_SIZE(aud_clks), clk_data);
if (r)
return r;
diff --git a/drivers/clk/mediatek/clk-mt8192-mm.c b/drivers/clk/mediatek/clk-mt8192-mm.c
index 1be3ff4d4..e9eb4cf83 100644
--- a/drivers/clk/mediatek/clk-mt8192-mm.c
+++ b/drivers/clk/mediatek/clk-mt8192-mm.c
@@ -91,7 +91,8 @@ static int clk_mt8192_mm_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, mm_clks, ARRAY_SIZE(mm_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data);
if (r)
return r;
diff --git a/drivers/clk/mediatek/clk-mt8192.c b/drivers/clk/mediatek/clk-mt8192.c
index d0f226931..16feb86dc 100644
--- a/drivers/clk/mediatek/clk-mt8192.c
+++ b/drivers/clk/mediatek/clk-mt8192.c
@@ -1100,27 +1100,68 @@ static int clk_mt8192_top_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
- mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
- mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node, &mt8192_clk_lock,
- top_clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, &mt8192_clk_lock,
- top_clk_data);
- mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base, &mt8192_clk_lock,
- top_clk_data);
- r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+ r = mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks), top_clk_data);
if (r)
return r;
+ r = mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
+ if (r)
+ goto unregister_fixed_clks;
+
+ r = mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ if (r)
+ goto unregister_early_factors;
+
+ r = mtk_clk_register_muxes(&pdev->dev, top_mtk_muxes,
+ ARRAY_SIZE(top_mtk_muxes), node,
+ &mt8192_clk_lock, top_clk_data);
+ if (r)
+ goto unregister_factors;
+
+ r = mtk_clk_register_composites(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt8192_clk_lock, top_clk_data);
+ if (r)
+ goto unregister_muxes;
+
+ r = mtk_clk_register_composites(&pdev->dev, top_adj_divs,
+ ARRAY_SIZE(top_adj_divs), base,
+ &mt8192_clk_lock, top_clk_data);
+ if (r)
+ goto unregister_top_composites;
+
+ r = mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), top_clk_data);
+ if (r)
+ goto unregister_adj_divs_composites;
+
r = clk_mt8192_reg_mfg_mux_notifier(&pdev->dev,
top_clk_data->hws[CLK_TOP_MFG_PLL_SEL]->clk);
if (r)
- return r;
+ goto unregister_gates;
-
- return of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, top_clk_data);
+ if (r)
+ goto unregister_gates;
+
+ return 0;
+
+unregister_gates:
+ mtk_clk_unregister_gates(top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+unregister_adj_divs_composites:
+ mtk_clk_unregister_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), top_clk_data);
+unregister_top_composites:
+ mtk_clk_unregister_composites(top_muxes, ARRAY_SIZE(top_muxes), top_clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), top_clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+unregister_early_factors:
+ mtk_clk_unregister_factors(top_early_divs, ARRAY_SIZE(top_early_divs), top_clk_data);
+unregister_fixed_clks:
+ mtk_clk_unregister_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
top_clk_data);
+ return r;
}
static int clk_mt8192_infra_probe(struct platform_device *pdev)
@@ -1133,20 +1174,23 @@ static int clk_mt8192_infra_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, infra_clks,
+ ARRAY_SIZE(infra_clks), clk_data);
if (r)
goto free_clk_data;
r = mtk_register_reset_controller_with_dev(&pdev->dev, &clk_rst_desc);
if (r)
- goto free_clk_data;
+ goto unregister_gates;
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
- goto free_clk_data;
+ goto unregister_gates;
return r;
+unregister_gates:
+ mtk_clk_unregister_gates(infra_clks, ARRAY_SIZE(infra_clks), clk_data);
free_clk_data:
mtk_free_clk_data(clk_data);
return r;
@@ -1162,16 +1206,19 @@ static int clk_mt8192_peri_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, peri_clks,
+ ARRAY_SIZE(peri_clks), clk_data);
if (r)
goto free_clk_data;
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
- goto free_clk_data;
+ goto unregister_gates;
return r;
+unregister_gates:
+ mtk_clk_unregister_gates(peri_clks, ARRAY_SIZE(peri_clks), clk_data);
free_clk_data:
mtk_free_clk_data(clk_data);
return r;
@@ -1188,16 +1235,19 @@ static int clk_mt8192_apmixed_probe(struct platform_device *pdev)
return -ENOMEM;
mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
- r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
if (r)
goto free_clk_data;
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
- goto free_clk_data;
+ goto unregister_gates;
return r;
+unregister_gates:
+ mtk_clk_unregister_gates(apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
free_clk_data:
mtk_free_clk_data(clk_data);
return r;
diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
index 0dfed6ec4..1bc917f26 100644
--- a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
+++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
@@ -124,7 +124,8 @@ static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
if (r)
goto free_apmixed_data;
- r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
if (r)
goto unregister_plls;
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
index 1e016329c..3485ebb17 100644
--- a/drivers/clk/mediatek/clk-mt8195-topckgen.c
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -1262,7 +1262,8 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
if (r)
goto unregister_fixed_clks;
- r = mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
+ r = mtk_clk_register_muxes(&pdev->dev, top_mtk_muxes,
+ ARRAY_SIZE(top_mtk_muxes), node,
&mt8195_clk_lock, top_clk_data);
if (r)
goto unregister_factors;
@@ -1281,12 +1282,14 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev)
if (r)
goto unregister_muxes;
- r = mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ r = mtk_clk_register_composites(&pdev->dev, top_adj_divs,
+ ARRAY_SIZE(top_adj_divs), base,
&mt8195_clk_lock, top_clk_data);
if (r)
goto unregister_muxes;
- r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, top_clks,
+ ARRAY_SIZE(top_clks), top_clk_data);
if (r)
goto unregister_composite_divs;
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
index 07b46bfd5..839b73068 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo0.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -104,7 +104,8 @@ static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, vdo0_clks,
+ ARRAY_SIZE(vdo0_clks), clk_data);
if (r)
goto free_vdo0_data;
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
index 835335b9d..7df695b28 100644
--- a/drivers/clk/mediatek/clk-mt8195-vdo1.c
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -131,7 +131,8 @@ static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates(node, vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
+ r = mtk_clk_register_gates(&pdev->dev, node, vdo1_clks,
+ ARRAY_SIZE(vdo1_clks), clk_data);
if (r)
goto free_vdo1_data;
diff --git a/drivers/clk/mediatek/clk-mt8365-mm.c b/drivers/clk/mediatek/clk-mt8365-mm.c
index 5c8bf18ab..22c75a03a 100644
--- a/drivers/clk/mediatek/clk-mt8365-mm.c
+++ b/drivers/clk/mediatek/clk-mt8365-mm.c
@@ -81,9 +81,8 @@ static int clk_mt8365_mm_probe(struct platform_device *pdev)
clk_data = mtk_alloc_clk_data(CLK_MM_NR_CLK);
- ret = mtk_clk_register_gates_with_dev(node, mm_clks,
- ARRAY_SIZE(mm_clks), clk_data,
- dev);
+ ret = mtk_clk_register_gates(dev, node, mm_clks,
+ ARRAY_SIZE(mm_clks), clk_data);
if (ret)
goto err_free_clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8365.c b/drivers/clk/mediatek/clk-mt8365.c
index adfecb618..c9faa07ec 100644
--- a/drivers/clk/mediatek/clk-mt8365.c
+++ b/drivers/clk/mediatek/clk-mt8365.c
@@ -947,12 +947,13 @@ static int clk_mt8365_top_probe(struct platform_device *pdev)
if (ret)
goto unregister_fixed_clks;
- ret = mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ ret = mtk_clk_register_muxes(&pdev->dev, top_muxes,
+ ARRAY_SIZE(top_muxes), node,
&mt8365_clk_lock, clk_data);
if (ret)
goto unregister_factors;
- ret = mtk_clk_register_composites(top_misc_mux_gates,
+ ret = mtk_clk_register_composites(&pdev->dev, top_misc_mux_gates,
ARRAY_SIZE(top_misc_mux_gates), base,
&mt8365_clk_lock, clk_data);
if (ret)
@@ -1019,8 +1020,8 @@ static int clk_mt8365_infra_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- ret = mtk_clk_register_gates(node, ifr_clks, ARRAY_SIZE(ifr_clks),
- clk_data);
+ ret = mtk_clk_register_gates(&pdev->dev, node, ifr_clks,
+ ARRAY_SIZE(ifr_clks), clk_data);
if (ret)
goto free_clk_data;
@@ -1080,8 +1081,9 @@ static int clk_mt8365_mcu_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- ret = mtk_clk_register_composites(mcu_muxes, ARRAY_SIZE(mcu_muxes),
- base, &mt8365_clk_lock, clk_data);
+ ret = mtk_clk_register_composites(&pdev->dev, mcu_muxes,
+ ARRAY_SIZE(mcu_muxes), base,
+ &mt8365_clk_lock, clk_data);
if (ret)
goto free_clk_data;
diff --git a/drivers/clk/mediatek/clk-mt8516-aud.c b/drivers/clk/mediatek/clk-mt8516-aud.c
index a3dafc719..a6ae8003b 100644
--- a/drivers/clk/mediatek/clk-mt8516-aud.c
+++ b/drivers/clk/mediatek/clk-mt8516-aud.c
@@ -48,7 +48,7 @@ static void __init mtk_audsys_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_AUD_NR_CLK);
- mtk_clk_register_gates(node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
+ mtk_clk_register_gates(NULL, node, aud_clks, ARRAY_SIZE(aud_clks), clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mt8516.c b/drivers/clk/mediatek/clk-mt8516.c
index 056953d59..6983d3a48 100644
--- a/drivers/clk/mediatek/clk-mt8516.c
+++ b/drivers/clk/mediatek/clk-mt8516.c
@@ -655,11 +655,12 @@ static void __init mtk_topckgen_init(struct device_node *node)
mtk_clk_register_fixed_clks(fixed_clks, ARRAY_SIZE(fixed_clks),
clk_data);
- mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), clk_data);
+ mtk_clk_register_gates(NULL, node, top_clks, ARRAY_SIZE(top_clks), clk_data);
mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
- mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
- &mt8516_clk_lock, clk_data);
+ mtk_clk_register_composites(NULL, top_muxes,
+ ARRAY_SIZE(top_muxes), base,
+ &mt8516_clk_lock, clk_data);
mtk_clk_register_dividers(top_adj_divs, ARRAY_SIZE(top_adj_divs),
base, &mt8516_clk_lock, clk_data);
@@ -684,8 +685,9 @@ static void __init mtk_infracfg_init(struct device_node *node)
clk_data = mtk_alloc_clk_data(CLK_IFR_NR_CLK);
- mtk_clk_register_composites(ifr_muxes, ARRAY_SIZE(ifr_muxes), base,
- &mt8516_clk_lock, clk_data);
+ mtk_clk_register_composites(NULL, ifr_muxes,
+ ARRAY_SIZE(ifr_muxes), base,
+ &mt8516_clk_lock, clk_data);
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index d31f01d0b..fa2c1b1c7 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -11,12 +11,15 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-mtk.h"
#include "clk-gate.h"
+#include "clk-mux.h"
static void mtk_init_clk_data(struct clk_hw_onecell_data *clk_data,
unsigned int clk_num)
@@ -197,8 +200,8 @@ void mtk_clk_unregister_factors(const struct mtk_fixed_factor *clks, int num,
}
EXPORT_SYMBOL_GPL(mtk_clk_unregister_factors);
-static struct clk_hw *mtk_clk_register_composite(const struct mtk_composite *mc,
- void __iomem *base, spinlock_t *lock)
+static struct clk_hw *mtk_clk_register_composite(struct device *dev,
+ const struct mtk_composite *mc, void __iomem *base, spinlock_t *lock)
{
struct clk_hw *hw;
struct clk_mux *mux = NULL;
@@ -264,7 +267,7 @@ static struct clk_hw *mtk_clk_register_composite(const struct mtk_composite *mc,
div_ops = &clk_divider_ops;
}
- hw = clk_hw_register_composite(NULL, mc->name, parent_names, num_parents,
+ hw = clk_hw_register_composite(dev, mc->name, parent_names, num_parents,
mux_hw, mux_ops,
div_hw, div_ops,
gate_hw, gate_ops,
@@ -308,7 +311,8 @@ static void mtk_clk_unregister_composite(struct clk_hw *hw)
kfree(mux);
}
-int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
+int mtk_clk_register_composites(struct device *dev,
+ const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
struct clk_hw_onecell_data *clk_data)
{
@@ -327,7 +331,7 @@ int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
continue;
}
- hw = mtk_clk_register_composite(mc, base, lock);
+ hw = mtk_clk_register_composite(dev, mc, base, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mc->name,
@@ -449,20 +453,81 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
const struct mtk_clk_desc *mcd;
struct clk_hw_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
- int r;
+ void __iomem *base;
+ int num_clks, r;
mcd = of_device_get_match_data(&pdev->dev);
if (!mcd)
return -EINVAL;
- clk_data = mtk_alloc_clk_data(mcd->num_clks);
+ /* Composite clocks needs us to pass iomem pointer */
+ if (mcd->composite_clks) {
+ if (!mcd->shared_io)
+ base = devm_platform_ioremap_resource(pdev, 0);
+ else
+ base = of_iomap(node, 0);
+
+ if (IS_ERR_OR_NULL(base))
+ return IS_ERR(base) ? PTR_ERR(base) : -ENOMEM;
+ }
+
+
+ devm_pm_runtime_enable(&pdev->dev);
+ /*
+ * Do a pm_runtime_resume_and_get() to workaround a possible
+ * deadlock between clk_register() and the genpd framework.
+ */
+ r = pm_runtime_resume_and_get(&pdev->dev);
+ if (r)
+ return r;
+
+ /* Calculate how many clk_hw_onecell_data entries to allocate */
+ num_clks = mcd->num_clks + mcd->num_composite_clks;
+ num_clks += mcd->num_fixed_clks + mcd->num_factor_clks;
+ num_clks += mcd->num_mux_clks;
+
+ clk_data = mtk_alloc_clk_data(num_clks);
if (!clk_data)
return -ENOMEM;
- r = mtk_clk_register_gates_with_dev(node, mcd->clks, mcd->num_clks,
- clk_data, &pdev->dev);
- if (r)
- goto free_data;
+ if (mcd->fixed_clks) {
+ r = mtk_clk_register_fixed_clks(mcd->fixed_clks,
+ mcd->num_fixed_clks, clk_data);
+ if (r)
+ goto free_data;
+ }
+
+ if (mcd->factor_clks) {
+ r = mtk_clk_register_factors(mcd->factor_clks,
+ mcd->num_factor_clks, clk_data);
+ if (r)
+ goto unregister_fixed_clks;
+ }
+
+ if (mcd->mux_clks) {
+ r = mtk_clk_register_muxes(&pdev->dev, mcd->mux_clks,
+ mcd->num_mux_clks, node,
+ mcd->clk_lock, clk_data);
+ if (r)
+ goto unregister_factors;
+ };
+
+ if (mcd->composite_clks) {
+ /* We don't check composite_lock because it's optional */
+ r = mtk_clk_register_composites(&pdev->dev,
+ mcd->composite_clks,
+ mcd->num_composite_clks,
+ base, mcd->clk_lock, clk_data);
+ if (r)
+ goto unregister_muxes;
+ }
+
+ if (mcd->clks) {
+ r = mtk_clk_register_gates(&pdev->dev, node, mcd->clks,
+ mcd->num_clks, clk_data);
+ if (r)
+ goto unregister_composites;
+ }
r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (r)
@@ -477,12 +542,35 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
goto unregister_clks;
}
+ pm_runtime_put(&pdev->dev);
+
return r;
unregister_clks:
- mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+ if (mcd->clks)
+ mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+unregister_composites:
+ if (mcd->composite_clks)
+ mtk_clk_unregister_composites(mcd->composite_clks,
+ mcd->num_composite_clks, clk_data);
+unregister_muxes:
+ if (mcd->mux_clks)
+ mtk_clk_unregister_muxes(mcd->mux_clks,
+ mcd->num_mux_clks, clk_data);
+unregister_factors:
+ if (mcd->factor_clks)
+ mtk_clk_unregister_factors(mcd->factor_clks,
+ mcd->num_factor_clks, clk_data);
+unregister_fixed_clks:
+ if (mcd->fixed_clks)
+ mtk_clk_unregister_fixed_clks(mcd->fixed_clks,
+ mcd->num_fixed_clks, clk_data);
free_data:
mtk_free_clk_data(clk_data);
+ if (mcd->shared_io && base)
+ iounmap(base);
+
+ pm_runtime_put(&pdev->dev);
return r;
}
EXPORT_SYMBOL_GPL(mtk_clk_simple_probe);
@@ -494,7 +582,20 @@ int mtk_clk_simple_remove(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
of_clk_del_provider(node);
- mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+ if (mcd->clks)
+ mtk_clk_unregister_gates(mcd->clks, mcd->num_clks, clk_data);
+ if (mcd->composite_clks)
+ mtk_clk_unregister_composites(mcd->composite_clks,
+ mcd->num_composite_clks, clk_data);
+ if (mcd->mux_clks)
+ mtk_clk_unregister_muxes(mcd->mux_clks,
+ mcd->num_mux_clks, clk_data);
+ if (mcd->factor_clks)
+ mtk_clk_unregister_factors(mcd->factor_clks,
+ mcd->num_factor_clks, clk_data);
+ if (mcd->fixed_clks)
+ mtk_clk_unregister_fixed_clks(mcd->fixed_clks,
+ mcd->num_fixed_clks, clk_data);
mtk_free_clk_data(clk_data);
return 0;
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 63ae7941a..880b3d6d8 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -149,7 +149,8 @@ struct mtk_composite {
.flags = 0, \
}
-int mtk_clk_register_composites(const struct mtk_composite *mcs, int num,
+int mtk_clk_register_composites(struct device *dev,
+ const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
struct clk_hw_onecell_data *clk_data);
void mtk_clk_unregister_composites(const struct mtk_composite *mcs, int num,
@@ -195,7 +196,17 @@ void mtk_clk_unregister_ref2usb_tx(struct clk_hw *hw);
struct mtk_clk_desc {
const struct mtk_gate *clks;
size_t num_clks;
+ const struct mtk_composite *composite_clks;
+ size_t num_composite_clks;
+ const struct mtk_fixed_clk *fixed_clks;
+ size_t num_fixed_clks;
+ const struct mtk_fixed_factor *factor_clks;
+ size_t num_factor_clks;
+ const struct mtk_mux *mux_clks;
+ size_t num_mux_clks;
const struct mtk_clk_rst_desc *rst_desc;
+ spinlock_t *clk_lock;
+ bool shared_io;
};
int mtk_clk_simple_probe(struct platform_device *pdev);
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index ba1720b9e..c85935542 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -154,9 +154,10 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
-static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux,
- struct regmap *regmap,
- spinlock_t *lock)
+static struct clk_hw *mtk_clk_register_mux(struct device *dev,
+ const struct mtk_mux *mux,
+ struct regmap *regmap,
+ spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
struct clk_init_data init = {};
@@ -177,7 +178,7 @@ static struct clk_hw *mtk_clk_register_mux(const struct mtk_mux *mux,
clk_mux->lock = lock;
clk_mux->hw.init = &init;
- ret = clk_hw_register(NULL, &clk_mux->hw);
+ ret = clk_hw_register(dev, &clk_mux->hw);
if (ret) {
kfree(clk_mux);
return ERR_PTR(ret);
@@ -198,7 +199,8 @@ static void mtk_clk_unregister_mux(struct clk_hw *hw)
kfree(mux);
}
-int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+int mtk_clk_register_muxes(struct device *dev,
+ const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
struct clk_hw_onecell_data *clk_data)
@@ -222,7 +224,7 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
continue;
}
- hw = mtk_clk_register_mux(mux, regmap, lock);
+ hw = mtk_clk_register_mux(dev, mux, regmap, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mux->name,
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 83ff420f4..7ecb963b0 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -83,7 +83,8 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
0, _upd_ofs, _upd, CLK_SET_RATE_PARENT, \
mtk_mux_clr_set_upd_ops)
-int mtk_clk_register_muxes(const struct mtk_mux *muxes,
+int mtk_clk_register_muxes(struct device *dev,
+ const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
struct clk_hw_onecell_data *clk_data);
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4536ed43f..84dce5184 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -641,33 +641,22 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
- struct usb_endpoint_descriptor *ep_desc;
- int i;
-
- if (iface_desc->desc.bNumEndpoints != 2)
- return -ENODEV;
-
- for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
- ep_desc = &iface_desc->endpoint[i].desc;
-
- if (usb_endpoint_is_int_in(ep_desc) ||
- usb_endpoint_is_bulk_in(ep_desc)) {
- if (!devpriv->ep_rx)
- devpriv->ep_rx = ep_desc;
- continue;
- }
+ struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
+ int ret;
- if (usb_endpoint_is_int_out(ep_desc) ||
- usb_endpoint_is_bulk_out(ep_desc)) {
- if (!devpriv->ep_tx)
- devpriv->ep_tx = ep_desc;
- continue;
- }
- }
+ if (devpriv->model == VMK8061_MODEL)
+ ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
+ &ep_tx_desc, NULL, NULL);
+ else
+ ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
+ &ep_rx_desc, &ep_tx_desc);
- if (!devpriv->ep_rx || !devpriv->ep_tx)
+ if (ret)
return -ENODEV;
+ devpriv->ep_rx = ep_rx_desc;
+ devpriv->ep_tx = ep_tx_desc;
+
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c8912756f..91efa23e0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1525,7 +1525,8 @@ static int cpufreq_online(unsigned int cpu)
if (cpufreq_driver->ready)
cpufreq_driver->ready(policy);
- if (cpufreq_thermal_control_enabled(cpufreq_driver))
+ /* Register cpufreq cooling only for a new policy */
+ if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
policy->cdev = of_cpufreq_cooling_register(policy);
pr_debug("initialization complete\n");
@@ -1609,11 +1610,6 @@ static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
else
policy->last_policy = policy->policy;
- if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
- cpufreq_cooling_unregister(policy->cdev);
- policy->cdev = NULL;
- }
-
if (has_target())
cpufreq_exit_governor(policy);
@@ -1674,6 +1670,15 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
return;
}
+ /*
+ * Unregister cpufreq cooling once all the CPUs of the policy are
+ * removed.
+ */
+ if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
+ cpufreq_cooling_unregister(policy->cdev);
+ policy->cdev = NULL;
+ }
+
/* We did light-weight exit earlier, do full tear down now */
if (cpufreq_driver->offline)
cpufreq_driver->exit(policy);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index f70aa17e2..c594e28ad 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -16,6 +16,7 @@
#include <linux/cpumask.h>
#include <linux/tick.h>
#include <linux/cpu.h>
+#include <linux/math64.h>
#include "cpuidle.h"
@@ -185,7 +186,7 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
s->target_residency_ns = 0;
if (s->exit_latency > 0)
- s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
+ s->exit_latency_ns = mul_u32_u32(s->exit_latency, NSEC_PER_USEC);
else if (s->exit_latency_ns < 0)
s->exit_latency_ns = 0;
}
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index f4c07ad3b..af8777a1e 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -167,6 +167,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
+ /* Since IRQ may be shared, check if DMA controller is powered on */
+ if (status == GENMASK(31, 0))
+ return IRQ_NONE;
+
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index d73004f47..612ef13b7 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -529,14 +529,11 @@ static int perf_event_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
target = cpumask_any_but(cpu_online_mask, cpu);
-
/* migrate events if there is a valid target */
- if (target < nr_cpu_ids)
+ if (target < nr_cpu_ids) {
cpumask_set_cpu(target, &perfmon_dsa_cpu_mask);
- else
- target = -1;
-
- perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ perf_pmu_migrate_context(&idxd_pmu->pmu, cpu, target);
+ }
return 0;
}
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index b6e0ac831..0819f19c8 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -249,7 +249,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
- writel(val, pchan->base + reg);
+ writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@@ -273,7 +273,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
- writel(val, od->base + reg);
+ writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 75af3488a..e70b7c41d 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -742,6 +742,9 @@ static int tegra_dma_get_residual(struct tegra_dma_channel *tdc)
bytes_xfer = dma_desc->bytes_xfer +
sg_req[dma_desc->sg_idx].len - (wcount * 4);
+ if (dma_desc->bytes_req == bytes_xfer)
+ return 0;
+
residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req);
return residual;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index 84dc5240a..93938ed80 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -214,7 +214,8 @@ struct xilinx_dpdma_tx_desc {
* @running: true if the channel is running
* @first_frame: flag for the first frame of stream
* @video_group: flag if multi-channel operation is needed for video channels
- * @lock: lock to access struct xilinx_dpdma_chan
+ * @lock: lock to access struct xilinx_dpdma_chan. Must be taken before
+ * @vchan.lock, if both are to be held.
* @desc_pool: descriptor allocation pool
* @err_task: error IRQ bottom half handler
* @desc: References to descriptors being processed
@@ -1097,12 +1098,14 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
* Complete the active descriptor, if any, promote the pending
* descriptor to active, and queue the next transfer, if any.
*/
+ spin_lock(&chan->vchan.lock);
if (chan->desc.active)
vchan_cookie_complete(&chan->desc.active->vdesc);
chan->desc.active = pending;
chan->desc.pending = NULL;
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
out:
spin_unlock_irqrestore(&chan->lock, flags);
@@ -1264,10 +1267,12 @@ static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
unsigned long flags;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
if (vchan_issue_pending(&chan->vchan))
xilinx_dpdma_chan_queue_transfer(chan);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
+ spin_unlock_irqrestore(&chan->lock, flags);
}
static int xilinx_dpdma_config(struct dma_chan *dchan,
@@ -1495,7 +1500,9 @@ static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->vchan.lock);
xilinx_dpdma_chan_queue_transfer(chan);
+ spin_unlock(&chan->vchan.lock);
spin_unlock_irqrestore(&chan->lock, flags);
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 473ef1842..748781c25 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -102,13 +102,6 @@ lib-y := $(patsubst %.o,%.stub.o,$(lib-y))
# https://bugs.llvm.org/show_bug.cgi?id=46480
STUBCOPY_FLAGS-y += --remove-section=.note.gnu.property
-#
-# For x86, bootloaders like systemd-boot or grub-efi do not zero-initialize the
-# .bss section, so the .bss section of the EFI stub needs to be included in the
-# .data section of the compressed kernel to ensure initialization. Rename the
-# .bss section here so it's easy to pick out in the linker script.
-#
-STUBCOPY_FLAGS-$(CONFIG_X86) += --rename-section .bss=.bss.efistub,load,alloc
STUBCOPY_RELOC-$(CONFIG_X86_32) := R_386_32
STUBCOPY_RELOC-$(CONFIG_X86_64) := R_X86_64_64
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index dc50dda40..55468debd 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -227,6 +227,15 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
rounded_end = roundup(start + size, EFI_PAGE_SIZE);
if (memattr != NULL) {
+ status = efi_call_proto(memattr, set_memory_attributes,
+ rounded_start,
+ rounded_end - rounded_start,
+ EFI_MEMORY_RO);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to set EFI_MEMORY_RO attribute\n");
+ return status;
+ }
+
status = efi_call_proto(memattr, clear_memory_attributes,
rounded_start,
rounded_end - rounded_start,
@@ -426,9 +435,8 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_system_table_t *sys_table_arg)
{
- struct boot_params *boot_params;
- struct setup_header *hdr;
- void *image_base;
+ static struct boot_params boot_params __page_aligned_bss;
+ struct setup_header *hdr = &boot_params.hdr;
efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
int options_size = 0;
efi_status_t status;
@@ -449,57 +457,25 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
efi_exit(handle, status);
}
- image_base = efi_table_attr(image, image_base);
-
- status = efi_allocate_pages(sizeof(struct boot_params),
- (unsigned long *)&boot_params, ULONG_MAX);
- if (status != EFI_SUCCESS) {
- efi_err("Failed to allocate lowmem for boot params\n");
- efi_exit(handle, status);
- }
-
- memset(boot_params, 0x0, sizeof(struct boot_params));
-
- hdr = &boot_params->hdr;
-
- /* Copy the setup header from the second sector to boot_params */
- memcpy(&hdr->jump, image_base + 512,
- sizeof(struct setup_header) - offsetof(struct setup_header, jump));
-
- /*
- * Fill out some of the header fields ourselves because the
- * EFI firmware loader doesn't load the first sector.
- */
+ /* Assign the setup_header fields that the kernel actually cares about */
hdr->root_flags = 1;
hdr->vid_mode = 0xffff;
- hdr->boot_flag = 0xAA55;
hdr->type_of_loader = 0x21;
+ hdr->initrd_addr_max = INT_MAX;
/* Convert unicode cmdline to ascii */
cmdline_ptr = efi_convert_cmdline(image, &options_size);
if (!cmdline_ptr)
goto fail;
- efi_set_u64_split((unsigned long)cmdline_ptr,
- &hdr->cmd_line_ptr, &boot_params->ext_cmd_line_ptr);
-
- hdr->ramdisk_image = 0;
- hdr->ramdisk_size = 0;
+ efi_set_u64_split((unsigned long)cmdline_ptr, &hdr->cmd_line_ptr,
+ &boot_params.ext_cmd_line_ptr);
- /*
- * Disregard any setup data that was provided by the bootloader:
- * setup_data could be pointing anywhere, and we have no way of
- * authenticating or validating the payload.
- */
- hdr->setup_data = 0;
-
- efi_stub_entry(handle, sys_table_arg, boot_params);
+ efi_stub_entry(handle, sys_table_arg, &boot_params);
/* not reached */
fail:
- efi_free(sizeof(struct boot_params), (unsigned long)boot_params);
-
efi_exit(handle, status);
}
@@ -811,7 +787,7 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry)
*kernel_entry = addr + entry;
- return efi_adjust_memory_range_protection(addr, kernel_total_size);
+ return efi_adjust_memory_range_protection(addr, kernel_text_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 9d3874cda..34e415247 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -81,7 +81,7 @@ static const char *get_filename(struct tegra_bpmp *bpmp,
root_path_buf = kzalloc(root_path_buf_len, GFP_KERNEL);
if (!root_path_buf)
- goto out;
+ return NULL;
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
root_path_buf_len);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 260e6a331..7d5fbaaba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1779,6 +1779,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
err_reserve_limit:
+ amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 4c661e024..49a47807c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1400,6 +1400,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ uint64_t saddr,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t tmp, lpfn;
+
+ if (saddr & AMDGPU_GPU_PAGE_MASK
+ || offset & AMDGPU_GPU_PAGE_MASK
+ || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ if (check_add_overflow(saddr, size, &tmp)
+ || check_add_overflow(offset, size, &tmp)
+ || size == 0 /* which also leads to end < begin */)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ if (bo && offset + size > amdgpu_bo_size(bo))
+ return -EINVAL;
+
+ /* Ensure last pfn not exceed max_pfn */
+ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+ if (lpfn >= adev->vm_manager.max_pfn)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -1426,21 +1457,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
+ int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@@ -1493,17 +1517,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK)
- return -EINVAL;
- if (saddr + size <= saddr || offset + size <= offset)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if ((bo && offset + size > amdgpu_bo_size(bo)) ||
- (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@@ -1517,7 +1533,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@@ -1604,10 +1620,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
+ int r;
+
+ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+ if (r)
+ return r;
- eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index 66a6f7a37..5a5787bfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -1531,44 +1531,70 @@ static void gfx_v11_0_select_se_sh(struct amdgpu_device *adev, u32 se_num,
WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, data);
}
-static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
+static u32 gfx_v11_0_get_sa_active_bitmap(struct amdgpu_device *adev)
{
- u32 data, mask;
+ u32 gc_disabled_sa_mask, gc_user_disabled_sa_mask, sa_mask;
+
+ gc_disabled_sa_mask = RREG32_SOC15(GC, 0, regCC_GC_SA_UNIT_DISABLE);
+ gc_disabled_sa_mask = REG_GET_FIELD(gc_disabled_sa_mask,
+ CC_GC_SA_UNIT_DISABLE,
+ SA_DISABLE);
+ gc_user_disabled_sa_mask = RREG32_SOC15(GC, 0, regGC_USER_SA_UNIT_DISABLE);
+ gc_user_disabled_sa_mask = REG_GET_FIELD(gc_user_disabled_sa_mask,
+ GC_USER_SA_UNIT_DISABLE,
+ SA_DISABLE);
+ sa_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_sh_per_se *
+ adev->gfx.config.max_shader_engines);
- data = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
- data |= RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
+ return sa_mask & (~(gc_disabled_sa_mask | gc_user_disabled_sa_mask));
+}
- data &= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK;
- data >>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT;
+static u32 gfx_v11_0_get_rb_active_bitmap(struct amdgpu_device *adev)
+{
+ u32 gc_disabled_rb_mask, gc_user_disabled_rb_mask;
+ u32 rb_mask;
- mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
- adev->gfx.config.max_sh_per_se);
+ gc_disabled_rb_mask = RREG32_SOC15(GC, 0, regCC_RB_BACKEND_DISABLE);
+ gc_disabled_rb_mask = REG_GET_FIELD(gc_disabled_rb_mask,
+ CC_RB_BACKEND_DISABLE,
+ BACKEND_DISABLE);
+ gc_user_disabled_rb_mask = RREG32_SOC15(GC, 0, regGC_USER_RB_BACKEND_DISABLE);
+ gc_user_disabled_rb_mask = REG_GET_FIELD(gc_user_disabled_rb_mask,
+ GC_USER_RB_BACKEND_DISABLE,
+ BACKEND_DISABLE);
+ rb_mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se *
+ adev->gfx.config.max_shader_engines);
- return (~data) & mask;
+ return rb_mask & (~(gc_disabled_rb_mask | gc_user_disabled_rb_mask));
}
static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
{
- int i, j;
- u32 data;
- u32 active_rbs = 0;
- u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
- adev->gfx.config.max_sh_per_se;
+ u32 rb_bitmap_width_per_sa;
+ u32 max_sa;
+ u32 active_sa_bitmap;
+ u32 global_active_rb_bitmap;
+ u32 active_rb_bitmap = 0;
+ u32 i;
- mutex_lock(&adev->grbm_idx_mutex);
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
- gfx_v11_0_select_se_sh(adev, i, j, 0xffffffff);
- data = gfx_v11_0_get_rb_active_bitmap(adev);
- active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
- rb_bitmap_width_per_sh);
- }
+ /* query sa bitmap from SA_UNIT_DISABLE registers */
+ active_sa_bitmap = gfx_v11_0_get_sa_active_bitmap(adev);
+ /* query rb bitmap from RB_BACKEND_DISABLE registers */
+ global_active_rb_bitmap = gfx_v11_0_get_rb_active_bitmap(adev);
+
+ /* generate active rb bitmap according to active sa bitmap */
+ max_sa = adev->gfx.config.max_shader_engines *
+ adev->gfx.config.max_sh_per_se;
+ rb_bitmap_width_per_sa = adev->gfx.config.max_backends_per_se /
+ adev->gfx.config.max_sh_per_se;
+ for (i = 0; i < max_sa; i++) {
+ if (active_sa_bitmap & (1 << i))
+ active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
}
- gfx_v11_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- adev->gfx.config.backend_enable_mask = active_rbs;
- adev->gfx.config.num_rbs = hweight32(active_rbs);
+ active_rb_bitmap &= global_active_rb_bitmap;
+ adev->gfx.config.backend_enable_mask = active_rb_bitmap;
+ adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
}
#define DEFAULT_SH_MEM_BASES (0x6000)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 856db876a..c7af36370 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -345,17 +345,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
- ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
-
- amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
- SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
- SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
- amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
- amdgpu_ring_write(ring, ref_and_mask); /* reference */
- amdgpu_ring_write(ring, ref_and_mask); /* mask */
- amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
- SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ if (ring->me > 1) {
+ amdgpu_asic_flush_hdp(adev, ring);
+ } else {
+ ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+ SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+ amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+ amdgpu_ring_write(ring, ref_and_mask); /* reference */
+ amdgpu_ring_write(ring, ref_and_mask); /* mask */
+ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 56af7b5ab..56cc59629 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -460,10 +460,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
- return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 3):
- return false;
default:
return true;
}
@@ -780,10 +778,35 @@ static int soc21_common_suspend(void *handle)
return soc21_common_hw_fini(adev);
}
+static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg1, sol_reg2;
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset dGPU side.
+ * 2) S3 suspend got aborted and TOS is active.
+ */
+ if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
+ !adev->suspend_complete) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+ msleep(100);
+ sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return (sol_reg1 != sol_reg2);
+ }
+
+ return false;
+}
+
static int soc21_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc21_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend aborted, resetting...");
+ soc21_asic_reset(adev);
+ }
+
return soc21_common_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index e191d38f3..3f403afd6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -765,8 +765,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
* nodes, but not more than args->num_of_nodes as that is
* the amount of memory allocated by user
*/
- pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
- args->num_of_nodes), GFP_KERNEL);
+ pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
+ GFP_KERNEL);
if (!pa)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 0b87034d9..1b7b29426 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1805,6 +1805,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
while (halt_if_hws_hang)
schedule();
+ kfd_hws_hang(dqm);
return -ETIME;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 187f5b27f..29d2003fb 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -112,20 +112,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -222,11 +227,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, context, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, context, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
index a974f86e7..37c645a88 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
@@ -216,9 +216,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
}
}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
index 4220fd8fd..54cd86060 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h
@@ -57,10 +57,10 @@ void mod_stats_update_event(struct mod_stats *mod_stats,
unsigned int length);
void mod_stats_update_flip(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns);
+ unsigned long long timestamp_in_ns);
void mod_stats_update_vupdate(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns);
+ unsigned long long timestamp_in_ns);
void mod_stats_update_freesync(struct mod_stats *mod_stats,
unsigned int v_total_min,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 6d9760eac..21b374d12 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -222,8 +222,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (!en && !adev->in_s0ix)
+ if (!en && !adev->in_s0ix) {
+ /* Adds a GFX reset as workaround just before sending the
+ * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+ * an invalid state.
+ */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+ SMU_RESET_MODE_2, NULL);
+ if (ret)
+ return ret;
+
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 564838603..a4a23b962 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -190,6 +190,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
struct ast_private *ast = to_ast_private(dev);
u8 video_on_off = on;
+ u32 i = 0;
// Video On/Off
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@@ -202,6 +203,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
// wait 1 ms
mdelay(1);
+ if (++i > 200)
+ break;
}
}
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 7847020de..9a6580604 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -781,6 +781,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
+ /* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@@ -849,7 +850,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@@ -879,6 +879,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
+ mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index d5c15292a..aa93129c3 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -117,6 +117,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_leftside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = {
+ .width = 1080,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.width = 1200,
.height = 1920,
@@ -279,6 +285,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
},
.driver_data = (void *)&lcd720x1280_rightside_up,
+ }, { /* GPD Win Mini */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1617-01")
+ },
+ .driver_data = (void *)&lcd1080x1920_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
@@ -336,6 +348,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Legion Go 8APU1 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"),
+ },
+ .driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* Lenovo Yoga Book X90F / X90L */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 25dcdde5f..5147718f3 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2152,7 +2152,7 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
&new_cdclk_state->actual))
return;
- if (pipe == INVALID_PIPE ||
+ if (new_cdclk_state->disable_pipes ||
old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
@@ -2181,7 +2181,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
&new_cdclk_state->actual))
return;
- if (pipe != INVALID_PIPE &&
+ if (!new_cdclk_state->disable_pipes &&
old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
@@ -2634,6 +2634,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
+ cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@@ -2793,6 +2794,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_cdclk_state->disable_pipes = true;
+
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index c674879a8..c4b3e5938 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -51,6 +51,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 706e2d956..76277eb3e 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -3683,7 +3683,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
+ /*
+ * FIXME the modeset sequence is currently wrong and
+ * can't deal with bigjoiner + port sync at the same time.
+ */
return crtc_state1->hw.active && crtc_state2->hw.active &&
+ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 5eac99021..6615e4153 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -110,6 +110,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
if (!intel_vrr_is_capable(connector))
return;
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that during VRR toggle/push/etc.
+ */
+ if (crtc_state->bigjoiner_pipes)
+ return;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index c8ad8f37e..58a03da16 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -32,6 +32,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "i915_drv.h"
@@ -98,12 +99,34 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_tryget(vma))
+ return -ENOENT;
+
+ /*
+ * Exclude global GTT VMA from holding a GT wakeref
+ * while active, otherwise GPU never goes idle.
+ */
+ if (!i915_vma_is_ggtt(vma))
+ intel_gt_pm_get(vma->vm->gt);
+
+ return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- i915_vma_put(active_to_vma(ref));
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we can be called from atomic contexts,
+ * use an async variant of intel_gt_pm_put().
+ */
+ intel_gt_pm_put_async(vma->vm->gt);
+ }
+
+ i915_vma_put(vma);
}
static struct i915_vma *
@@ -1365,7 +1388,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref;
unsigned int bound;
int err;
@@ -1385,8 +1408,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
- if (flags & PIN_GLOBAL)
- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ /*
+ * In case of a global GTT, we must hold a runtime-pm wakeref
+ * while global PTEs are updated. In other cases, we hold
+ * the rpm reference while the VMA is active. Since runtime
+ * resume may require allocations, which are forbidden inside
+ * vm->mutex, get the first rpm wakeref outside of the mutex.
+ */
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@@ -1522,8 +1551,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
- if (wakeref)
- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 189903b65..48cf59338 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
+#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@@ -1675,7 +1676,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@@ -1761,26 +1762,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
- fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
- fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
+ fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
- bios->legacy.i2c_indices.crt, 1, 1);
+ bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
- all_heads, 0);
+ all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
- all_heads, 1);
+ all_heads, DCB_OUTPUT_B);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
index 4bf486b57..cb05f7f48 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
+static void of_fini(void *p)
+{
+ kfree(p);
+}
+
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
- .fini = (void(*)(void *))kfree,
+ .fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index c51bac761..9fe5b6a36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
- if (refcount_inc_not_zero(&iobj->maps))
+ if (refcount_inc_not_zero(&iobj->maps)) {
+ /* read barrier match the wmb on refcount set */
+ smp_rmb();
return iobj->map;
+ }
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
+ /* barrier to ensure the ptrs are written before refcount is set */
+ smp_wmb();
refcount_set(&iobj->maps, 1);
}
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index ec228c269..b380bbb0e 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -261,8 +261,6 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
mipi_dsi_detach(ctx->dsi);
- mipi_dsi_device_unregister(ctx->dsi);
-
drm_panel_remove(&ctx->panel);
}
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 368d26da0..9febc8b73 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
+ struct qxl_release *release;
+ int count = 0, sc = 0;
+ bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
+ release = container_of(fence, struct qxl_release, base);
+ have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
- if (!wait_event_timeout(qdev->release_event,
- (dma_fence_is_signaled(fence) ||
- (qxl_io_notify_oom(qdev), 0)),
- timeout))
- return 0;
+retry:
+ sc++;
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+
+ qxl_io_notify_oom(qdev);
+
+ for (count = 0; count < 11; count++) {
+ if (!qxl_queue_garbage_collect(qdev, true))
+ break;
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+ }
+
+ if (dma_fence_is_signaled(fence))
+ goto signaled;
+
+ if (have_drawable_releases || sc < 4) {
+ if (sc > 2)
+ /* back off */
+ usleep_range(500, 1000);
+
+ if (time_after(jiffies, end))
+ return 0;
+
+ if (have_drawable_releases && sc > 300) {
+ DMA_FENCE_WARN(fence,
+ "failed to wait on release %llu after spincount %d\n",
+ fence->context & ~0xf0000000, sc);
+ goto signaled;
+ }
+ goto retry;
+ }
+ /*
+ * yeah, original sync_obj_wait gave up after 3 spins when
+ * have_drawable_releases is not set.
+ */
+signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index eb0802015..7e6648b27 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1415,9 +1415,6 @@ static int vc4_prepare_fb(struct drm_plane *plane,
drm_gem_plane_helper_prepare_fb(plane, state);
- if (plane->state->fb == state->fb)
- return 0;
-
return vc4_bo_inc_usecnt(bo);
}
@@ -1426,7 +1423,7 @@ static void vc4_cleanup_fb(struct drm_plane *plane,
{
struct vc4_bo *bo;
- if (plane->state->fb == state->fb || !state->fb)
+ if (!state->fb)
return;
bo = to_vc4_bo(&drm_fb_dma_get_gem_obj(state->fb, 0)->base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9d7a1b710..53f63ad65 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -663,11 +663,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
- /* TTM currently doesn't fully support SEV encryption. */
- if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
- return -EINVAL;
-
- if (vmw_force_coherent)
+ /*
+ * When running with SEV we always want dma mappings, because
+ * otherwise ttm tt pool pages will bounce through swiotlb running
+ * out of available space.
+ */
+ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index aa571b75c..b1aed051b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -793,6 +793,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct vmw_private *vmw = vmw_priv(crtc->dev);
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
@@ -800,9 +801,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
bool has_primary = new_state->plane_mask &
drm_plane_mask(crtc->primary);
- /* We always want to have an active plane with an active CRTC */
- if (has_primary != new_state->enable)
- return -EINVAL;
+ /*
+ * This is fine in general, but broken userspace might expect
+ * some actual rendering so give a clue as why it's blank.
+ */
+ if (new_state->enable && !has_primary)
+ drm_dbg_driver(&vmw->drm,
+ "CRTC without a primary plane will be blank.\n");
if (new_state->connector_mask != connector_mask &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index b02d27936..b116600b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -246,10 +246,10 @@ struct vmw_framebuffer_bo {
static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB1555,
};
static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 1be454baf..405d88b08 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -717,10 +717,10 @@
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
-#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2 0x501a
#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
#define USB_DEVICE_ID_KYE_PENSKETCH_M912 0x5015
#define USB_DEVICE_ID_KYE_EASYPEN_M406XE 0x5019
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2 0x501A
#define USB_VENDOR_ID_LABTEC 0x1020
#define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index da903138e..dc57e9d4a 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -602,6 +602,18 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[74] = 0x08;
}
break;
+ case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
+ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
+ "Genius Gila Gaming Mouse");
+ break;
+ case USB_DEVICE_ID_GENIUS_MANTICORE:
+ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
+ "Genius Manticore Keyboard");
+ break;
+ case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
+ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
+ "Genius Gx Imperator Keyboard");
+ break;
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
if (*rsize == EASYPEN_I405X_RDESC_ORIG_SIZE) {
rdesc = easypen_i405x_rdesc_fixed;
@@ -638,18 +650,6 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(pensketch_m912_rdesc_fixed);
}
break;
- case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
- rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
- "Genius Gila Gaming Mouse");
- break;
- case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
- rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
- "Genius Gx Imperator Keyboard");
- break;
- case USB_DEVICE_ID_GENIUS_MANTICORE:
- rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
- "Genius Manticore Keyboard");
- break;
}
return rdesc;
}
@@ -717,26 +717,26 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
switch (id->product) {
+ case USB_DEVICE_ID_GENIUS_MANTICORE:
+ /*
+ * The manticore keyboard needs to have all the interfaces
+ * opened at least once to be fully functional.
+ */
+ if (hid_hw_open(hdev))
+ hid_hw_close(hdev);
+ break;
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
- case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2:
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
- case USB_DEVICE_ID_KYE_EASYPEN_M406XE:
case USB_DEVICE_ID_KYE_PENSKETCH_M912:
+ case USB_DEVICE_ID_KYE_EASYPEN_M406XE:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2:
ret = kye_tablet_enable(hdev);
if (ret) {
hid_err(hdev, "tablet enabling failed\n");
goto enabling_err;
}
break;
- case USB_DEVICE_ID_GENIUS_MANTICORE:
- /*
- * The manticore keyboard needs to have all the interfaces
- * opened at least once to be fully functional.
- */
- if (hid_hw_open(hdev))
- hid_hw_close(hdev);
- break;
}
return 0;
@@ -749,23 +749,23 @@ err:
static const struct hid_device_id kye_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_KYE_EASYPEN_I405X) },
+ USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ USB_DEVICE_ID_GENIUS_MANTICORE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
+ USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_KYE_EASYPEN_M610X) },
+ USB_DEVICE_ID_KYE_EASYPEN_I405X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_KYE_EASYPEN_M406XE) },
+ USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
+ USB_DEVICE_ID_KYE_PENSKETCH_M912) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_GENIUS_MANTICORE) },
+ USB_DEVICE_ID_KYE_EASYPEN_M406XE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
- USB_DEVICE_ID_KYE_PENSKETCH_M912) },
+ USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
{ }
};
MODULE_DEVICE_TABLE(hid, kye_devices);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 08768e5ac..57697605b 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
}
break;
case REPORT_TYPE_MOUSE:
- workitem->reports_supported |= STD_MOUSE | HIDPP;
- if (djrcv_dev->type == recvr_type_mouse_only)
- workitem->reports_supported |= MULTIMEDIA;
+ workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
break;
}
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 608840663..debc49272 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -107,12 +107,12 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_1f4a), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X), HID_QUIRK_MULTI_INPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 969f8eb08..0b05bb1e4 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -61,7 +61,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
-#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@@ -193,15 +192,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
msgs[n].len = recv_len;
msgs[n].buf = recv_buf;
n++;
-
- set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
ret = i2c_transfer(client->adapter, msgs, n);
- if (recv_len)
- clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
-
if (ret != n)
return ret < 0 ? ret : -EIO;
@@ -569,9 +563,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
- if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
- return IRQ_HANDLED;
-
i2c_hid_get_input(ihid);
return IRQ_HANDLED;
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index a49c6affd..dd5fc6087 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
+ dev->devc = &pdev->dev;
ishtp_device_init(dev);
init_waitqueue_head(&dev->wait_hw_ready);
@@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
}
dev->ops = &ish_hw_ops;
- dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
return dev;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 5e3976ba5..1ebc95379 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2075,13 +2075,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
- * takes place. adap->algo->master_xfer existence isn't checked.
+ * takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
+ if (!adap->algo->master_xfer) {
+ dev_dbg(&adap->dev, "I2C level transfers not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@@ -2148,11 +2153,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
- if (!adap->algo->master_xfer) {
- dev_dbg(&adap->dev, "I2C level transfers not supported\n");
- return -EOPNOTSUPP;
- }
-
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index b7f902344..950fe2059 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -34,6 +34,7 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
+#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
[IB_CM_REJ_NO_EEC] = "no EEC",
@@ -1025,13 +1026,26 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
+ enum ib_cm_state old_state)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+}
+
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
+ enum ib_cm_state old_state;
struct cm_work *work;
+ int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
+ old_state = cm_id->state;
retest:
switch (cm_id->state) {
case IB_CM_LISTEN:
@@ -1135,7 +1149,14 @@ retest:
xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
- wait_for_completion(&cm_id_priv->comp);
+ do {
+ ret = wait_for_completion_timeout(&cm_id_priv->comp,
+ msecs_to_jiffies(
+ CM_DESTROY_ID_WAIT_TIMEOUT));
+ if (!ret) /* timeout happened */
+ cm_destroy_id_wait_timeout(cm_id, old_state);
+ } while (!ret);
+
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 9c8a7b206..e61efed32 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -188,7 +188,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
- if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
+ !mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 51daac5c4..be3ddfbf3 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -33,6 +33,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
+
+ mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 258d5fe3d..aa32371f0 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -1196,7 +1196,11 @@ static int rmi_driver_probe(struct device *dev)
}
rmi_driver_set_input_params(rmi_dev, data->input);
data->input->phys = devm_kasprintf(dev, GFP_KERNEL,
- "%s/input0", dev_name(dev));
+ "%s/input0", dev_name(dev));
+ if (!data->input->phys) {
+ retval = -ENOMEM;
+ goto err;
+ }
}
retval = rmi_init_functions(data);
diff --git a/drivers/input/touchscreen/imagis.c b/drivers/input/touchscreen/imagis.c
index e2697e6c6..2636e1c94 100644
--- a/drivers/input/touchscreen/imagis.c
+++ b/drivers/input/touchscreen/imagis.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -23,12 +24,9 @@
#define IST3038C_I2C_RETRY_COUNT 3
#define IST3038C_MAX_FINGER_NUM 10
#define IST3038C_X_MASK GENMASK(23, 12)
-#define IST3038C_X_SHIFT 12
#define IST3038C_Y_MASK GENMASK(11, 0)
#define IST3038C_AREA_MASK GENMASK(27, 24)
-#define IST3038C_AREA_SHIFT 24
#define IST3038C_FINGER_COUNT_MASK GENMASK(15, 12)
-#define IST3038C_FINGER_COUNT_SHIFT 12
#define IST3038C_FINGER_STATUS_MASK GENMASK(9, 0)
struct imagis_ts {
@@ -92,8 +90,7 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
goto out;
}
- finger_count = (intr_message & IST3038C_FINGER_COUNT_MASK) >>
- IST3038C_FINGER_COUNT_SHIFT;
+ finger_count = FIELD_GET(IST3038C_FINGER_COUNT_MASK, intr_message);
if (finger_count > IST3038C_MAX_FINGER_NUM) {
dev_err(&ts->client->dev,
"finger count %d is more than maximum supported\n",
@@ -101,7 +98,7 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
goto out;
}
- finger_pressed = intr_message & IST3038C_FINGER_STATUS_MASK;
+ finger_pressed = FIELD_GET(IST3038C_FINGER_STATUS_MASK, intr_message);
for (i = 0; i < finger_count; i++) {
error = imagis_i2c_read_reg(ts,
@@ -118,12 +115,11 @@ static irqreturn_t imagis_interrupt(int irq, void *dev_id)
input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER,
finger_pressed & BIT(i));
touchscreen_report_pos(ts->input_dev, &ts->prop,
- (finger_status & IST3038C_X_MASK) >>
- IST3038C_X_SHIFT,
- finger_status & IST3038C_Y_MASK, 1);
+ FIELD_GET(IST3038C_X_MASK, finger_status),
+ FIELD_GET(IST3038C_Y_MASK, finger_status),
+ true);
input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR,
- (finger_status & IST3038C_AREA_MASK) >>
- IST3038C_AREA_SHIFT);
+ FIELD_GET(IST3038C_AREA_MASK, finger_status));
}
input_mt_sync_frame(ts->input_dev);
@@ -210,7 +206,7 @@ static int imagis_init_input_dev(struct imagis_ts *ts)
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 16, 0, 0);
touchscreen_parse_properties(input_dev, true, &ts->prop);
if (!ts->prop.max_x || !ts->prop.max_y) {
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 03b253589..cb862ab96 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -71,7 +71,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
struct page *pages;
int irq, ret;
- pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+ pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4d03fb3a8..f9ab5cfc9 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4535,13 +4535,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
set_bit(i, bitmap);
}
- if (err) {
- if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i);
-
- its_lpi_free(bitmap, base, nr_ids);
- its_free_prop_table(vprop_page);
- }
+ if (err)
+ its_vpe_irq_domain_free(domain, virq, i);
return err;
}
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 4bc2a7050..c761ac35e 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1121,20 +1121,6 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (valid_la && min_len) {
/* These messages have special length requirements */
switch (cmd) {
- case CEC_MSG_TIMER_STATUS:
- if (msg->msg[2] & 0x10) {
- switch (msg->msg[2] & 0xf) {
- case CEC_OP_PROG_INFO_NOT_ENOUGH_SPACE:
- case CEC_OP_PROG_INFO_MIGHT_NOT_BE_ENOUGH_SPACE:
- if (msg->len < 5)
- valid_la = false;
- break;
- }
- } else if ((msg->msg[2] & 0xf) == CEC_OP_PROG_ERROR_DUPLICATE) {
- if (msg->len < 5)
- valid_la = false;
- }
- break;
case CEC_MSG_RECORD_ON:
switch (msg->msg[2]) {
case CEC_OP_RECORD_SRC_OWN:
diff --git a/drivers/media/pci/sta2x11/sta2x11_vip.c b/drivers/media/pci/sta2x11/sta2x11_vip.c
index 8535e49a4..1f7ab56de 100644
--- a/drivers/media/pci/sta2x11/sta2x11_vip.c
+++ b/drivers/media/pci/sta2x11/sta2x11_vip.c
@@ -756,7 +756,7 @@ static const struct video_device video_dev_template = {
/**
* vip_irq - interrupt routine
* @irq: Number of interrupt ( not used, correct number is assumed )
- * @vip: local data structure containing all information
+ * @data: local data structure containing all information
*
* check for both frame interrupts set ( top and bottom ).
* check FIFO overflow, but limit number of log messages after open.
@@ -766,8 +766,9 @@ static const struct video_device video_dev_template = {
*
* IRQ_HANDLED, interrupt done.
*/
-static irqreturn_t vip_irq(int irq, struct sta2x11_vip *vip)
+static irqreturn_t vip_irq(int irq, void *data)
{
+ struct sta2x11_vip *vip = data;
unsigned int status;
status = reg_read(vip, DVP_ITS);
@@ -1049,9 +1050,7 @@ static int sta2x11_vip_init_one(struct pci_dev *pdev,
spin_lock_init(&vip->slock);
- ret = request_irq(pdev->irq,
- (irq_handler_t) vip_irq,
- IRQF_SHARED, KBUILD_MODNAME, vip);
+ ret = request_irq(pdev->irq, vip_irq, IRQF_SHARED, KBUILD_MODNAME, vip);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
ret = -ENODEV;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index f8219cbd2..a617f64a3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -116,7 +116,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)},
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
index f50d22882..a0ad1f3a6 100644
--- a/drivers/misc/vmw_vmci/vmci_datagram.c
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -234,7 +234,8 @@ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
dg_info->in_dg_host_queue = true;
dg_info->entry = dst_entry;
- memcpy(&dg_info->msg, dg, dg_size);
+ dg_info->msg = *dg;
+ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
@@ -377,7 +378,8 @@ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
dg_info->in_dg_host_queue = false;
dg_info->entry = dst_entry;
- memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+ dg_info->msg = *dg;
+ memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size);
INIT_WORK(&dg_info->work, dg_delayed_dispatch);
schedule_work(&dg_info->work);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index a5ab2af3e..e37fb2557 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2831,6 +2831,11 @@ static __maybe_unused int sdhci_msm_runtime_suspend(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = true;
+ spin_unlock_irqrestore(&host->lock, flags);
/* Drop the performance vote */
dev_pm_opp_set_rate(dev, 0);
@@ -2845,6 +2850,7 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ unsigned long flags;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
@@ -2863,7 +2869,15 @@ static __maybe_unused int sdhci_msm_runtime_resume(struct device *dev)
dev_pm_opp_set_rate(dev, msm_host->clk_rate);
- return sdhci_msm_ice_resume(msm_host);
+ ret = sdhci_msm_ice_resume(msm_host);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->runtime_suspended = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 5d2ddb037..2068025d5 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
- 0xffffffff };
+};
static struct mtd_info *doclist = NULL;
@@ -1552,7 +1552,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
- for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+ for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 07065c1af..1aba0cf38 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -419,6 +419,20 @@ static void mt7530_pll_setup(struct mt7530_priv *priv)
core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
}
+/* If port 6 is available as a CPU port, always prefer that as the default,
+ * otherwise don't care.
+ */
+static struct dsa_port *
+mt753x_preferred_default_local_cpu_port(struct dsa_switch *ds)
+{
+ struct dsa_port *cpu_dp = dsa_to_port(ds, 6);
+
+ if (dsa_port_is_cpu(cpu_dp))
+ return cpu_dp;
+
+ return NULL;
+}
+
/* Setup port 6 interface mode and TRGMII TX circuit */
static int
mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
@@ -998,20 +1012,173 @@ unlock_exit:
mutex_unlock(&priv->reg_mutex);
}
-/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
- * 802.1Qâ„¢-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
- * must only be propagated to C-VLAN and MAC Bridge components. That means
- * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
- * these frames are supposed to be processed by the CPU (software). So we make
- * the switch only forward them to the CPU port. And if received from a CPU
- * port, forward to a single port. The software is responsible of making the
- * switch conform to the latter by setting a single port as destination port on
- * the special tag.
+/* In Clause 5 of IEEE Std 802-2014, two sublayers of the data link layer (DLL)
+ * of the Open Systems Interconnection basic reference model (OSI/RM) are
+ * described; the medium access control (MAC) and logical link control (LLC)
+ * sublayers. The MAC sublayer is the one facing the physical layer.
+ *
+ * In 8.2 of IEEE Std 802.1Q-2022, the Bridge architecture is described. A
+ * Bridge component comprises a MAC Relay Entity for interconnecting the Ports
+ * of the Bridge, at least two Ports, and higher layer entities with at least a
+ * Spanning Tree Protocol Entity included.
+ *
+ * Each Bridge Port also functions as an end station and shall provide the MAC
+ * Service to an LLC Entity. Each instance of the MAC Service is provided to a
+ * distinct LLC Entity that supports protocol identification, multiplexing, and
+ * demultiplexing, for protocol data unit (PDU) transmission and reception by
+ * one or more higher layer entities.
+ *
+ * It is described in 8.13.9 of IEEE Std 802.1Q-2022 that in a Bridge, the LLC
+ * Entity associated with each Bridge Port is modeled as being directly
+ * connected to the attached Local Area Network (LAN).
+ *
+ * On the switch with CPU port architecture, CPU port functions as Management
+ * Port, and the Management Port functionality is provided by software which
+ * functions as an end station. Software is connected to an IEEE 802 LAN that is
+ * wholly contained within the system that incorporates the Bridge. Software
+ * provides access to the LLC Entity associated with each Bridge Port by the
+ * value of the source port field on the special tag on the frame received by
+ * software.
+ *
+ * We call frames that carry control information to determine the active
+ * topology and current extent of each Virtual Local Area Network (VLAN), i.e.,
+ * spanning tree or Shortest Path Bridging (SPB) and Multiple VLAN Registration
+ * Protocol Data Units (MVRPDUs), and frames from other link constrained
+ * protocols, such as Extensible Authentication Protocol over LAN (EAPOL) and
+ * Link Layer Discovery Protocol (LLDP), link-local frames. They are not
+ * forwarded by a Bridge. Permanently configured entries in the filtering
+ * database (FDB) ensure that such frames are discarded by the Forwarding
+ * Process. In 8.6.3 of IEEE Std 802.1Q-2022, this is described in detail:
+ *
+ * Each of the reserved MAC addresses specified in Table 8-1
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]) shall be
+ * permanently configured in the FDB in C-VLAN components and ERs.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-2
+ * (01-80-C2-00-00-[01,02,03,04,05,06,07,08,09,0A,0E]) shall be permanently
+ * configured in the FDB in S-VLAN components.
+ *
+ * Each of the reserved MAC addresses specified in Table 8-3
+ * (01-80-C2-00-00-[01,02,04,0E]) shall be permanently configured in the FDB in
+ * TPMR components.
+ *
+ * The FDB entries for reserved MAC addresses shall specify filtering for all
+ * Bridge Ports and all VIDs. Management shall not provide the capability to
+ * modify or remove entries for reserved MAC addresses.
+ *
+ * The addresses in Table 8-1, Table 8-2, and Table 8-3 determine the scope of
+ * propagation of PDUs within a Bridged Network, as follows:
+ *
+ * The Nearest Bridge group address (01-80-C2-00-00-0E) is an address that no
+ * conformant Two-Port MAC Relay (TPMR) component, Service VLAN (S-VLAN)
+ * component, Customer VLAN (C-VLAN) component, or MAC Bridge can forward.
+ * PDUs transmitted using this destination address, or any other addresses
+ * that appear in Table 8-1, Table 8-2, and Table 8-3
+ * (01-80-C2-00-00-[00,01,02,03,04,05,06,07,08,09,0A,0B,0C,0D,0E,0F]), can
+ * therefore travel no further than those stations that can be reached via a
+ * single individual LAN from the originating station.
+ *
+ * The Nearest non-TPMR Bridge group address (01-80-C2-00-00-03), is an
+ * address that no conformant S-VLAN component, C-VLAN component, or MAC
+ * Bridge can forward; however, this address is relayed by a TPMR component.
+ * PDUs using this destination address, or any of the other addresses that
+ * appear in both Table 8-1 and Table 8-2 but not in Table 8-3
+ * (01-80-C2-00-00-[00,03,05,06,07,08,09,0A,0B,0C,0D,0F]), will be relayed by
+ * any TPMRs but will propagate no further than the nearest S-VLAN component,
+ * C-VLAN component, or MAC Bridge.
+ *
+ * The Nearest Customer Bridge group address (01-80-C2-00-00-00) is an address
+ * that no conformant C-VLAN component, MAC Bridge can forward; however, it is
+ * relayed by TPMR components and S-VLAN components. PDUs using this
+ * destination address, or any of the other addresses that appear in Table 8-1
+ * but not in either Table 8-2 or Table 8-3 (01-80-C2-00-00-[00,0B,0C,0D,0F]),
+ * will be relayed by TPMR components and S-VLAN components but will propagate
+ * no further than the nearest C-VLAN component or MAC Bridge.
+ *
+ * Because the LLC Entity associated with each Bridge Port is provided via CPU
+ * port, we must not filter these frames but forward them to CPU port.
+ *
+ * In a Bridge, the transmission Port is majorly decided by ingress and egress
+ * rules, FDB, and spanning tree Port State functions of the Forwarding Process.
+ * For link-local frames, only CPU port should be designated as destination port
+ * in the FDB, and the other functions of the Forwarding Process must not
+ * interfere with the decision of the transmission Port. We call this process
+ * trapping frames to CPU port.
+ *
+ * Therefore, on the switch with CPU port architecture, link-local frames must
+ * be trapped to CPU port, and certain link-local frames received by a Port of a
+ * Bridge comprising a TPMR component or an S-VLAN component must be excluded
+ * from it.
+ *
+ * A Bridge of the switch with CPU port architecture cannot comprise a Two-Port
+ * MAC Relay (TPMR) component as a TPMR component supports only a subset of the
+ * functionality of a MAC Bridge. A Bridge comprising two Ports (Management Port
+ * doesn't count) of this architecture will either function as a standard MAC
+ * Bridge or a standard VLAN Bridge.
+ *
+ * Therefore, a Bridge of this architecture can only comprise S-VLAN components,
+ * C-VLAN components, or MAC Bridge components. Since there's no TPMR component,
+ * we don't need to relay PDUs using the destination addresses specified on the
+ * Nearest non-TPMR section, and the proportion of the Nearest Customer Bridge
+ * section where they must be relayed by TPMR components.
+ *
+ * One option to trap link-local frames to CPU port is to add static FDB entries
+ * with CPU port designated as destination port. However, because that
+ * Independent VLAN Learning (IVL) is being used on every VID, each entry only
+ * applies to a single VLAN Identifier (VID). For a Bridge comprising a MAC
+ * Bridge component or a C-VLAN component, there would have to be 16 times 4096
+ * entries. This switch intellectual property can only hold a maximum of 2048
+ * entries. Using this option, there also isn't a mechanism to prevent
+ * link-local frames from being discarded when the spanning tree Port State of
+ * the reception Port is discarding.
+ *
+ * The remaining option is to utilise the BPC, RGAC1, RGAC2, RGAC3, and RGAC4
+ * registers. Whilst this applies to every VID, it doesn't contain all of the
+ * reserved MAC addresses without affecting the remaining Standard Group MAC
+ * Addresses. The REV_UN frame tag utilised using the RGAC4 register covers the
+ * remaining 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F] destination
+ * addresses. It also includes the 01-80-C2-00-00-22 to 01-80-C2-00-00-FF
+ * destination addresses which may be relayed by MAC Bridges or VLAN Bridges.
+ * The latter option provides better but not complete conformance.
*
- * This switch intellectual property cannot conform to this part of the standard
- * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
- * DAs, it also includes :22-FF which the scope of propagation is not supposed
- * to be restricted for these MAC DAs.
+ * This switch intellectual property also does not provide a mechanism to trap
+ * link-local frames with specific destination addresses to CPU port by Bridge,
+ * to conform to the filtering rules for the distinct Bridge components.
+ *
+ * Therefore, regardless of the type of the Bridge component, link-local frames
+ * with these destination addresses will be trapped to CPU port:
+ *
+ * 01-80-C2-00-00-[00,01,02,03,0E]
+ *
+ * In a Bridge comprising a MAC Bridge component or a C-VLAN component:
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A,0B,0C,0D,0F]
+ *
+ * In a Bridge comprising an S-VLAN component:
+ *
+ * Link-local frames with these destination addresses will be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-00
+ *
+ * Link-local frames with these destination addresses won't be trapped to CPU
+ * port which won't conform to IEEE Std 802.1Q-2022:
+ *
+ * 01-80-C2-00-00-[04,05,06,07,08,09,0A]
+ *
+ * To trap link-local frames to CPU port as conformant as this switch
+ * intellectual property can allow, link-local frames are made to be regarded as
+ * Bridge Protocol Data Units (BPDUs). This is because this switch intellectual
+ * property only lets the frames regarded as BPDUs bypass the spanning tree Port
+ * State function of the Forwarding Process.
+ *
+ * The only remaining interference is the ingress rules. When the reception Port
+ * has no PVID assigned on software, VLAN-untagged frames won't be allowed in.
+ * There doesn't seem to be a mechanism on the switch intellectual property to
+ * have link-local frames bypass this function of the Forwarding Process.
*/
static void
mt753x_trap_frames(struct mt7530_priv *priv)
@@ -1019,35 +1186,43 @@ mt753x_trap_frames(struct mt7530_priv *priv)
/* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
* VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
- MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
- MT753X_BPDU_PORT_FW_MASK,
- MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_BPC,
+ MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
+ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+ MT753X_BPDU_PORT_FW_MASK,
+ MT753X_PAE_BPDU_FR |
+ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
- MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
- MT753X_R01_PORT_FW_MASK,
- MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC1,
+ MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
+ MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
+ MT753X_R02_BPDU_FR |
+ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R01_BPDU_FR |
+ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
- MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
- MT753X_R03_PORT_FW_MASK,
- MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ mt7530_rmw(priv, MT753X_RGAC2,
+ MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
+ MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
+ MT753X_R0E_BPDU_FR |
+ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R03_BPDU_FR |
+ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
}
static int
@@ -1075,6 +1250,13 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
if (priv->id == ID_MT7530 || priv->id == ID_MT7621)
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
+ /* Add the CPU port to the CPU port bitmap for MT7531. Trapped frames
+ * will be forwarded to the CPU port that is affine to the inbound user
+ * port.
+ */
+ if (priv->id == ID_MT7531)
+ mt7530_set(priv, MT7531_CFC, MT7531_CPU_PMAP(BIT(port)));
+
/* CPU port gets connected to all user ports of
* the switch.
*/
@@ -2252,8 +2434,6 @@ mt7530_setup(struct dsa_switch *ds)
SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
SYS_CTRL_REG_RST);
- mt7530_pll_setup(priv);
-
/* Lower Tx driving for TRGMII path */
for (i = 0; i < NUM_TRGMII_CTRL; i++)
mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
@@ -2271,6 +2451,9 @@ mt7530_setup(struct dsa_switch *ds)
priv->p6_interface = PHY_INTERFACE_MODE_NA;
+ if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
+ mt7530_pll_setup(priv);
+
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2300,6 +2483,9 @@ mt7530_setup(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
if (ret)
@@ -2370,16 +2556,8 @@ static int
mt7531_setup_common(struct dsa_switch *ds)
{
struct mt7530_priv *priv = ds->priv;
- struct dsa_port *cpu_dp;
int ret, i;
- /* BPDU to CPU port */
- dsa_switch_for_each_cpu_port(cpu_dp, ds) {
- mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
- BIT(cpu_dp->index));
- break;
- }
-
mt753x_trap_frames(priv);
/* Enable and reset MIB counters */
@@ -2416,6 +2594,9 @@ mt7531_setup_common(struct dsa_switch *ds)
PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
}
+ /* Allow mirroring frames received on the local port (monitor port). */
+ mt7530_set(priv, MT753X_AGC, LOCAL_EN);
+
/* Flush the FDB table */
ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
if (ret < 0)
@@ -2494,18 +2675,25 @@ mt7531_setup(struct dsa_switch *ds)
priv->p5_interface = PHY_INTERFACE_MODE_NA;
priv->p6_interface = PHY_INTERFACE_MODE_NA;
- /* Enable PHY core PLL, since phy_device has not yet been created
- * provided for phy_[read,write]_mmd_indirect is called, we provide
- * our own mt7531_ind_mmd_phy_[read,write] to complete this
- * function.
+ /* Enable Energy-Efficient Ethernet (EEE) and PHY core PLL, since
+ * phy_device has not yet been created provided for
+ * phy_[read,write]_mmd_indirect is called, we provide our own
+ * mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
- val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
CORE_PLL_GROUP4, val);
+ /* Disable EEE advertisement on the switch PHYs. */
+ for (i = MT753X_CTRL_PHY_ADDR;
+ i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
+ mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
+ 0);
+ }
+
mt7531_setup_common(ds);
/* Setup VLAN ID 0 for VLAN-unaware bridges */
@@ -3239,6 +3427,7 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
.setup = mt753x_setup,
+ .preferred_default_local_cpu_port = mt753x_preferred_default_local_cpu_port,
.get_strings = mt7530_get_strings,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index fa2afa67c..6441e8d7f 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -31,6 +31,10 @@ enum mt753x_id {
#define SYSC_REG_RSTCTRL 0x34
#define RESET_MCM BIT(2)
+/* Register for ARL global control */
+#define MT753X_AGC 0xc
+#define LOCAL_EN BIT(7)
+
/* Registers to mac forward control for unknown frames */
#define MT7530_MFC 0x10
#define BC_FFP(x) (((x) & 0xff) << 24)
@@ -53,6 +57,7 @@ enum mt753x_id {
#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
+#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
#define MT753X_MIRROR_REG(id) (((id) == ID_MT7531) ? \
MT7531_CFC : MT7530_MFC)
@@ -63,6 +68,7 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
+#define MT753X_PAE_BPDU_FR BIT(25)
#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
@@ -73,20 +79,24 @@ enum mt753x_id {
/* Register for :01 and :02 MAC DA frame control */
#define MT753X_RGAC1 0x28
+#define MT753X_R02_BPDU_FR BIT(25)
#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
+#define MT753X_R01_BPDU_FR BIT(9)
#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_BPDU_FR BIT(25)
#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+#define MT753X_R03_BPDU_FR BIT(9)
#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
@@ -663,6 +673,7 @@ enum mt7531_clk_skew {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_RG_SYSPLL_DMY2 BIT(6)
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 633b321d7..4db689372 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -362,7 +362,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
- size = io_sq->bounce_buf_ctrl.buffer_size *
+ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 9e82e7b9c..5e37b18ac 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1203,8 +1203,11 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{
bool print_once = true;
+ bool is_xdp_ring;
u32 i;
+ is_xdp_ring = ENA_IS_XDP_INDEX(tx_ring->adapter, tx_ring->qid);
+
for (i = 0; i < tx_ring->ring_size; i++) {
struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
@@ -1224,10 +1227,15 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
ena_unmap_tx_buff(tx_ring, tx_info);
- dev_kfree_skb_any(tx_info->skb);
+ if (is_xdp_ring)
+ xdp_return_frame(tx_info->xdpf);
+ else
+ dev_kfree_skb_any(tx_info->skb);
}
- netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
- tx_ring->qid));
+
+ if (!is_xdp_ring)
+ netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->qid));
}
static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
@@ -3797,10 +3805,11 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
{
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
- int i, budget, rc;
+ int qid, budget, rc;
int io_queue_count;
io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
+
/* Make sure the driver doesn't turn the device in other process */
smp_rmb();
@@ -3813,27 +3822,29 @@ static void check_for_missing_completions(struct ena_adapter *adapter)
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
- budget = ENA_MONITORED_TX_QUEUES;
+ budget = min_t(u32, io_queue_count, ENA_MONITORED_TX_QUEUES);
- for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
- tx_ring = &adapter->tx_ring[i];
- rx_ring = &adapter->rx_ring[i];
+ qid = adapter->last_monitored_tx_qid;
+
+ while (budget) {
+ qid = (qid + 1) % io_queue_count;
+
+ tx_ring = &adapter->tx_ring[qid];
+ rx_ring = &adapter->rx_ring[qid];
rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
if (unlikely(rc))
return;
- rc = !ENA_IS_XDP_INDEX(adapter, i) ?
+ rc = !ENA_IS_XDP_INDEX(adapter, qid) ?
check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
if (unlikely(rc))
return;
budget--;
- if (!budget)
- break;
}
- adapter->last_monitored_tx_qid = i % io_queue_count;
+ adapter->last_monitored_tx_qid = qid;
}
/* trigger napi schedule after 2 consecutive detections */
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 7f8767215..5b6209f5a 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2033,12 +2033,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
- if (bp->flags & B44_FLAG_PAUSE_AUTO) {
- b44_halt(bp);
- b44_init_rings(bp);
- b44_init_hw(bp, B44_FULL_RESET);
- } else {
- __b44_set_flow_ctrl(bp, bp->flags);
+ if (netif_running(dev)) {
+ if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+ b44_halt(bp);
+ b44_init_rings(bp);
+ b44_init_hw(bp, B44_FULL_RESET);
+ } else {
+ __b44_set_flow_ctrl(bp, bp->flags);
+ }
}
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4950fde82..b04c5b51e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -147,10 +147,11 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
- phy_fw_ver, PHY_FW_VER_LEN);
- strscpy(buf, bp->fw_ver, buf_len);
- snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
- "bc %d.%d.%d%s%s",
+ phy_fw_ver, sizeof(phy_fw_ver));
+ /* This may become truncated. */
+ scnprintf(buf, buf_len,
+ "%sbc %d.%d.%d%s%s",
+ bp->fw_ver,
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bda3ccc28..f920976c3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1132,7 +1132,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
}
memset(version, 0, sizeof(version));
- bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ bnx2x_fill_fw_str(bp, version, sizeof(version));
strlcat(info->fw_version, version, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 02808513f..ea310057f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6163,8 +6163,8 @@ static void bnx2x_link_int_ack(struct link_params *params,
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
- str[0] = '\0';
- (*len)--;
+ if (*len)
+ str[0] = '\0';
return 0;
}
@@ -6173,7 +6173,7 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
u16 ret;
if (*len < 10) {
- /* Need more than 10chars for this format */
+ /* Need more than 10 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6188,8 +6188,8 @@ static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
{
u16 ret;
- if (*len < 10) {
- /* Need more than 10chars for this format */
+ if (*len < 9) {
+ /* Need more than 9 chars for this format */
bnx2x_null_format_ver(num, str, len);
return -EINVAL;
}
@@ -6208,7 +6208,7 @@ int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
int status = 0;
u8 *ver_p = version;
u16 remain_len = len;
- if (version == NULL || params == NULL)
+ if (version == NULL || params == NULL || len == 0)
return -EINVAL;
bp = params->bp;
@@ -11546,7 +11546,7 @@ static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
str[2] = (spirom_ver & 0xFF0000) >> 16;
str[3] = (spirom_ver & 0xFF000000) >> 24;
str[4] = '\0';
- *len -= 5;
+ *len -= 4;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f810b5dc2..77ea19bcd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1697,7 +1697,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
} else {
@@ -1707,7 +1707,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
@@ -1723,7 +1723,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1734,7 +1734,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->sw_stats.rx.rx_oom_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -1950,11 +1950,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
cp_cons, agg_bufs,
false);
- if (!frag_len) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!frag_len)
+ goto oom_next_rx;
}
xdp_active = true;
}
@@ -1977,9 +1974,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
bnxt_xdp_buff_frags_free(rxr, &xdp);
}
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
} else {
u32 payload;
@@ -1990,29 +1985,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
payload = 0;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
payload | len);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
}
if (agg_bufs) {
if (!xdp_active) {
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
- if (!skb) {
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
- }
+ if (!skb)
+ goto oom_next_rx;
} else {
skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
if (!skb) {
/* we should be able to free the old skb here */
bnxt_xdp_buff_frags_free(rxr, &xdp);
- cpr->sw_stats.rx.rx_oom_discards += 1;
- rc = -ENOMEM;
- goto next_rx;
+ goto oom_next_rx;
}
}
}
@@ -2090,6 +2077,11 @@ next_rx_no_prod_no_len:
*raw_cons = tmp_raw_cons;
return rc;
+
+oom_next_rx:
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ rc = -ENOMEM;
+ goto next_rx;
}
/* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2135,7 +2127,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
return rc;
}
@@ -10564,6 +10556,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
+ if (bp->ptp_cfg)
+ atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
return 0;
@@ -11810,6 +11804,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_rtnl_unlock_sp(bp);
}
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(bp->pdev);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@@ -11823,12 +11827,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
if (val == 0xffff)
bp->fw_reset_min_dsecs = 0;
- bnxt_tx_disable(bp);
- bnxt_disable_napi(bp);
- bnxt_disable_int_sync(bp);
- bnxt_free_irq(bp);
- bnxt_clear_int_mode(bp);
- pci_disable_device(bp->pdev);
+ bnxt_fw_fatal_close(bp);
}
__bnxt_close_nic(bp, true, false);
bnxt_vf_reps_free(bp);
@@ -13976,6 +13975,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
+ bool abort = false;
netdev_info(netdev, "PCI I/O error detected\n");
@@ -13984,16 +13984,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
bnxt_ulp_stop(bp);
- if (state == pci_channel_io_perm_failure) {
+ if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ netdev_err(bp->dev, "Firmware reset already in progress\n");
+ abort = true;
+ }
+
+ if (abort || state == pci_channel_io_perm_failure) {
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
- if (state == pci_channel_io_frozen)
+ /* Link is not reliable anymore if state is pci_channel_io_frozen
+ * so we disable bus master to prevent any potential bad DMAs before
+ * freeing kernel memory.
+ */
+ if (state == pci_channel_io_frozen) {
set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+ bnxt_fw_fatal_close(bp);
+ }
if (netif_running(netdev))
- bnxt_close(netdev);
+ __bnxt_close_nic(bp, true, true);
if (pci_is_enabled(pdev))
pci_disable_device(pdev);
@@ -14079,6 +14090,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
}
reset_exit:
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_clear_reservations(bp, true);
rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a9db1ed74..9efd4b962 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -16173,8 +16173,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
if (val < MAX_FRAME_SIZE_DEFAULT)
- dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
- pf->hw.port, val);
+ dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
+ pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
/* Add a filter to drop all Flow control frames from any VSI from being
* transmitted. By doing so we stop a malicious VF from sending out
@@ -16716,7 +16716,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
- i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+ i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index b9c4b311c..53b9fe35d 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3632,6 +3632,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
}
/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+ struct tc_mqprio_qopt *mqprio_qopt)
+{
+ struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+ int i;
+
+ if (adapter->num_tc != mqprio_qopt->num_tc)
+ return false;
+
+ for (i = 0; i < adapter->num_tc; i++) {
+ if (ch[i].count != mqprio_qopt->count[i] ||
+ ch[i].offset != mqprio_qopt->offset[i])
+ return false;
+ }
+ return true;
+}
+
+/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type_data: tc offload data
@@ -3688,7 +3716,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
- if (adapter->num_tc == num_tc)
+ if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 652ef09ee..ec6628aac 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -663,7 +663,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
int ret;
int i;
- if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 4b71392f6..e64bef490 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -493,7 +493,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
- vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
+ vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID;
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
@@ -539,12 +539,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
*/
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
-
- vsi = ice_find_vsi(pf, vsi_id);
-
- return (vsi && (vsi->vf == vf));
+ return vsi_id == ICE_VF_VSI_ID;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
index b5a3fd8ad..6073d3b2d 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h
@@ -18,6 +18,15 @@
*/
#define ICE_MAX_MACADDR_PER_VF 18
+/* VFs only get a single VSI. For ice hardware, the VF does not need to know
+ * its VSI index. However, the virtchnl interface requires a VSI number,
+ * mainly due to legacy hardware.
+ *
+ * Since the VF doesn't need this information, report a static value to the VF
+ * instead of leaking any information about the PF or hardware setup.
+ */
+#define ICE_VF_VSI_ID 1
+
struct ice_virtchnl_ops {
int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index bb99302ea..67080d505 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4237,18 +4237,18 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
*/
rvu_write64(rvu, blkaddr, NIX_AF_CFG,
rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x40ULL);
+ }
- /* Set chan/link to backpressure TL3 instead of TL2 */
- rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
+ /* Set chan/link to backpressure TL3 instead of TL2 */
+ rvu_write64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL, 0x01);
- /* Disable SQ manager's sticky mode operation (set TM6 = 0)
- * This sticky mode is known to cause SQ stalls when multiple
- * SQs are mapped to same SMQ and transmitting pkts at a time.
- */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
- cfg &= ~BIT_ULL(15);
- rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
- }
+ /* Disable SQ manager's sticky mode operation (set TM6 = 0)
+ * This sticky mode is known to cause SQ stalls when multiple
+ * SQs are mapped to same SMQ and transmitting pkts at a time.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS);
+ cfg &= ~BIT_ULL(15);
+ rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 2842195ee..1e887d640 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -82,24 +82,25 @@ int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
txq_ix = mlx5e_qid_from_qos(chs, node_qid);
- WARN_ON(node_qid > priv->htb_max_qos_sqs);
- if (node_qid == priv->htb_max_qos_sqs) {
- struct mlx5e_sq_stats *stats, **stats_list = NULL;
-
- if (priv->htb_max_qos_sqs == 0) {
- stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
- sizeof(*stats_list),
- GFP_KERNEL);
- if (!stats_list)
- return -ENOMEM;
- }
+ WARN_ON(node_qid >= mlx5e_htb_cur_leaf_nodes(priv->htb));
+ if (!priv->htb_qos_sq_stats) {
+ struct mlx5e_sq_stats **stats_list;
+
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list), GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+
+ WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+ }
+
+ if (!priv->htb_qos_sq_stats[node_qid]) {
+ struct mlx5e_sq_stats *stats;
+
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
- if (!stats) {
- kvfree(stats_list);
+ if (!stats)
return -ENOMEM;
- }
- if (stats_list)
- WRITE_ONCE(priv->htb_qos_sq_stats, stats_list);
+
WRITE_ONCE(priv->htb_qos_sq_stats[node_qid], stats);
/* Order htb_max_qos_sqs increment after writing the array pointer.
* Pairs with smp_load_acquire in en_stats.c.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
index f675b1926..f66bbc846 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/selq.c
@@ -57,6 +57,7 @@ int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
{
+ mutex_lock(selq->state_lock);
WARN_ON_ONCE(selq->is_prepared);
kvfree(selq->standby);
@@ -67,6 +68,7 @@ void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
kvfree(selq->standby);
selq->standby = NULL;
+ mutex_unlock(selq->state_lock);
}
void mlx5e_selq_prepare_params(struct mlx5e_selq *selq, struct mlx5e_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index a7832a018..48cf69184 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -1703,6 +1703,7 @@ static const struct macsec_ops macsec_offload_ops = {
.mdo_add_secy = mlx5e_macsec_add_secy,
.mdo_upd_secy = mlx5e_macsec_upd_secy,
.mdo_del_secy = mlx5e_macsec_del_secy,
+ .rx_uses_md_dst = true,
};
bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 58eacba6d..ad51edf55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -45,6 +45,10 @@ struct arfs_table {
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
+enum {
+ MLX5E_ARFS_STATE_ENABLED,
+};
+
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
@@ -60,6 +64,7 @@ struct mlx5e_arfs_tables {
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
+ unsigned long state;
};
struct arfs_tuple {
@@ -170,6 +175,8 @@ int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
return err;
}
}
+ set_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
return 0;
}
@@ -454,6 +461,8 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs)
int i;
int j;
+ clear_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state);
+
spin_lock_bh(&arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
@@ -621,17 +630,8 @@ static void arfs_handle_work(struct work_struct *work)
struct mlx5_flow_handle *rule;
arfs = mlx5e_fs_get_arfs(priv->fs);
- mutex_lock(&priv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- spin_lock_bh(&arfs->arfs_lock);
- hlist_del(&arfs_rule->hlist);
- spin_unlock_bh(&arfs->arfs_lock);
-
- mutex_unlock(&priv->state_lock);
- kfree(arfs_rule);
- goto out;
- }
- mutex_unlock(&priv->state_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state))
+ return;
if (!arfs_rule->rule) {
rule = arfs_add_rule(priv, arfs_rule);
@@ -744,6 +744,11 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
+ if (!test_bit(MLX5E_ARFS_STATE_ENABLED, &arfs->state)) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -EPERM;
+ }
+
arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 9910a0480..e7d396434 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5578,9 +5578,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
- mutex_lock(&priv->state_lock);
mlx5e_selq_cleanup(&priv->selq);
- mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb_max_qos_sqs; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index e6674118b..164e10b5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1752,8 +1752,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
+ /* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
- if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
+ if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index ad32b80e8..01c0e1ee9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -679,8 +679,10 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return err;
}
- if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags))
+ if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &flags)) {
mlx5_lag_port_sel_destroy(ldev);
+ ldev->buckets = 1;
+ }
if (mlx5_lag_has_drop_rule(ldev))
mlx5_lag_drop_rule_cleanup(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e2a985ec2..f36a416ff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -792,7 +792,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
- EMAD, DISCARD);
+ EMAD, FORWARD);
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 41eac7dfb..685bcf8cb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -780,7 +780,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
+ mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+ mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@@ -791,6 +793,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
}
static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+ /* The entry markers are relative to the current chunk and therefore
+ * needs to be reset together with the chunk marker.
+ */
+ ctx->current_vchunk = NULL;
+ ctx->start_ventry = NULL;
+ ctx->stop_ventry = NULL;
+}
+
+static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
@@ -812,7 +825,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
- vregion->rehash.ctx.current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@@ -885,10 +898,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
- cancel_delayed_work_sync(&vregion->rehash.dw);
+ if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
+ ctx->hints_priv)
+ ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@@ -1254,8 +1271,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
- ventry->entry, activity);
+ struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
+ int err;
+
+ mutex_lock(&vregion->lock);
+ err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
+ activity);
+ mutex_unlock(&vregion->lock);
+ return err;
}
static int
@@ -1289,6 +1312,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
+ WARN_ON(vchunk->chunk2);
+
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@@ -1307,7 +1332,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@@ -1330,6 +1355,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+ if (list_empty(&vchunk->ventry_list))
+ goto out;
+
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@@ -1339,6 +1367,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
+ WARN_ON(ventry->vchunk != vchunk);
+
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@@ -1379,6 +1409,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
+out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@@ -1392,6 +1423,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
+ if (list_empty(&vregion->vchunk_list))
+ return 0;
+
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@@ -1420,16 +1454,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
- mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
+ if (ctx->this_is_rollback)
+ return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
- ctx->current_vchunk = NULL;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@@ -1440,7 +1475,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
- mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@@ -1489,6 +1523,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@@ -1541,7 +1576,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+ return;
}
if (*credits >= 0)
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e5ec0a363..31f75b4a6 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -368,7 +368,6 @@ union ks8851_tx_hdr {
* @rdfifo: FIFO read callback
* @wrfifo: FIFO write callback
* @start_xmit: start_xmit() implementation callback
- * @rx_skb: rx_skb() implementation callback
* @flush_tx_work: flush_tx_work() implementation callback
*
* The @statelock is used to protect information in the structure which may
@@ -423,8 +422,6 @@ struct ks8851_net {
struct sk_buff *txp, bool irq);
netdev_tx_t (*start_xmit)(struct sk_buff *skb,
struct net_device *dev);
- void (*rx_skb)(struct ks8851_net *ks,
- struct sk_buff *skb);
void (*flush_tx_work)(struct ks8851_net *ks);
};
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 0bf13b38b..d4cdf3d4f 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -232,16 +232,6 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
}
/**
- * ks8851_rx_skb - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb(struct ks8851_net *ks, struct sk_buff *skb)
-{
- ks->rx_skb(ks, skb);
-}
-
-/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
*
@@ -309,7 +299,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- ks8851_rx_skb(ks, skb);
+ __netif_rx(skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -340,6 +330,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
unsigned long flags;
unsigned int status;
+ local_bh_disable();
+
ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
@@ -416,6 +408,8 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
+ local_bh_enable();
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 7f4904248..96fb0ffce 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -210,16 +210,6 @@ static void ks8851_wrfifo_par(struct ks8851_net *ks, struct sk_buff *txp,
iowrite16_rep(ksp->hw_addr, txp->data, len / 2);
}
-/**
- * ks8851_rx_skb_par - receive skbuff
- * @ks: The device state.
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_par(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
static unsigned int ks8851_rdreg16_par_txqcr(struct ks8851_net *ks)
{
return ks8851_rdreg16_par(ks, KS_TXQCR);
@@ -298,7 +288,6 @@ static int ks8851_probe_par(struct platform_device *pdev)
ks->rdfifo = ks8851_rdfifo_par;
ks->wrfifo = ks8851_wrfifo_par;
ks->start_xmit = ks8851_start_xmit_par;
- ks->rx_skb = ks8851_rx_skb_par;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
IRQ_RXI | /* RX done */ \
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 88e26c120..4dcbff789 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -299,16 +299,6 @@ static unsigned int calc_txlen(unsigned int len)
}
/**
- * ks8851_rx_skb_spi - receive skbuff
- * @ks: The device state
- * @skb: The skbuff
- */
-static void ks8851_rx_skb_spi(struct ks8851_net *ks, struct sk_buff *skb)
-{
- netif_rx(skb);
-}
-
-/**
* ks8851_tx_work - process tx packet(s)
* @work: The work strucutre what was scheduled.
*
@@ -435,7 +425,6 @@ static int ks8851_probe_spi(struct spi_device *spi)
ks->rdfifo = ks8851_rdfifo_spi;
ks->wrfifo = ks8851_wrfifo_spi;
ks->start_xmit = ks8851_start_xmit_spi;
- ks->rx_skb = ks8851_rx_skb_spi;
ks->flush_tx_work = ks8851_flush_tx_work_spi;
#define STD_IRQ (IRQ_LCI | /* Link Change */ \
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 32709d21a..212bf6f4e 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -730,7 +730,7 @@ static int sparx5_port_pcs_low_set(struct sparx5 *sparx5,
bool sgmii = false, inband_aneg = false;
int err;
- if (port->conf.inband) {
+ if (conf->inband) {
if (conf->portmode == PHY_INTERFACE_MODE_SGMII ||
conf->portmode == PHY_INTERFACE_MODE_QSGMII)
inband_aneg = true; /* Cisco-SGMII in-band-aneg */
@@ -947,7 +947,7 @@ int sparx5_port_pcs_set(struct sparx5 *sparx5,
if (err)
return -EINVAL;
- if (port->conf.inband) {
+ if (conf->inband) {
/* Enable/disable 1G counters in ASM */
spx5_rmw(ASM_PORT_CFG_CSC_STAT_DIS_SET(high_speed_dev),
ASM_PORT_CFG_CSC_STAT_DIS,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index fcc3faecb..d33cf8ee7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3216,9 +3216,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
napi_enable(&qcq->napi);
- if (qcq->flags & IONIC_QCQ_F_INTR)
+ if (qcq->flags & IONIC_QCQ_F_INTR) {
+ irq_set_affinity_hint(qcq->intr.vector,
+ &qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 76fabeae5..33df06a2d 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -2549,6 +2549,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chan = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chan = common->tx_chns;
struct device *dev = common->dev;
struct devlink_port *dl_port;
struct am65_cpsw_port *port;
@@ -2567,6 +2569,22 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
return ret;
}
+ /* The DMA Channels are not guaranteed to be in a clean state.
+ * Reset and disable them to ensure that they are back to the
+ * clean state and ready to be used.
+ */
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(tx_chan[i].tx_chn, &tx_chan[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(tx_chan[i].tx_chn);
+ }
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(rx_chan->rx_chn, i, rx_chan,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(rx_chan->rx_chn);
+
ret = am65_cpsw_nuss_register_devlink(common);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 9948ac14e..c1bdf045e 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -649,6 +649,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
struct am65_cpts_skb_cb_data *skb_cb =
(struct am65_cpts_skb_cb_data *)skb->cb;
+ if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
+ ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
+ (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
+ mtype_seqid = skb_cb->skb_mtype_seqid;
+
if (mtype_seqid == skb_cb->skb_mtype_seqid) {
u64 ns = event->timestamp;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 3f8da6f0b..488ca1c85 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -930,7 +930,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@@ -1028,7 +1028,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!pskb_inet_may_pull(skb))
+ if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7086acfed..05b5914d8 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1110,11 +1110,12 @@ out_hashtable:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
+ struct hlist_node *next;
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
- hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+ hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 209ee9f35..8a8fd7411 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1007,10 +1007,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
struct metadata_dst *md_dst;
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
+ bool is_macsec_md_dst;
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
md_dst = skb_metadata_dst(skb);
+ is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct sk_buff *nskb;
@@ -1021,10 +1023,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
* the SecTAG, so we have to deduce which port to deliver to.
*/
if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
- if (md_dst && md_dst->type == METADATA_MACSEC &&
- (!find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci)))
+ const struct macsec_ops *ops;
+
+ ops = macsec_get_ops(macsec, NULL);
+
+ if (ops->rx_uses_md_dst && !is_macsec_md_dst)
continue;
+ if (is_macsec_md_dst) {
+ struct macsec_rx_sc *rx_sc;
+
+ /* All drivers that implement MACsec offload
+ * support using skb metadata destinations must
+ * indicate that they do so.
+ */
+ DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
+ rx_sc = find_rx_sc(&macsec->secy,
+ md_dst->u.macsec_info.sci);
+ if (!rx_sc)
+ continue;
+ /* device indicated macsec offload occurred */
+ skb->dev = ndev;
+ skb->pkt_type = PACKET_HOST;
+ eth_skb_pkt_type(skb, ndev);
+ ret = RX_HANDLER_ANOTHER;
+ goto out;
+ }
+
+ /* This datapath is insecure because it is unable to
+ * enforce isolation of broadcast/multicast traffic and
+ * unicast traffic with promiscuous mode on the macsec
+ * netdev. Since the core stack has no mechanism to
+ * check that the hardware did indeed receive MACsec
+ * traffic, it is possible that the response handling
+ * done by the MACsec port was to a plaintext packet.
+ * This violates the MACsec protocol standard.
+ */
if (ether_addr_equal_64bits(hdr->h_dest,
ndev->dev_addr)) {
/* exact match, divert skb to this port */
@@ -1040,11 +1074,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
break;
nskb->dev = ndev;
- if (ether_addr_equal_64bits(hdr->h_dest,
- ndev->broadcast))
- nskb->pkt_type = PACKET_BROADCAST;
- else
- nskb->pkt_type = PACKET_MULTICAST;
+ eth_skb_pkt_type(nskb, ndev);
__netif_rx(nskb);
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 3f882bce3..d126273da 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -262,7 +262,7 @@ static int xpcs_soft_reset(struct dw_xpcs *xpcs,
dev = MDIO_MMD_VEND2;
break;
default:
- return -1;
+ return -EINVAL;
}
ret = xpcs_write(xpcs, dev, MDIO_CTRL1, MDIO_CTRL1_RESET);
@@ -904,7 +904,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
return ret;
break;
default:
- return -1;
+ return -EINVAL;
}
if (compat->pma_config) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 922d6f16d..4af1ba5d0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2121,14 +2121,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
- pr_err("unexpected GSO type: "
- "0x%x, gso_size %d, hdr_len %d\n",
- sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
- tun16_to_cpu(tun, gso.hdr_len));
- print_hex_dump(KERN_ERR, "tun: ",
- DUMP_PREFIX_NONE,
- 16, 1, skb->head,
- min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+
+ if (net_ratelimit()) {
+ netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
+ sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
+ tun16_to_cpu(tun, gso.hdr_len));
+ print_hex_dump(KERN_ERR, "tun: ",
+ DUMP_PREFIX_NONE,
+ 16, 1, skb->head,
+ min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
+ }
WARN_ON_ONCE(1);
return -EINVAL;
}
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index e0e9b4c53..21b6c4d94 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1317,6 +1317,8 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
netif_set_tso_max_size(dev->net, 16384);
+ ax88179_reset(dev);
+
return 0;
}
@@ -1454,21 +1456,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
- skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb)
return 0;
- skb_trim(ax_skb, pkt_len);
-
- /* Skip IP alignment pseudo header */
- skb_pull(ax_skb, 2);
+ skb_put(ax_skb, pkt_len);
+ memcpy(ax_skb->data, skb->data + 2, pkt_len);
- skb->truesize = pkt_len_plus_padd +
- SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);
@@ -1695,7 +1692,6 @@ static const struct driver_info ax88179_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1708,7 +1704,6 @@ static const struct driver_info ax88178a_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 45f1a871b..32cddb633 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2948,19 +2948,35 @@ static int virtnet_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfu
static int virtnet_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc)
{
struct virtnet_info *vi = netdev_priv(dev);
+ bool update = false;
int i;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
+ if (!vi->has_rss)
+ return -EOPNOTSUPP;
+
for (i = 0; i < vi->rss_indir_table_size; ++i)
vi->ctrl->rss.indirection_table[i] = indir[i];
+ update = true;
}
- if (key)
+
+ if (key) {
+ /* If either _F_HASH_REPORT or _F_RSS are negotiated, the
+ * device provides hash calculation capabilities, that is,
+ * hash_key is configured.
+ */
+ if (!vi->has_rss && !vi->has_rss_hash_report)
+ return -EOPNOTSUPP;
+
memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
+ update = true;
+ }
- virtnet_commit_rss_command(vi);
+ if (update)
+ virtnet_commit_rss_command(vi);
return 0;
}
@@ -3852,13 +3868,15 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT))
vi->has_rss_hash_report = true;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_RSS)) {
vi->has_rss = true;
- if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_indir_table_size =
virtio_cread16(vdev, offsetof(struct virtio_net_config,
rss_max_indirection_table_length));
+ }
+
+ if (vi->has_rss || vi->has_rss_hash_report) {
vi->rss_key_size =
virtio_cread8(vdev, offsetof(struct virtio_net_config, rss_max_key_size));
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 619dd71c9..fbd36dff9 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1662,6 +1662,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+ return false;
+
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index a62ee05c5..4bea36cc7 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -105,7 +105,7 @@ static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
- .buf_len = 0,
+ .buf_len = 8192,
.num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
.ch_cfg = ath11k_mhi_channels_qca6390,
.num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 988222cea..acc84e671 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -643,7 +643,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
} else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
+ antcomb->rssi_lna2) {
/* set to A-B */
conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 86ff17493..c3a602197 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -83,6 +83,15 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* ACEPC W5 Pro Cherry Trail Z8350 HDMI stick, same wifi as the T8 */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "3"),
+ DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Chuwi Hi8 Pro with D2D3_Hi8Pro.233 BIOS */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 8c5b97fb1..5b0b4bb2b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -48,6 +48,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (!pasn)
return -ENOBUFS;
+ iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
+
pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
switch (pasn->cipher) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index acd8803db..b20d64dbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2650,7 +2650,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
if (ver_handler->version != scan_ver)
continue;
- return ver_handler->handler(mvm, vif, params, type, uid);
+ err = ver_handler->handler(mvm, vif, params, type, uid);
+ return err ? : uid;
}
err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 4d4db5f68..7f30e6add 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -505,6 +505,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 179740607..d982c5dc0 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -546,7 +546,7 @@
#define RTW89_PCI_TXWD_NUM_MAX 512
#define RTW89_PCI_TXWD_PAGE_SIZE 128
#define RTW89_PCI_ADDRINFO_MAX 4
-#define RTW89_PCI_RX_BUF_SIZE 11460
+#define RTW89_PCI_RX_BUF_SIZE (11454 + 40) /* +40 for rtw89_rxdesc_long_v2 */
#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
#define RTW89_PCI_MULTITAG 8
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 21d68664f..7968baa62 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -424,7 +424,8 @@ struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
- struct regulator *regulator;
+ struct regulator *vin_regulator;
+ struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
- ret = regulator_disable(trf->regulator);
+ ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
- trf->regulator = devm_regulator_get(&spi->dev, "vin");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
+ if (IS_ERR(trf->vin_regulator)) {
+ ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
- uvolts = regulator_get_voltage(trf->regulator);
+ uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
- trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
- if (IS_ERR(trf->regulator)) {
- ret = PTR_ERR(trf->regulator);
+ trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(trf->vddio_regulator)) {
+ ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- ret = regulator_enable(trf->regulator);
+ ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
- goto err_destroy_lock;
+ goto err_disable_vin_regulator;
}
- if (regulator_get_voltage(trf->regulator) == 1800000) {
+ if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
- goto err_disable_regulator;
+ goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@@ -2137,8 +2138,10 @@ err_shutdown:
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
-err_disable_regulator:
- regulator_disable(trf->regulator);
+err_disable_vddio_regulator:
+ regulator_disable(trf->vddio_regulator);
+err_disable_vin_regulator:
+ regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
- regulator_disable(trf->regulator);
+ regulator_disable(trf->vddio_regulator);
+ regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3d0129099..5ff09f2ca 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3471,6 +3471,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_BOGUS_NID, },
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index feafa378b..aa2fba1c0 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -379,21 +379,8 @@ void pci_bus_add_devices(const struct pci_bus *bus)
}
EXPORT_SYMBOL(pci_bus_add_devices);
-/** pci_walk_bus - walk devices on/under bus, calling callback.
- * @top bus whose devices should be walked
- * @cb callback to be called for each device found
- * @userdata arbitrary pointer to be passed to callback.
- *
- * Walk the given bus, including any bridged devices
- * on buses under this bus. Call the provided callback
- * on each device found.
- *
- * We check the return of @cb each time. If it returns anything
- * other than 0, we break out.
- *
- */
-void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
- void *userdata)
+static void __pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata, bool locked)
{
struct pci_dev *dev;
struct pci_bus *bus;
@@ -401,7 +388,8 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
int retval;
bus = top;
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
next = top->devices.next;
for (;;) {
if (next == &bus->devices) {
@@ -424,10 +412,37 @@ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
if (retval)
break;
}
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
+}
+
+/**
+ * pci_walk_bus - walk devices on/under bus, calling callback.
+ * @top: bus whose devices should be walked
+ * @cb: callback to be called for each device found
+ * @userdata: arbitrary pointer to be passed to callback
+ *
+ * Walk the given bus, including any bridged devices
+ * on buses under this bus. Call the provided callback
+ * on each device found.
+ *
+ * We check the return of @cb each time. If it returns anything
+ * other than 0, we break out.
+ */
+void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ __pci_walk_bus(top, cb, userdata, false);
}
EXPORT_SYMBOL_GPL(pci_walk_bus);
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata)
+{
+ lockdep_assert_held(&pci_bus_sem);
+
+ __pci_walk_bus(top, cb, userdata, true);
+}
+EXPORT_SYMBOL_GPL(pci_walk_bus_locked);
+
struct pci_bus *pci_bus_get(struct pci_bus *bus)
{
if (bus)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 5368a3715..67956bfeb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1258,6 +1258,7 @@ end:
/**
* pci_set_full_power_state - Put a PCI device into D0 and update its state
* @dev: PCI device to power up
+ * @locked: whether pci_bus_sem is held
*
* Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
* to confirm the state change, restore its BARs if they might be lost and
@@ -1267,7 +1268,7 @@ end:
* to D0, it is more efficient to use pci_power_up() directly instead of this
* function.
*/
-static int pci_set_full_power_state(struct pci_dev *dev)
+static int pci_set_full_power_state(struct pci_dev *dev, bool locked)
{
u16 pmcsr;
int ret;
@@ -1303,7 +1304,7 @@ static int pci_set_full_power_state(struct pci_dev *dev)
}
if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
+ pcie_aspm_pm_state_change(dev->bus->self, locked);
return 0;
}
@@ -1332,10 +1333,22 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
pci_walk_bus(bus, __pci_dev_set_current_state, &state);
}
+static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state, bool locked)
+{
+ if (!bus)
+ return;
+
+ if (locked)
+ pci_walk_bus_locked(bus, __pci_dev_set_current_state, &state);
+ else
+ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
+}
+
/**
* pci_set_low_power_state - Put a PCI device into a low-power state.
* @dev: PCI device to handle.
* @state: PCI power state (D1, D2, D3hot) to put the device into.
+ * @locked: whether pci_bus_sem is held
*
* Use the device's PCI_PM_CTRL register to put it into a low-power state.
*
@@ -1346,7 +1359,7 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
* 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/
-static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
+static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
{
u16 pmcsr;
@@ -1400,29 +1413,12 @@ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
pci_power_name(state));
if (dev->bus->self)
- pcie_aspm_pm_state_change(dev->bus->self);
+ pcie_aspm_pm_state_change(dev->bus->self, locked);
return 0;
}
-/**
- * pci_set_power_state - Set the power state of a PCI device
- * @dev: PCI device to handle.
- * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
- *
- * Transition a device to a new power state, using the platform firmware and/or
- * the device's PCI PM registers.
- *
- * RETURN VALUE:
- * -EINVAL if the requested state is invalid.
- * -EIO if device does not support PCI PM or its PM capabilities register has a
- * wrong version, or device doesn't support the requested state.
- * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
- * 0 if device already is in the requested state.
- * 0 if the transition is to D3 but D3 is not supported.
- * 0 if device's power state has been successfully changed.
- */
-int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+static int __pci_set_power_state(struct pci_dev *dev, pci_power_t state, bool locked)
{
int error;
@@ -1446,7 +1442,7 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
if (state == PCI_D0)
- return pci_set_full_power_state(dev);
+ return pci_set_full_power_state(dev, locked);
/*
* This device is quirked not to be put into D3, so don't put it in
@@ -1460,16 +1456,16 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
* To put the device in D3cold, put it into D3hot in the native
* way, then put it into D3cold using platform ops.
*/
- error = pci_set_low_power_state(dev, PCI_D3hot);
+ error = pci_set_low_power_state(dev, PCI_D3hot, locked);
if (pci_platform_power_transition(dev, PCI_D3cold))
return error;
/* Powering off a bridge may power off the whole hierarchy */
if (dev->current_state == PCI_D3cold)
- pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ __pci_bus_set_current_state(dev->subordinate, PCI_D3cold, locked);
} else {
- error = pci_set_low_power_state(dev, state);
+ error = pci_set_low_power_state(dev, state, locked);
if (pci_platform_power_transition(dev, state))
return error;
@@ -1477,8 +1473,38 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0;
}
+
+/**
+ * pci_set_power_state - Set the power state of a PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ *
+ * Transition a device to a new power state, using the platform firmware and/or
+ * the device's PCI PM registers.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
+ * 0 if device already is in the requested state.
+ * 0 if the transition is to D3 but D3 is not supported.
+ * 0 if device's power state has been successfully changed.
+ */
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ return __pci_set_power_state(dev, state, false);
+}
EXPORT_SYMBOL(pci_set_power_state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{
+ lockdep_assert_held(&pci_bus_sem);
+
+ return __pci_set_power_state(dev, state, true);
+}
+EXPORT_SYMBOL(pci_set_power_state_locked);
+
#define PCI_EXP_SAVE_REGS 7
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9950deeb0..88576a22f 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -556,12 +556,12 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
-void pcie_aspm_pm_state_change(struct pci_dev *pdev);
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
-static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
+static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
#endif
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 25736d408..cf4acea66 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -743,10 +743,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
* in pcie_config_aspm_link().
*/
if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) {
- pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPM_L1, 0);
- pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_ASPM_L1, 0);
+ pcie_capability_clear_word(child, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPM_L1);
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPM_L1);
}
val = 0;
@@ -1055,8 +1055,11 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
up_read(&pci_bus_sem);
}
-/* @pdev: the root port or switch downstream port */
-void pcie_aspm_pm_state_change(struct pci_dev *pdev)
+/*
+ * @pdev: the root port or switch downstream port
+ * @locked: whether pci_bus_sem is held
+ */
+void pcie_aspm_pm_state_change(struct pci_dev *pdev, bool locked)
{
struct pcie_link_state *link = pdev->link_state;
@@ -1066,12 +1069,14 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
* Devices changed PM state, we should recheck if latency
* meets all functions' requirement
*/
- down_read(&pci_bus_sem);
+ if (!locked)
+ down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
pcie_update_aspm_capable(link->root);
pcie_config_aspm_path(link);
mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
+ if (!locked)
+ up_read(&pci_bus_sem);
}
void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index 088002828..acdbf9e77 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -9,6 +9,7 @@
#define dev_fmt(fmt) "DPC: " fmt
#include <linux/aer.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
@@ -203,7 +204,7 @@ static void dpc_process_rp_pio_error(struct pci_dev *pdev)
/* Get First Error Pointer */
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &dpc_status);
- first_error = (dpc_status & 0x1f00) >> 8;
+ first_error = FIELD_GET(PCI_EXP_DPC_RP_PIO_FEP, dpc_status);
for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) {
if ((status & ~mask) & (1 << i))
@@ -339,7 +340,7 @@ void pci_dpc_init(struct pci_dev *pdev)
/* Quirks may set dpc_rp_log_size if device or firmware is buggy */
if (!pdev->dpc_rp_log_size) {
pdev->dpc_rp_log_size =
- (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8;
+ FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, cap);
if (pdev->dpc_rp_log_size < 4 || pdev->dpc_rp_log_size > 9) {
pci_err(pdev, "RP PIO log size %u is invalid\n",
pdev->dpc_rp_log_size);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 289ba6902..56dce858a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2425,9 +2425,9 @@ static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
dev->clear_retrain_link = 1;
pci_info(dev, "Enable PCIe Retrain Link quirk\n");
}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link);
static void fixup_rev1_53c810(struct pci_dev *dev)
{
@@ -4011,10 +4011,11 @@ static int nvme_disable_and_flr(struct pci_dev *dev, bool probe)
}
/*
- * Intel DC P3700 NVMe controller will timeout waiting for ready status
- * to change after NVMe enable if the driver starts interacting with the
- * device too soon after FLR. A 250ms delay after FLR has heuristically
- * proven to produce reliably working results for device assignment cases.
+ * Some NVMe controllers such as Intel DC P3700 and Solidigm P44 Pro will
+ * timeout waiting for ready status to change after NVMe enable if the driver
+ * starts interacting with the device too soon after FLR. A 250ms delay after
+ * FLR has heuristically proven to produce reliably working results for device
+ * assignment cases.
*/
static int delay_250ms_after_flr(struct pci_dev *dev, bool probe)
{
@@ -4101,6 +4102,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
{ PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
{ PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
{ PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr },
+ { PCI_VENDOR_ID_SOLIDIGM, 0xf1ac, delay_250ms_after_flr },
{ PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
reset_chelsio_generic_dev },
{ PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
@@ -4474,9 +4476,9 @@ static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
dev_name(&pdev->dev));
- pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_RELAX_EN |
- PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
+ pcie_capability_clear_word(root_port, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
}
/*
@@ -5403,6 +5405,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+/* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
+static void quirk_no_flr_snet(struct pci_dev *dev)
+{
+ if (dev->revision == 0x1)
+ quirk_no_flr(dev);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLIDRUN, 0x1000, quirk_no_flr_snet);
+
static void quirk_no_ext_tags(struct pci_dev *pdev)
{
struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
@@ -5808,6 +5818,42 @@ SWITCHTEC_QUIRK(0x4428); /* PSXA 28XG4 */
SWITCHTEC_QUIRK(0x4552); /* PAXA 52XG4 */
SWITCHTEC_QUIRK(0x4536); /* PAXA 36XG4 */
SWITCHTEC_QUIRK(0x4528); /* PAXA 28XG4 */
+SWITCHTEC_QUIRK(0x5000); /* PFX 100XG5 */
+SWITCHTEC_QUIRK(0x5084); /* PFX 84XG5 */
+SWITCHTEC_QUIRK(0x5068); /* PFX 68XG5 */
+SWITCHTEC_QUIRK(0x5052); /* PFX 52XG5 */
+SWITCHTEC_QUIRK(0x5036); /* PFX 36XG5 */
+SWITCHTEC_QUIRK(0x5028); /* PFX 28XG5 */
+SWITCHTEC_QUIRK(0x5100); /* PSX 100XG5 */
+SWITCHTEC_QUIRK(0x5184); /* PSX 84XG5 */
+SWITCHTEC_QUIRK(0x5168); /* PSX 68XG5 */
+SWITCHTEC_QUIRK(0x5152); /* PSX 52XG5 */
+SWITCHTEC_QUIRK(0x5136); /* PSX 36XG5 */
+SWITCHTEC_QUIRK(0x5128); /* PSX 28XG5 */
+SWITCHTEC_QUIRK(0x5200); /* PAX 100XG5 */
+SWITCHTEC_QUIRK(0x5284); /* PAX 84XG5 */
+SWITCHTEC_QUIRK(0x5268); /* PAX 68XG5 */
+SWITCHTEC_QUIRK(0x5252); /* PAX 52XG5 */
+SWITCHTEC_QUIRK(0x5236); /* PAX 36XG5 */
+SWITCHTEC_QUIRK(0x5228); /* PAX 28XG5 */
+SWITCHTEC_QUIRK(0x5300); /* PFXA 100XG5 */
+SWITCHTEC_QUIRK(0x5384); /* PFXA 84XG5 */
+SWITCHTEC_QUIRK(0x5368); /* PFXA 68XG5 */
+SWITCHTEC_QUIRK(0x5352); /* PFXA 52XG5 */
+SWITCHTEC_QUIRK(0x5336); /* PFXA 36XG5 */
+SWITCHTEC_QUIRK(0x5328); /* PFXA 28XG5 */
+SWITCHTEC_QUIRK(0x5400); /* PSXA 100XG5 */
+SWITCHTEC_QUIRK(0x5484); /* PSXA 84XG5 */
+SWITCHTEC_QUIRK(0x5468); /* PSXA 68XG5 */
+SWITCHTEC_QUIRK(0x5452); /* PSXA 52XG5 */
+SWITCHTEC_QUIRK(0x5436); /* PSXA 36XG5 */
+SWITCHTEC_QUIRK(0x5428); /* PSXA 28XG5 */
+SWITCHTEC_QUIRK(0x5500); /* PAXA 100XG5 */
+SWITCHTEC_QUIRK(0x5584); /* PAXA 84XG5 */
+SWITCHTEC_QUIRK(0x5568); /* PAXA 68XG5 */
+SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */
+SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */
+SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
@@ -6057,7 +6103,7 @@ static void dpc_log_size(struct pci_dev *dev)
if (!(val & PCI_EXP_DPC_CAP_RP_EXT))
return;
- if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) {
+ if (FIELD_GET(PCI_EXP_DPC_RP_PIO_LOG_SIZE, val) == 0) {
pci_info(dev, "Overriding RP PIO Log Size to 4\n");
dev->dpc_rp_log_size = 4;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index d05a48263..332af6938 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -372,7 +372,7 @@ static ssize_t field ## _show(struct device *dev, \
if (stdev->gen == SWITCHTEC_GEN3) \
return io_string_show(buf, &si->gen3.field, \
sizeof(si->gen3.field)); \
- else if (stdev->gen == SWITCHTEC_GEN4) \
+ else if (stdev->gen >= SWITCHTEC_GEN4) \
return io_string_show(buf, &si->gen4.field, \
sizeof(si->gen4.field)); \
else \
@@ -663,7 +663,7 @@ static int ioctl_flash_info(struct switchtec_dev *stdev,
if (stdev->gen == SWITCHTEC_GEN3) {
info.flash_length = ioread32(&fi->gen3.flash_length);
info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3;
- } else if (stdev->gen == SWITCHTEC_GEN4) {
+ } else if (stdev->gen >= SWITCHTEC_GEN4) {
info.flash_length = ioread32(&fi->gen4.flash_length);
info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4;
} else {
@@ -870,7 +870,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev,
ret = flash_part_info_gen3(stdev, &info);
if (ret)
return ret;
- } else if (stdev->gen == SWITCHTEC_GEN4) {
+ } else if (stdev->gen >= SWITCHTEC_GEN4) {
ret = flash_part_info_gen4(stdev, &info);
if (ret)
return ret;
@@ -1606,7 +1606,7 @@ static int switchtec_init_pci(struct switchtec_dev *stdev,
if (stdev->gen == SWITCHTEC_GEN3)
part_id = &stdev->mmio_sys_info->gen3.partition_id;
- else if (stdev->gen == SWITCHTEC_GEN4)
+ else if (stdev->gen >= SWITCHTEC_GEN4)
part_id = &stdev->mmio_sys_info->gen4.partition_id;
else
return -EOPNOTSUPP;
@@ -1740,63 +1740,99 @@ static void switchtec_pci_remove(struct pci_dev *pdev)
}
static const struct pci_device_id switchtec_pci_tbl[] = {
- SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), //PFX 24xG3
- SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), //PFX 32xG3
- SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), //PFX 48xG3
- SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), //PFX 64xG3
- SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), //PFX 80xG3
- SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), //PFX 96xG3
- SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), //PSX 24xG3
- SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), //PSX 32xG3
- SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), //PSX 48xG3
- SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), //PSX 64xG3
- SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), //PSX 80xG3
- SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), //PSX 96xG3
- SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), //PAX 24XG3
- SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), //PAX 32XG3
- SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), //PAX 48XG3
- SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), //PAX 64XG3
- SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), //PAX 80XG3
- SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), //PAX 96XG3
- SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), //PFXL 24XG3
- SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), //PFXL 32XG3
- SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), //PFXL 48XG3
- SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), //PFXL 64XG3
- SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), //PFXL 80XG3
- SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), //PFXL 96XG3
- SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), //PFXI 24XG3
- SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), //PFXI 32XG3
- SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), //PFXI 48XG3
- SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), //PFXI 64XG3
- SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), //PFXI 80XG3
- SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), //PFXI 96XG3
- SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), //PFX 100XG4
- SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), //PFX 84XG4
- SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), //PFX 68XG4
- SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), //PFX 52XG4
- SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), //PFX 36XG4
- SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), //PFX 28XG4
- SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), //PSX 100XG4
- SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), //PSX 84XG4
- SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), //PSX 68XG4
- SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), //PSX 52XG4
- SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), //PSX 36XG4
- SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), //PSX 28XG4
- SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), //PAX 100XG4
- SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), //PAX 84XG4
- SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), //PAX 68XG4
- SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), //PAX 52XG4
- SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), //PAX 36XG4
- SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), //PAX 28XG4
- SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), //PFXA 52XG4
- SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), //PFXA 36XG4
- SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), //PFXA 28XG4
- SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), //PSXA 52XG4
- SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), //PSXA 36XG4
- SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), //PSXA 28XG4
- SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), //PAXA 52XG4
- SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), //PAXA 36XG4
- SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), //PAXA 28XG4
+ SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), /* PFX 48xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), /* PFX 64xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), /* PFX 80xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), /* PFX 96xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), /* PSX 24xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), /* PSX 32xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), /* PSX 48xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), /* PSX 64xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), /* PSX 80xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), /* PSX 96xG3 */
+ SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), /* PAX 24XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), /* PAX 32XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), /* PAX 48XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), /* PAX 64XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), /* PAX 80XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), /* PAX 96XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), /* PFXL 24XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), /* PFXL 32XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), /* PFXL 48XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), /* PFXL 64XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), /* PFXL 80XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), /* PFXL 96XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), /* PFXI 24XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), /* PFXI 32XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), /* PFXI 48XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), /* PFXI 64XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), /* PFXI 80XG3 */
+ SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), /* PFXI 96XG3 */
+ SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), /* PFX 100XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), /* PFX 84XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), /* PFX 68XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), /* PFX 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), /* PFX 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), /* PFX 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), /* PSX 100XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), /* PSX 84XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), /* PSX 68XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), /* PSX 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), /* PSX 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), /* PSX 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), /* PAX 100XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), /* PAX 84XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), /* PAX 68XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), /* PAX 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), /* PAX 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), /* PAX 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), /* PFXA 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), /* PFXA 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), /* PFXA 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), /* PSXA 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), /* PSXA 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), /* PSXA 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), /* PAXA 52XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), /* PAXA 36XG4 */
+ SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), /* PAXA 28XG4 */
+ SWITCHTEC_PCI_DEVICE(0x5000, SWITCHTEC_GEN5), /* PFX 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5084, SWITCHTEC_GEN5), /* PFX 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5068, SWITCHTEC_GEN5), /* PFX 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5052, SWITCHTEC_GEN5), /* PFX 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5036, SWITCHTEC_GEN5), /* PFX 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5028, SWITCHTEC_GEN5), /* PFX 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5100, SWITCHTEC_GEN5), /* PSX 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5184, SWITCHTEC_GEN5), /* PSX 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5168, SWITCHTEC_GEN5), /* PSX 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5152, SWITCHTEC_GEN5), /* PSX 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5136, SWITCHTEC_GEN5), /* PSX 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5128, SWITCHTEC_GEN5), /* PSX 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5200, SWITCHTEC_GEN5), /* PAX 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5284, SWITCHTEC_GEN5), /* PAX 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5268, SWITCHTEC_GEN5), /* PAX 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5252, SWITCHTEC_GEN5), /* PAX 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5236, SWITCHTEC_GEN5), /* PAX 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5228, SWITCHTEC_GEN5), /* PAX 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5300, SWITCHTEC_GEN5), /* PFXA 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5384, SWITCHTEC_GEN5), /* PFXA 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5368, SWITCHTEC_GEN5), /* PFXA 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5352, SWITCHTEC_GEN5), /* PFXA 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5336, SWITCHTEC_GEN5), /* PFXA 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5328, SWITCHTEC_GEN5), /* PFXA 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5400, SWITCHTEC_GEN5), /* PSXA 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5484, SWITCHTEC_GEN5), /* PSXA 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5468, SWITCHTEC_GEN5), /* PSXA 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5452, SWITCHTEC_GEN5), /* PSXA 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5436, SWITCHTEC_GEN5), /* PSXA 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5428, SWITCHTEC_GEN5), /* PSXA 28XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5500, SWITCHTEC_GEN5), /* PAXA 100XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5584, SWITCHTEC_GEN5), /* PAXA 84XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5568, SWITCHTEC_GEN5), /* PAXA 68XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */
+ SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index c93286483..211ce84d9 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -11,6 +11,7 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -47,6 +48,15 @@
#define IMX8MM_GPR_PCIE_SSC_EN BIT(16)
#define IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE BIT(9)
+enum imx8_pcie_phy_type {
+ IMX8MM,
+};
+
+struct imx8_pcie_phy_drvdata {
+ const char *gpr;
+ enum imx8_pcie_phy_type variant;
+};
+
struct imx8_pcie_phy {
void __iomem *base;
struct clk *clk;
@@ -57,6 +67,7 @@ struct imx8_pcie_phy {
u32 tx_deemph_gen1;
u32 tx_deemph_gen2;
bool clkreq_unused;
+ const struct imx8_pcie_phy_drvdata *drvdata;
};
static int imx8_pcie_phy_power_on(struct phy *phy)
@@ -68,31 +79,17 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
reset_control_assert(imx8_phy->reset);
pad_mode = imx8_phy->refclk_pad_mode;
- /* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */
- regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
- IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE,
- imx8_phy->clkreq_unused ?
- 0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE);
- regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
- IMX8MM_GPR_PCIE_AUX_EN,
- IMX8MM_GPR_PCIE_AUX_EN);
- regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
- IMX8MM_GPR_PCIE_POWER_OFF, 0);
- regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
- IMX8MM_GPR_PCIE_SSC_EN, 0);
-
- regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
- IMX8MM_GPR_PCIE_REF_CLK_SEL,
- pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ?
- IMX8MM_GPR_PCIE_REF_CLK_EXT :
- IMX8MM_GPR_PCIE_REF_CLK_PLL);
- usleep_range(100, 200);
-
- /* Do the PHY common block reset */
- regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
- IMX8MM_GPR_PCIE_CMN_RST,
- IMX8MM_GPR_PCIE_CMN_RST);
- usleep_range(200, 500);
+ switch (imx8_phy->drvdata->variant) {
+ case IMX8MM:
+ /* Tune PHY de-emphasis setting to pass PCIe compliance. */
+ if (imx8_phy->tx_deemph_gen1)
+ writel(imx8_phy->tx_deemph_gen1,
+ imx8_phy->base + PCIE_PHY_TRSV_REG5);
+ if (imx8_phy->tx_deemph_gen2)
+ writel(imx8_phy->tx_deemph_gen2,
+ imx8_phy->base + PCIE_PHY_TRSV_REG6);
+ break;
+ }
if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ||
pad_mode == IMX8_PCIE_REFCLK_PAD_UNUSED) {
@@ -111,8 +108,10 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
/* Source clock from SoC internal PLL */
writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
- writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
- imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ if (imx8_phy->drvdata->variant != IMX8MM) {
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ }
val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
writel(val | ANA_AUX_RX_TERM_GND_EN,
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
@@ -120,15 +119,37 @@ static int imx8_pcie_phy_power_on(struct phy *phy)
imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG065);
}
- /* Tune PHY de-emphasis setting to pass PCIe compliance. */
- if (imx8_phy->tx_deemph_gen1)
- writel(imx8_phy->tx_deemph_gen1,
- imx8_phy->base + PCIE_PHY_TRSV_REG5);
- if (imx8_phy->tx_deemph_gen2)
- writel(imx8_phy->tx_deemph_gen2,
- imx8_phy->base + PCIE_PHY_TRSV_REG6);
+ /* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE,
+ imx8_phy->clkreq_unused ?
+ 0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_AUX_EN,
+ IMX8MM_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_POWER_OFF, 0);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_SSC_EN, 0);
- reset_control_deassert(imx8_phy->reset);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ?
+ IMX8MM_GPR_PCIE_REF_CLK_EXT :
+ IMX8MM_GPR_PCIE_REF_CLK_PLL);
+ usleep_range(100, 200);
+
+ /* Do the PHY common block reset */
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_CMN_RST,
+ IMX8MM_GPR_PCIE_CMN_RST);
+
+ switch (imx8_phy->drvdata->variant) {
+ case IMX8MM:
+ reset_control_deassert(imx8_phy->reset);
+ usleep_range(200, 500);
+ break;
+ }
/* Polling to check the phy is ready or not. */
ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG75,
@@ -160,6 +181,17 @@ static const struct phy_ops imx8_pcie_phy_ops = {
.owner = THIS_MODULE,
};
+static const struct imx8_pcie_phy_drvdata imx8mm_drvdata = {
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ .variant = IMX8MM,
+};
+
+static const struct of_device_id imx8_pcie_phy_of_match[] = {
+ {.compatible = "fsl,imx8mm-pcie-phy", .data = &imx8mm_drvdata, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match);
+
static int imx8_pcie_phy_probe(struct platform_device *pdev)
{
struct phy_provider *phy_provider;
@@ -172,6 +204,8 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
if (!imx8_phy)
return -ENOMEM;
+ imx8_phy->drvdata = of_device_get_match_data(dev);
+
/* get PHY refclk pad mode */
of_property_read_u32(np, "fsl,refclk-pad-mode",
&imx8_phy->refclk_pad_mode);
@@ -197,7 +231,7 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
/* Grab GPR config register range */
imx8_phy->iomuxc_gpr =
- syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ syscon_regmap_lookup_by_compatible(imx8_phy->drvdata->gpr);
if (IS_ERR(imx8_phy->iomuxc_gpr)) {
dev_err(dev, "unable to find iomuxc registers\n");
return PTR_ERR(imx8_phy->iomuxc_gpr);
@@ -225,12 +259,6 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
-static const struct of_device_id imx8_pcie_phy_of_match[] = {
- {.compatible = "fsl,imx8mm-pcie-phy",},
- { },
-};
-MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match);
-
static struct platform_driver imx8_pcie_phy_driver = {
.probe = imx8_pcie_phy_probe,
.driver = {
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index d641b345a..251e1aedd 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -602,7 +602,7 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
u16 val;
fix_idx = 0;
- for (addr = 0; addr < 512; addr++) {
+ for (addr = 0; addr < ARRAY_SIZE(gbe_phy_init); addr++) {
/*
* All PHY register values are defined in full for 3.125Gbps
* SERDES speed. The values required for 1.25 Gbps are almost
@@ -610,11 +610,12 @@ static void comphy_gbe_phy_init(struct mvebu_a3700_comphy_lane *lane,
* comparison to 3.125 Gbps values. These register values are
* stored in "gbe_phy_init_fix" array.
*/
- if (!is_1gbps && gbe_phy_init_fix[fix_idx].addr == addr) {
+ if (!is_1gbps &&
+ fix_idx < ARRAY_SIZE(gbe_phy_init_fix) &&
+ gbe_phy_init_fix[fix_idx].addr == addr) {
/* Use new value */
val = gbe_phy_init_fix[fix_idx].value;
- if (fix_idx < ARRAY_SIZE(gbe_phy_init_fix))
- fix_idx++;
+ fix_idx++;
} else {
val = gbe_phy_init[addr];
}
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 1d355b32b..c6aa6bc69 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -39,6 +39,8 @@
#define RK3588_BIFURCATION_LANE_0_1 BIT(0)
#define RK3588_BIFURCATION_LANE_2_3 BIT(1)
#define RK3588_LANE_AGGREGATION BIT(2)
+#define RK3588_PCIE1LN_SEL_EN (GENMASK(1, 0) << 16)
+#define RK3588_PCIE30_PHY_MODE_EN (GENMASK(2, 0) << 16)
struct rockchip_p3phy_ops;
@@ -131,7 +133,7 @@ static const struct rockchip_p3phy_ops rk3568_ops = {
static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
{
u32 reg = 0;
- u8 mode = 0;
+ u8 mode = RK3588_LANE_AGGREGATION; /* default */
int ret;
/* Deassert PCIe PMA output clamp mode */
@@ -139,31 +141,24 @@ static int rockchip_p3phy_rk3588_init(struct rockchip_p3phy_priv *priv)
/* Set bifurcation if needed */
for (int i = 0; i < priv->num_lanes; i++) {
- if (!priv->lanes[i])
- mode |= (BIT(i) << 3);
-
if (priv->lanes[i] > 1)
- mode |= (BIT(i) >> 1);
- }
-
- if (!mode)
- reg = RK3588_LANE_AGGREGATION;
- else {
- if (mode & (BIT(0) | BIT(1)))
- reg |= RK3588_BIFURCATION_LANE_0_1;
-
- if (mode & (BIT(2) | BIT(3)))
- reg |= RK3588_BIFURCATION_LANE_2_3;
+ mode &= ~RK3588_LANE_AGGREGATION;
+ if (priv->lanes[i] == 3)
+ mode |= RK3588_BIFURCATION_LANE_0_1;
+ if (priv->lanes[i] == 4)
+ mode |= RK3588_BIFURCATION_LANE_2_3;
}
- regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0, (0x7<<16) | reg);
+ reg = mode;
+ regmap_write(priv->phy_grf, RK3588_PCIE3PHY_GRF_CMN_CON0,
+ RK3588_PCIE30_PHY_MODE_EN | reg);
/* Set pcie1ln_sel in PHP_GRF_PCIESEL_CON */
if (!IS_ERR(priv->pipe_grf)) {
- reg = (mode & (BIT(6) | BIT(7))) >> 6;
+ reg = mode & (RK3588_BIFURCATION_LANE_0_1 | RK3588_BIFURCATION_LANE_2_3);
if (reg)
regmap_write(priv->pipe_grf, PHP_GRF_PCIESEL_CON,
- (reg << 16) | reg);
+ RK3588_PCIE1LN_SEL_EN | reg);
}
reset_control_deassert(priv->p30phy);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 669c13d6e..bdd44ec3e 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -64,7 +64,6 @@ struct tusb1210 {
struct delayed_work chg_det_work;
struct notifier_block psy_nb;
struct power_supply *psy;
- struct power_supply *charger;
#endif
};
@@ -230,19 +229,24 @@ static const char * const tusb1210_chargers[] = {
static bool tusb1210_get_online(struct tusb1210 *tusb)
{
+ struct power_supply *charger = NULL;
union power_supply_propval val;
- int i;
+ bool online = false;
+ int i, ret;
- for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !tusb->charger; i++)
- tusb->charger = power_supply_get_by_name(tusb1210_chargers[i]);
+ for (i = 0; i < ARRAY_SIZE(tusb1210_chargers) && !charger; i++)
+ charger = power_supply_get_by_name(tusb1210_chargers[i]);
- if (!tusb->charger)
+ if (!charger)
return false;
- if (power_supply_get_property(tusb->charger, POWER_SUPPLY_PROP_ONLINE, &val))
- return false;
+ ret = power_supply_get_property(charger, POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0)
+ online = val.intval;
+
+ power_supply_put(charger);
- return val.intval;
+ return online;
}
static void tusb1210_chg_det_work(struct work_struct *work)
@@ -466,9 +470,6 @@ static void tusb1210_remove_charger_detect(struct tusb1210 *tusb)
cancel_delayed_work_sync(&tusb->chg_det_work);
power_supply_unregister(tusb->psy);
}
-
- if (tusb->charger)
- power_supply_put(tusb->charger);
}
#else
static void tusb1210_probe_charger_detect(struct tusb1210 *tusb) { }
diff --git a/drivers/pinctrl/renesas/core.c b/drivers/pinctrl/renesas/core.c
index c91102d3f..1c7f8caf7 100644
--- a/drivers/pinctrl/renesas/core.c
+++ b/drivers/pinctrl/renesas/core.c
@@ -921,9 +921,11 @@ static void __init sh_pfc_check_cfg_reg(const char *drvname,
sh_pfc_err("reg 0x%x: var_field_width declares %u instead of %u bits\n",
cfg_reg->reg, rw, cfg_reg->reg_width);
- if (n != cfg_reg->nr_enum_ids)
+ if (n != cfg_reg->nr_enum_ids) {
sh_pfc_err("reg 0x%x: enum_ids[] has %u instead of %u values\n",
cfg_reg->reg, cfg_reg->nr_enum_ids, n);
+ n = cfg_reg->nr_enum_ids;
+ }
check_enum_ids:
sh_pfc_check_reg_enums(drvname, cfg_reg->reg, cfg_reg->enum_ids, n);
diff --git a/drivers/platform/x86/intel/vbtn.c b/drivers/platform/x86/intel/vbtn.c
index c10c99a31..224139006 100644
--- a/drivers/platform/x86/intel/vbtn.c
+++ b/drivers/platform/x86/intel/vbtn.c
@@ -136,8 +136,6 @@ static int intel_vbtn_input_setup(struct platform_device *device)
priv->switches_dev->id.bustype = BUS_HOST;
if (priv->has_switches) {
- detect_tablet_mode(&device->dev);
-
ret = input_register_device(priv->switches_dev);
if (ret)
return ret;
@@ -316,6 +314,9 @@ static int intel_vbtn_probe(struct platform_device *device)
if (ACPI_FAILURE(status))
dev_err(&device->dev, "Error VBDL failed with ACPI status %d\n", status);
}
+ // Check switches after buttons since VBDL may have side effects.
+ if (has_switches)
+ detect_tablet_mode(&device->dev);
device_init_wakeup(&device->dev, true);
/*
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 11d72a353..399b97b54 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -1178,6 +1178,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Vi8 dual-boot (CWI506) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI2.D86JHBNR02"),
+ },
+ },
+ {
/* Chuwi Vi8 Plus (CWI519) */
.driver_data = (void *)&chuwi_vi8_plus_data,
.matches = {
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 02813b63f..5666b9cc5 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -360,10 +360,8 @@ int ccw_device_set_online(struct ccw_device *cdev)
spin_lock_irq(cdev->ccwlock);
ret = ccw_device_online(cdev);
- spin_unlock_irq(cdev->ccwlock);
- if (ret == 0)
- wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
- else {
+ if (ret) {
+ spin_unlock_irq(cdev->ccwlock);
CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
"device 0.%x.%04x\n",
ret, cdev->private->dev_id.ssid,
@@ -372,7 +370,12 @@ int ccw_device_set_online(struct ccw_device *cdev)
put_device(&cdev->dev);
return ret;
}
- spin_lock_irq(cdev->ccwlock);
+ /* Wait until a final state is reached */
+ while (!dev_fsm_final_state(cdev)) {
+ spin_unlock_irq(cdev->ccwlock);
+ wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
+ spin_lock_irq(cdev->ccwlock);
+ }
/* Check if online processing was successful */
if ((cdev->private->state != DEV_STATE_ONLINE) &&
(cdev->private->state != DEV_STATE_W4SENSE)) {
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9cde55730..ebcb53580 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -722,8 +722,8 @@ static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
lgr_info_log();
}
-static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
- int dstat)
+static int qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
+ int dstat, int dcc)
{
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
@@ -731,15 +731,18 @@ static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
goto error;
if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
goto error;
+ if (dcc == 1)
+ return -EAGAIN;
if (!(dstat & DEV_STAT_DEV_END))
goto error;
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
- return;
+ return 0;
error:
DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ return -EIO;
}
/* qdio interrupt handler */
@@ -748,7 +751,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
{
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
- int cstat, dstat;
+ int cstat, dstat, rc, dcc;
if (!intparm || !irq_ptr) {
ccw_device_get_schid(cdev, &schid);
@@ -768,10 +771,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
qdio_irq_check_sense(irq_ptr, irb);
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
+ dcc = scsw_cmd_is_valid_cc(&irb->scsw) ? irb->scsw.cmd.cc : 0;
+ rc = 0;
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(irq_ptr, cstat, dstat);
+ rc = qdio_establish_handle_irq(irq_ptr, cstat, dstat, dcc);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -785,12 +790,25 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (cstat || dstat)
qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
+ else if (dcc == 1)
+ rc = -EAGAIN;
break;
case QDIO_IRQ_STATE_STOPPED:
break;
default:
WARN_ON_ONCE(1);
}
+
+ if (rc == -EAGAIN) {
+ DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qint retry");
+ rc = ccw_device_start(cdev, irq_ptr->ccw, intparm, 0, 0);
+ if (!rc)
+ return;
+ DBF_ERROR("%4x RETRY ERR", irq_ptr->schid.sch_no);
+ DBF_ERROR("rc:%4x", rc);
+ qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
+ }
+
wake_up(&cdev->private->wait_q);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 450a85781..2116f5ee3 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -1715,7 +1715,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
if (dev_is_sata(device)) {
struct ata_link *link = &device->sata_dev.ap->link;
- rc = ata_wait_after_reset(link, HISI_SAS_WAIT_PHYUP_TIMEOUT,
+ rc = ata_wait_after_reset(link, jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT,
smp_ata_check_ready_type);
} else {
msleep(2000);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b86ff9fcd..f21396a0b 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -748,8 +748,10 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Save the ELS cmd */
elsiocb->drvrTimeout = cmd;
- lpfc_sli4_resume_rpi(ndlp,
- lpfc_mbx_cmpl_resume_rpi, elsiocb);
+ if (lpfc_sli4_resume_rpi(ndlp,
+ lpfc_mbx_cmpl_resume_rpi,
+ elsiocb))
+ kfree(elsiocb);
goto out;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index 7aee4d093..969008071 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -1058,7 +1058,7 @@ qla_edif_app_getstats(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
if (fcport->edif.enable) {
- if (pcnt > app_req.num_ports)
+ if (pcnt >= app_req.num_ports)
break;
app_reply->elem[pcnt].rekey_count =
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index edd296f95..5c5954b78 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -185,37 +185,39 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, true);
}
+
/**
- * scsi_execute_cmd - insert request and wait for the result
- * @sdev: scsi_device
+ * __scsi_execute - insert request and wait for the result
+ * @sdev: scsi device
* @cmd: scsi command
- * @opf: block layer request cmd_flags
+ * @data_direction: data direction
* @buffer: data buffer
* @bufflen: len of buffer
+ * @sense: optional sense buffer
+ * @sshdr: optional decoded sense header
* @timeout: request timeout in HZ
* @retries: number of times to retry request
- * @args: Optional args. See struct definition for field descriptions
+ * @flags: flags for ->cmd_flags
+ * @rq_flags: flags for ->rq_flags
+ * @resid: optional residual length
*
* Returns the scsi_cmnd result field if a command was executed, or a negative
* Linux error code if we didn't get that far.
*/
-int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
- blk_opf_t opf, void *buffer, unsigned int bufflen,
- int timeout, int retries,
- const struct scsi_exec_args *args)
+int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ unsigned char *sense, struct scsi_sense_hdr *sshdr,
+ int timeout, int retries, blk_opf_t flags,
+ req_flags_t rq_flags, int *resid)
{
- static const struct scsi_exec_args default_args;
struct request *req;
struct scsi_cmnd *scmd;
int ret;
- if (!args)
- args = &default_args;
- else if (WARN_ON_ONCE(args->sense &&
- args->sense_len != SCSI_SENSE_BUFFERSIZE))
- return -EINVAL;
-
- req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
+ req = scsi_alloc_request(sdev->request_queue,
+ data_direction == DMA_TO_DEVICE ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -230,7 +232,8 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
scmd->allowed = retries;
req->timeout = timeout;
- req->rq_flags |= RQF_QUIET;
+ req->cmd_flags |= flags;
+ req->rq_flags |= rq_flags | RQF_QUIET;
/*
* head injection *required* here otherwise quiesce won't work
@@ -246,21 +249,20 @@ int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
- if (args->resid)
- *args->resid = scmd->resid_len;
- if (args->sense)
- memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
- if (args->sshdr)
+ if (resid)
+ *resid = scmd->resid_len;
+ if (sense && scmd->sense_len)
+ memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+ if (sshdr)
scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
- args->sshdr);
-
+ sshdr);
ret = scmd->result;
out:
blk_mq_free_request(req);
return ret;
}
-EXPORT_SYMBOL(scsi_execute_cmd);
+EXPORT_SYMBOL(__scsi_execute);
/*
* Wake up the error handler if necessary. Avoid as follows that the error
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index f32236c3f..3ec9b324f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3299,8 +3299,8 @@ static void sd_read_block_zero(struct scsi_disk *sdkp)
put_unaligned_be32(0, &cmd[2]); /* Logical block address 0 */
put_unaligned_be16(1, &cmd[7]); /* Transfer 1 logical block */
- scsi_execute_cmd(sdkp->device, cmd, REQ_OP_DRV_IN, buffer, buf_len,
- SD_TIMEOUT, sdkp->max_retries, NULL);
+ scsi_execute_req(sdkp->device, cmd, DMA_FROM_DEVICE, buffer, buf_len,
+ NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
kfree(buffer);
}
diff --git a/drivers/soundwire/dmi-quirks.c b/drivers/soundwire/dmi-quirks.c
index 9ebdd0cd0..91ab97a45 100644
--- a/drivers/soundwire/dmi-quirks.c
+++ b/drivers/soundwire/dmi-quirks.c
@@ -131,6 +131,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
.driver_data = (void *)intel_rooks_county,
},
{
+ /* quirk used for NUC15 LAPRC710 skew */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
+ },
+ .driver_data = (void *)intel_rooks_county,
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A3E")
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 4104743db..202dce0d2 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -337,14 +337,18 @@ static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdel
int ret;
ret = of_property_read_u32(np, "polling-delay-passive", pdelay);
- if (ret < 0) {
- pr_err("%pOFn: missing polling-delay-passive property\n", np);
+ if (ret == -EINVAL) {
+ *pdelay = 0;
+ } else if (ret < 0) {
+ pr_err("%pOFn: Couldn't get polling-delay-passive: %d\n", np, ret);
return ret;
}
ret = of_property_read_u32(np, "polling-delay", delay);
- if (ret < 0) {
- pr_err("%pOFn: missing polling-delay property\n", np);
+ if (ret == -EINVAL) {
+ *delay = 0;
+ } else if (ret < 0) {
+ pr_err("%pOFn: Couldn't get polling-delay: %d\n", np, ret);
return ret;
}
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index 4ab3803e1..e81de9c30 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -10,6 +10,7 @@
static void quirk_force_power_link(struct tb_switch *sw)
{
sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
+ tb_sw_dbg(sw, "forcing power to link controller\n");
}
static void quirk_dp_credit_allocation(struct tb_switch *sw)
@@ -42,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
}
}
+static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
+ tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
@@ -86,6 +93,14 @@ static const struct tb_quirk tb_quirks[] = {
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
+ * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
+ * controllers.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ /*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
@@ -116,6 +131,7 @@ void tb_check_quirks(struct tb_switch *sw)
if (q->device && q->device != sw->device)
continue;
+ tb_sw_dbg(sw, "running %ps\n", q->hook);
q->hook(sw);
}
}
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 55698a097..d3058ede5 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2880,22 +2880,29 @@ void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
- if (sw->is_unplugged)
- return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
+ /*
+ * Unconfigure downstream port so that wake-on-connect can be
+ * configured after router unplug. No need to unconfigure upstream port
+ * since its router is unplugged.
+ */
up = tb_upstream_port(sw);
- if (tb_switch_is_usb4(up->sw))
- usb4_port_unconfigure(up);
- else
- tb_lc_unconfigure_port(up);
-
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
+
+ if (sw->is_unplugged)
+ return;
+
+ up = tb_upstream_port(sw);
+ if (tb_switch_is_usb4(up->sw))
+ usb4_port_unconfigure(up);
+ else
+ tb_lc_unconfigure_port(up);
}
static void tb_switch_credits_init(struct tb_switch *sw)
@@ -3135,7 +3142,26 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
return tb_lc_set_wake(sw, flags);
}
-int tb_switch_resume(struct tb_switch *sw)
+static void tb_switch_check_wakes(struct tb_switch *sw)
+{
+ if (device_may_wakeup(&sw->dev)) {
+ if (tb_switch_is_usb4(sw))
+ usb4_switch_check_wakes(sw);
+ }
+}
+
+/**
+ * tb_switch_resume() - Resume a switch after sleep
+ * @sw: Switch to resume
+ * @runtime: Is this resume from runtime suspend or system sleep
+ *
+ * Resumes and re-enumerates router (and all its children), if still plugged
+ * after suspend. Don't enumerate device router whose UID was changed during
+ * suspend. If this is resume from system sleep, notifies PM core about the
+ * wakes occurred during suspend. Disables all wakes, except USB4 wake of
+ * upstream port for USB4 routers that shall be always enabled.
+ */
+int tb_switch_resume(struct tb_switch *sw, bool runtime)
{
struct tb_port *port;
int err;
@@ -3184,6 +3210,9 @@ int tb_switch_resume(struct tb_switch *sw)
if (err)
return err;
+ if (!runtime)
+ tb_switch_check_wakes(sw);
+
/* Disable wakes */
tb_switch_set_wake(sw, 0);
@@ -3213,7 +3242,8 @@ int tb_switch_resume(struct tb_switch *sw)
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
- if (port->remote && tb_switch_resume(port->remote->sw)) {
+ if (port->remote &&
+ tb_switch_resume(port->remote->sw, runtime)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index e1eb092ad..c5e4fa478 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1050,6 +1050,49 @@ err_rpm_put:
pm_runtime_put_autosuspend(&in->sw->dev);
}
+static void tb_enter_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ /*
+ * If we get hot-unplug for the DP IN port of the host router
+ * and the DP resource is not available anymore it means there
+ * is a monitor connected directly to the Type-C port and we are
+ * in "redrive" mode. For this to work we cannot enter RTD3 so
+ * we bump up the runtime PM reference count here.
+ */
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (!tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = true;
+ pm_runtime_get(&sw->dev);
+ tb_port_dbg(port, "enter redrive mode, keeping powered\n");
+ }
+}
+
+static void tb_exit_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
+}
+
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
{
struct tb_port *in, *out;
@@ -1066,7 +1109,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
}
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
- tb_deactivate_and_free_tunnel(tunnel);
+ if (tunnel)
+ tb_deactivate_and_free_tunnel(tunnel);
+ else
+ tb_enter_redrive(port);
list_del_init(&port->list);
/*
@@ -1092,6 +1138,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
tb_port_dbg(port, "DP %s resource available\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
+ tb_exit_redrive(port);
/* Look for suitable DP IN <-> DP OUT pairs now */
tb_tunnel_dp(tb);
@@ -1581,7 +1628,7 @@ static int tb_resume_noirq(struct tb *tb)
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb->root_switch);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, false);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
@@ -1707,7 +1754,7 @@ static int tb_runtime_resume(struct tb *tb)
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
- tb_switch_resume(tb->root_switch);
+ tb_switch_resume(tb->root_switch, true);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index f79cae48a..acf5b8620 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -27,6 +27,8 @@
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
#define QUIRK_NO_CLX BIT(1)
+/* Need to keep power on while USB4 port is in redrive mode */
+#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
/**
* struct tb_nvm - Structure holding NVM information
@@ -254,6 +256,7 @@ struct tb_switch {
* DMA paths through this port.
* @max_bw: Maximum possible bandwidth through this adapter if set to
* non-zero.
+ * @redrive: For DP IN, if true the adapter is in redrive mode.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -280,6 +283,7 @@ struct tb_port {
unsigned int ctl_credits;
unsigned int dma_credits;
unsigned int max_bw;
+ bool redrive;
};
/**
@@ -783,7 +787,7 @@ int tb_switch_configure(struct tb_switch *sw);
int tb_switch_add(struct tb_switch *sw);
void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
-int tb_switch_resume(struct tb_switch *sw);
+int tb_switch_resume(struct tb_switch *sw, bool runtime);
int tb_switch_reset(struct tb_switch *sw);
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec);
@@ -1178,6 +1182,7 @@ static inline struct tb_retimer *tb_to_retimer(struct device *dev)
return NULL;
}
+void usb4_switch_check_wakes(struct tb_switch *sw);
int usb4_switch_setup(struct tb_switch *sw);
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 3c821f5e4..b0394ba6d 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -153,15 +153,18 @@ static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
tx_dwords, rx_data, rx_dwords);
}
-static void usb4_switch_check_wakes(struct tb_switch *sw)
+/**
+ * usb4_switch_check_wakes() - Check for wakes and notify PM core about them
+ * @sw: Router whose wakes to check
+ *
+ * Checks wakes occurred during suspend and notify the PM core about them.
+ */
+void usb4_switch_check_wakes(struct tb_switch *sw)
{
struct tb_port *port;
bool wakeup = false;
u32 val;
- if (!device_may_wakeup(&sw->dev))
- return;
-
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
@@ -226,8 +229,6 @@ int usb4_switch_setup(struct tb_switch *sw)
u32 val = 0;
int ret;
- usb4_switch_check_wakes(sw);
-
if (!tb_route(sw))
return 0;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 6b6abce6b..d2daf0a72 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2969,6 +2969,9 @@ static int gsmld_open(struct tty_struct *tty)
{
struct gsm_mux *gsm;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
if (tty->ops->write == NULL)
return -EINVAL;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index d21a4f3ef..8b31017e7 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1094,11 +1094,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istat;
+ u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
- u32 stat = mxs_read(s, REG_STAT);
+ uart_port_lock(&s->port);
+
+ stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@@ -1134,6 +1136,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
+ uart_port_unlock(&s->port);
+
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index fe2e4ec42..daf15d23b 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -210,7 +210,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
- int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
@@ -291,25 +290,12 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
- /* We can get stuck in an infinite loop getting char 0 when the
- * line is in a wrong HW state, we break that here.
- * When that happens, I disable the receive side of the driver.
- * Note that what I've been experiencing is a real irq loop where
- * I'm getting flooded regardless of the actual port speed.
- * Something strange is going on with the HW
- */
- if ((++loops) > 1000)
- goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
return true;
- flood:
- pmz_interrupt_control(uap, 0);
- pmz_error("pmz: rx irq flood !\n");
- return true;
}
static void pmz_status_handle(struct uart_pmac_port *uap)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 2a9c40588..7d11511c8 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -755,6 +755,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
u32 sr;
unsigned int size;
+ irqreturn_t ret = IRQ_NONE;
sr = readl_relaxed(port->membase + ofs->isr);
@@ -763,11 +764,14 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
+ ret = IRQ_HANDLED;
}
- if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
+ ret = IRQ_HANDLED;
+ }
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
/* Clear wake up flag and disable wake up interrupt */
@@ -776,6 +780,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
+ ret = IRQ_HANDLED;
}
/*
@@ -790,6 +795,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
}
@@ -797,6 +803,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
spin_lock(&port->lock);
stm32_usart_transmit_chars(port);
spin_unlock(&port->lock);
+ ret = IRQ_HANDLED;
}
/* Receiver timeout irq for DMA RX */
@@ -806,9 +813,10 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
+ ret = IRQ_HANDLED;
}
- return IRQ_HANDLED;
+ return ret;
}
static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -1013,6 +1021,7 @@ static int stm32_usart_startup(struct uart_port *port)
val |= USART_CR2_SWAP;
writel_relaxed(val, port->membase + ofs->cr2);
}
+ stm32_port->throttled = false;
/* RX FIFO Flush */
if (ofs->rqr != UNDEF_REG)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index fdc1a66b1..1f0951be1 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -485,7 +485,6 @@ out_free_mem:
static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
- int used;
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
@@ -500,10 +499,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
goto out;
}
- used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
- if (used)
- goto out;
-
+ set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b1fb04e52..dea110241 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -60,6 +60,12 @@
#define USB_PING_RESPONSE_TIME 400 /* ns */
#define USB_REDUCE_FRAME_INTR_BINTERVAL 9
+/*
+ * The SET_ADDRESS request timeout will be 500 ms when
+ * USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT quirk flag is set.
+ */
+#define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */
+
/* Protect struct usb_device->state and ->children members
* Note: Both are also protected by ->dev.sem, except that ->state can
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
@@ -4648,7 +4654,12 @@ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
static int hub_set_address(struct usb_device *udev, int devnum)
{
int retval;
+ unsigned int timeout_ms = USB_CTRL_SET_TIMEOUT;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+
+ if (hub->hdev->quirks & USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT)
+ timeout_ms = USB_SHORT_SET_ADDRESS_REQ_TIMEOUT;
/*
* The host controller will choose the device address,
@@ -4661,11 +4672,11 @@ static int hub_set_address(struct usb_device *udev, int devnum)
if (udev->state != USB_STATE_DEFAULT)
return -EINVAL;
if (hcd->driver->address_device)
- retval = hcd->driver->address_device(hcd, udev);
+ retval = hcd->driver->address_device(hcd, udev, timeout_ms);
else
retval = usb_control_msg(udev, usb_sndaddr0pipe(),
USB_REQ_SET_ADDRESS, 0, devnum, 0,
- NULL, 0, USB_CTRL_SET_TIMEOUT);
+ NULL, 0, timeout_ms);
if (retval == 0) {
update_devnum(udev, devnum);
/* Device now using proper address. */
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index e91fa567d..93a63b7f1 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -409,8 +409,10 @@ static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
- if (port_dev->child)
+ if (port_dev->child) {
usb_disable_usb2_hardware_lpm(port_dev->child);
+ usb_unlocked_disable_lpm(port_dev->child);
+ }
}
static const struct dev_pm_ops usb_port_pm_ops = {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 15e9bd180..b4783574b 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -138,6 +138,9 @@ static int quirks_param_set(const char *value, const struct kernel_param *kp)
case 'o':
flags |= USB_QUIRK_HUB_SLOW_RESET;
break;
+ case 'p':
+ flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT;
+ break;
/* Ignore unrecognized flag characters */
}
}
@@ -527,6 +530,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+ /* APTIV AUTOMOTIVE HUB */
+ { USB_DEVICE(0x2c48, 0x0132), .driver_info =
+ USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT },
+
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 79582b102..994a78ad0 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -867,13 +867,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 frame_desc_idx;
- struct urb *usb_urb = qtd->urb->priv;
+ struct urb *usb_urb;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
+ usb_urb = qtd->urb->priv;
+
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 14601a2d2..b267ed9dc 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -884,7 +884,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (alt > 1)
goto fail;
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
DBG(cdev, "reset ncm\n");
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
@@ -1369,7 +1369,7 @@ static void ncm_disable(struct usb_function *f)
DBG(cdev, "ncm deactivated\n");
- if (ncm->port.in_ep->enabled) {
+ if (ncm->netdev) {
ncm->netdev = NULL;
gether_disconnect(&ncm->port);
}
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index e81865978..be48d5ab1 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -35,6 +35,9 @@ uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
data[1] = UVC_STREAM_EOH | video->fid;
+ if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
+ data[1] |= UVC_STREAM_ERR;
+
if (video->queue.buf_used == 0 && ts.tv_sec) {
/* dwClockFrequency is 48 MHz */
u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index ef08d68b9..2665832f9 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -207,8 +207,7 @@ EXPORT_SYMBOL_GPL(sb800_prefetch);
static void usb_amd_find_chipset_info(void)
{
unsigned long flags;
- struct amd_chipset_info info;
- info.need_pll_quirk = false;
+ struct amd_chipset_info info = { };
spin_lock_irqsave(&amd_lock, flags);
@@ -218,7 +217,6 @@ static void usb_amd_find_chipset_info(void)
spin_unlock_irqrestore(&amd_lock, flags);
return;
}
- memset(&info, 0, sizeof(info));
spin_unlock_irqrestore(&amd_lock, flags);
if (!amd_chipset_sb_type_init(&info)) {
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index b8b90eec9..48478eb71 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -585,6 +585,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
finish_request(sl811, ep, urb, urbstat);
}
+#ifdef QUIRK2
static inline u8 checkdone(struct sl811 *sl811)
{
u8 ctl;
@@ -616,6 +617,7 @@ static inline u8 checkdone(struct sl811 *sl811)
#endif
return irqstat;
}
+#endif
static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 019dcbe55..62808c987 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1752,6 +1752,8 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
}
command->status = 0;
+ /* set default timeout to 5000 ms */
+ command->timeout_ms = XHCI_CMD_DEFAULT_TIMEOUT;
INIT_LIST_HEAD(&command->cmd_list);
return command;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 239b5edee..4a039e426 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -332,9 +332,10 @@ void xhci_ring_cmd_db(struct xhci_hcd *xhci)
readl(&xhci->dba->doorbell[0]);
}
-static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
+static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci)
{
- return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
+ return mod_delayed_work(system_wq, &xhci->cmd_timer,
+ msecs_to_jiffies(xhci->current_cmd->timeout_ms));
}
static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
@@ -378,7 +379,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
!(xhci->xhc_state & XHCI_STATE_DYING)) {
xhci->current_cmd = cur_cmd;
- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci);
xhci_ring_cmd_db(xhci);
}
}
@@ -1762,7 +1763,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
if (!list_is_singular(&xhci->cmd_list)) {
xhci->current_cmd = list_first_entry(&cmd->cmd_list,
struct xhci_command, cmd_list);
- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci);
} else if (xhci->current_cmd == cmd) {
xhci->current_cmd = NULL;
}
@@ -4339,7 +4340,7 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
/* if there are no other commands queued we start the timeout timer */
if (list_empty(&xhci->cmd_list)) {
xhci->current_cmd = cmd;
- xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
+ xhci_mod_cmd_timer(xhci);
}
list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 565aba6b9..27e01671d 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4170,12 +4170,18 @@ disable_slot:
return 0;
}
-/*
- * Issue an Address Device command and optionally send a corresponding
- * SetAddress request to the device.
+/**
+ * xhci_setup_device - issues an Address Device command to assign a unique
+ * USB bus address.
+ * @hcd: USB host controller data structure.
+ * @udev: USB dev structure representing the connected device.
+ * @setup: Enum specifying setup mode: address only or with context.
+ * @timeout_ms: Max wait time (ms) for the command operation to complete.
+ *
+ * Return: 0 if successful; otherwise, negative error code.
*/
static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
- enum xhci_setup_dev setup)
+ enum xhci_setup_dev setup, unsigned int timeout_ms)
{
const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
unsigned long flags;
@@ -4232,6 +4238,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
}
command->in_ctx = virt_dev->in_ctx;
+ command->timeout_ms = timeout_ms;
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
@@ -4358,14 +4365,16 @@ out:
return ret;
}
-static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev,
+ unsigned int timeout_ms)
{
- return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
+ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS, timeout_ms);
}
static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
{
- return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
+ return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY,
+ XHCI_CMD_DEFAULT_TIMEOUT);
}
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index fc25a5b09..fa9e87141 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -815,6 +815,8 @@ struct xhci_command {
struct completion *completion;
union xhci_trb *command_trb;
struct list_head cmd_list;
+ /* xHCI command response timeout in milliseconds */
+ unsigned int timeout_ms;
};
/* drop context bitmasks */
@@ -1574,8 +1576,11 @@ struct xhci_td {
unsigned int num_trbs;
};
-/* xHCI command default timeout value */
-#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
+/*
+ * xHCI command default timeout value in milliseconds.
+ * USB 3.2 spec, section 9.2.6.1
+ */
+#define XHCI_CMD_DEFAULT_TIMEOUT 5000
/* command descriptor */
struct xhci_cd {
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1a3e5a941..b5ee8518f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
+#define QUECTEL_PRODUCT_EM060K_129 0x0129
+#define QUECTEL_PRODUCT_EM060K_12a 0x012a
+#define QUECTEL_PRODUCT_EM060K_12b 0x012b
+#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
@@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
+ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
+ .driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 816945913..f64976991 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -875,6 +875,7 @@ MODULE_DEVICE_TABLE(i2c, tcpci_id);
#ifdef CONFIG_OF
static const struct of_device_id tcpci_of_match[] = {
{ .compatible = "nxp,ptn5110", },
+ { .compatible = "tcpci", },
{},
};
MODULE_DEVICE_TABLE(of, tcpci_of_match);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 61c72e62a..1b00ed5ef 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2523,9 +2523,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
r = vhost_get_avail_idx(vq, &avail_idx);
if (unlikely(r))
return false;
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return false;
+ }
- return vq->avail_idx == vq->last_avail_idx;
+ return true;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
@@ -2562,9 +2572,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
&vq->avail->idx, r);
return false;
}
+
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+ if (vq->avail_idx != vq->last_avail_idx) {
+ /* Since we have updated avail_idx, the following
+ * call to vhost_get_vq_desc() will read available
+ * ring entries. Make sure that read happens after
+ * the avail_idx read.
+ */
+ smp_rmb();
+ return true;
+ }
- return vq->avail_idx != vq->last_avail_idx;
+ return false;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 49883c801..3b376345d 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -200,7 +200,7 @@ err_mutex_unlock:
*/
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
{
- unsigned long offset = vmf->address - vmf->vma->vm_start;
+ unsigned long offset = vmf->pgoff << PAGE_SHIFT;
struct page *page = vmf->page;
file_update_time(vmf->vma->vm_file);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index b0e690f41..9ca99da3a 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1311,7 +1311,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode)
{
- unsigned int htotal, vtotal;
+ unsigned int htotal, vtotal, total;
fbmode->xres = vm->hactive;
fbmode->left_margin = vm->hback_porch;
@@ -1344,8 +1344,9 @@ int fb_videomode_from_videomode(const struct videomode *vm,
vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
vm->vsync_len;
/* prevent division by zero */
- if (htotal && vtotal) {
- fbmode->refresh = vm->pixelclock / (htotal * vtotal);
+ total = htotal * vtotal;
+ if (total) {
+ fbmode->refresh = vm->pixelclock / total;
/* a mode must have htotal and vtotal != 0 or it is invalid */
} else {
fbmode->refresh = 0;
diff --git a/drivers/video/fbdev/via/accel.c b/drivers/video/fbdev/via/accel.c
index 0a1bc7a4d..1e04026f0 100644
--- a/drivers/video/fbdev/via/accel.c
+++ b/drivers/video/fbdev/via/accel.c
@@ -115,7 +115,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
if (op != VIA_BITBLT_FILL) {
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
@@ -260,7 +260,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
writel(tmp, engine + 0x18);
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 828ced060..1ef094427 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -489,13 +489,19 @@ EXPORT_SYMBOL_GPL(unregister_virtio_device);
int virtio_device_freeze(struct virtio_device *dev)
{
struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
+ int ret;
virtio_config_disable(dev);
dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
- if (drv && drv->freeze)
- return drv->freeze(dev);
+ if (drv && drv->freeze) {
+ ret = drv->freeze(dev);
+ if (ret) {
+ virtio_config_enable(dev);
+ return ret;
+ }
+ }
return 0;
}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 18cf801ab..23d0372e8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -2475,20 +2475,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
- data = kvmalloc(alloc_bytes, GFP_KERNEL);
+ data = kvzalloc(alloc_bytes, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
- if (total_bytes >= sizeof(*data)) {
+ if (total_bytes >= sizeof(*data))
data->bytes_left = total_bytes - sizeof(*data);
- data->bytes_missing = 0;
- } else {
+ else
data->bytes_missing = sizeof(*data) - total_bytes;
- data->bytes_left = 0;
- }
-
- data->elem_cnt = 0;
- data->elem_missed = 0;
return data;
}
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index c6426080c..1494ce990 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1115,6 +1115,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
if (ret)
return ret;
+ ret = btrfs_record_root_in_trans(trans, node->root);
+ if (ret)
+ return ret;
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
return ret;
}
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index fab7eb76e..58b0f04d7 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -161,8 +161,15 @@ struct dentry *btrfs_get_parent(struct dentry *child)
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto fail;
+ if (ret == 0) {
+ /*
+ * Key with offset of -1 found, there would have to exist an
+ * inode with such number or a root with such id.
+ */
+ ret = -EUCLEAN;
+ goto fail;
+ }
- BUG_ON(ret == 0); /* Key with offset of -1 found */
if (path->slots[0] == 0) {
ret = -ENOENT;
goto fail;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index c14d4f70e..80ca7b435 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -4154,6 +4154,8 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
BTRFS_QGROUP_RSV_META_PREALLOC);
trace_qgroup_meta_convert(root, num_bytes);
qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
+ if (!sb_rdonly(fs_info->sb))
+ add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
}
/*
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 9f7ffd9ef..754a9fb01 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1015,7 +1015,15 @@ static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
ret = PTR_ERR(start);
goto out;
}
- BUG_ON(start < p->buf);
+ if (unlikely(start < p->buf)) {
+ btrfs_err(root->fs_info,
+ "send: path ref buffer underflow for key (%llu %u %llu)",
+ found_key->objectid,
+ found_key->type,
+ found_key->offset);
+ ret = -EINVAL;
+ goto out;
+ }
}
p->start = start;
} else {
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index b172091f4..5549c843f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -700,14 +700,6 @@ again:
h->reloc_reserved = reloc_reserved;
}
- /*
- * Now that we have found a transaction to be a part of, convert the
- * qgroup reservation from prealloc to pertrans. A different transaction
- * can't race in and free our pertrans out from under us.
- */
- if (qgroup_reserved)
- btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
-
got_it:
if (!current->journal_info)
current->journal_info = h;
@@ -741,8 +733,15 @@ got_it:
* not just freed.
*/
btrfs_end_transaction(h);
- return ERR_PTR(ret);
+ goto reserve_fail;
}
+ /*
+ * Now that we have found a transaction to be a part of, convert the
+ * qgroup reservation from prealloc to pertrans. A different transaction
+ * can't race in and free our pertrans out from under us.
+ */
+ if (qgroup_reserved)
+ btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved);
return h;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 03cfb425e..ab5d410d5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3381,7 +3381,17 @@ again:
mutex_unlock(&fs_info->reclaim_bgs_lock);
goto error;
}
- BUG_ON(ret == 0); /* Corruption */
+ if (ret == 0) {
+ /*
+ * On the first search we would find chunk tree with
+ * offset -1, which is not possible. On subsequent
+ * loops this would find an existing item on an invalid
+ * offset (one less than the previous one, wrong
+ * alignment and size).
+ */
+ ret = -EUCLEAN;
+ goto error;
+ }
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index bc0ca45a5..a843f9643 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2905,7 +2905,10 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
for (i = 0; i <= 13; i++)
seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ?
sg.info.bb_counters[i] : 0);
- seq_puts(seq, " ]\n");
+ seq_puts(seq, " ]");
+ if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info))
+ seq_puts(seq, " Block bitmap corrupted!");
+ seq_puts(seq, "\n");
return 0;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 601e097e1..274542d86 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -6751,6 +6751,10 @@ static int ext4_write_dquot(struct dquot *dquot)
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_commit(dquot);
+ if (ret < 0)
+ ext4_error_err(dquot->dq_sb, -ret,
+ "Failed to commit dquot type %d",
+ dquot->dq_id.type);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
@@ -6767,6 +6771,10 @@ static int ext4_acquire_dquot(struct dquot *dquot)
if (IS_ERR(handle))
return PTR_ERR(handle);
ret = dquot_acquire(dquot);
+ if (ret < 0)
+ ext4_error_err(dquot->dq_sb, -ret,
+ "Failed to acquire dquot type %d",
+ dquot->dq_id.type);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
@@ -6786,6 +6794,10 @@ static int ext4_release_dquot(struct dquot *dquot)
return PTR_ERR(handle);
}
ret = dquot_release(dquot);
+ if (ret < 0)
+ ext4_error_err(dquot->dq_sb, -ret,
+ "Failed to release dquot type %d",
+ dquot->dq_id.type);
err = ext4_journal_stop(handle);
if (!ret)
ret = err;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index df9d70588..8a6c7fdc1 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -908,8 +908,22 @@ root_found:
* we then decide whether to use the Joliet descriptor.
*/
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
- if (IS_ERR(inode))
- goto out_no_root;
+
+ /*
+ * Fix for broken CDs with a corrupt root inode but a correct Joliet
+ * root directory.
+ */
+ if (IS_ERR(inode)) {
+ if (joliet_level && sbi->s_firstdatazone != first_data_zone) {
+ printk(KERN_NOTICE
+ "ISOFS: root inode is unusable. "
+ "Disabling Rock Ridge and switching to Joliet.");
+ sbi->s_rock = 0;
+ inode = NULL;
+ } else {
+ goto out_no_root;
+ }
+ }
/*
* Fix for broken CDs with Rock Ridge and empty ISO root directory but
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index decd64713..760405da8 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -243,7 +243,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
#define S_SHIFT 12
static unsigned char
-nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
+nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
[S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
index 5254256a2..4ca8ed410 100644
--- a/fs/orangefs/super.c
+++ b/fs/orangefs/super.c
@@ -527,7 +527,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst,
sb->s_fs_info = kzalloc(sizeof(struct orangefs_sb_info_s), GFP_KERNEL);
if (!ORANGEFS_SB(sb)) {
d = ERR_PTR(-ENOMEM);
- goto free_sb_and_op;
+ goto free_op;
}
ret = orangefs_fill_sb(sb,
diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c
index 2770746bb..abca11772 100644
--- a/fs/pstore/zone.c
+++ b/fs/pstore/zone.c
@@ -973,6 +973,8 @@ static ssize_t psz_kmsg_read(struct pstore_zone *zone,
char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n",
kmsg_dump_reason_str(record->reason),
record->count);
+ if (!buf)
+ return -ENOMEM;
hlen = strlen(buf);
record->buf = krealloc(buf, hlen + size, GFP_KERNEL);
if (!record->buf) {
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index f4ad343b0..2ca188191 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -386,8 +386,8 @@ smb2_close_cached_fid(struct kref *ref)
if (cfid->is_open) {
rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
cfid->fid.volatile_fid);
- if (rc != -EBUSY && rc != -EAGAIN)
- atomic_dec(&cfid->tcon->num_remote_opens);
+ if (rc) /* should we retry on -EBUSY or -EAGAIN? */
+ cifs_dbg(VFS, "close cached dir rc %d\n", rc);
}
free_cached_dir(cfid);
diff --git a/fs/smb/client/cifs_spnego.h b/fs/smb/client/cifs_spnego.h
index 7f102ffeb..e4d751b0c 100644
--- a/fs/smb/client/cifs_spnego.h
+++ b/fs/smb/client/cifs_spnego.h
@@ -24,7 +24,7 @@ struct cifs_spnego_msg {
uint32_t flags;
uint32_t sesskey_len;
uint32_t secblob_len;
- uint8_t data[1];
+ uint8_t data[];
};
#ifdef __KERNEL__
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 0a79771c8..f0a3336ff 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -387,6 +387,7 @@ cifs_alloc_inode(struct super_block *sb)
* server, can not assume caching of file data or metadata.
*/
cifs_set_oplock_level(cifs_inode, 0);
+ cifs_inode->lease_granted = false;
cifs_inode->flags = 0;
spin_lock_init(&cifs_inode->writers_lock);
cifs_inode->writers = 0;
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index 97bb18385..9cb457706 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -562,7 +562,7 @@ typedef union smb_com_session_setup_andx {
__u32 Reserved;
__le32 Capabilities; /* see below */
__le16 ByteCount;
- unsigned char SecurityBlob[1]; /* followed by */
+ unsigned char SecurityBlob[]; /* followed by */
/* STRING NativeOS */
/* STRING NativeLanMan */
} __attribute__((packed)) req; /* NTLM request format (with
@@ -582,7 +582,7 @@ typedef union smb_com_session_setup_andx {
__u32 Reserved; /* see below */
__le32 Capabilities;
__le16 ByteCount;
- unsigned char CaseInsensitivePassword[1]; /* followed by: */
+ unsigned char CaseInsensitivePassword[]; /* followed by: */
/* unsigned char * CaseSensitivePassword; */
/* STRING AccountName */
/* STRING PrimaryDomain */
@@ -599,7 +599,7 @@ typedef union smb_com_session_setup_andx {
__le16 Action; /* see below */
__le16 SecurityBlobLength;
__u16 ByteCount;
- unsigned char SecurityBlob[1]; /* followed by */
+ unsigned char SecurityBlob[]; /* followed by */
/* unsigned char * NativeOS; */
/* unsigned char * NativeLanMan; */
/* unsigned char * PrimaryDomain; */
@@ -618,7 +618,7 @@ typedef union smb_com_session_setup_andx {
__le16 PasswordLength;
__u32 Reserved; /* encrypt key len and offset */
__le16 ByteCount;
- unsigned char AccountPassword[1]; /* followed by */
+ unsigned char AccountPassword[]; /* followed by */
/* STRING AccountName */
/* STRING PrimaryDomain */
/* STRING NativeOS */
@@ -632,7 +632,7 @@ typedef union smb_com_session_setup_andx {
__le16 AndXOffset;
__le16 Action; /* see below */
__u16 ByteCount;
- unsigned char NativeOS[1]; /* followed by */
+ unsigned char NativeOS[]; /* followed by */
/* unsigned char * NativeLanMan; */
/* unsigned char * PrimaryDomain; */
} __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
@@ -693,7 +693,7 @@ typedef struct smb_com_tconx_req {
__le16 Flags; /* see below */
__le16 PasswordLength;
__le16 ByteCount;
- unsigned char Password[1]; /* followed by */
+ unsigned char Password[]; /* followed by */
/* STRING Path *//* \\server\share name */
/* STRING Service */
} __attribute__((packed)) TCONX_REQ;
@@ -705,7 +705,7 @@ typedef struct smb_com_tconx_rsp {
__le16 AndXOffset;
__le16 OptionalSupport; /* see below */
__u16 ByteCount;
- unsigned char Service[1]; /* always ASCII, not Unicode */
+ unsigned char Service[]; /* always ASCII, not Unicode */
/* STRING NativeFileSystem */
} __attribute__((packed)) TCONX_RSP;
@@ -718,7 +718,7 @@ typedef struct smb_com_tconx_rsp_ext {
__le32 MaximalShareAccessRights;
__le32 GuestMaximalShareAccessRights;
__u16 ByteCount;
- unsigned char Service[1]; /* always ASCII, not Unicode */
+ unsigned char Service[]; /* always ASCII, not Unicode */
/* STRING NativeFileSystem */
} __attribute__((packed)) TCONX_RSP_EXT;
@@ -755,14 +755,14 @@ typedef struct smb_com_echo_req {
struct smb_hdr hdr;
__le16 EchoCount;
__le16 ByteCount;
- char Data[1];
+ char Data[];
} __attribute__((packed)) ECHO_REQ;
typedef struct smb_com_echo_rsp {
struct smb_hdr hdr;
__le16 SequenceNumber;
__le16 ByteCount;
- char Data[1];
+ char Data[];
} __attribute__((packed)) ECHO_RSP;
typedef struct smb_com_logoff_andx_req {
@@ -862,7 +862,7 @@ typedef struct smb_com_open_req { /* also handles create */
__le32 ImpersonationLevel;
__u8 SecurityFlags;
__le16 ByteCount;
- char fileName[1];
+ char fileName[];
} __attribute__((packed)) OPEN_REQ;
/* open response: oplock levels */
@@ -882,7 +882,7 @@ typedef struct smb_com_open_rsp {
__u8 OplockLevel;
__u16 Fid;
__le32 CreateAction;
- struct_group(common_attributes,
+ struct_group_attr(common_attributes, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
@@ -939,7 +939,7 @@ typedef struct smb_com_openx_req {
__le32 Timeout;
__le32 Reserved;
__le16 ByteCount; /* file name follows */
- char fileName[1];
+ char fileName[];
} __attribute__((packed)) OPENX_REQ;
typedef struct smb_com_openx_rsp {
@@ -1087,7 +1087,7 @@ typedef struct smb_com_lock_req {
__le16 NumberOfUnlocks;
__le16 NumberOfLocks;
__le16 ByteCount;
- LOCKING_ANDX_RANGE Locks[1];
+ LOCKING_ANDX_RANGE Locks[];
} __attribute__((packed)) LOCK_REQ;
/* lock type */
@@ -1116,7 +1116,7 @@ typedef struct smb_com_rename_req {
__le16 SearchAttributes; /* target file attributes */
__le16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII or Unicode */
- unsigned char OldFileName[1];
+ unsigned char OldFileName[];
/* followed by __u8 BufferFormat2 */
/* followed by NewFileName */
} __attribute__((packed)) RENAME_REQ;
@@ -1136,7 +1136,7 @@ typedef struct smb_com_copy_req {
__le16 Flags;
__le16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII or Unicode */
- unsigned char OldFileName[1];
+ unsigned char OldFileName[];
/* followed by __u8 BufferFormat2 */
/* followed by NewFileName string */
} __attribute__((packed)) COPY_REQ;
@@ -1146,7 +1146,7 @@ typedef struct smb_com_copy_rsp {
__le16 CopyCount; /* number of files copied */
__u16 ByteCount; /* may be zero */
__u8 BufferFormat; /* 0x04 - only present if errored file follows */
- unsigned char ErrorFileName[1]; /* only present if error in copy */
+ unsigned char ErrorFileName[]; /* only present if error in copy */
} __attribute__((packed)) COPY_RSP;
#define CREATE_HARD_LINK 0x103
@@ -1160,7 +1160,7 @@ typedef struct smb_com_nt_rename_req { /* A5 - also used for create hardlink */
__le32 ClusterCount;
__le16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII or Unicode */
- unsigned char OldFileName[1];
+ unsigned char OldFileName[];
/* followed by __u8 BufferFormat2 */
/* followed by NewFileName */
} __attribute__((packed)) NT_RENAME_REQ;
@@ -1175,7 +1175,7 @@ typedef struct smb_com_delete_file_req {
__le16 SearchAttributes;
__le16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII */
- unsigned char fileName[1];
+ unsigned char fileName[];
} __attribute__((packed)) DELETE_FILE_REQ;
typedef struct smb_com_delete_file_rsp {
@@ -1187,7 +1187,7 @@ typedef struct smb_com_delete_directory_req {
struct smb_hdr hdr; /* wct = 0 */
__le16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII */
- unsigned char DirName[1];
+ unsigned char DirName[];
} __attribute__((packed)) DELETE_DIRECTORY_REQ;
typedef struct smb_com_delete_directory_rsp {
@@ -1199,7 +1199,7 @@ typedef struct smb_com_create_directory_req {
struct smb_hdr hdr; /* wct = 0 */
__le16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII */
- unsigned char DirName[1];
+ unsigned char DirName[];
} __attribute__((packed)) CREATE_DIRECTORY_REQ;
typedef struct smb_com_create_directory_rsp {
@@ -1211,7 +1211,7 @@ typedef struct smb_com_query_information_req {
struct smb_hdr hdr; /* wct = 0 */
__le16 ByteCount; /* 1 + namelen + 1 */
__u8 BufferFormat; /* 4 = ASCII */
- unsigned char FileName[1];
+ unsigned char FileName[];
} __attribute__((packed)) QUERY_INFORMATION_REQ;
typedef struct smb_com_query_information_rsp {
@@ -1231,7 +1231,7 @@ typedef struct smb_com_setattr_req {
__le16 reserved[5]; /* must be zero */
__u16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII */
- unsigned char fileName[1];
+ unsigned char fileName[];
} __attribute__((packed)) SETATTR_REQ;
typedef struct smb_com_setattr_rsp {
@@ -1313,7 +1313,7 @@ typedef struct smb_com_transaction_ioctl_req {
__u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
__le16 ByteCount;
__u8 Pad[3];
- __u8 Data[1];
+ __u8 Data[];
} __attribute__((packed)) TRANSACT_IOCTL_REQ;
typedef struct smb_com_transaction_compr_ioctl_req {
@@ -1431,8 +1431,8 @@ typedef struct smb_com_transaction_change_notify_req {
__u8 WatchTree; /* 1 = Monitor subdirectories */
__u8 Reserved2;
__le16 ByteCount;
-/* __u8 Pad[3];*/
-/* __u8 Data[1];*/
+/* __u8 Pad[3];*/
+/* __u8 Data[];*/
} __attribute__((packed)) TRANSACT_CHANGE_NOTIFY_REQ;
/* BB eventually change to use generic ntransact rsp struct
@@ -1521,7 +1521,7 @@ struct cifs_quota_data {
__u64 space_used;
__u64 soft_limit;
__u64 hard_limit;
- char sid[1]; /* variable size? */
+ char sid[]; /* variable size? */
} __attribute__((packed));
/* quota sub commands */
@@ -1673,7 +1673,7 @@ typedef struct smb_com_transaction2_qpi_req {
__u8 Pad;
__le16 InformationLevel;
__u32 Reserved4;
- char FileName[1];
+ char FileName[];
} __attribute__((packed)) TRANSACTION2_QPI_REQ;
typedef struct smb_com_transaction2_qpi_rsp {
@@ -1706,7 +1706,7 @@ typedef struct smb_com_transaction2_spi_req {
__u16 Pad1;
__le16 InformationLevel;
__u32 Reserved4;
- char FileName[1];
+ char FileName[];
} __attribute__((packed)) TRANSACTION2_SPI_REQ;
typedef struct smb_com_transaction2_spi_rsp {
@@ -1813,7 +1813,7 @@ typedef struct smb_com_transaction2_ffirst_req {
__le16 SearchFlags;
__le16 InformationLevel;
__le32 SearchStorageType;
- char FileName[1];
+ char FileName[];
} __attribute__((packed)) TRANSACTION2_FFIRST_REQ;
typedef struct smb_com_transaction2_ffirst_rsp {
@@ -2024,7 +2024,7 @@ typedef struct smb_com_transaction2_get_dfs_refer_req {
perhaps?) followed by one byte pad - doesn't
seem to matter though */
__le16 MaxReferralLevel;
- char RequestFileName[1];
+ char RequestFileName[];
} __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_REQ;
#define DFS_VERSION cpu_to_le16(0x0003)
@@ -2053,7 +2053,7 @@ struct get_dfs_referral_rsp {
__le16 PathConsumed;
__le16 NumberOfReferrals;
__le32 DFSFlags;
- REFERRAL3 referrals[1]; /* array of level 3 dfs_referral structures */
+ REFERRAL3 referrals[]; /* array of level 3 dfs_referral structures */
/* followed by the strings pointed to by the referral structures */
} __packed;
@@ -2270,7 +2270,7 @@ typedef struct {
/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
/******************************************************************************/
typedef struct { /* data block encoding of response to level 263 QPathInfo */
- struct_group(common_attributes,
+ struct_group_attr(common_attributes, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
@@ -2292,7 +2292,10 @@ typedef struct { /* data block encoding of response to level 263 QPathInfo */
__le32 Mode;
__le32 AlignmentRequirement;
__le32 FileNameLength;
- char FileName[1];
+ union {
+ char __pad;
+ DECLARE_FLEX_ARRAY(char, FileName);
+ };
} __attribute__((packed)) FILE_ALL_INFO; /* level 0x107 QPathInfo */
typedef struct {
@@ -2330,7 +2333,7 @@ typedef struct {
} __attribute__((packed)) FILE_UNIX_BASIC_INFO; /* level 0x200 QPathInfo */
typedef struct {
- char LinkDest[1];
+ DECLARE_FLEX_ARRAY(char, LinkDest);
} __attribute__((packed)) FILE_UNIX_LINK_INFO; /* level 0x201 QPathInfo */
/* The following three structures are needed only for
@@ -2380,7 +2383,7 @@ struct file_end_of_file_info {
} __attribute__((packed)); /* size info, level 0x104 for set, 0x106 for query */
struct file_alt_name_info {
- __u8 alt_name[1];
+ DECLARE_FLEX_ARRAY(__u8, alt_name);
} __attribute__((packed)); /* level 0x0108 */
struct file_stream_info {
@@ -2490,7 +2493,10 @@ typedef struct {
__le32 NextEntryOffset;
__u32 ResumeKey; /* as with FileIndex - no need to convert */
FILE_UNIX_BASIC_INFO basic;
- char FileName[1];
+ union {
+ char __pad;
+ DECLARE_FLEX_ARRAY(char, FileName);
+ };
} __attribute__((packed)) FILE_UNIX_INFO; /* level 0x202 */
typedef struct {
@@ -2504,7 +2510,7 @@ typedef struct {
__le64 AllocationSize;
__le32 ExtFileAttributes;
__le32 FileNameLength;
- char FileName[1];
+ char FileName[];
} __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF resp data */
typedef struct {
@@ -2519,7 +2525,7 @@ typedef struct {
__le32 ExtFileAttributes;
__le32 FileNameLength;
__le32 EaSize; /* length of the xattrs */
- char FileName[1];
+ char FileName[];
} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 rsp data */
typedef struct {
@@ -2536,7 +2542,7 @@ typedef struct {
__le32 EaSize; /* EA size */
__le32 Reserved;
__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
- char FileName[1];
+ char FileName[];
} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF rsp data */
typedef struct {
@@ -2554,7 +2560,7 @@ typedef struct {
__u8 ShortNameLength;
__u8 Reserved;
__u8 ShortName[24];
- char FileName[1];
+ char FileName[];
} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */
typedef struct {
@@ -2569,7 +2575,7 @@ typedef struct {
__le32 AllocationSize;
__le16 Attributes; /* verify not u32 */
__u8 FileNameLength;
- char FileName[1];
+ char FileName[];
} __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */
@@ -2579,16 +2585,6 @@ struct win_dev {
__le64 minor;
} __attribute__((packed));
-struct gea {
- unsigned char name_len;
- char name[1];
-} __attribute__((packed));
-
-struct gealist {
- unsigned long list_len;
- struct gea list[1];
-} __attribute__((packed));
-
struct fea {
unsigned char EA_flags;
__u8 name_len;
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 4d5302b58..ca39d0107 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -676,6 +676,16 @@ static int smb3_fs_context_validate(struct fs_context *fc)
/* set the port that we got earlier */
cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port);
+ if (ctx->uid_specified && !ctx->forceuid_specified) {
+ ctx->override_uid = 1;
+ pr_notice("enabling forceuid mount option implicitly because uid= option is specified\n");
+ }
+
+ if (ctx->gid_specified && !ctx->forcegid_specified) {
+ ctx->override_gid = 1;
+ pr_notice("enabling forcegid mount option implicitly because gid= option is specified\n");
+ }
+
if (ctx->override_uid && !ctx->uid_specified) {
ctx->override_uid = 0;
pr_notice("ignoring forceuid mount option specified with no uid= option\n");
@@ -923,12 +933,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->override_uid = 0;
else
ctx->override_uid = 1;
+ ctx->forceuid_specified = true;
break;
case Opt_forcegid:
if (result.negated)
ctx->override_gid = 0;
else
ctx->override_gid = 1;
+ ctx->forcegid_specified = true;
break;
case Opt_perm:
if (result.negated)
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index 26093f54d..319a91b7f 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -154,6 +154,8 @@ enum cifs_param {
};
struct smb3_fs_context {
+ bool forceuid_specified;
+ bool forcegid_specified;
bool uid_specified;
bool cruid_specified;
bool gid_specified;
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 5990bdbae..9a1f1913f 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -497,7 +497,7 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
FIND_FILE_STANDARD_INFO *pfData;
pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
- new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
+ new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) + 1 +
pfData->FileNameLength;
} else {
u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
@@ -515,9 +515,9 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
new_entry, end_of_smb, old_entry);
return NULL;
} else if (((level == SMB_FIND_FILE_INFO_STANDARD) &&
- (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb))
+ (new_entry + sizeof(FIND_FILE_STANDARD_INFO) + 1 > end_of_smb))
|| ((level != SMB_FIND_FILE_INFO_STANDARD) &&
- (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) {
+ (new_entry + sizeof(FILE_DIRECTORY_INFO) + 1 > end_of_smb))) {
cifs_dbg(VFS, "search entry %p extends after end of SMB %p\n",
new_entry, end_of_smb);
return NULL;
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index cc425a616..e15bf116c 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -5073,10 +5073,10 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
switch (srch_inf->info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
- info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
+ info_buf_size = sizeof(FILE_DIRECTORY_INFO);
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
- info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
+ info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO);
break;
case SMB_FIND_FILE_POSIX_INFO:
/* note that posix payload are variable size */
diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
index 8d011fede..2823526b6 100644
--- a/fs/smb/client/smb2pdu.h
+++ b/fs/smb/client/smb2pdu.h
@@ -339,7 +339,7 @@ struct smb2_file_reparse_point_info {
} __packed;
struct smb2_file_network_open_info {
- struct_group(network_open_info,
+ struct_group_attr(network_open_info, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
@@ -373,7 +373,7 @@ struct smb2_file_id_extd_directory_info {
__le32 EaSize; /* EA size */
__le32 ReparsePointTag; /* valid if FILE_ATTR_REPARSE_POINT set in FileAttributes */
__le64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit */
- char FileName[1];
+ char FileName[];
} __packed; /* level 60 */
extern char smb2_padding[7];
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index df44acaec..338b34c99 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -931,12 +931,15 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
}
+ spin_unlock(&server->mid_lock);
cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
__func__, mid->mid, mid->mid_state);
rc = -EIO;
+ goto sync_mid_done;
}
spin_unlock(&server->mid_lock);
+sync_mid_done:
release_mid(mid);
return rc;
}
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index a3936ff53..25383b11d 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -699,7 +699,7 @@ struct smb2_close_rsp {
__le16 StructureSize; /* 60 */
__le16 Flags;
__le32 Reserved;
- struct_group(network_open_info,
+ struct_group_attr(network_open_info, __packed,
__le64 CreationTime;
__le64 LastAccessTime;
__le64 LastWriteTime;
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 11b201e6e..63b01f7d9 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -167,20 +167,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
int rc;
bool is_chained = false;
- if (conn->ops->allocate_rsp_buf(work))
- return;
-
if (conn->ops->is_transform_hdr &&
conn->ops->is_transform_hdr(work->request_buf)) {
rc = conn->ops->decrypt_req(work);
- if (rc < 0) {
- conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
- goto send;
- }
-
+ if (rc < 0)
+ return;
work->encrypted = true;
}
+ if (conn->ops->allocate_rsp_buf(work))
+ return;
+
rc = conn->ops->init_rsp_hdr(work);
if (rc) {
/* either uid or tid is not correct */
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index c02b1772c..34d884254 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -534,6 +534,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
if (cmd == SMB2_QUERY_INFO_HE) {
struct smb2_query_info_req *req;
+ if (get_rfc1002_len(work->request_buf) <
+ offsetof(struct smb2_query_info_req, OutputBufferLength))
+ return -EINVAL;
+
req = smb2_get_msg(work->request_buf);
if ((req->InfoType == SMB2_O_INFO_FILE &&
(req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index fe2c80ea2..a4c99ec38 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -746,10 +746,15 @@ retry:
goto out4;
}
+ /*
+ * explicitly handle file overwrite case, for compatibility with
+ * filesystems that may not support rename flags (e.g: fuse)
+ */
if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
err = -EEXIST;
goto out4;
}
+ flags &= ~(RENAME_NOREPLACE);
if (old_child == trap) {
err = -EINVAL;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index a12ac0356..f21e73d10 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -450,6 +450,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
kn = kernfs_find_and_get(kobj->sd, attr->name);
if (kn)
kernfs_break_active_protection(kn);
+ else
+ kobject_put(kobj);
return kn;
}
EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index 9925cfe57..17c7d7677 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -82,9 +82,6 @@ static inline sysv_zone_t *block_end(struct buffer_head *bh)
return (sysv_zone_t*)((char*)bh->b_data + bh->b_size);
}
-/*
- * Requires read_lock(&pointers_lock) or write_lock(&pointers_lock)
- */
static Indirect *get_branch(struct inode *inode,
int depth,
int offsets[],
@@ -104,15 +101,18 @@ static Indirect *get_branch(struct inode *inode,
bh = sb_bread(sb, block);
if (!bh)
goto failure;
+ read_lock(&pointers_lock);
if (!verify_chain(chain, p))
goto changed;
add_chain(++p, bh, (sysv_zone_t*)bh->b_data + *++offsets);
+ read_unlock(&pointers_lock);
if (!p->key)
goto no_block;
}
return NULL;
changed:
+ read_unlock(&pointers_lock);
brelse(bh);
*err = -EAGAIN;
goto no_block;
@@ -218,9 +218,7 @@ static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *b
goto out;
reread:
- read_lock(&pointers_lock);
partial = get_branch(inode, depth, offsets, chain, &err);
- read_unlock(&pointers_lock);
/* Simplest case - block found, no allocation needed */
if (!partial) {
@@ -290,9 +288,9 @@ static Indirect *find_shared(struct inode *inode,
*top = 0;
for (k = depth; k > 1 && !offsets[k-1]; k--)
;
+ partial = get_branch(inode, k, offsets, chain, &err);
write_lock(&pointers_lock);
- partial = get_branch(inode, k, offsets, chain, &err);
if (!partial)
partial = chain + k-1;
/*
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index ca73940e2..4195444ec 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -287,7 +287,12 @@ int __init xbc_init(const char *buf, size_t size, const char **emsg, int *epos);
int __init xbc_get_info(int *node_size, size_t *data_size);
/* XBC cleanup data structures */
-void __init xbc_exit(void);
+void __init _xbc_exit(bool early);
+
+static inline void xbc_exit(void)
+{
+ _xbc_exit(false);
+}
/* XBC embedded bootconfig data in kernel */
#ifdef CONFIG_BOOT_CONFIG_EMBED
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index b79097b90..5d6a5f309 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -659,4 +659,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
}
+#define DMA_FENCE_WARN(f, fmt, args...) \
+ do { \
+ struct dma_fence *__ff = (f); \
+ pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
+ ##args); \
+ } while (0)
+
#endif /* __LINUX_DMA_FENCE_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index a541f0c4f..d7eef2158 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -594,6 +594,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
}
/**
+ * eth_skb_pkt_type - Assign packet type if destination address does not match
+ * @skb: Assigned a packet type if address does not match @dev address
+ * @dev: Network device used to compare packet address against
+ *
+ * If the destination MAC address of the packet does not match the network
+ * device address, assign an appropriate packet type.
+ */
+static inline void eth_skb_pkt_type(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+ }
+}
+
+/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad
*
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 2b665c32f..2e09c269b 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -126,7 +126,7 @@ do { \
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
# define lockdep_hrtimer_enter(__hrtimer) false
-# define lockdep_hrtimer_exit(__context) do { } while (0)
+# define lockdep_hrtimer_exit(__context) do { (void)(__context); } while (0)
# define lockdep_posixtimer_enter() do { } while (0)
# define lockdep_posixtimer_exit() do { } while (0)
# define lockdep_irq_work_enter(__work) do { } while (0)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f5d89a4b8..4da7411da 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1383,6 +1383,7 @@ int pci_load_and_free_saved_state(struct pci_dev *dev,
struct pci_saved_state **state);
int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
void pci_pme_active(struct pci_dev *dev, bool enable);
@@ -1553,6 +1554,8 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
void *userdata);
+void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *),
+ void *userdata);
int pci_cfg_space_size(struct pci_dev *dev);
unsigned char pci_bus_max_busnr(struct pci_bus *bus);
void pci_setup_bridge(struct pci_bus *bus);
@@ -1884,6 +1887,8 @@ static inline int pci_save_state(struct pci_dev *dev) { return 0; }
static inline void pci_restore_state(struct pci_dev *dev) { }
static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{ return 0; }
+static inline int pci_set_power_state_locked(struct pci_dev *dev, pci_power_t state)
+{ return 0; }
static inline int pci_wake_from_d3(struct pci_dev *dev, bool enable)
{ return 0; }
static inline pci_power_t pci_choose_state(struct pci_dev *dev,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 73cc1e7dd..9e9794d03 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -158,6 +158,8 @@
#define PCI_VENDOR_ID_LOONGSON 0x0014
+#define PCI_VENDOR_ID_SOLIDIGM 0x025e
+
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h
index 5d868505a..6d92b68ef 100644
--- a/include/linux/randomize_kstack.h
+++ b/include/linux/randomize_kstack.h
@@ -80,7 +80,7 @@ DECLARE_PER_CPU(u32, kstack_offset);
if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \
&randomize_kstack_offset)) { \
u32 offset = raw_cpu_read(kstack_offset); \
- offset ^= (rand); \
+ offset = ror32(offset, 5) ^ (rand); \
raw_cpu_write(kstack_offset, offset); \
} \
} while (0)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 319698087..6858cae98 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -205,9 +205,9 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t);
do { \
int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
\
- if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \
+ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \
likely(!___rttq_nesting)) { \
- rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \
+ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \
} else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
!READ_ONCE((t)->trc_reader_special.b.blocked)) { \
rcu_tasks_trace_qs_blkd(t); \
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c4a8520dc..d5f888fe0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2603,6 +2603,8 @@ static inline void skb_put_u8(struct sk_buff *skb, u8 val)
void *skb_push(struct sk_buff *skb, unsigned int len);
static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->data -= len;
skb->len += len;
return skb->data;
@@ -2611,6 +2613,8 @@ static inline void *__skb_push(struct sk_buff *skb, unsigned int len)
void *skb_pull(struct sk_buff *skb, unsigned int len);
static inline void *__skb_pull(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
skb->len -= len;
if (unlikely(skb->len < skb->data_len)) {
#if defined(CONFIG_DEBUG_NET)
@@ -2634,6 +2638,8 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta);
static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
+ DEBUG_NET_WARN_ON_ONCE(len > INT_MAX);
+
if (likely(len <= skb_headlen(skb)))
return true;
if (unlikely(len > skb->len))
@@ -2796,6 +2802,11 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
skb->inner_network_header += offset;
}
+static inline bool skb_inner_network_header_was_set(const struct sk_buff *skb)
+{
+ return skb->inner_network_header > 0;
+}
+
static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
{
return skb->head + skb->inner_mac_header;
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 8ada7dc80..8f9bee0e2 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -186,7 +186,7 @@ struct rpc_wait_queue {
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char nr; /* # tasks remaining for cookie */
- unsigned short qlen; /* total # tasks waiting in queue */
+ unsigned int qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
const char * name;
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 48fabe365..8d8fac162 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -41,6 +41,7 @@ enum {
enum switchtec_gen {
SWITCHTEC_GEN3,
SWITCHTEC_GEN4,
+ SWITCHTEC_GEN5,
};
struct mrpc_regs {
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index 46040d663..79c3bbaa7 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -135,10 +135,11 @@ static inline void u64_stats_inc(u64_stats_t *p)
p->v++;
}
-static inline void u64_stats_init(struct u64_stats_sync *syncp)
-{
- seqcount_init(&syncp->seq);
-}
+#define u64_stats_init(syncp) \
+ do { \
+ struct u64_stats_sync *__s = (syncp); \
+ seqcount_init(&__s->seq); \
+ } while (0)
static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 5a89928ea..cd667acf6 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -371,8 +371,9 @@ struct hc_driver {
* or bandwidth constraints.
*/
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
- /* Returns the hardware-chosen device address */
- int (*address_device)(struct usb_hcd *, struct usb_device *udev);
+ /* Set the hardware-chosen device address */
+ int (*address_device)(struct usb_hcd *, struct usb_device *udev,
+ unsigned int timeout_ms);
/* prepares the hardware to send commands to the device */
int (*enable_device)(struct usb_hcd *, struct usb_device *udev);
/* Notifies the HCD after a hub descriptor is fetched.
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index eeb7c2157..59409c1fc 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -72,4 +72,7 @@
/* device has endpoints that should be ignored */
#define USB_QUIRK_ENDPOINT_IGNORE BIT(15)
+/* short SET_ADDRESS request timeout */
+#define USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT BIT(16)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 86eb2aba1..5bcc63ead 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -437,6 +437,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
refcount_inc(&ifp->refcnt);
}
+static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
+{
+ return refcount_inc_not_zero(&ifp->refcnt);
+}
/*
* compute link-local solicited-node multicast address
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 0920b669b..e7d71a516 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -54,7 +54,7 @@ struct unix_sock {
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
- atomic_long_t inflight;
+ unsigned long inflight;
spinlock_t lock;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
@@ -79,6 +79,9 @@ enum unix_socket_lock_class {
U_LOCK_NORMAL,
U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+ U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
+ * candidates to close a small race window.
+ */
};
static inline void unix_state_lock_nested(struct sock *sk,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index bcc5a4cd2..5aaf7d7f3 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -565,6 +565,15 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk,
return skb;
}
+static inline int bt_copy_from_sockptr(void *dst, size_t dst_size,
+ sockptr_t src, size_t src_size)
+{
+ if (dst_size > src_size)
+ return -EINVAL;
+
+ return copy_from_sockptr(dst, src, dst_size);
+}
+
int bt_to_errno(u16 code);
__u8 bt_status(int err);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index ee369670e..f96b61d97 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -969,6 +969,14 @@ struct dsa_switch_ops {
void (*port_disable)(struct dsa_switch *ds, int port);
/*
+ * Compatibility between device trees defining multiple CPU ports and
+ * drivers which are not OK to use by default the numerically smallest
+ * CPU port of a switch for its local ports. This can return NULL,
+ * meaning "don't know/don't care".
+ */
+ struct dsa_port *(*preferred_default_local_cpu_port)(struct dsa_switch *ds);
+
+ /*
* Port's MAC EEE settings
*/
int (*set_mac_eee)(struct dsa_switch *ds, int port,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index bca80522f..f9906b73e 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -351,6 +351,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
return pskb_network_may_pull(skb, nhlen);
}
+/* Variant of pskb_inet_may_pull().
+ */
+static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
+{
+ int nhlen = 0, maclen = ETH_HLEN;
+ __be16 type = skb->protocol;
+
+ /* Essentially this is skb_protocol(skb, true)
+ * And we get MAC len.
+ */
+ if (eth_type_vlan(type))
+ type = __vlan_get_protocol(skb, type, &maclen);
+
+ switch (type) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ nhlen = sizeof(struct ipv6hdr);
+ break;
+#endif
+ case htons(ETH_P_IP):
+ nhlen = sizeof(struct iphdr);
+ break;
+ }
+ /* For ETH_P_IPV6/ETH_P_IP we make sure to pull
+ * a base network header in skb->head.
+ */
+ if (!pskb_may_pull(skb, maclen + nhlen))
+ return false;
+
+ skb_set_network_header(skb, maclen);
+ return true;
+}
+
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
diff --git a/include/net/macsec.h b/include/net/macsec.h
index 65c93959c..dd578d193 100644
--- a/include/net/macsec.h
+++ b/include/net/macsec.h
@@ -302,6 +302,7 @@ struct macsec_ops {
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
+ bool rx_uses_md_dst;
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 4a767b3d2..df7775afb 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -335,7 +335,7 @@ int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
int nf_flow_table_offload_init(void);
void nf_flow_table_offload_exit(void);
-static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
{
__be16 proto;
@@ -351,6 +351,16 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
return 0;
}
+static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
+{
+ if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
+ return false;
+
+ *inner_proto = __nf_flow_pppoe_proto(skb);
+
+ return true;
+}
+
#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
diff --git a/include/net/sock.h b/include/net/sock.h
index 60577751e..77298c748 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1483,33 +1483,36 @@ sk_memory_allocated(const struct sock *sk)
/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
+extern int sysctl_mem_pcpu_rsv;
+
+static inline void proto_memory_pcpu_drain(struct proto *proto)
+{
+ int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
+
+ if (val)
+ atomic_long_add(val, proto->memory_allocated);
+}
static inline void
-sk_memory_allocated_add(struct sock *sk, int amt)
+sk_memory_allocated_add(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt)
+sk_memory_allocated_sub(const struct sock *sk, int val)
{
- int local_reserve;
+ struct proto *proto = sk->sk_prot;
- preempt_disable();
- local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
- if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
- __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
- atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
- }
- preempt_enable();
+ val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
+
+ if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
+ proto_memory_pcpu_drain(proto);
}
#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index a64713fe5..1504d3137 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -480,51 +480,28 @@ extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
-
-/* Optional arguments to scsi_execute_cmd */
-struct scsi_exec_args {
- unsigned char *sense; /* sense buffer */
- unsigned int sense_len; /* sense buffer len */
- struct scsi_sense_hdr *sshdr; /* decoded sense header */
- blk_mq_req_flags_t req_flags; /* BLK_MQ_REQ flags */
- int *resid; /* residual length */
-};
-
-int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
- blk_opf_t opf, void *buffer, unsigned int bufflen,
- int timeout, int retries,
- const struct scsi_exec_args *args);
-
+extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+ int data_direction, void *buffer, unsigned bufflen,
+ unsigned char *sense, struct scsi_sense_hdr *sshdr,
+ int timeout, int retries, blk_opf_t flags,
+ req_flags_t rq_flags, int *resid);
/* Make sure any sense buffer is the correct size. */
-#define scsi_execute(_sdev, _cmd, _data_dir, _buffer, _bufflen, _sense, \
- _sshdr, _timeout, _retries, _flags, _rq_flags, \
- _resid) \
+#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \
+ sshdr, timeout, retries, flags, rq_flags, resid) \
({ \
- scsi_execute_cmd(_sdev, _cmd, (_data_dir == DMA_TO_DEVICE ? \
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN) | _flags, \
- _buffer, _bufflen, _timeout, _retries, \
- &(struct scsi_exec_args) { \
- .sense = _sense, \
- .sshdr = _sshdr, \
- .req_flags = _rq_flags & RQF_PM ? \
- BLK_MQ_REQ_PM : 0, \
- .resid = _resid, \
- }); \
+ BUILD_BUG_ON((sense) != NULL && \
+ sizeof(sense) != SCSI_SENSE_BUFFERSIZE); \
+ __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, \
+ sense, sshdr, timeout, retries, flags, rq_flags, \
+ resid); \
})
-
static inline int scsi_execute_req(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
int retries, int *resid)
{
- return scsi_execute_cmd(sdev, cmd,
- data_direction == DMA_TO_DEVICE ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, buffer,
- bufflen, timeout, retries,
- &(struct scsi_exec_args) {
- .sshdr = sshdr,
- .resid = resid,
- });
+ return scsi_execute(sdev, cmd, data_direction, buffer,
+ bufflen, NULL, sshdr, timeout, retries, 0, 0, resid);
}
extern void sdev_disable_disk_events(struct scsi_device *sdev);
extern void sdev_enable_disk_events(struct scsi_device *sdev);
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index 3f121eed3..894d9fc8b 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -587,7 +587,7 @@ TRACE_EVENT(rpcgss_context,
__field(unsigned int, timeout)
__field(u32, window_size)
__field(int, len)
- __string(acceptor, data)
+ __string_len(acceptor, data, len)
),
TP_fast_assign(
@@ -596,7 +596,7 @@ TRACE_EVENT(rpcgss_context,
__entry->timeout = timeout;
__entry->window_size = window_size;
__entry->len = len;
- strncpy(__get_str(acceptor), data, len);
+ __assign_str(acceptor, data);
),
TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 7ad931a32..1ce8a9134 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -602,6 +602,7 @@
#define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */
#define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */
+#define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */
#define KEY_BUTTONCONFIG 0x240 /* AL Button Configuration */
#define KEY_TASKMANAGER 0x241 /* AL Task/Project Manager */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 57b8e2ffb..332515503 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -1043,6 +1043,7 @@
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
+#define PCI_EXP_DPC_RP_PIO_FEP 0x1f00 /* RP PIO First Err Ptr */
#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
diff --git a/init/Kconfig b/init/Kconfig
index b63dce670..537f01eba 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1920,11 +1920,11 @@ config RUST
bool "Rust support"
depends on HAVE_RUST
depends on RUST_IS_AVAILABLE
+ depends on !CFI_CLANG
depends on !MODVERSIONS
depends on !GCC_PLUGINS
depends on !RANDSTRUCT
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
- select CONSTRUCTORS
help
Enables Rust support in the kernel.
diff --git a/init/main.c b/init/main.c
index ccde19e72..2c3397935 100644
--- a/init/main.c
+++ b/init/main.c
@@ -633,6 +633,8 @@ static void __init setup_command_line(char *command_line)
if (!saved_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
+ len = xlen + strlen(command_line) + 1;
+
static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
if (!static_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len);
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 415248c1f..958c3b619 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1978,6 +1978,13 @@ static void io_init_req_drain(struct io_kiocb *req)
}
}
+static __cold int io_init_fail_req(struct io_kiocb *req, int err)
+{
+ /* ensure per-opcode data is cleared if we fail before prep */
+ memset(&req->cmd.data, 0, sizeof(req->cmd.data));
+ return err;
+}
+
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
const struct io_uring_sqe *sqe)
__must_hold(&ctx->uring_lock)
@@ -1998,29 +2005,29 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(opcode >= IORING_OP_LAST)) {
req->opcode = 0;
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
}
def = &io_op_defs[opcode];
if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
/* enforce forwards compatibility on users */
if (sqe_flags & ~SQE_VALID_FLAGS)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (sqe_flags & IOSQE_BUFFER_SELECT) {
if (!def->buffer_select)
- return -EOPNOTSUPP;
+ return io_init_fail_req(req, -EOPNOTSUPP);
req->buf_index = READ_ONCE(sqe->buf_group);
}
if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
ctx->drain_disabled = true;
if (sqe_flags & IOSQE_IO_DRAIN) {
if (ctx->drain_disabled)
- return -EOPNOTSUPP;
+ return io_init_fail_req(req, -EOPNOTSUPP);
io_init_req_drain(req);
}
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
- return -EACCES;
+ return io_init_fail_req(req, -EACCES);
/* knock it to the slow queue path, will be drained there */
if (ctx->drain_active)
req->flags |= REQ_F_FORCE_ASYNC;
@@ -2033,9 +2040,9 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
}
if (!def->ioprio && sqe->ioprio)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
if (def->needs_file) {
struct io_submit_state *state = &ctx->submit_state;
@@ -2059,12 +2066,12 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
req->creds = xa_load(&ctx->personalities, personality);
if (!req->creds)
- return -EINVAL;
+ return io_init_fail_req(req, -EINVAL);
get_cred(req->creds);
ret = security_uring_override_creds(req->creds);
if (ret) {
put_cred(req->creds);
- return ret;
+ return io_init_fail_req(req, ret);
}
req->flags |= REQ_F_CREDS;
}
@@ -2419,6 +2426,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return 0;
} while (ret > 0);
+ if (uts) {
+ struct timespec64 ts;
+
+ if (get_timespec64(&ts, uts))
+ return -EFAULT;
+ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+ }
+
if (sig) {
#ifdef CONFIG_COMPAT
if (in_compat_syscall())
@@ -2432,14 +2447,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- if (uts) {
- struct timespec64 ts;
-
- if (get_timespec64(&ts, uts))
- return -EFAULT;
- timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
- }
-
init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry);
diff --git a/io_uring/net.c b/io_uring/net.c
index b1b564c04..48404bd33 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -1229,6 +1229,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) {
kmsg = req->async_data;
+ kmsg->msg.msg_control_user = sr->msg_control;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
diff --git a/kernel/bounds.c b/kernel/bounds.c
index c5a9fcd2d..29b2cd00d 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -19,7 +19,7 @@ int main(void)
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
#ifdef CONFIG_SMP
- DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
+ DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
#ifdef CONFIG_LRU_GEN
diff --git a/kernel/cpu.c b/kernel/cpu.c
index e6f010194..e0e09b700 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2788,7 +2788,8 @@ enum cpu_mitigations {
};
static enum cpu_mitigations cpu_mitigations __ro_after_init =
- CPU_MITIGATIONS_AUTO;
+ IS_ENABLED(CONFIG_CPU_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
+ CPU_MITIGATIONS_OFF;
static int __init mitigations_parse_cmdline(char *arg)
{
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 63859a101..d4215739e 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -296,7 +296,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
} else {
ret = page_address(page);
if (dma_set_decrypted(dev, ret, size))
- goto out_free_pages;
+ goto out_leak_pages;
}
memset(ret, 0, size);
@@ -317,6 +317,8 @@ out_encrypt_pages:
out_free_pages:
__dma_direct_free_pages(dev, page, size);
return NULL;
+out_leak_pages:
+ return NULL;
}
void dma_direct_free(struct device *dev, size_t size,
@@ -379,12 +381,11 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
ret = page_address(page);
if (dma_set_decrypted(dev, ret, size))
- goto out_free_pages;
+ goto out_leak_pages;
memset(ret, 0, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
-out_free_pages:
- __dma_direct_free_pages(dev, page, size);
+out_leak_pages:
return NULL;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 856179280..7e9a59192 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -662,6 +662,15 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
} else if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~(VM_LOCKED | VM_LOCKONFAULT);
+ /*
+ * Copy/update hugetlb private vma information.
+ */
+ if (is_vm_hugetlb_page(tmp))
+ hugetlb_dup_vma_private(tmp);
+
+ if (tmp->vm_ops && tmp->vm_ops->open)
+ tmp->vm_ops->open(tmp);
+
file = tmp->vm_file;
if (file) {
struct address_space *mapping = file->f_mapping;
@@ -678,12 +687,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
i_mmap_unlock_write(mapping);
}
- /*
- * Copy/update hugetlb private vma information.
- */
- if (is_vm_hugetlb_page(tmp))
- hugetlb_dup_vma_private(tmp);
-
/* Link the vma into the MT */
mas.index = tmp->vm_start;
mas.last = tmp->vm_end - 1;
@@ -695,9 +698,6 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
if (!(tmp->vm_flags & VM_WIPEONFORK))
retval = copy_page_range(tmp, mpnt);
- if (tmp->vm_ops && tmp->vm_ops->open)
- tmp->vm_ops->open(tmp);
-
if (retval)
goto loop_out;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index dbfddfa86..5b5ee060a 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1567,10 +1567,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
jump_label_lock();
preempt_disable();
- /* Ensure it is not in reserved area nor out of text */
- if (!(core_kernel_text((unsigned long) p->addr) ||
- is_module_text_address((unsigned long) p->addr)) ||
- in_gate_area_no_mm((unsigned long) p->addr) ||
+ /* Ensure the address is in a text area, and find a module if exists. */
+ *probed_mod = NULL;
+ if (!core_kernel_text((unsigned long) p->addr)) {
+ *probed_mod = __module_text_address((unsigned long) p->addr);
+ if (!(*probed_mod)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ /* Ensure it is not in reserved area. */
+ if (in_gate_area_no_mm((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
@@ -1580,8 +1587,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
goto out;
}
- /* Check if 'p' is probing a module. */
- *probed_mod = __module_text_address((unsigned long) p->addr);
+ /* Get module refcount and reject __init functions for loaded modules. */
if (*probed_mod) {
/*
* We must hold a refcount of the probed module while updating
diff --git a/kernel/panic.c b/kernel/panic.c
index 63e94f3bd..e6c2bf04a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -441,6 +441,14 @@ void panic(const char *fmt, ...)
/* Do not scroll important messages printed above */
suppress_printk = 1;
+
+ /*
+ * The final messages may not have been printed if in a context that
+ * defers printing (such as NMI) and irq_work is not available.
+ * Explicitly flush the kernel log buffer one last time.
+ */
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+
local_irq_enable();
for (i = 0; ; i += PANIC_TIMER_STEP) {
touch_softlockup_watchdog();
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index a718067de..3aae526cc 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -106,6 +106,12 @@ static void s2idle_enter(void)
swait_event_exclusive(s2idle_wait_head,
s2idle_state == S2IDLE_STATE_WAKE);
+ /*
+ * Kick all CPUs to ensure that they resume their timers and restore
+ * consistent system state.
+ */
+ wake_up_all_idle_cpus();
+
cpus_read_unlock();
raw_spin_lock_irq(&s2idle_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 431a922e5..337162e0c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1543,7 +1543,6 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
- local_inc(&cpu_buffer->pages_touched);
/*
* Just make sure we have seen our old_write and synchronize
* with any interrupts that come in.
@@ -1580,8 +1579,9 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
*/
local_set(&next_page->page->commit, 0);
- /* Again, either we update tail_page or an interrupt does */
- (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
+ /* Either we update tail_page or an interrupt does */
+ if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
+ local_inc(&cpu_buffer->pages_touched);
}
}
@@ -4431,7 +4431,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
cpu_buffer = iter->cpu_buffer;
reader = cpu_buffer->reader_page;
head_page = cpu_buffer->head_page;
- commit_page = cpu_buffer->commit_page;
+ commit_page = READ_ONCE(cpu_buffer->commit_page);
commit_ts = commit_page->page->time_stamp;
/*
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index a6d2f99f8..24859d964 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1669,6 +1669,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
return 0;
}
+#ifdef CONFIG_PERF_EVENTS
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
@@ -1683,6 +1684,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
+#endif
static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
@@ -2127,10 +2129,12 @@ static const struct file_operations ftrace_event_format_fops = {
.release = seq_release,
};
+#ifdef CONFIG_PERF_EVENTS
static const struct file_operations ftrace_event_id_fops = {
.read = event_id_read,
.llseek = default_llseek,
};
+#endif
static const struct file_operations ftrace_event_filter_fops = {
.open = tracing_open_file_tr,
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index c59d26068..884155443 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -61,9 +61,12 @@ static inline void * __init xbc_alloc_mem(size_t size)
return memblock_alloc(size, SMP_CACHE_BYTES);
}
-static inline void __init xbc_free_mem(void *addr, size_t size)
+static inline void __init xbc_free_mem(void *addr, size_t size, bool early)
{
- memblock_free(addr, size);
+ if (early)
+ memblock_free(addr, size);
+ else if (addr)
+ memblock_free_late(__pa(addr), size);
}
#else /* !__KERNEL__ */
@@ -73,7 +76,7 @@ static inline void *xbc_alloc_mem(size_t size)
return malloc(size);
}
-static inline void xbc_free_mem(void *addr, size_t size)
+static inline void xbc_free_mem(void *addr, size_t size, bool early)
{
free(addr);
}
@@ -904,13 +907,13 @@ static int __init xbc_parse_tree(void)
* If you need to reuse xbc_init() with new boot config, you can
* use this.
*/
-void __init xbc_exit(void)
+void __init _xbc_exit(bool early)
{
- xbc_free_mem(xbc_data, xbc_data_size);
+ xbc_free_mem(xbc_data, xbc_data_size, early);
xbc_data = NULL;
xbc_data_size = 0;
xbc_node_num = 0;
- xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX);
+ xbc_free_mem(xbc_nodes, sizeof(struct xbc_node) * XBC_NODE_MAX, early);
xbc_nodes = NULL;
brace_index = 0;
}
@@ -963,7 +966,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
if (!xbc_nodes) {
if (emsg)
*emsg = "Failed to allocate bootconfig nodes";
- xbc_exit();
+ _xbc_exit(true);
return -ENOMEM;
}
memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
@@ -977,7 +980,7 @@ int __init xbc_init(const char *data, size_t size, const char **emsg, int *epos)
*epos = xbc_err_pos;
if (emsg)
*emsg = xbc_err_msg;
- xbc_exit();
+ _xbc_exit(true);
} else
ret = xbc_node_num;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 79e894cf8..77eb944b7 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -466,10 +466,10 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
- * contexts and I/O.
+ * contexts, I/O, nolockdep.
*/
alloc_flags &= ~GFP_ZONEMASK;
- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
+ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
if (page)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5b846ed5d..be58ce999 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -84,11 +84,23 @@ static int __page_handle_poison(struct page *page)
{
int ret;
- zone_pcp_disable(page_zone(page));
+ /*
+ * zone_pcp_disable() can't be used here. It will
+ * hold pcp_batch_high_lock and dissolve_free_huge_page() might hold
+ * cpu_hotplug_lock via static_key_slow_dec() when hugetlb vmemmap
+ * optimization is enabled. This will break current lock dependency
+ * chain and leads to deadlock.
+ * Disabling pcp before dissolving the page was a deterministic
+ * approach because we made sure that those pages cannot end up in any
+ * PCP list. Draining PCP lists expels those pages to the buddy system,
+ * but nothing guarantees that those pages do not get back to a PCP
+ * queue if we need to refill those.
+ */
ret = dissolve_free_huge_page(page);
- if (!ret)
+ if (!ret) {
+ drain_all_pages(page_zone(page));
ret = take_page_off_buddy(page);
- zone_pcp_enable(page_zone(page));
+ }
return ret;
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 6b4c25a92..0bffac238 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -103,7 +103,7 @@ again:
s->ax25_dev = NULL;
if (sk->sk_socket) {
netdev_put(ax25_dev->dev,
- &ax25_dev->dev_tracker);
+ &s->dev_tracker);
ax25_dev_put(ax25_dev);
}
ax25_cb_del(s);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5d8cee747..4fc66cd95 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -3948,7 +3948,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
spin_lock_bh(&bat_priv->tt.commit_lock);
- while (true) {
+ while (timeout) {
table_size = batadv_tt_local_table_transmit_size(bat_priv);
if (packet_size_max >= table_size)
break;
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 4468647df..cf69e973b 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -105,8 +105,10 @@ void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
- if (skb)
+ if (skb) {
+ kfree_skb(hdev->req_skb);
hdev->req_skb = skb_get(skb);
+ }
wake_up_interruptible(&hdev->req_wait_q);
}
}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 947ca580b..e3c7029ec 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -457,7 +457,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
- int len, err = 0;
+ int err = 0;
+ size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@@ -504,7 +505,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mode 0x%2.2x", chan->mode);
- len = min_t(unsigned int, len, sizeof(opts));
+ len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
err = -EFAULT;
@@ -554,7 +555,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = chan->conn->hcon->handle;
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
- len = min_t(unsigned int, len, sizeof(cinfo));
+ len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
@@ -745,7 +746,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
- int len, err = 0;
+ int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
@@ -772,11 +773,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
opts.max_tx = chan->max_tx;
opts.txwin_size = chan->tx_win;
- len = min_t(unsigned int, sizeof(opts), optlen);
- if (copy_from_sockptr(&opts, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opts, sizeof(opts), optval, optlen);
+ if (err)
break;
- }
if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) {
err = -EINVAL;
@@ -819,10 +818,9 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
break;
case L2CAP_LM:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt & L2CAP_LM_FIPS) {
err = -EINVAL;
@@ -903,7 +901,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
struct bt_security sec;
struct bt_power pwr;
struct l2cap_conn *conn;
- int len, err = 0;
+ int err = 0;
u32 opt;
u16 mtu;
u8 mode;
@@ -929,11 +927,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
sec.level = BT_SECURITY_LOW;
- len = min_t(unsigned int, sizeof(sec), optlen);
- if (copy_from_sockptr(&sec, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&sec, sizeof(sec), optval, optlen);
+ if (err)
break;
- }
if (sec.level < BT_SECURITY_LOW ||
sec.level > BT_SECURITY_FIPS) {
@@ -978,10 +974,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt) {
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -993,10 +988,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_FLUSHABLE:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt > BT_FLUSHABLE_ON) {
err = -EINVAL;
@@ -1028,11 +1022,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
- len = min_t(unsigned int, sizeof(pwr), optlen);
- if (copy_from_sockptr(&pwr, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&pwr, sizeof(pwr), optval, optlen);
+ if (err)
break;
- }
if (pwr.force_active)
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
@@ -1041,10 +1033,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_CHANNEL_POLICY:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) {
err = -EINVAL;
@@ -1089,10 +1080,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&mtu, optval, sizeof(u16))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&mtu, sizeof(mtu), optval, optlen);
+ if (err)
break;
- }
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
sk->sk_state == BT_CONNECTED)
@@ -1120,10 +1110,9 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&mode, optval, sizeof(u8))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&mode, sizeof(mode), optval, optlen);
+ if (err)
break;
- }
BT_DBG("mode %u", mode);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 716f6dc49..76dac5a90 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2680,7 +2680,11 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
+ /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
+ mgmt_class_complete);
if (err < 0) {
mgmt_pending_free(cmd);
goto failed;
@@ -2774,8 +2778,11 @@ update_class:
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
- mgmt_class_complete);
+ /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
+ mgmt_class_complete);
if (err < 0)
mgmt_pending_free(cmd);
@@ -2841,8 +2848,11 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
- mgmt_class_complete);
+ /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
+ * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+ */
+ err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
+ mgmt_class_complete);
if (err < 0)
mgmt_pending_free(cmd);
@@ -5530,8 +5540,8 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
- mgmt_remove_adv_monitor_complete);
+ err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
+ mgmt_remove_adv_monitor_complete);
if (err) {
mgmt_pending_remove(cmd);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 6d4168cfe..4a6bf60f3 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -831,7 +831,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- int len, err = 0;
+ int err = 0;
struct bt_voice voice;
u32 opt;
struct bt_codecs *codecs;
@@ -850,10 +850,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
@@ -870,11 +869,10 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
voice.setting = sco_pi(sk)->setting;
- len = min_t(unsigned int, sizeof(voice), optlen);
- if (copy_from_sockptr(&voice, optval, len)) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&voice, sizeof(voice), optval,
+ optlen);
+ if (err)
break;
- }
/* Explicitly check for these values */
if (voice.setting != BT_VOICE_TRANSPARENT &&
@@ -897,10 +895,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
case BT_PKT_STATUS:
- if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
- err = -EFAULT;
+ err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen);
+ if (err)
break;
- }
if (opt)
sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS;
@@ -941,9 +938,9 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(buffer, optval, optlen)) {
+ err = bt_copy_from_sockptr(buffer, optlen, optval, optlen);
+ if (err) {
hci_dev_put(hdev);
- err = -EFAULT;
break;
}
@@ -974,7 +971,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
- int len, err = 0;
+ int err = 0;
+ size_t len;
BT_DBG("sk %p", sk);
@@ -996,7 +994,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mtu %u", opts.mtu);
- len = min_t(unsigned int, len, sizeof(opts));
+ len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
@@ -1014,7 +1012,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
- len = min_t(unsigned int, len, sizeof(cinfo));
+ len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6bb272894..b94a17839 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,7 +30,7 @@ br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
return netif_receive_skb(skb);
}
-static int br_pass_frame_up(struct sk_buff *skb)
+static int br_pass_frame_up(struct sk_buff *skb, bool promisc)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
@@ -65,6 +65,8 @@ static int br_pass_frame_up(struct sk_buff *skb)
br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
BR_MCAST_DIR_TX);
+ BR_INPUT_SKB_CB(skb)->promisc = promisc;
+
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
dev_net(indev), NULL, skb, indev, NULL,
br_netif_receive_skb);
@@ -82,6 +84,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
struct net_bridge_mcast *brmctx;
struct net_bridge_vlan *vlan;
struct net_bridge *br;
+ bool promisc;
u16 vid = 0;
u8 state;
@@ -120,7 +123,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
if (p->flags & BR_LEARNING)
br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
- local_rcv = !!(br->dev->flags & IFF_PROMISC);
+ promisc = !!(br->dev->flags & IFF_PROMISC);
+ local_rcv = promisc;
+
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
/* by definition the broadcast is also a multicast address */
if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
@@ -183,7 +188,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
unsigned long now = jiffies;
if (test_bit(BR_FDB_LOCAL, &dst->flags))
- return br_pass_frame_up(skb);
+ return br_pass_frame_up(skb, false);
if (now != dst->used)
dst->used = now;
@@ -196,7 +201,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
}
if (local_rcv)
- return br_pass_frame_up(skb);
+ return br_pass_frame_up(skb, promisc);
out:
return 0;
@@ -368,6 +373,8 @@ static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
goto forward;
}
+ BR_INPUT_SKB_CB(skb)->promisc = false;
+
/* The else clause should be hit when nf_hook():
* - returns < 0 (drop/error)
* - returns = 0 (stolen/nf_queue)
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index bff48d576..9ac70c27d 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -600,11 +600,17 @@ static unsigned int br_nf_local_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
+ bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
struct nf_conntrack *nfct = skb_nfct(skb);
const struct nf_ct_hook *ct_hook;
struct nf_conn *ct;
int ret;
+ if (promisc) {
+ nf_reset_ct(skb);
+ return NF_ACCEPT;
+ }
+
if (!nfct || skb->pkt_type == PACKET_HOST)
return NF_ACCEPT;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index d087fd4c7..d38eff277 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -650,7 +650,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br,
{
u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
- return br_info_notify(event, br, port, filter);
+ br_info_notify(event, br, port, filter);
}
/*
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 51d010f64..940de9516 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -559,6 +559,7 @@ struct br_input_skb_cb {
#endif
u8 proxyarp_replied:1;
u8 src_port_isolated:1;
+ u8 promisc:1;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
u8 vlan_filtered:1;
#endif
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index c7c27ada6..e60c38670 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -294,18 +294,24 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
static unsigned int nf_ct_bridge_in(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- enum ip_conntrack_info ctinfo;
+ bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
+ struct nf_conntrack *nfct = skb_nfct(skb);
struct nf_conn *ct;
- if (skb->pkt_type == PACKET_HOST)
+ if (promisc) {
+ nf_reset_ct(skb);
+ return NF_ACCEPT;
+ }
+
+ if (!nfct || skb->pkt_type == PACKET_HOST)
return NF_ACCEPT;
/* nf_conntrack_confirm() cannot handle concurrent clones,
* this happens for broad/multicast frames with e.g. macvlan on top
* of the bridge device.
*/
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
+ ct = container_of(nfct, struct nf_conn, ct_general);
+ if (nf_ct_is_confirmed(ct) || nf_ct_is_template(ct))
return NF_ACCEPT;
/* let inet prerouting call conntrack again */
diff --git a/net/core/sock.c b/net/core/sock.c
index c8803b95e..550af616f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -279,6 +279,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
+int sysctl_mem_pcpu_rsv __read_mostly = SK_MEMORY_PCPU_RESERVE;
/* Maximal space eaten by iovec or ancillary data plus some space */
int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 5b1ce656b..d281d5343 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -29,6 +29,7 @@ static int int_3600 = 3600;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
+static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -349,6 +350,14 @@ static struct ctl_table net_core_table[] = {
.extra1 = &min_rcvbuf,
},
{
+ .procname = "mem_pcpu_rsv",
+ .data = &sysctl_mem_pcpu_rsv,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_mem_pcpu_rsv,
+ },
+ {
.procname = "dev_weight",
.data = &weight_p,
.maxlen = sizeof(int),
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 5417f7b11..98f864879 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -425,6 +425,24 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
return 0;
}
+static struct dsa_port *
+dsa_switch_preferred_default_local_cpu_port(struct dsa_switch *ds)
+{
+ struct dsa_port *cpu_dp;
+
+ if (!ds->ops->preferred_default_local_cpu_port)
+ return NULL;
+
+ cpu_dp = ds->ops->preferred_default_local_cpu_port(ds);
+ if (!cpu_dp)
+ return NULL;
+
+ if (WARN_ON(!dsa_port_is_cpu(cpu_dp) || cpu_dp->ds != ds))
+ return NULL;
+
+ return cpu_dp;
+}
+
/* Perform initial assignment of CPU ports to user ports and DSA links in the
* fabric, giving preference to CPU ports local to each switch. Default to
* using the first CPU port in the switch tree if the port does not have a CPU
@@ -432,12 +450,16 @@ static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
*/
static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
{
- struct dsa_port *cpu_dp, *dp;
+ struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
list_for_each_entry(cpu_dp, &dst->ports, list) {
if (!dsa_port_is_cpu(cpu_dp))
continue;
+ preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
+ if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
+ continue;
+
/* Prefer a local CPU port */
dsa_switch_for_each_port(dp, cpu_dp->ds) {
/* Prefer the first local CPU port found */
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index e02daa74e..5ba7b460c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -164,17 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = (struct ethhdr *)skb->data;
skb_pull_inline(skb, ETH_HLEN);
- if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
- dev->dev_addr))) {
- if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
- if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
- skb->pkt_type = PACKET_BROADCAST;
- else
- skb->pkt_type = PACKET_MULTICAST;
- } else {
- skb->pkt_type = PACKET_OTHERHOST;
- }
- }
+ eth_skb_pkt_type(skb, dev);
/*
* Some variants of DSA tagging don't have an ethertype field
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 2b09ef707..31051b327 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -92,6 +92,7 @@
#include <net/inet_common.h>
#include <net/ip_fib.h>
#include <net/l3mdev.h>
+#include <net/addrconf.h>
/*
* Build xmit assembly blocks
@@ -1029,6 +1030,8 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
struct icmp_ext_hdr *ext_hdr, _ext_hdr;
struct icmp_ext_echo_iio *iio, _iio;
struct net *net = dev_net(skb->dev);
+ struct inet6_dev *in6_dev;
+ struct in_device *in_dev;
struct net_device *dev;
char buff[IFNAMSIZ];
u16 ident_len;
@@ -1112,10 +1115,15 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
/* Fill bits in reply message */
if (dev->flags & IFF_UP)
status |= ICMP_EXT_ECHOREPLY_ACTIVE;
- if (__in_dev_get_rcu(dev) && __in_dev_get_rcu(dev)->ifa_list)
+
+ in_dev = __in_dev_get_rcu(dev);
+ if (in_dev && rcu_access_pointer(in_dev->ifa_list))
status |= ICMP_EXT_ECHOREPLY_IPV4;
- if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
+
+ in6_dev = __in6_dev_get(dev);
+ if (in6_dev && !list_empty(&in6_dev->addr_list))
status |= ICMP_EXT_ECHOREPLY_IPV6;
+
dev_put(dev);
icmphdr->un.echo.sequence |= htons(status);
return true;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index b150c9929..14365b20f 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -966,6 +966,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1266,6 +1268,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 1f365e28e..a6208efcf 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1120,6 +1120,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1506,6 +1508,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 474f391fa..6c0f1e347 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -926,13 +926,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->n_redirects;
-#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (log_martians &&
+ if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
peer->n_redirects == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);
-#endif
}
out_put_peer:
inet_putpeer(peer);
@@ -2170,6 +2168,9 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
int err = -EINVAL;
u32 tag = 0;
+ if (!in_dev)
+ return -EINVAL;
+
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 2a78c7818..39fae7581 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1141,16 +1141,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_controllen) {
err = udp_cmsg_send(sk, msg, &ipc.gso_size);
- if (err > 0)
+ if (err > 0) {
err = ip_cmsg_send(sk, msg, &ipc,
sk->sk_family == AF_INET6);
+ connected = 0;
+ }
if (unlikely(err < 0)) {
kfree(ipc.opt);
return err;
}
if (ipc.opt)
free = 1;
- connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 164837369..3866deaad 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2050,9 +2050,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (!dev || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
- result = ifp;
- in6_ifa_hold(ifp);
- break;
+ if (in6_ifa_hold_safe(ifp)) {
+ result = ifp;
+ break;
+ }
}
}
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index e60637485..821362643 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1376,7 +1376,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack)
{
struct fib6_table *table = rt->fib6_table;
- struct fib6_node *fn, *pn = NULL;
+ struct fib6_node *fn;
+#ifdef CONFIG_IPV6_SUBTREES
+ struct fib6_node *pn = NULL;
+#endif
int err = -ENOMEM;
int allow_create = 1;
int replace_required = 0;
@@ -1400,9 +1403,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
goto out;
}
+#ifdef CONFIG_IPV6_SUBTREES
pn = fn;
-#ifdef CONFIG_IPV6_SUBTREES
if (rt->fib6_src.plen) {
struct fib6_node *sn;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 37a2b3301..b844e519d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1137,6 +1137,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
@@ -1515,6 +1517,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
+ if ((u64)len < (u64)tmp.size + sizeof(tmp))
+ return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1775e9b9b..504ea27d0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1493,9 +1493,11 @@ do_udp_sendmsg:
ipc6.opt = opt;
err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
- if (err > 0)
+ if (err > 0) {
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
&ipc6);
+ connected = false;
+ }
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@@ -1507,7 +1509,6 @@ do_udp_sendmsg:
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
- connected = false;
}
if (!opt) {
opt = txopt_get(np);
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 1482259de..40334d4d8 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -26,6 +26,9 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
__be16 mpls_protocol;
unsigned int mpls_hlen;
+ if (!skb_inner_network_header_was_set(skb))
+ goto out;
+
skb_reset_network_header(skb);
mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb);
if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN))
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index a0921adc3..1e689c714 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (sctph->source != cp->vport || payload_csum ||
skb->ip_summed == CHECKSUM_PARTIAL) {
sctph->source = cp->vport;
- sctp_nat_csum(skb, sctph, sctphoff);
+ if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
+ sctp_nat_csum(skb, sctph, sctphoff);
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
(skb->ip_summed == CHECKSUM_PARTIAL &&
!(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
sctph->dest = cp->dport;
- sctp_nat_csum(skb, sctph, sctphoff);
+ if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
+ sctp_nat_csum(skb, sctph, sctphoff);
} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 9505f9d18..6eef15648 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -21,7 +21,8 @@ nf_flow_offload_inet_hook(void *priv, struct sk_buff *skb,
proto = veth->h_vlan_encapsulated_proto;
break;
case htons(ETH_P_PPP_SES):
- proto = nf_flow_pppoe_proto(skb);
+ if (!nf_flow_pppoe_proto(skb, &proto))
+ return NF_ACCEPT;
break;
default:
proto = skb->protocol;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index 6feaac9ab..22bc0e3d8 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -156,7 +156,7 @@ static void nf_flow_tuple_encap(struct sk_buff *skb,
tuple->encap[i].proto = skb->protocol;
break;
case htons(ETH_P_PPP_SES):
- phdr = (struct pppoe_hdr *)skb_mac_header(skb);
+ phdr = (struct pppoe_hdr *)skb_network_header(skb);
tuple->encap[i].id = ntohs(phdr->sid);
tuple->encap[i].proto = skb->protocol;
break;
@@ -267,10 +267,11 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
return NF_STOLEN;
}
-static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
+static bool nf_flow_skb_encap_protocol(struct sk_buff *skb, __be16 proto,
u32 *offset)
{
struct vlan_ethhdr *veth;
+ __be16 inner_proto;
switch (skb->protocol) {
case htons(ETH_P_8021Q):
@@ -281,7 +282,8 @@ static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto,
}
break;
case htons(ETH_P_PPP_SES):
- if (nf_flow_pppoe_proto(skb) == proto) {
+ if (nf_flow_pppoe_proto(skb, &inner_proto) &&
+ inner_proto == proto) {
*offset += PPPOE_SES_HLEN;
return true;
}
@@ -310,7 +312,7 @@ static void nf_flow_encap_pop(struct sk_buff *skb,
skb_reset_network_header(skb);
break;
case htons(ETH_P_PPP_SES):
- skb->protocol = nf_flow_pppoe_proto(skb);
+ skb->protocol = __nf_flow_pppoe_proto(skb);
skb_pull(skb, PPPOE_SES_HLEN);
skb_reset_network_header(skb);
break;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 8d38cd504..1c4b7a8ec 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1192,6 +1192,24 @@ static void nf_tables_table_disable(struct net *net, struct nft_table *table)
#define __NFT_TABLE_F_UPDATE (__NFT_TABLE_F_WAS_DORMANT | \
__NFT_TABLE_F_WAS_AWAKEN)
+static bool nft_table_pending_update(const struct nft_ctx *ctx)
+{
+ struct nftables_pernet *nft_net = nft_pernet(ctx->net);
+ struct nft_trans *trans;
+
+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ return true;
+
+ list_for_each_entry(trans, &nft_net->commit_list, list) {
+ if (trans->ctx.table == ctx->table &&
+ trans->msg_type == NFT_MSG_DELCHAIN &&
+ nft_is_base_chain(trans->ctx.chain))
+ return true;
+ }
+
+ return false;
+}
+
static int nf_tables_updtable(struct nft_ctx *ctx)
{
struct nft_trans *trans;
@@ -1215,7 +1233,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
return -EOPNOTSUPP;
/* No dormant off/on/off/on games in single transaction */
- if (ctx->table->flags & __NFT_TABLE_F_UPDATE)
+ if (nft_table_pending_update(ctx))
return -EINVAL;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
@@ -2873,7 +2891,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
{
const struct nft_expr_type *type, *candidate = NULL;
- list_for_each_entry(type, &nf_tables_expressions, list) {
+ list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
if (!nla_strcmp(nla, type->name)) {
if (!type->family && !candidate)
candidate = type;
@@ -2905,9 +2923,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
if (nla == NULL)
return ERR_PTR(-EINVAL);
+ rcu_read_lock();
type = __nft_expr_type_get(family, nla);
- if (type != NULL && try_module_get(type->owner))
+ if (type != NULL && try_module_get(type->owner)) {
+ rcu_read_unlock();
return type;
+ }
+ rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
@@ -7153,7 +7175,7 @@ static const struct nft_object_type *__nft_obj_type_get(u32 objtype, u8 family)
{
const struct nft_object_type *type;
- list_for_each_entry(type, &nf_tables_objects, list) {
+ list_for_each_entry_rcu(type, &nf_tables_objects, list) {
if (type->family != NFPROTO_UNSPEC &&
type->family != family)
continue;
@@ -7169,9 +7191,13 @@ nft_obj_type_get(struct net *net, u32 objtype, u8 family)
{
const struct nft_object_type *type;
+ rcu_read_lock();
type = __nft_obj_type_get(objtype, family);
- if (type != NULL && try_module_get(type->owner))
+ if (type != NULL && try_module_get(type->owner)) {
+ rcu_read_unlock();
return type;
+ }
+ rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES
@@ -9902,10 +9928,11 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
struct nft_trans *trans, *next;
LIST_HEAD(set_update_list);
struct nft_trans_elem *te;
+ int err = 0;
if (action == NFNL_ABORT_VALIDATE &&
nf_tables_validate(net) < 0)
- return -EAGAIN;
+ err = -EAGAIN;
list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list,
list) {
@@ -10076,12 +10103,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
nf_tables_abort_release(trans);
}
- if (action == NFNL_ABORT_AUTOLOAD)
- nf_tables_module_autoload(net);
- else
- nf_tables_module_autoload_cleanup(net);
-
- return 0;
+ return err;
}
static int nf_tables_abort(struct net *net, struct sk_buff *skb,
@@ -10095,6 +10117,16 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb,
ret = __nf_tables_abort(net, action);
nft_gc_seq_end(nft_net, gc_seq);
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
+
+ /* module autoload needs to happen after GC sequence update because it
+ * temporarily releases and grabs mutex again.
+ */
+ if (action == NFNL_ABORT_AUTOLOAD)
+ nf_tables_module_autoload(net);
+ else
+ nf_tables_module_autoload_cleanup(net);
+
mutex_unlock(&nft_net->commit_mutex);
return ret;
@@ -10892,9 +10924,10 @@ static void __net_exit nf_tables_exit_net(struct net *net)
gc_seq = nft_gc_seq_begin(nft_net);
- if (!list_empty(&nft_net->commit_list) ||
- !list_empty(&nft_net->module_list))
- __nf_tables_abort(net, NFNL_ABORT_NONE);
+ WARN_ON_ONCE(!list_empty(&nft_net->commit_list));
+
+ if (!list_empty(&nft_net->module_list))
+ nf_tables_module_autoload_cleanup(net);
__nft_release_tables(net);
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index 274b6f7e6..d170758a1 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -338,7 +338,9 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
return;
if (n > 1) {
- nf_unregister_net_hook(ctx->net, &found->ops);
+ if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT))
+ nf_unregister_net_hook(ctx->net, &found->ops);
+
list_del_rcu(&found->list);
kfree_rcu(found, rcu);
return;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 58eca2616..2299ced93 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -1994,6 +1994,8 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
rules_fx = rules_f0;
nft_pipapo_for_each_field(f, i, m) {
+ bool last = i == m->field_count - 1;
+
if (!pipapo_match_field(f, start, rules_fx,
match_start, match_end))
break;
@@ -2006,16 +2008,18 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
match_start += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
match_end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
- }
- if (i == m->field_count) {
- priv->dirty = true;
- pipapo_drop(m, rulemap);
- return;
+ if (last && f->mt[rulemap[i].to].e == e) {
+ priv->dirty = true;
+ pipapo_drop(m, rulemap);
+ return;
+ }
}
first_rule += rules_f0;
}
+
+ WARN_ON_ONCE(1); /* elem_priv not found */
}
/**
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 0591cfb28..2302bae1e 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1711,8 +1711,9 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
if (ct_info.timeout[0]) {
if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
ct_info.timeout))
- pr_info_ratelimited("Failed to associated timeout "
- "policy `%s'\n", ct_info.timeout);
+ OVS_NLERR(log,
+ "Failed to associated timeout policy '%s'",
+ ct_info.timeout);
else
ct_info.nf_ct_timeout = rcu_dereference(
nf_ct_timeout_find(ct_info.ct)->timeout);
@@ -1919,9 +1920,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
struct hlist_head *head = &info->limits[i];
struct ovs_ct_limit *ct_limit;
+ struct hlist_node *next;
- hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
- lockdep_ovsl_is_held())
+ hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
kfree_rcu(ct_limit, rcu);
}
kfree(info->limits);
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 25fb2fd18..21b8bf23e 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -802,6 +802,16 @@ static void smc_pnet_create_pnetids_list(struct net *net)
u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
struct net_device *dev;
+ /* Newly created netns do not have devices.
+ * Do not even acquire rtnl.
+ */
+ if (list_empty(&net->dev_base_head))
+ return;
+
+ /* Note: This might not be needed, because smc_pnet_netdev_event()
+ * is also calling smc_pnet_add_base_pnetid() when handling
+ * NETDEV_UP event.
+ */
rtnl_lock();
for_each_netdev(net, dev)
smc_pnet_add_base_pnetid(net, dev, ndev_pnetid);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index e1af94393..f28e2956f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -968,11 +968,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
- u = unix_sk(sk);
+ u = unix_sk(sk);
+ u->inflight = 0;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
- atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */
@@ -2675,9 +2675,13 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
WRITE_ONCE(u->oob_skb, NULL);
consume_skb(skb);
}
- } else if (!(flags & MSG_PEEK)) {
+ } else if (flags & MSG_PEEK) {
+ skb = NULL;
+ } else {
skb_unlink(skb, &sk->sk_receive_queue);
- consume_skb(skb);
+ WRITE_ONCE(u->oob_skb, NULL);
+ if (!WARN_ON_ONCE(skb_unref(skb)))
+ kfree_skb(skb);
skb = skb_peek(&sk->sk_receive_queue);
}
}
@@ -2751,18 +2755,16 @@ redo:
last = skb = skb_peek(&sk->sk_receive_queue);
last_len = last ? last->len : 0;
+again:
#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
if (skb) {
skb = manage_oob(skb, sk, flags, copied);
- if (!skb) {
+ if (!skb && copied) {
unix_state_unlock(sk);
- if (copied)
- break;
- goto redo;
+ break;
}
}
#endif
-again:
if (skb == NULL) {
if (copied >= target)
goto unlock;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 9bfffe2a7..d2fc79539 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -166,17 +166,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
static void dec_inflight(struct unix_sock *usk)
{
- atomic_long_dec(&usk->inflight);
+ usk->inflight--;
}
static void inc_inflight(struct unix_sock *usk)
{
- atomic_long_inc(&usk->inflight);
+ usk->inflight++;
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
- atomic_long_inc(&u->inflight);
+ u->inflight++;
+
/* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
@@ -234,20 +235,34 @@ void unix_gc(void)
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
+ *
+ * Embryos, though never candidates themselves, affect which
+ * candidates are reachable by the garbage collector. Before
+ * being added to a listener's queue, an embryo may already
+ * receive data carrying SCM_RIGHTS, potentially making the
+ * passed socket a candidate that is not yet reachable by the
+ * collector. It becomes reachable once the embryo is
+ * enqueued. Therefore, we must ensure that no SCM-laden
+ * embryo appears in a (candidate) listener's queue between
+ * consecutive scan_children() calls.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
+ struct sock *sk = &u->sk;
long total_refs;
- long inflight_refs;
- total_refs = file_count(u->sk.sk_socket->file);
- inflight_refs = atomic_long_read(&u->inflight);
+ total_refs = file_count(sk->sk_socket->file);
- BUG_ON(inflight_refs < 1);
- BUG_ON(total_refs < inflight_refs);
- if (total_refs == inflight_refs) {
+ BUG_ON(!u->inflight);
+ BUG_ON(total_refs < u->inflight);
+ if (total_refs == u->inflight) {
list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
+
+ if (sk->sk_state == TCP_LISTEN) {
+ unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
+ unix_state_unlock(sk);
+ }
}
}
@@ -271,7 +286,7 @@ void unix_gc(void)
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
- if (atomic_long_read(&u->inflight) > 0) {
+ if (u->inflight) {
list_move_tail(&u->link, &not_cycle_list);
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
scan_children(&u->sk, inc_inflight_move_tail, NULL);
diff --git a/net/unix/scm.c b/net/unix/scm.c
index d1048b4c2..4eff7da9f 100644
--- a/net/unix/scm.c
+++ b/net/unix/scm.c
@@ -52,12 +52,13 @@ void unix_inflight(struct user_struct *user, struct file *fp)
if (s) {
struct unix_sock *u = unix_sk(s);
- if (atomic_long_inc_return(&u->inflight) == 1) {
+ if (!u->inflight) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
BUG_ON(list_empty(&u->link));
}
+ u->inflight++;
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
}
@@ -74,10 +75,11 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
if (s) {
struct unix_sock *u = unix_sk(s);
- BUG_ON(!atomic_long_read(&u->inflight));
+ BUG_ON(!u->inflight);
BUG_ON(list_empty(&u->link));
- if (atomic_long_dec_and_test(&u->inflight))
+ u->inflight--;
+ if (!u->inflight)
list_del_init(&u->link);
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 5c8e02d56..e3bdfc517 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -1127,6 +1127,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
struct xsk_queue **q;
int entries;
+ if (optlen < sizeof(entries))
+ return -EINVAL;
if (copy_from_sockptr(&entries, optval, sizeof(entries)))
return -EFAULT;
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
index 91764bfb1..f2efa86a7 100644
--- a/rust/macros/lib.rs
+++ b/rust/macros/lib.rs
@@ -27,18 +27,6 @@ use proc_macro::TokenStream;
/// author: b"Rust for Linux Contributors",
/// description: b"My very own kernel module!",
/// license: b"GPL",
-/// params: {
-/// my_i32: i32 {
-/// default: 42,
-/// permissions: 0o000,
-/// description: b"Example of i32",
-/// },
-/// writeable_i32: i32 {
-/// default: 42,
-/// permissions: 0o644,
-/// description: b"Example of i32",
-/// },
-/// },
/// }
///
/// struct MyModule;
diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
index c5c2ce113..d20c47d21 100644
--- a/scripts/gcc-plugins/stackleak_plugin.c
+++ b/scripts/gcc-plugins/stackleak_plugin.c
@@ -467,6 +467,8 @@ static bool stackleak_gate(void)
return false;
if (STRING_EQUAL(section, ".entry.text"))
return false;
+ if (STRING_EQUAL(section, ".head.text"))
+ return false;
}
return track_frame_size >= 0;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index f8b644cb9..875312568 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -771,10 +771,14 @@ static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
} else {
unsigned int dbc_interval;
- if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
- dbc_interval = s->ctx_data.tx.dbc_interval;
- else
- dbc_interval = *data_blocks;
+ if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) {
+ if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
+ dbc_interval = s->ctx_data.tx.dbc_interval;
+ else
+ dbc_interval = *data_blocks;
+ } else {
+ dbc_interval = payload_length / sizeof(__be32);
+ }
lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
}
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index 1f957c946..cf9ab3472 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -37,6 +37,9 @@
* the value of current SYT_INTERVAL; e.g. initial value is not zero.
* @CIP_UNAWARE_SYT: For outgoing packet, the value in SYT field of CIP is 0xffff.
* For incoming packet, the value in SYT field of CIP is not handled.
+ * @CIP_DBC_IS_PAYLOAD_QUADLETS: Available for incoming packet, and only effective with
+ * CIP_DBC_IS_END_EVENT flag. The value of dbc field is the number of accumulated quadlets
+ * in CIP payload, instead of the number of accumulated data blocks.
*/
enum cip_flags {
CIP_NONBLOCKING = 0x00,
@@ -51,6 +54,7 @@ enum cip_flags {
CIP_NO_HEADER = 0x100,
CIP_UNALIGHED_DBC = 0x200,
CIP_UNAWARE_SYT = 0x400,
+ CIP_DBC_IS_PAYLOAD_QUADLETS = 0x800,
};
/**
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e8cf38dc8..77c40063d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -10122,6 +10122,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1d05, 0x115c, "TongFang GMxTGxx", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x1d05, 0x121b, "TongFang GMxAGxx", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x1d05, 0x1387, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 985012f20..d1e6e4208 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -224,6 +224,17 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
SOF_SDW_PCH_DMIC |
RT711_JD2_100K),
},
+ {
+ /* NUC15 LAPRC710 skews */
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "LAPRC710"),
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ SOF_SDW_PCH_DMIC |
+ RT711_JD2_100K),
+ },
/* TigerLake-SDCA devices */
{
.callback = sof_sdw_quirk_cb,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index a409fbed8..6a4101dc1 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1020,6 +1020,9 @@ int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
if (!snd_soc_is_matching_component(platform, component))
continue;
+ if (snd_soc_component_is_dummy(component) && component->num_dai)
+ continue;
+
snd_soc_rtd_add_component(rtd, component);
}
}
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index 9ccb21a4f..64a718c76 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -12,7 +12,7 @@ snd-usb-audio-objs := card.o \
mixer.o \
mixer_quirks.o \
mixer_scarlett.o \
- mixer_scarlett_gen2.o \
+ mixer_scarlett2.o \
mixer_us16x08.o \
mixer_s1810c.o \
pcm.o \
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 1f32e3ae3..c8d48566e 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -33,7 +33,7 @@
#include "mixer.h"
#include "mixer_quirks.h"
#include "mixer_scarlett.h"
-#include "mixer_scarlett_gen2.h"
+#include "mixer_scarlett2.h"
#include "mixer_us16x08.h"
#include "mixer_s1810c.h"
#include "helper.h"
@@ -3447,8 +3447,13 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x1235, 0x8213): /* Focusrite Scarlett 8i6 3rd Gen */
case USB_ID(0x1235, 0x8214): /* Focusrite Scarlett 18i8 3rd Gen */
case USB_ID(0x1235, 0x8215): /* Focusrite Scarlett 18i20 3rd Gen */
+ case USB_ID(0x1235, 0x8206): /* Focusrite Clarett 2Pre USB */
+ case USB_ID(0x1235, 0x8207): /* Focusrite Clarett 4Pre USB */
+ case USB_ID(0x1235, 0x8208): /* Focusrite Clarett 8Pre USB */
+ case USB_ID(0x1235, 0x820a): /* Focusrite Clarett+ 2Pre */
+ case USB_ID(0x1235, 0x820b): /* Focusrite Clarett+ 4Pre */
case USB_ID(0x1235, 0x820c): /* Focusrite Clarett+ 8Pre */
- err = snd_scarlett_gen2_init(mixer);
+ err = snd_scarlett2_init(mixer);
break;
case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */
diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett2.c
index 1bcb05c73..bcb8b7617 100644
--- a/sound/usb/mixer_scarlett_gen2.c
+++ b/sound/usb/mixer_scarlett2.c
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Focusrite Scarlett Gen 2/3 and Clarett+ Driver for ALSA
+ * Focusrite Scarlett 2 Protocol Driver for ALSA
+ * (including Scarlett 2nd Gen, 3rd Gen, Clarett USB, and Clarett+
+ * series products)
*
* Supported models:
* - 6i6/18i8/18i20 Gen 2
* - Solo/2i2/4i4/8i6/18i8/18i20 Gen 3
- * - Clarett+ 8Pre
+ * - Clarett 2Pre/4Pre/8Pre USB
+ * - Clarett+ 2Pre/4Pre/8Pre
*
- * Copyright (c) 2018-2022 by Geoffrey D. Bennett <g at b4.vu>
+ * Copyright (c) 2018-2023 by Geoffrey D. Bennett <g at b4.vu>
* Copyright (c) 2020-2021 by Vladimir Sadovnikov <sadko4u@gmail.com>
* Copyright (c) 2022 by Christian Colglazier <christian@cacolglazier.com>
*
@@ -56,6 +59,15 @@
* Support for Clarett+ 8Pre added in Aug 2022 by Christian
* Colglazier.
*
+ * Support for Clarett 8Pre USB added in Sep 2023 (thanks to Philippe
+ * Perrot for confirmation).
+ *
+ * Support for Clarett+ 4Pre and 2Pre added in Sep 2023 (thanks to
+ * Gregory Rozzo for donating a 4Pre, and David Sherwood and Patrice
+ * Peterson for usbmon output).
+ *
+ * Support for Clarett 2Pre and 4Pre USB added in Oct 2023.
+ *
* This ALSA mixer gives access to (model-dependent):
* - input, output, mixer-matrix muxes
* - mixer-matrix gain stages
@@ -139,14 +151,14 @@
#include "mixer.h"
#include "helper.h"
-#include "mixer_scarlett_gen2.h"
-
-/* device_setup value to enable */
-#define SCARLETT2_ENABLE 0x01
+#include "mixer_scarlett2.h"
/* device_setup value to allow turning MSD mode back on */
#define SCARLETT2_MSD_ENABLE 0x02
+/* device_setup value to disable this mixer driver */
+#define SCARLETT2_DISABLE 0x04
+
/* some gui mixers can't handle negative ctl values */
#define SCARLETT2_VOLUME_BIAS 127
@@ -317,8 +329,6 @@ struct scarlett2_mux_entry {
};
struct scarlett2_device_info {
- u32 usb_id; /* USB device identifier */
-
/* Gen 3 devices have an internal MSD mode switch that needs
* to be disabled in order to access the full functionality of
* the device.
@@ -389,6 +399,7 @@ struct scarlett2_data {
struct mutex data_mutex; /* lock access to this data */
struct delayed_work work;
const struct scarlett2_device_info *info;
+ const char *series_name;
__u8 bInterfaceNumber;
__u8 bEndpointAddress;
__u16 wMaxPacketSize;
@@ -440,8 +451,6 @@ struct scarlett2_data {
/*** Model-specific data ***/
static const struct scarlett2_device_info s6i6_gen2_info = {
- .usb_id = USB_ID(0x1235, 0x8203),
-
.config_set = SCARLETT2_CONFIG_SET_GEN_2,
.level_input_count = 2,
.pad_input_count = 2,
@@ -486,8 +495,6 @@ static const struct scarlett2_device_info s6i6_gen2_info = {
};
static const struct scarlett2_device_info s18i8_gen2_info = {
- .usb_id = USB_ID(0x1235, 0x8204),
-
.config_set = SCARLETT2_CONFIG_SET_GEN_2,
.level_input_count = 2,
.pad_input_count = 4,
@@ -535,8 +542,6 @@ static const struct scarlett2_device_info s18i8_gen2_info = {
};
static const struct scarlett2_device_info s18i20_gen2_info = {
- .usb_id = USB_ID(0x1235, 0x8201),
-
.config_set = SCARLETT2_CONFIG_SET_GEN_2,
.line_out_hw_vol = 1,
@@ -589,8 +594,6 @@ static const struct scarlett2_device_info s18i20_gen2_info = {
};
static const struct scarlett2_device_info solo_gen3_info = {
- .usb_id = USB_ID(0x1235, 0x8211),
-
.has_msd_mode = 1,
.config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
.level_input_count = 1,
@@ -602,8 +605,6 @@ static const struct scarlett2_device_info solo_gen3_info = {
};
static const struct scarlett2_device_info s2i2_gen3_info = {
- .usb_id = USB_ID(0x1235, 0x8210),
-
.has_msd_mode = 1,
.config_set = SCARLETT2_CONFIG_SET_NO_MIXER,
.level_input_count = 2,
@@ -614,8 +615,6 @@ static const struct scarlett2_device_info s2i2_gen3_info = {
};
static const struct scarlett2_device_info s4i4_gen3_info = {
- .usb_id = USB_ID(0x1235, 0x8212),
-
.has_msd_mode = 1,
.config_set = SCARLETT2_CONFIG_SET_GEN_3,
.level_input_count = 2,
@@ -660,8 +659,6 @@ static const struct scarlett2_device_info s4i4_gen3_info = {
};
static const struct scarlett2_device_info s8i6_gen3_info = {
- .usb_id = USB_ID(0x1235, 0x8213),
-
.has_msd_mode = 1,
.config_set = SCARLETT2_CONFIG_SET_GEN_3,
.level_input_count = 2,
@@ -713,8 +710,6 @@ static const struct scarlett2_device_info s8i6_gen3_info = {
};
static const struct scarlett2_device_info s18i8_gen3_info = {
- .usb_id = USB_ID(0x1235, 0x8214),
-
.has_msd_mode = 1,
.config_set = SCARLETT2_CONFIG_SET_GEN_3,
.line_out_hw_vol = 1,
@@ -783,8 +778,6 @@ static const struct scarlett2_device_info s18i8_gen3_info = {
};
static const struct scarlett2_device_info s18i20_gen3_info = {
- .usb_id = USB_ID(0x1235, 0x8215),
-
.has_msd_mode = 1,
.config_set = SCARLETT2_CONFIG_SET_GEN_3,
.line_out_hw_vol = 1,
@@ -847,9 +840,96 @@ static const struct scarlett2_device_info s18i20_gen3_info = {
} },
};
-static const struct scarlett2_device_info clarett_8pre_info = {
- .usb_id = USB_ID(0x1235, 0x820c),
+static const struct scarlett2_device_info clarett_2pre_info = {
+ .config_set = SCARLETT2_CONFIG_SET_CLARETT,
+ .line_out_hw_vol = 1,
+ .level_input_count = 2,
+ .air_input_count = 2,
+
+ .line_out_descrs = {
+ "Monitor L",
+ "Monitor R",
+ "Headphones L",
+ "Headphones R",
+ },
+
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 2, 4 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 0 },
+ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 4, 12 },
+ },
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 12 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 8 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 2 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 4 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 26 },
+ { 0, 0, 0 },
+ } },
+};
+
+static const struct scarlett2_device_info clarett_4pre_info = {
+ .config_set = SCARLETT2_CONFIG_SET_CLARETT,
+ .line_out_hw_vol = 1,
+ .level_input_count = 2,
+ .air_input_count = 4,
+
+ .line_out_descrs = {
+ "Monitor L",
+ "Monitor R",
+ "Headphones 1 L",
+ "Headphones 1 R",
+ "Headphones 2 L",
+ "Headphones 2 R",
+ },
+ .port_count = {
+ [SCARLETT2_PORT_TYPE_NONE] = { 1, 0 },
+ [SCARLETT2_PORT_TYPE_ANALOGUE] = { 8, 6 },
+ [SCARLETT2_PORT_TYPE_SPDIF] = { 2, 2 },
+ [SCARLETT2_PORT_TYPE_ADAT] = { 8, 0 },
+ [SCARLETT2_PORT_TYPE_MIX] = { 10, 18 },
+ [SCARLETT2_PORT_TYPE_PCM] = { 8, 18 },
+ },
+
+ .mux_assignment = { {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 18 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 14 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_MIX, 0, 18 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 8 },
+ { 0, 0, 0 },
+ }, {
+ { SCARLETT2_PORT_TYPE_PCM, 0, 12 },
+ { SCARLETT2_PORT_TYPE_ANALOGUE, 0, 6 },
+ { SCARLETT2_PORT_TYPE_SPDIF, 0, 2 },
+ { SCARLETT2_PORT_TYPE_NONE, 0, 24 },
+ { 0, 0, 0 },
+ } },
+};
+
+static const struct scarlett2_device_info clarett_8pre_info = {
.config_set = SCARLETT2_CONFIG_SET_CLARETT,
.line_out_hw_vol = 1,
.level_input_count = 2,
@@ -902,25 +982,36 @@ static const struct scarlett2_device_info clarett_8pre_info = {
} },
};
-static const struct scarlett2_device_info *scarlett2_devices[] = {
+struct scarlett2_device_entry {
+ const u32 usb_id; /* USB device identifier */
+ const struct scarlett2_device_info *info;
+ const char *series_name;
+};
+
+static const struct scarlett2_device_entry scarlett2_devices[] = {
/* Supported Gen 2 devices */
- &s6i6_gen2_info,
- &s18i8_gen2_info,
- &s18i20_gen2_info,
+ { USB_ID(0x1235, 0x8203), &s6i6_gen2_info, "Scarlett Gen 2" },
+ { USB_ID(0x1235, 0x8204), &s18i8_gen2_info, "Scarlett Gen 2" },
+ { USB_ID(0x1235, 0x8201), &s18i20_gen2_info, "Scarlett Gen 2" },
/* Supported Gen 3 devices */
- &solo_gen3_info,
- &s2i2_gen3_info,
- &s4i4_gen3_info,
- &s8i6_gen3_info,
- &s18i8_gen3_info,
- &s18i20_gen3_info,
-
- /* Supported Clarett+ devices */
- &clarett_8pre_info,
+ { USB_ID(0x1235, 0x8211), &solo_gen3_info, "Scarlett Gen 3" },
+ { USB_ID(0x1235, 0x8210), &s2i2_gen3_info, "Scarlett Gen 3" },
+ { USB_ID(0x1235, 0x8212), &s4i4_gen3_info, "Scarlett Gen 3" },
+ { USB_ID(0x1235, 0x8213), &s8i6_gen3_info, "Scarlett Gen 3" },
+ { USB_ID(0x1235, 0x8214), &s18i8_gen3_info, "Scarlett Gen 3" },
+ { USB_ID(0x1235, 0x8215), &s18i20_gen3_info, "Scarlett Gen 3" },
+
+ /* Supported Clarett USB/Clarett+ devices */
+ { USB_ID(0x1235, 0x8206), &clarett_2pre_info, "Clarett USB" },
+ { USB_ID(0x1235, 0x8207), &clarett_4pre_info, "Clarett USB" },
+ { USB_ID(0x1235, 0x8208), &clarett_8pre_info, "Clarett USB" },
+ { USB_ID(0x1235, 0x820a), &clarett_2pre_info, "Clarett+" },
+ { USB_ID(0x1235, 0x820b), &clarett_4pre_info, "Clarett+" },
+ { USB_ID(0x1235, 0x820c), &clarett_8pre_info, "Clarett+" },
/* End of list */
- NULL
+ { 0, NULL },
};
/* get the starting port index number for a given port type/direction */
@@ -1112,7 +1203,7 @@ static const struct scarlett2_config
[SCARLETT2_CONFIG_TALKBACK_MAP] = {
.offset = 0xb0, .size = 16, .activate = 10 },
-/* Clarett+ 8Pre */
+/* Clarett USB and Clarett+ devices: 2Pre, 4Pre, 8Pre */
}, {
[SCARLETT2_CONFIG_DIM_MUTE] = {
.offset = 0x31, .size = 8, .activate = 2 },
@@ -1217,8 +1308,8 @@ static int scarlett2_usb(
if (err != req_buf_size) {
usb_audio_err(
mixer->chip,
- "Scarlett Gen 2/3 USB request result cmd %x was %d\n",
- cmd, err);
+ "%s USB request result cmd %x was %d\n",
+ private->series_name, cmd, err);
err = -EINVAL;
goto unlock;
}
@@ -1234,9 +1325,8 @@ static int scarlett2_usb(
if (err != resp_buf_size) {
usb_audio_err(
mixer->chip,
- "Scarlett Gen 2/3 USB response result cmd %x was %d "
- "expected %zu\n",
- cmd, err, resp_buf_size);
+ "%s USB response result cmd %x was %d expected %zu\n",
+ private->series_name, cmd, err, resp_buf_size);
err = -EINVAL;
goto unlock;
}
@@ -1252,9 +1342,10 @@ static int scarlett2_usb(
resp->pad) {
usb_audio_err(
mixer->chip,
- "Scarlett Gen 2/3 USB invalid response; "
+ "%s USB invalid response; "
"cmd tx/rx %d/%d seq %d/%d size %d/%d "
"error %d pad %d\n",
+ private->series_name,
le32_to_cpu(req->cmd), le32_to_cpu(resp->cmd),
le16_to_cpu(req->seq), le16_to_cpu(resp->seq),
resp_size, le16_to_cpu(resp->size),
@@ -3810,7 +3901,7 @@ static int scarlett2_find_fc_interface(struct usb_device *dev,
/* Initialise private data */
static int scarlett2_init_private(struct usb_mixer_interface *mixer,
- const struct scarlett2_device_info *info)
+ const struct scarlett2_device_entry *entry)
{
struct scarlett2_data *private =
kzalloc(sizeof(struct scarlett2_data), GFP_KERNEL);
@@ -3826,7 +3917,8 @@ static int scarlett2_init_private(struct usb_mixer_interface *mixer,
mixer->private_free = scarlett2_private_free;
mixer->private_suspend = scarlett2_private_suspend;
- private->info = info;
+ private->info = entry->info;
+ private->series_name = entry->series_name;
scarlett2_count_mux_io(private);
private->scarlett2_seq = 0;
private->mixer = mixer;
@@ -4147,19 +4239,28 @@ static int scarlett2_init_notify(struct usb_mixer_interface *mixer)
return usb_submit_urb(mixer->urb, GFP_KERNEL);
}
-static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
+static const struct scarlett2_device_entry *get_scarlett2_device_entry(
+ struct usb_mixer_interface *mixer)
{
- const struct scarlett2_device_info **info = scarlett2_devices;
- int err;
+ const struct scarlett2_device_entry *entry = scarlett2_devices;
- /* Find device in scarlett2_devices */
- while (*info && (*info)->usb_id != mixer->chip->usb_id)
- info++;
- if (!*info)
- return -EINVAL;
+ /* Find entry in scarlett2_devices */
+ while (entry->usb_id && entry->usb_id != mixer->chip->usb_id)
+ entry++;
+ if (!entry->usb_id)
+ return NULL;
+
+ return entry;
+}
+
+static int snd_scarlett2_controls_create(
+ struct usb_mixer_interface *mixer,
+ const struct scarlett2_device_entry *entry)
+{
+ int err;
/* Initialise private data */
- err = scarlett2_init_private(mixer, *info);
+ err = scarlett2_init_private(mixer, entry);
if (err < 0)
return err;
@@ -4240,34 +4341,50 @@ static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
-int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer)
+int snd_scarlett2_init(struct usb_mixer_interface *mixer)
{
struct snd_usb_audio *chip = mixer->chip;
+ const struct scarlett2_device_entry *entry;
int err;
/* only use UAC_VERSION_2 */
if (!mixer->protocol)
return 0;
- if (!(chip->setup & SCARLETT2_ENABLE)) {
+ /* find entry in scarlett2_devices */
+ entry = get_scarlett2_device_entry(mixer);
+ if (!entry) {
+ usb_audio_err(mixer->chip,
+ "%s: missing device entry for %04x:%04x\n",
+ __func__,
+ USB_ID_VENDOR(chip->usb_id),
+ USB_ID_PRODUCT(chip->usb_id));
+ return 0;
+ }
+
+ if (chip->setup & SCARLETT2_DISABLE) {
usb_audio_info(chip,
- "Focusrite Scarlett Gen 2/3 Mixer Driver disabled; "
- "use options snd_usb_audio vid=0x%04x pid=0x%04x "
- "device_setup=1 to enable and report any issues "
- "to g@b4.vu",
+ "Focusrite %s Mixer Driver disabled "
+ "by modprobe options (snd_usb_audio "
+ "vid=0x%04x pid=0x%04x device_setup=%d)\n",
+ entry->series_name,
USB_ID_VENDOR(chip->usb_id),
- USB_ID_PRODUCT(chip->usb_id));
+ USB_ID_PRODUCT(chip->usb_id),
+ SCARLETT2_DISABLE);
return 0;
}
usb_audio_info(chip,
- "Focusrite Scarlett Gen 2/3 Mixer Driver enabled pid=0x%04x",
+ "Focusrite %s Mixer Driver enabled (pid=0x%04x); "
+ "report any issues to g@b4.vu",
+ entry->series_name,
USB_ID_PRODUCT(chip->usb_id));
- err = snd_scarlett_gen2_controls_create(mixer);
+ err = snd_scarlett2_controls_create(mixer, entry);
if (err < 0)
usb_audio_err(mixer->chip,
- "Error initialising Scarlett Mixer Driver: %d",
+ "Error initialising %s Mixer Driver: %d",
+ entry->series_name,
err);
return err;
diff --git a/sound/usb/mixer_scarlett2.h b/sound/usb/mixer_scarlett2.h
new file mode 100644
index 000000000..d209362cf
--- /dev/null
+++ b/sound/usb/mixer_scarlett2.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __USB_MIXER_SCARLETT2_H
+#define __USB_MIXER_SCARLETT2_H
+
+int snd_scarlett2_init(struct usb_mixer_interface *mixer);
+
+#endif /* __USB_MIXER_SCARLETT2_H */
diff --git a/sound/usb/mixer_scarlett_gen2.h b/sound/usb/mixer_scarlett_gen2.h
deleted file mode 100644
index 668c6b0cb..000000000
--- a/sound/usb/mixer_scarlett_gen2.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __USB_MIXER_SCARLETT_GEN2_H
-#define __USB_MIXER_SCARLETT_GEN2_H
-
-int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer);
-
-#endif /* __USB_MIXER_SCARLETT_GEN2_H */
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index 6a00a6eec..c5c5082cb 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -376,7 +376,7 @@ int build_channel_array(const char *device_dir, int buffer_idx,
goto error_close_dir;
}
- seekdir(dp, 0);
+ rewinddir(dp);
while (ent = readdir(dp), ent) {
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index 61b637f29..b871923c7 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -233,10 +233,10 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist)
static void perf_evlist__id_hash(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id)
+ int cpu_map_idx, int thread, u64 id)
{
int hash;
- struct perf_sample_id *sid = SID(evsel, cpu, thread);
+ struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
sid->id = id;
sid->evsel = evsel;
@@ -254,21 +254,27 @@ void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
void perf_evlist__id_add(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id)
+ int cpu_map_idx, int thread, u64 id)
{
- perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+ if (!SID(evsel, cpu_map_idx, thread))
+ return;
+
+ perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
evsel->id[evsel->ids++] = id;
}
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, int fd)
+ int cpu_map_idx, int thread, int fd)
{
u64 read_data[4] = { 0, };
int id_idx = 1; /* The first entry is the counter value */
u64 id;
int ret;
+ if (!SID(evsel, cpu_map_idx, thread))
+ return -1;
+
ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
if (!ret)
goto add;
@@ -297,7 +303,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
id = read_data[id_idx];
add:
- perf_evlist__id_add(evlist, evsel, cpu, thread, id);
+ perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
return 0;
}
diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
index 850f07070..cf77db752 100644
--- a/tools/lib/perf/include/internal/evlist.h
+++ b/tools/lib/perf/include/internal/evlist.h
@@ -127,11 +127,11 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist);
void perf_evlist__id_add(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id);
+ int cpu_map_idx, int thread, u64 id);
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, int fd);
+ int cpu_map_idx, int thread, int fd);
void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
index 5fd9e5940..ebda9c366 100644
--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -1241,6 +1241,7 @@ unsigned int get_pkg_num(int cpu)
retval = fscanf(fp, "%d\n", &pkg);
if (retval != 1)
errx(1, "%s: failed to parse", pathname);
+ fclose(fp);
return pkg;
}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
index e6c381498..449e45bd6 100755
--- a/tools/testing/ktest/ktest.pl
+++ b/tools/testing/ktest/ktest.pl
@@ -836,6 +836,7 @@ sub set_value {
if ($lvalue =~ /^(TEST|BISECT|CONFIG_BISECT)_TYPE(\[.*\])?$/ &&
$prvalue !~ /^(config_|)bisect$/ &&
$prvalue !~ /^build$/ &&
+ $prvalue !~ /^make_warnings_file$/ &&
$buildonly) {
# Note if a test is something other than build, then we
diff --git a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
index b1ede6249..b7c8f29c0 100644
--- a/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
+++ b/tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
@@ -18,7 +18,7 @@ echo 'sched:*' > set_event
yield
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
@@ -29,7 +29,7 @@ echo 1 > events/sched/enable
yield
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
@@ -40,7 +40,7 @@ echo 0 > events/sched/enable
yield
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -ne 0 ]; then
fail "any of scheduler events should not be recorded"
fi
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 0ba500056..193a984f5 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -66,7 +66,7 @@ static int check_diff(struct timeval start, struct timeval end)
diff = end.tv_usec - start.tv_usec;
diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
- if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
+ if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
printf("Diff too high: %lld..", diff);
return -1;
}