summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-13 05:04:46 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-13 05:04:46 +0000
commitab7a1d20a3def1448bdcc128d77ebf50a672e4a5 (patch)
tree09a6c24baf5e8a0cafc6ebb9fb94b7062e31f7ec
parentAdding debian version 6.10.3-1. (diff)
downloadlinux-ab7a1d20a3def1448bdcc128d77ebf50a672e4a5.tar.xz
linux-ab7a1d20a3def1448bdcc128d77ebf50a672e4a5.zip
Merging upstream version 6.10.4.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst11
-rw-r--r--Documentation/netlink/specs/ethtool.yaml2
-rw-r--r--Documentation/networking/ethtool-netlink.rst1
-rw-r--r--Makefile2
-rw-r--r--arch/arm/kernel/perf_callchain.c3
-rw-r--r--arch/arm/mm/proc.c20
-rw-r--r--arch/arm64/include/asm/jump_label.h1
-rw-r--r--arch/arm64/kernel/jump_label.c11
-rw-r--r--arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi81
-rw-r--r--arch/riscv/kernel/sbi-ipi.c2
-rw-r--r--arch/riscv/mm/fault.c17
-rw-r--r--arch/riscv/mm/init.c15
-rw-r--r--arch/riscv/purgatory/entry.S2
-rw-r--r--arch/s390/kernel/fpu.c2
-rw-r--r--arch/s390/mm/dump_pagetables.c21
-rw-r--r--arch/x86/events/intel/core.c162
-rw-r--r--drivers/bluetooth/btintel.c3
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c16
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c29
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c5
-rw-r--r--drivers/gpu/drm/drm_client.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_regs.h2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h4
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c44
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c121
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmw_surface_cache.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c127
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c502
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c174
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c280
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c40
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c18
-rw-r--r--drivers/hid/wacom_wac.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c184
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c33
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/phy/micrel.c34
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/usb/sr9700.c11
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c31
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c4
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c6
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c2
-rw-r--r--fs/btrfs/block-group.c13
-rw-r--r--fs/btrfs/extent-tree.c3
-rw-r--r--fs/btrfs/free-space-cache.c4
-rw-r--r--fs/btrfs/inode.c16
-rw-r--r--fs/btrfs/space-info.c5
-rw-r--r--fs/btrfs/space-info.h1
-rw-r--r--fs/ceph/caps.c35
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/ext4/inode.c76
-rw-r--r--fs/f2fs/segment.c4
-rw-r--r--fs/file.c1
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--include/linux/migrate.h7
-rw-r--r--include/trace/events/btrfs.h8
-rw-r--r--include/trace/events/mptcp.h2
-rw-r--r--init/Kconfig1
-rw-r--r--io_uring/poll.c1
-rw-r--r--mm/huge_memory.c20
-rw-r--r--mm/khugepaged.c33
-rw-r--r--mm/memory.c11
-rw-r--r--mm/migrate.c94
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/hci_event.c5
-rw-r--r--net/bluetooth/hci_sync.c21
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/ethtool/ioctl.c5
-rw-r--r--net/ethtool/rss.c8
-rw-r--r--net/ipv4/netfilter/iptable_nat.c18
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv6/ndisc.c34
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c14
-rw-r--r--net/iucv/af_iucv.c4
-rw-r--r--net/mac80211/cfg.c7
-rw-r--r--net/mac80211/tx.c5
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/mptcp/mib.c2
-rw-r--r--net/mptcp/mib.h2
-rw-r--r--net/mptcp/options.c2
-rw-r--r--net/mptcp/pm.c12
-rw-r--r--net/mptcp/pm_netlink.c46
-rw-r--r--net/mptcp/pm_userspace.c18
-rw-r--r--net/mptcp/protocol.c18
-rw-r--r--net/mptcp/protocol.h4
-rw-r--r--net/mptcp/subflow.c26
-rw-r--r--net/sched/act_ct.c4
-rw-r--r--net/wireless/scan.c11
-rw-r--r--net/wireless/sme.c1
-rw-r--r--sound/core/seq/seq_ump_convert.c37
-rw-r--r--sound/firewire/amdtp-stream.c38
-rw-r--r--sound/firewire/amdtp-stream.h1
-rw-r--r--sound/pci/hda/hda_controller.h2
-rw-r--r--sound/pci/hda/hda_intel.c10
-rw-r--r--sound/pci/hda/patch_conexant.c54
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/usb/stream.c4
-rw-r--r--tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json2
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c8
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh74
143 files changed, 2028 insertions, 1273 deletions
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index d414d3f559..1f901de208 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -202,12 +202,11 @@ PMD-mappable transparent hugepage::
cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
-khugepaged will be automatically started when one or more hugepage
-sizes are enabled (either by directly setting "always" or "madvise",
-or by setting "inherit" while the top-level enabled is set to "always"
-or "madvise"), and it'll be automatically shutdown when the last
-hugepage size is disabled (either by directly setting "never", or by
-setting "inherit" while the top-level enabled is set to "never").
+khugepaged will be automatically started when PMD-sized THP is enabled
+(either of the per-size anon control or the top-level control are set
+to "always" or "madvise"), and it'll be automatically shutdown when
+PMD-sized THP is disabled (when both the per-size anon control and the
+top-level control are "never")
Khugepaged controls
-------------------
diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
index 4510e8d1ad..238145c318 100644
--- a/Documentation/netlink/specs/ethtool.yaml
+++ b/Documentation/netlink/specs/ethtool.yaml
@@ -1634,6 +1634,7 @@ operations:
request:
attributes:
- header
+ - context
reply:
attributes:
- header
@@ -1642,7 +1643,6 @@ operations:
- indir
- hkey
- input_xfrm
- dump: *rss-get-op
-
name: plca-get-cfg
doc: Get PLCA params.
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index 160bfb0ae8..0d8c487be3 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -1800,6 +1800,7 @@ Kernel response contents:
===================================== ====== ==========================
``ETHTOOL_A_RSS_HEADER`` nested reply header
+ ``ETHTOOL_A_RSS_CONTEXT`` u32 context number
``ETHTOOL_A_RSS_HFUNC`` u32 RSS hash func
``ETHTOOL_A_RSS_INDIR`` binary Indir table bytes
``ETHTOOL_A_RSS_HKEY`` binary Hash key bytes
diff --git a/Makefile b/Makefile
index c0af6d8aeb..aec5cc0bab 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 10
-SUBLEVEL = 3
+SUBLEVEL = 4
EXTRAVERSION =
NAME = Baby Opossum Posse
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 7147edbe56..1d230ac9d0 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -85,8 +85,7 @@ static bool
callchain_trace(void *data, unsigned long pc)
{
struct perf_callchain_entry_ctx *entry = data;
- perf_callchain_store(entry, pc);
- return true;
+ return perf_callchain_store(entry, pc) == 0;
}
void
diff --git a/arch/arm/mm/proc.c b/arch/arm/mm/proc.c
index bdbbf65d1b..2027845efe 100644
--- a/arch/arm/mm/proc.c
+++ b/arch/arm/mm/proc.c
@@ -17,7 +17,7 @@ void cpu_arm7tdmi_proc_init(void);
__ADDRESSABLE(cpu_arm7tdmi_proc_init);
void cpu_arm7tdmi_proc_fin(void);
__ADDRESSABLE(cpu_arm7tdmi_proc_fin);
-void cpu_arm7tdmi_reset(void);
+void cpu_arm7tdmi_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm7tdmi_reset);
int cpu_arm7tdmi_do_idle(void);
__ADDRESSABLE(cpu_arm7tdmi_do_idle);
@@ -32,7 +32,7 @@ void cpu_arm720_proc_init(void);
__ADDRESSABLE(cpu_arm720_proc_init);
void cpu_arm720_proc_fin(void);
__ADDRESSABLE(cpu_arm720_proc_fin);
-void cpu_arm720_reset(void);
+void cpu_arm720_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm720_reset);
int cpu_arm720_do_idle(void);
__ADDRESSABLE(cpu_arm720_do_idle);
@@ -49,7 +49,7 @@ void cpu_arm740_proc_init(void);
__ADDRESSABLE(cpu_arm740_proc_init);
void cpu_arm740_proc_fin(void);
__ADDRESSABLE(cpu_arm740_proc_fin);
-void cpu_arm740_reset(void);
+void cpu_arm740_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm740_reset);
int cpu_arm740_do_idle(void);
__ADDRESSABLE(cpu_arm740_do_idle);
@@ -64,7 +64,7 @@ void cpu_arm9tdmi_proc_init(void);
__ADDRESSABLE(cpu_arm9tdmi_proc_init);
void cpu_arm9tdmi_proc_fin(void);
__ADDRESSABLE(cpu_arm9tdmi_proc_fin);
-void cpu_arm9tdmi_reset(void);
+void cpu_arm9tdmi_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm9tdmi_reset);
int cpu_arm9tdmi_do_idle(void);
__ADDRESSABLE(cpu_arm9tdmi_do_idle);
@@ -79,7 +79,7 @@ void cpu_arm920_proc_init(void);
__ADDRESSABLE(cpu_arm920_proc_init);
void cpu_arm920_proc_fin(void);
__ADDRESSABLE(cpu_arm920_proc_fin);
-void cpu_arm920_reset(void);
+void cpu_arm920_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm920_reset);
int cpu_arm920_do_idle(void);
__ADDRESSABLE(cpu_arm920_do_idle);
@@ -102,7 +102,7 @@ void cpu_arm922_proc_init(void);
__ADDRESSABLE(cpu_arm922_proc_init);
void cpu_arm922_proc_fin(void);
__ADDRESSABLE(cpu_arm922_proc_fin);
-void cpu_arm922_reset(void);
+void cpu_arm922_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm922_reset);
int cpu_arm922_do_idle(void);
__ADDRESSABLE(cpu_arm922_do_idle);
@@ -119,7 +119,7 @@ void cpu_arm925_proc_init(void);
__ADDRESSABLE(cpu_arm925_proc_init);
void cpu_arm925_proc_fin(void);
__ADDRESSABLE(cpu_arm925_proc_fin);
-void cpu_arm925_reset(void);
+void cpu_arm925_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm925_reset);
int cpu_arm925_do_idle(void);
__ADDRESSABLE(cpu_arm925_do_idle);
@@ -159,7 +159,7 @@ void cpu_arm940_proc_init(void);
__ADDRESSABLE(cpu_arm940_proc_init);
void cpu_arm940_proc_fin(void);
__ADDRESSABLE(cpu_arm940_proc_fin);
-void cpu_arm940_reset(void);
+void cpu_arm940_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm940_reset);
int cpu_arm940_do_idle(void);
__ADDRESSABLE(cpu_arm940_do_idle);
@@ -174,7 +174,7 @@ void cpu_arm946_proc_init(void);
__ADDRESSABLE(cpu_arm946_proc_init);
void cpu_arm946_proc_fin(void);
__ADDRESSABLE(cpu_arm946_proc_fin);
-void cpu_arm946_reset(void);
+void cpu_arm946_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_arm946_reset);
int cpu_arm946_do_idle(void);
__ADDRESSABLE(cpu_arm946_do_idle);
@@ -429,7 +429,7 @@ void cpu_v7_proc_init(void);
__ADDRESSABLE(cpu_v7_proc_init);
void cpu_v7_proc_fin(void);
__ADDRESSABLE(cpu_v7_proc_fin);
-void cpu_v7_reset(void);
+void cpu_v7_reset(unsigned long addr, bool hvc);
__ADDRESSABLE(cpu_v7_reset);
int cpu_v7_do_idle(void);
__ADDRESSABLE(cpu_v7_do_idle);
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 4e753908b8..a0a5bbae72 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <asm/insn.h>
+#define HAVE_JUMP_LABEL_BATCH
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
#define JUMP_TABLE_ENTRY(key, label) \
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index faf88ec9c4..f63ea915d6 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -7,11 +7,12 @@
*/
#include <linux/kernel.h>
#include <linux/jump_label.h>
+#include <linux/smp.h>
#include <asm/insn.h>
#include <asm/patching.h>
-void arch_jump_label_transform(struct jump_entry *entry,
- enum jump_label_type type)
+bool arch_jump_label_transform_queue(struct jump_entry *entry,
+ enum jump_label_type type)
{
void *addr = (void *)jump_entry_code(entry);
u32 insn;
@@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
}
aarch64_insn_patch_text_nosync(addr, insn);
+ return true;
+}
+
+void arch_jump_label_transform_apply(void)
+{
+ kick_all_cpus_sync();
}
diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
index c0be84a6e8..cc7747c5f2 100644
--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
@@ -99,8 +99,8 @@
rtc0: rtc@1fe07800 {
compatible = "loongson,ls2k1000-rtc";
reg = <0 0x1fe07800 0 0x78>;
- interrupt-parent = <&liointc0>;
- interrupts = <60 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
};
uart0: serial@1fe00000 {
@@ -108,7 +108,7 @@
reg = <0 0x1fe00000 0 0x8>;
clock-frequency = <125000000>;
interrupt-parent = <&liointc0>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
no-loopback-test;
};
@@ -117,7 +117,6 @@
device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
- #interrupt-cells = <2>;
reg = <0 0x1a000000 0 0x02000000>,
<0xfe 0x00000000 0 0x20000000>;
@@ -132,8 +131,8 @@
"pciclass0c03";
reg = <0x1800 0x0 0x0 0x0 0x0>;
- interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
- <13 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
+ <13 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&liointc0>;
phy-mode = "rgmii-id";
@@ -156,8 +155,8 @@
"loongson, pci-gmac";
reg = <0x1900 0x0 0x0 0x0 0x0>;
- interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
- <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
+ <15 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq", "eth_lpi";
interrupt-parent = <&liointc0>;
phy-mode = "rgmii-id";
@@ -179,7 +178,7 @@
"pciclass0c03";
reg = <0x2100 0x0 0x0 0x0 0x0>;
- interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc1>;
};
@@ -190,7 +189,7 @@
"pciclass0c03";
reg = <0x2200 0x0 0x0 0x0 0x0>;
- interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc1>;
};
@@ -201,97 +200,121 @@
"pciclass0106";
reg = <0x4000 0x0 0x0 0x0 0x0>;
- interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc0>;
};
- pci_bridge@9,0 {
+ pcie@9,0 {
compatible = "pci0014,7a19.0",
"pci0014,7a19",
"pciclass060400",
"pciclass0604";
reg = <0x4800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
#interrupt-cells = <1>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
external-facing;
};
- pci_bridge@a,0 {
+ pcie@a,0 {
compatible = "pci0014,7a09.0",
"pci0014,7a09",
"pciclass060400",
"pciclass0604";
reg = <0x5000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
#interrupt-cells = <1>;
- interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
external-facing;
};
- pci_bridge@b,0 {
+ pcie@b,0 {
compatible = "pci0014,7a09.0",
"pci0014,7a09",
"pciclass060400",
"pciclass0604";
reg = <0x5800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
#interrupt-cells = <1>;
- interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
external-facing;
};
- pci_bridge@c,0 {
+ pcie@c,0 {
compatible = "pci0014,7a09.0",
"pci0014,7a09",
"pciclass060400",
"pciclass0604";
reg = <0x6000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
#interrupt-cells = <1>;
- interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
external-facing;
};
- pci_bridge@d,0 {
+ pcie@d,0 {
compatible = "pci0014,7a19.0",
"pci0014,7a19",
"pciclass060400",
"pciclass0604";
reg = <0x6800 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
#interrupt-cells = <1>;
- interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
external-facing;
};
- pci_bridge@e,0 {
+ pcie@e,0 {
compatible = "pci0014,7a09.0",
"pci0014,7a09",
"pciclass060400",
"pciclass0604";
reg = <0x7000 0x0 0x0 0x0 0x0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
#interrupt-cells = <1>;
- interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
interrupt-parent = <&liointc1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
external-facing;
};
diff --git a/arch/riscv/kernel/sbi-ipi.c b/arch/riscv/kernel/sbi-ipi.c
index 1026e22955..0cc5559c08 100644
--- a/arch/riscv/kernel/sbi-ipi.c
+++ b/arch/riscv/kernel/sbi-ipi.c
@@ -71,7 +71,7 @@ void __init sbi_ipi_init(void)
* the masking/unmasking of virtual IPIs is done
* via generic IPI-Mux
*/
- cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
"irqchip/sbi-ipi:starting",
sbi_ipi_starting_cpu, NULL);
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 5224f37338..a9f2b4af8f 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
{
+ if (!user_mode(regs)) {
+ no_context(regs, addr);
+ return;
+ }
+
if (fault & VM_FAULT_OOM) {
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
- if (!user_mode(regs)) {
- no_context(regs, addr);
- return;
- }
pagefault_out_of_memory();
return;
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */
- if (!user_mode(regs)) {
- no_context(regs, addr);
- return;
- }
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
return;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
+ return;
}
+
BUG();
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index e3405e4b99..7e25606f85 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -233,8 +233,6 @@ static void __init setup_bootmem(void)
*/
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
- phys_ram_end = memblock_end_of_DRAM();
-
/*
* Make sure we align the start of the memory on a PMD boundary so that
* at worst, we map the linear mapping with PMD mappings.
@@ -250,6 +248,16 @@ static void __init setup_bootmem(void)
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
/*
+ * The size of the linear page mapping may restrict the amount of
+ * usable RAM.
+ */
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
+ memblock_cap_memory_range(phys_ram_base,
+ max_mapped_addr - phys_ram_base);
+ }
+
+ /*
* Reserve physical address space that would be mapped to virtual
* addresses greater than (void *)(-PAGE_SIZE) because:
* - This memory would overlap with ERR_PTR
@@ -265,6 +273,7 @@ static void __init setup_bootmem(void)
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
}
+ phys_ram_end = memblock_end_of_DRAM();
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
@@ -1289,8 +1298,6 @@ static void __init create_linear_mapping_page_table(void)
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
- if (end >= __pa(PAGE_OFFSET) + memory_limit)
- end = __pa(PAGE_OFFSET) + memory_limit;
create_linear_mapping_range(start, end, 0);
}
diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S
index 5bcf3af903..0e6ca6d5ae 100644
--- a/arch/riscv/purgatory/entry.S
+++ b/arch/riscv/purgatory/entry.S
@@ -7,6 +7,7 @@
* Author: Li Zhengyu (lizhengyu3@huawei.com)
*
*/
+#include <asm/asm.h>
#include <linux/linkage.h>
.text
@@ -34,6 +35,7 @@ SYM_CODE_END(purgatory_start)
.data
+.align LGREG
SYM_DATA(riscv_kernel_entry, .quad 0)
.end
diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c
index fa90bbdc5e..6f2e879202 100644
--- a/arch/s390/kernel/fpu.c
+++ b/arch/s390/kernel/fpu.c
@@ -113,7 +113,7 @@ void load_fpu_state(struct fpu *state, int flags)
int mask;
if (flags & KERNEL_FPC)
- fpu_lfpc(&state->fpc);
+ fpu_lfpc_safe(&state->fpc);
if (!cpu_has_vx()) {
if (flags & KERNEL_VXR_V0V7)
load_fp_regs_vx(state->vxrs);
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index ffd07ed7b4..9d0805d6dc 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -20,8 +20,8 @@ struct addr_marker {
};
enum address_markers_idx {
- IDENTITY_BEFORE_NR = 0,
- IDENTITY_BEFORE_END_NR,
+ LOWCORE_START_NR = 0,
+ LOWCORE_END_NR,
AMODE31_START_NR,
AMODE31_END_NR,
KERNEL_START_NR,
@@ -30,8 +30,8 @@ enum address_markers_idx {
KFENCE_START_NR,
KFENCE_END_NR,
#endif
- IDENTITY_AFTER_NR,
- IDENTITY_AFTER_END_NR,
+ IDENTITY_START_NR,
+ IDENTITY_END_NR,
VMEMMAP_NR,
VMEMMAP_END_NR,
VMALLOC_NR,
@@ -49,8 +49,10 @@ enum address_markers_idx {
};
static struct addr_marker address_markers[] = {
- [IDENTITY_BEFORE_NR] = {0, "Identity Mapping Start"},
- [IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
+ [LOWCORE_START_NR] = {0, "Lowcore Start"},
+ [LOWCORE_END_NR] = {0, "Lowcore End"},
+ [IDENTITY_START_NR] = {0, "Identity Mapping Start"},
+ [IDENTITY_END_NR] = {0, "Identity Mapping End"},
[AMODE31_START_NR] = {0, "Amode31 Area Start"},
[AMODE31_END_NR] = {0, "Amode31 Area End"},
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
@@ -59,8 +61,6 @@ static struct addr_marker address_markers[] = {
[KFENCE_START_NR] = {0, "KFence Pool Start"},
[KFENCE_END_NR] = {0, "KFence Pool End"},
#endif
- [IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
- [IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
[VMEMMAP_NR] = {0, "vmemmap Area Start"},
[VMEMMAP_END_NR] = {0, "vmemmap Area End"},
[VMALLOC_NR] = {0, "vmalloc Area Start"},
@@ -290,7 +290,10 @@ static int pt_dump_init(void)
*/
max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
- address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
+ address_markers[LOWCORE_START_NR].start_address = 0;
+ address_markers[LOWCORE_END_NR].start_address = sizeof(struct lowcore);
+ address_markers[IDENTITY_START_NR].start_address = __identity_base;
+ address_markers[IDENTITY_END_NR].start_address = __identity_base + ident_map_size;
address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 38c1b1f1de..101a21fe9c 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4698,8 +4698,8 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
static inline bool intel_pmu_broken_perf_cap(void)
{
/* The Perf Metric (Bit 15) is always cleared */
- if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) ||
- (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L))
+ if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
+ boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
return true;
return false;
@@ -6238,19 +6238,19 @@ __init int intel_pmu_init(void)
/*
* Install the hw-cache-events table:
*/
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_CORE_YONAH:
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_CORE_YONAH:
pr_cont("Core events, ");
name = "core";
break;
- case INTEL_FAM6_CORE2_MEROM:
+ case INTEL_CORE2_MEROM:
x86_add_quirk(intel_clovertown_quirk);
fallthrough;
- case INTEL_FAM6_CORE2_MEROM_L:
- case INTEL_FAM6_CORE2_PENRYN:
- case INTEL_FAM6_CORE2_DUNNINGTON:
+ case INTEL_CORE2_MEROM_L:
+ case INTEL_CORE2_PENRYN:
+ case INTEL_CORE2_DUNNINGTON:
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6262,9 +6262,9 @@ __init int intel_pmu_init(void)
name = "core2";
break;
- case INTEL_FAM6_NEHALEM:
- case INTEL_FAM6_NEHALEM_EP:
- case INTEL_FAM6_NEHALEM_EX:
+ case INTEL_NEHALEM:
+ case INTEL_NEHALEM_EP:
+ case INTEL_NEHALEM_EX:
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -6296,11 +6296,11 @@ __init int intel_pmu_init(void)
name = "nehalem";
break;
- case INTEL_FAM6_ATOM_BONNELL:
- case INTEL_FAM6_ATOM_BONNELL_MID:
- case INTEL_FAM6_ATOM_SALTWELL:
- case INTEL_FAM6_ATOM_SALTWELL_MID:
- case INTEL_FAM6_ATOM_SALTWELL_TABLET:
+ case INTEL_ATOM_BONNELL:
+ case INTEL_ATOM_BONNELL_MID:
+ case INTEL_ATOM_SALTWELL:
+ case INTEL_ATOM_SALTWELL_MID:
+ case INTEL_ATOM_SALTWELL_TABLET:
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6313,11 +6313,11 @@ __init int intel_pmu_init(void)
name = "bonnell";
break;
- case INTEL_FAM6_ATOM_SILVERMONT:
- case INTEL_FAM6_ATOM_SILVERMONT_D:
- case INTEL_FAM6_ATOM_SILVERMONT_MID:
- case INTEL_FAM6_ATOM_AIRMONT:
- case INTEL_FAM6_ATOM_AIRMONT_MID:
+ case INTEL_ATOM_SILVERMONT:
+ case INTEL_ATOM_SILVERMONT_D:
+ case INTEL_ATOM_SILVERMONT_MID:
+ case INTEL_ATOM_AIRMONT:
+ case INTEL_ATOM_AIRMONT_MID:
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -6335,8 +6335,8 @@ __init int intel_pmu_init(void)
name = "silvermont";
break;
- case INTEL_FAM6_ATOM_GOLDMONT:
- case INTEL_FAM6_ATOM_GOLDMONT_D:
+ case INTEL_ATOM_GOLDMONT:
+ case INTEL_ATOM_GOLDMONT_D:
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -6362,7 +6362,7 @@ __init int intel_pmu_init(void)
name = "goldmont";
break;
- case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_ATOM_GOLDMONT_PLUS:
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
@@ -6391,9 +6391,9 @@ __init int intel_pmu_init(void)
name = "goldmont_plus";
break;
- case INTEL_FAM6_ATOM_TREMONT_D:
- case INTEL_FAM6_ATOM_TREMONT:
- case INTEL_FAM6_ATOM_TREMONT_L:
+ case INTEL_ATOM_TREMONT_D:
+ case INTEL_ATOM_TREMONT:
+ case INTEL_ATOM_TREMONT_L:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6420,7 +6420,7 @@ __init int intel_pmu_init(void)
name = "Tremont";
break;
- case INTEL_FAM6_ATOM_GRACEMONT:
+ case INTEL_ATOM_GRACEMONT:
intel_pmu_init_grt(NULL);
intel_pmu_pebs_data_source_grt();
x86_pmu.pebs_latency_data = adl_latency_data_small;
@@ -6432,8 +6432,8 @@ __init int intel_pmu_init(void)
name = "gracemont";
break;
- case INTEL_FAM6_ATOM_CRESTMONT:
- case INTEL_FAM6_ATOM_CRESTMONT_X:
+ case INTEL_ATOM_CRESTMONT:
+ case INTEL_ATOM_CRESTMONT_X:
intel_pmu_init_grt(NULL);
x86_pmu.extra_regs = intel_cmt_extra_regs;
intel_pmu_pebs_data_source_cmt();
@@ -6446,9 +6446,9 @@ __init int intel_pmu_init(void)
name = "crestmont";
break;
- case INTEL_FAM6_WESTMERE:
- case INTEL_FAM6_WESTMERE_EP:
- case INTEL_FAM6_WESTMERE_EX:
+ case INTEL_WESTMERE:
+ case INTEL_WESTMERE_EP:
+ case INTEL_WESTMERE_EX:
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -6477,8 +6477,8 @@ __init int intel_pmu_init(void)
name = "westmere";
break;
- case INTEL_FAM6_SANDYBRIDGE:
- case INTEL_FAM6_SANDYBRIDGE_X:
+ case INTEL_SANDYBRIDGE:
+ case INTEL_SANDYBRIDGE_X:
x86_add_quirk(intel_sandybridge_quirk);
x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
@@ -6491,7 +6491,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
+ if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
x86_pmu.extra_regs = intel_snbep_extra_regs;
else
x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -6517,8 +6517,8 @@ __init int intel_pmu_init(void)
name = "sandybridge";
break;
- case INTEL_FAM6_IVYBRIDGE:
- case INTEL_FAM6_IVYBRIDGE_X:
+ case INTEL_IVYBRIDGE:
+ case INTEL_IVYBRIDGE_X:
x86_add_quirk(intel_ht_bug);
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
@@ -6534,7 +6534,7 @@ __init int intel_pmu_init(void)
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
x86_pmu.pebs_prec_dist = true;
- if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
+ if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
x86_pmu.extra_regs = intel_snbep_extra_regs;
else
x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -6556,10 +6556,10 @@ __init int intel_pmu_init(void)
break;
- case INTEL_FAM6_HASWELL:
- case INTEL_FAM6_HASWELL_X:
- case INTEL_FAM6_HASWELL_L:
- case INTEL_FAM6_HASWELL_G:
+ case INTEL_HASWELL:
+ case INTEL_HASWELL_X:
+ case INTEL_HASWELL_L:
+ case INTEL_HASWELL_G:
x86_add_quirk(intel_ht_bug);
x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
@@ -6589,10 +6589,10 @@ __init int intel_pmu_init(void)
name = "haswell";
break;
- case INTEL_FAM6_BROADWELL:
- case INTEL_FAM6_BROADWELL_D:
- case INTEL_FAM6_BROADWELL_G:
- case INTEL_FAM6_BROADWELL_X:
+ case INTEL_BROADWELL:
+ case INTEL_BROADWELL_D:
+ case INTEL_BROADWELL_G:
+ case INTEL_BROADWELL_X:
x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -6631,8 +6631,8 @@ __init int intel_pmu_init(void)
name = "broadwell";
break;
- case INTEL_FAM6_XEON_PHI_KNL:
- case INTEL_FAM6_XEON_PHI_KNM:
+ case INTEL_XEON_PHI_KNL:
+ case INTEL_XEON_PHI_KNM:
memcpy(hw_cache_event_ids,
slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs,
@@ -6651,15 +6651,15 @@ __init int intel_pmu_init(void)
name = "knights-landing";
break;
- case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_SKYLAKE_X:
pmem = true;
fallthrough;
- case INTEL_FAM6_SKYLAKE_L:
- case INTEL_FAM6_SKYLAKE:
- case INTEL_FAM6_KABYLAKE_L:
- case INTEL_FAM6_KABYLAKE:
- case INTEL_FAM6_COMETLAKE_L:
- case INTEL_FAM6_COMETLAKE:
+ case INTEL_SKYLAKE_L:
+ case INTEL_SKYLAKE:
+ case INTEL_KABYLAKE_L:
+ case INTEL_KABYLAKE:
+ case INTEL_COMETLAKE_L:
+ case INTEL_COMETLAKE:
x86_add_quirk(intel_pebs_isolation_quirk);
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -6708,16 +6708,16 @@ __init int intel_pmu_init(void)
name = "skylake";
break;
- case INTEL_FAM6_ICELAKE_X:
- case INTEL_FAM6_ICELAKE_D:
+ case INTEL_ICELAKE_X:
+ case INTEL_ICELAKE_D:
x86_pmu.pebs_ept = 1;
pmem = true;
fallthrough;
- case INTEL_FAM6_ICELAKE_L:
- case INTEL_FAM6_ICELAKE:
- case INTEL_FAM6_TIGERLAKE_L:
- case INTEL_FAM6_TIGERLAKE:
- case INTEL_FAM6_ROCKETLAKE:
+ case INTEL_ICELAKE_L:
+ case INTEL_ICELAKE:
+ case INTEL_TIGERLAKE_L:
+ case INTEL_TIGERLAKE:
+ case INTEL_ROCKETLAKE:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -6752,16 +6752,22 @@ __init int intel_pmu_init(void)
name = "icelake";
break;
- case INTEL_FAM6_SAPPHIRERAPIDS_X:
- case INTEL_FAM6_EMERALDRAPIDS_X:
+ case INTEL_SAPPHIRERAPIDS_X:
+ case INTEL_EMERALDRAPIDS_X:
x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
x86_pmu.extra_regs = intel_glc_extra_regs;
- fallthrough;
- case INTEL_FAM6_GRANITERAPIDS_X:
- case INTEL_FAM6_GRANITERAPIDS_D:
+ pr_cont("Sapphire Rapids events, ");
+ name = "sapphire_rapids";
+ goto glc_common;
+
+ case INTEL_GRANITERAPIDS_X:
+ case INTEL_GRANITERAPIDS_D:
+ x86_pmu.extra_regs = intel_rwc_extra_regs;
+ pr_cont("Granite Rapids events, ");
+ name = "granite_rapids";
+
+ glc_common:
intel_pmu_init_glc(NULL);
- if (!x86_pmu.extra_regs)
- x86_pmu.extra_regs = intel_rwc_extra_regs;
x86_pmu.pebs_ept = 1;
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = glc_get_event_constraints;
@@ -6772,15 +6778,13 @@ __init int intel_pmu_init(void)
td_attr = glc_td_events_attrs;
tsx_attr = glc_tsx_events_attrs;
intel_pmu_pebs_data_source_skl(true);
- pr_cont("Sapphire Rapids events, ");
- name = "sapphire_rapids";
break;
- case INTEL_FAM6_ALDERLAKE:
- case INTEL_FAM6_ALDERLAKE_L:
- case INTEL_FAM6_RAPTORLAKE:
- case INTEL_FAM6_RAPTORLAKE_P:
- case INTEL_FAM6_RAPTORLAKE_S:
+ case INTEL_ALDERLAKE:
+ case INTEL_ALDERLAKE_L:
+ case INTEL_RAPTORLAKE:
+ case INTEL_RAPTORLAKE_P:
+ case INTEL_RAPTORLAKE_S:
/*
* Alder Lake has 2 types of CPU, core and atom.
*
@@ -6838,8 +6842,8 @@ __init int intel_pmu_init(void)
name = "alderlake_hybrid";
break;
- case INTEL_FAM6_METEORLAKE:
- case INTEL_FAM6_METEORLAKE_L:
+ case INTEL_METEORLAKE:
+ case INTEL_METEORLAKE_L:
intel_pmu_init_hybrid(hybrid_big_small);
x86_pmu.pebs_latency_data = mtl_latency_data_small;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 7ecc67deec..93900c3734 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -3012,6 +3012,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
btintel_set_dsm_reset_method(hdev, &ver_tlv);
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
+ if (err)
+ goto exit_error;
+
btintel_register_devcoredump_support(hdev);
btintel_print_fseq_info(hdev);
break;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 359b68adaf..79628ff837 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -253,6 +253,7 @@ config DRM_EXEC
config DRM_GPUVM
tristate
depends on DRM
+ select DRM_EXEC
help
GPU-VM representation providing helpers to manage a GPUs virtual
address space
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ec888fc6ea..13eb2bc69e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1763,7 +1763,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
- int r;
+ int i, r;
addr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1778,13 +1778,13 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
return -EINVAL;
- if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
- (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
- if (r)
- return r;
- }
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+ for (i = 0; i < (*bo)->placement.num_placement; i++)
+ (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+ if (r)
+ return r;
return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 1e92594169..e6c7f0d64e 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -158,7 +158,14 @@ void ast_dp_launch(struct drm_device *dev)
ASTDP_HOST_EDID_READ_DONE);
}
+bool ast_dp_power_is_on(struct ast_device *ast)
+{
+ u8 vgacre3;
+
+ vgacre3 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xe3);
+ return !(vgacre3 & AST_DP_PHY_SLEEP);
+}
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index f8c49ba68e..af2368f6f0 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -391,6 +391,11 @@ static int ast_drm_freeze(struct drm_device *dev)
static int ast_drm_thaw(struct drm_device *dev)
{
+ struct ast_device *ast = to_ast_device(dev);
+
+ ast_enable_vga(ast->ioregs);
+ ast_open_key(ast->ioregs);
+ ast_enable_mmio(dev->dev, ast->ioregs);
ast_post_gpu(dev);
return drm_mode_config_helper_resume(dev);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index ba3d869739..47bab5596c 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -472,6 +472,7 @@ void ast_init_3rdtx(struct drm_device *dev);
bool ast_astdp_is_connected(struct ast_device *ast);
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
void ast_dp_launch(struct drm_device *dev);
+bool ast_dp_power_is_on(struct ast_device *ast);
void ast_dp_power_on_off(struct drm_device *dev, bool no);
void ast_dp_set_on_off(struct drm_device *dev, bool no);
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 6695af7076..88f830a7d2 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -28,6 +28,7 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
+#include <linux/delay.h>
#include <linux/export.h>
#include <linux/pci.h>
@@ -1641,11 +1642,35 @@ static int ast_astdp_connector_helper_detect_ctx(struct drm_connector *connector
struct drm_modeset_acquire_ctx *ctx,
bool force)
{
+ struct drm_device *dev = connector->dev;
struct ast_device *ast = to_ast_device(connector->dev);
+ enum drm_connector_status status = connector_status_disconnected;
+ struct drm_connector_state *connector_state = connector->state;
+ bool is_active = false;
+
+ mutex_lock(&ast->modeset_lock);
+
+ if (connector_state && connector_state->crtc) {
+ struct drm_crtc_state *crtc_state = connector_state->crtc->state;
+
+ if (crtc_state && crtc_state->active)
+ is_active = true;
+ }
+
+ if (!is_active && !ast_dp_power_is_on(ast)) {
+ ast_dp_power_on_off(dev, true);
+ msleep(50);
+ }
if (ast_astdp_is_connected(ast))
- return connector_status_connected;
- return connector_status_disconnected;
+ status = connector_status_connected;
+
+ if (!is_active && status == connector_status_disconnected)
+ ast_dp_power_on_off(dev, false);
+
+ mutex_unlock(&ast->modeset_lock);
+
+ return status;
}
static const struct drm_connector_helper_funcs ast_astdp_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index fc16fddee5..02b1235c6d 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1066,7 +1066,10 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
break;
}
- if (async_flip && prop != config->prop_fb_id) {
+ if (async_flip &&
+ prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips) {
ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 2803ac111b..bfedcbf516 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -355,7 +355,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
err_drm_gem_vmap_unlocked:
drm_gem_unlock(gem);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap_local);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 90998b0373..292d163036 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -1658,7 +1658,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
}
static int
-skl_ddi_calculate_wrpll(int clock /* in Hz */,
+skl_ddi_calculate_wrpll(int clock,
int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
@@ -1683,7 +1683,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
};
unsigned int dco, d, i;
unsigned int p0, p1, p2;
- u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
@@ -1808,7 +1808,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
struct skl_wrpll_params wrpll_params = {};
int ret;
- ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
i915->display.dpll.ref_clks.nssc, &wrpll_params);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
index a568a457e5..f590d7f48b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h
@@ -251,7 +251,7 @@
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
(TRANS_HDCP(dev_priv) ? \
TRANS_HDCP2_STREAM_STATUS(trans) : \
- PIPE_HDCP2_STREAM_STATUS(pipe))
+ PIPE_HDCP2_STREAM_STATUS(port))
#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0b1cd4c7a5..025a79fe59 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2749,26 +2749,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
}
static int
-gen12_configure_all_contexts(struct i915_perf_stream *stream,
- const struct i915_oa_config *oa_config,
- struct i915_active *active)
-{
- struct flex regs[] = {
- {
- GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
- CTX_R_PWR_CLK_STATE,
- },
- };
-
- if (stream->engine->class != RENDER_CLASS)
- return 0;
-
- return oa_configure_all_contexts(stream,
- regs, ARRAY_SIZE(regs),
- active);
-}
-
-static int
lrc_configure_all_contexts(struct i915_perf_stream *stream,
const struct i915_oa_config *oa_config,
struct i915_active *active)
@@ -2874,7 +2854,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
{
struct drm_i915_private *i915 = stream->perf->i915;
struct intel_uncore *uncore = stream->uncore;
- struct i915_oa_config *oa_config = stream->oa_config;
bool periodic = stream->periodic;
u32 period_exponent = stream->period_exponent;
u32 sqcnt1;
@@ -2919,15 +2898,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
/*
- * Update all contexts prior writing the mux configurations as we need
- * to make sure all slices/subslices are ON before writing to NOA
- * registers.
- */
- ret = gen12_configure_all_contexts(stream, oa_config, active);
- if (ret)
- return ret;
-
- /*
* For Gen12, performance counters are context
* saved/restored. Only enable it for the context that
* requested this.
@@ -2980,9 +2950,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
_MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
}
- /* Reset all contexts' slices/subslices configurations. */
- gen12_configure_all_contexts(stream, NULL, NULL);
-
/* disable the context save/restore or OAR counters */
if (stream->ctx)
gen12_configure_oar_context(stream, NULL);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index b58ab595fa..cd95446d68 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
if (ret) {
- nouveau_bo_ref(NULL, &nvbo);
+ drm_gem_object_release(&nvbo->bo.base);
+ kfree(nvbo);
obj = ERR_PTR(-ENOMEM);
goto unlock;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index ee02cd833c..84a36fe7c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -1803,6 +1803,7 @@ nouveau_uvmm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
struct nouveau_bo *nvbo = nouveau_gem_object(vm_bo->obj);
+ nouveau_bo_placement_set(nvbo, nvbo->valid_domains, 0);
return nouveau_bo_validate(nvbo, true, false);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a2c516fe6d..1d535abedc 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -556,6 +556,10 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo);
void v3d_mmu_remove_ptes(struct v3d_bo *bo);
/* v3d_sched.c */
+void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
+ unsigned int count);
+void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
+ unsigned int count);
void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 7cd8c335cd..30d5366d62 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -73,24 +73,44 @@ v3d_sched_job_free(struct drm_sched_job *sched_job)
v3d_job_cleanup(job);
}
+void
+v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
+ unsigned int count)
+{
+ if (query_info->queries) {
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ drm_syncobj_put(query_info->queries[i].syncobj);
+
+ kvfree(query_info->queries);
+ }
+}
+
+void
+v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
+ unsigned int count)
+{
+ if (query_info->queries) {
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ drm_syncobj_put(query_info->queries[i].syncobj);
+
+ kvfree(query_info->queries);
+ }
+}
+
static void
v3d_cpu_job_free(struct drm_sched_job *sched_job)
{
struct v3d_cpu_job *job = to_cpu_job(sched_job);
- struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query;
- struct v3d_performance_query_info *performance_query = &job->performance_query;
- if (timestamp_query->queries) {
- for (int i = 0; i < timestamp_query->count; i++)
- drm_syncobj_put(timestamp_query->queries[i].syncobj);
- kvfree(timestamp_query->queries);
- }
+ v3d_timestamp_query_info_free(&job->timestamp_query,
+ job->timestamp_query.count);
- if (performance_query->queries) {
- for (int i = 0; i < performance_query->count; i++)
- drm_syncobj_put(performance_query->queries[i].syncobj);
- kvfree(performance_query->queries);
- }
+ v3d_performance_query_info_free(&job->performance_query,
+ job->performance_query.count);
v3d_job_cleanup(&job->base);
}
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 88f63d526b..4cdfabbf49 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -452,6 +452,8 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_timestamp_query timestamp;
+ unsigned int i;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -480,26 +482,34 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv,
offsets = u64_to_user_ptr(timestamp.offsets);
syncs = u64_to_user_ptr(timestamp.syncs);
- for (int i = 0; i < timestamp.count; i++) {
+ for (i = 0; i < timestamp.count; i++) {
u32 offset, sync;
if (copy_from_user(&offset, offsets++, sizeof(offset))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
job->timestamp_query.queries[i].offset = offset;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ if (!job->timestamp_query.queries[i].syncobj) {
+ err = -ENOENT;
+ goto error;
+ }
}
job->timestamp_query.count = timestamp.count;
return 0;
+
+error:
+ v3d_timestamp_query_info_free(&job->timestamp_query, i);
+ return err;
}
static int
@@ -509,6 +519,8 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
{
u32 __user *syncs;
struct drm_v3d_reset_timestamp_query reset;
+ unsigned int i;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -533,21 +545,29 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(reset.syncs);
- for (int i = 0; i < reset.count; i++) {
+ for (i = 0; i < reset.count; i++) {
u32 sync;
job->timestamp_query.queries[i].offset = reset.offset + 8 * i;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ if (!job->timestamp_query.queries[i].syncobj) {
+ err = -ENOENT;
+ goto error;
+ }
}
job->timestamp_query.count = reset.count;
return 0;
+
+error:
+ v3d_timestamp_query_info_free(&job->timestamp_query, i);
+ return err;
}
/* Get data for the copy timestamp query results job submission. */
@@ -558,7 +578,8 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
{
u32 __user *offsets, *syncs;
struct drm_v3d_copy_timestamp_query copy;
- int i;
+ unsigned int i;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -591,18 +612,22 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
u32 offset, sync;
if (copy_from_user(&offset, offsets++, sizeof(offset))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
job->timestamp_query.queries[i].offset = offset;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->timestamp_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ if (!job->timestamp_query.queries[i].syncobj) {
+ err = -ENOENT;
+ goto error;
+ }
}
job->timestamp_query.count = copy.count;
@@ -613,6 +638,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv,
job->copy.stride = copy.stride;
return 0;
+
+error:
+ v3d_timestamp_query_info_free(&job->timestamp_query, i);
+ return err;
}
static int
@@ -623,6 +652,8 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
u32 __user *syncs;
u64 __user *kperfmon_ids;
struct drm_v3d_reset_performance_query reset;
+ unsigned int i, j;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -637,6 +668,9 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
if (copy_from_user(&reset, ext, sizeof(reset)))
return -EFAULT;
+ if (reset.nperfmons > V3D_MAX_PERFMONS)
+ return -EINVAL;
+
job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY;
job->performance_query.queries = kvmalloc_array(reset.count,
@@ -648,39 +682,47 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(reset.syncs);
kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids);
- for (int i = 0; i < reset.count; i++) {
+ for (i = 0; i < reset.count; i++) {
u32 sync;
u64 ids;
u32 __user *ids_pointer;
u32 id;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
-
if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
ids_pointer = u64_to_user_ptr(ids);
- for (int j = 0; j < reset.nperfmons; j++) {
+ for (j = 0; j < reset.nperfmons; j++) {
if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
job->performance_query.queries[i].kperfmon_ids[j] = id;
}
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ if (!job->performance_query.queries[i].syncobj) {
+ err = -ENOENT;
+ goto error;
+ }
}
job->performance_query.count = reset.count;
job->performance_query.nperfmons = reset.nperfmons;
return 0;
+
+error:
+ v3d_performance_query_info_free(&job->performance_query, i);
+ return err;
}
static int
@@ -691,6 +733,8 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
u32 __user *syncs;
u64 __user *kperfmon_ids;
struct drm_v3d_copy_performance_query copy;
+ unsigned int i, j;
+ int err;
if (!job) {
DRM_DEBUG("CPU job extension was attached to a GPU job.\n");
@@ -708,6 +752,9 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
if (copy.pad)
return -EINVAL;
+ if (copy.nperfmons > V3D_MAX_PERFMONS)
+ return -EINVAL;
+
job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY;
job->performance_query.queries = kvmalloc_array(copy.count,
@@ -719,34 +766,38 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
syncs = u64_to_user_ptr(copy.syncs);
kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids);
- for (int i = 0; i < copy.count; i++) {
+ for (i = 0; i < copy.count; i++) {
u32 sync;
u64 ids;
u32 __user *ids_pointer;
u32 id;
if (copy_from_user(&sync, syncs++, sizeof(sync))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
- job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
-
if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
ids_pointer = u64_to_user_ptr(ids);
- for (int j = 0; j < copy.nperfmons; j++) {
+ for (j = 0; j < copy.nperfmons; j++) {
if (copy_from_user(&id, ids_pointer++, sizeof(id))) {
- kvfree(job->performance_query.queries);
- return -EFAULT;
+ err = -EFAULT;
+ goto error;
}
job->performance_query.queries[i].kperfmon_ids[j] = id;
}
+
+ job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync);
+ if (!job->performance_query.queries[i].syncobj) {
+ err = -ENOENT;
+ goto error;
+ }
}
job->performance_query.count = copy.count;
job->performance_query.nperfmons = copy.nperfmons;
@@ -759,6 +810,10 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv,
job->copy.stride = copy.stride;
return 0;
+
+error:
+ v3d_performance_query_info_free(&job->performance_query, i);
+ return err;
}
/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
index 1c7c7f61a2..7d34cf83f5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -48,7 +48,7 @@ struct virtio_gpu_submit {
static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
struct dma_fence *in_fence)
{
- u32 context = submit->fence_ctx + submit->ring_idx;
+ u64 context = submit->fence_ctx + submit->ring_idx;
if (dma_fence_match_context(in_fence, context))
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
index b0d87c5f58..1ac3cb151b 100644
--- a/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
+++ b/drivers/gpu/drm/vmwgfx/vmw_surface_cache.h
@@ -1,6 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 2021 VMware, Inc.
- * SPDX-License-Identifier: GPL-2.0 OR MIT
+ *
+ * Copyright (c) 2021-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -31,6 +33,10 @@
#include <drm/vmwgfx_drm.h>
+#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) ((svga3d_flags) >> 32)
+#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
+ ((svga3d_flags) & ((uint64_t)U32_MAX))
+
static inline u32 clamped_umul32(u32 a, u32 b)
{
uint64_t tmp = (uint64_t) a*b;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 00144632c6..f42ebc4a7c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
+ * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -28,15 +28,39 @@
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
-
+#include "vmwgfx_resource_priv.h"
#include <drm/ttm/ttm_placement.h>
static void vmw_bo_release(struct vmw_bo *vbo)
{
+ struct vmw_resource *res;
+
WARN_ON(vbo->tbo.base.funcs &&
kref_read(&vbo->tbo.base.refcount) != 0);
vmw_bo_unmap(vbo);
+
+ xa_destroy(&vbo->detached_resources);
+ WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
+ if (vbo->is_dumb && vbo->dumb_surface) {
+ res = &vbo->dumb_surface->res;
+ WARN_ON(vbo != res->guest_memory_bo);
+ WARN_ON(!res->guest_memory_bo);
+ if (res->guest_memory_bo) {
+ /* Reserve and switch the backing mob. */
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ (void)vmw_resource_reserve(res, false, true);
+ vmw_resource_mob_detach(res);
+ if (res->coherent)
+ vmw_bo_dirty_release(res->guest_memory_bo);
+ res->guest_memory_bo = NULL;
+ res->guest_memory_offset = 0;
+ vmw_resource_unreserve(res, false, false, false, NULL,
+ 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ }
+ vmw_surface_unreference(&vbo->dumb_surface);
+ }
drm_gem_object_release(&vbo->tbo.base);
}
@@ -326,6 +350,11 @@ void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
*/
void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
+ return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
+}
+
+void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
+{
struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used;
void *virtual;
@@ -335,9 +364,10 @@ void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
if (virtual)
return virtual;
- ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
+ ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
if (ret)
- DRM_ERROR("Buffer object map failed: %d.\n", ret);
+ DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
+ ret, bo->base.size, size);
return ttm_kmap_obj_virtual(&vbo->map, &not_used);
}
@@ -390,6 +420,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
+ xa_init(&vmw_bo->detached_resources);
params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
@@ -654,52 +685,6 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
dma_fence_put(&fence->base);
}
-
-/**
- * vmw_dumb_create - Create a dumb kms buffer
- *
- * @file_priv: Pointer to a struct drm_file identifying the caller.
- * @dev: Pointer to the drm device.
- * @args: Pointer to a struct drm_mode_create_dumb structure
- * Return: Zero on success, negative error code on failure.
- *
- * This is a driver callback for the core drm create_dumb functionality.
- * Note that this is very similar to the vmw_bo_alloc ioctl, except
- * that the arguments have a different format.
- */
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_bo *vbo;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
- int ret;
-
- switch (cpp) {
- case 1: /* DRM_FORMAT_C8 */
- case 2: /* DRM_FORMAT_RGB565 */
- case 4: /* DRM_FORMAT_XRGB8888 */
- break;
- default:
- /*
- * Dumb buffers don't allow anything else.
- * This is tested via IGT's dumb_buffers
- */
- return -EINVAL;
- }
-
- args->pitch = args->width * cpp;
- args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
-
- ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
- args->size, &args->handle,
- &vbo);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&vbo->tbo.base);
- return ret;
-}
-
/**
* vmw_bo_swap_notify - swapout notify callback.
*
@@ -853,3 +838,43 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
vmw_bo_placement_set(bo, domain, domain);
}
+
+void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+{
+ xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
+}
+
+void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
+{
+ xa_erase(&vbo->detached_resources, (unsigned long)res);
+}
+
+struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
+{
+ unsigned long index;
+ struct vmw_resource *res = NULL;
+ struct vmw_surface *surf = NULL;
+ struct rb_node *rb_itr = vbo->res_tree.rb_node;
+
+ if (vbo->is_dumb && vbo->dumb_surface) {
+ res = &vbo->dumb_surface->res;
+ goto out;
+ }
+
+ xa_for_each(&vbo->detached_resources, index, res) {
+ if (res->func->res_type == vmw_res_surface)
+ goto out;
+ }
+
+ for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
+ rb_itr = rb_next(rb_itr)) {
+ res = rb_entry(rb_itr, struct vmw_resource, mob_node);
+ if (res->func->res_type == vmw_res_surface)
+ goto out;
+ }
+
+out:
+ if (res)
+ surf = vmw_res_to_srf(res);
+ return surf;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index f349642e61..62b4342d5f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2023-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -35,11 +36,13 @@
#include <linux/rbtree_types.h>
#include <linux/types.h>
+#include <linux/xarray.h>
struct vmw_bo_dirty;
struct vmw_fence_obj;
struct vmw_private;
struct vmw_resource;
+struct vmw_surface;
enum vmw_bo_domain {
VMW_BO_DOMAIN_SYS = BIT(0),
@@ -85,11 +88,15 @@ struct vmw_bo {
struct rb_root res_tree;
u32 res_prios[TTM_MAX_BO_PRIORITY];
+ struct xarray detached_resources;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
struct vmw_bo_dirty *dirty;
+
+ bool is_dumb;
+ struct vmw_surface *dumb_surface;
};
void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
@@ -124,15 +131,21 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
+void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size);
void vmw_bo_unmap(struct vmw_bo *vbo);
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res);
+struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo);
+
int vmw_user_bo_lookup(struct drm_file *filp,
u32 handle,
struct vmw_bo **out);
+
/**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority
* according to attached resources
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index a1ce41e1c4..32f50e5958 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -763,6 +764,26 @@ extern int vmw_gmr_bind(struct vmw_private *dev_priv,
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
+ * User handles
+ */
+struct vmw_user_object {
+ struct vmw_surface *surface;
+ struct vmw_bo *buffer;
+};
+
+int vmw_user_object_lookup(struct vmw_private *dev_priv, struct drm_file *filp,
+ u32 handle, struct vmw_user_object *uo);
+struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo);
+void vmw_user_object_unref(struct vmw_user_object *uo);
+bool vmw_user_object_is_null(struct vmw_user_object *uo);
+struct vmw_surface *vmw_user_object_surface(struct vmw_user_object *uo);
+struct vmw_bo *vmw_user_object_buffer(struct vmw_user_object *uo);
+void *vmw_user_object_map(struct vmw_user_object *uo);
+void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size);
+void vmw_user_object_unmap(struct vmw_user_object *uo);
+bool vmw_user_object_is_mapped(struct vmw_user_object *uo);
+
+/**
* Resource utilities - vmwgfx_resource.c
*/
struct vmw_user_resource_conv;
@@ -776,11 +797,6 @@ extern int vmw_resource_validate(struct vmw_resource *res, bool intr,
extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
-extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
- struct drm_file *filp,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_bo **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -1057,9 +1073,6 @@ int vmw_kms_suspend(struct drm_device *dev);
int vmw_kms_resume(struct drm_device *dev);
void vmw_kms_lost_device(struct drm_device *dev);
-int vmw_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
extern void vmw_resource_unpin(struct vmw_resource *res);
extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
@@ -1176,6 +1189,15 @@ extern int vmw_gb_surface_reference_ext_ioctl(struct drm_device *dev,
int vmw_gb_surface_define(struct vmw_private *dev_priv,
const struct vmw_surface_metadata *req,
struct vmw_surface **srf_out);
+struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle);
+u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle);
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
/*
* Shader management - vmwgfx_shader.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 5efc6a766f..588d50abab 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -32,7 +32,6 @@
#define VMW_FENCE_WRAP (1 << 31)
struct vmw_fence_manager {
- int num_fence_objects;
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
@@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
-
struct vmw_fence_manager *fman = fman_from_fence(fence);
- spin_lock(&fman->lock);
- list_del_init(&fence->head);
- --fman->num_fence_objects;
- spin_unlock(&fman->lock);
+ if (!list_empty(&fence->head)) {
+ spin_lock(&fman->lock);
+ list_del_init(&fence->head);
+ spin_unlock(&fman->lock);
+ }
fence->destroy(fence);
}
@@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = {
.release = vmw_fence_obj_destroy,
};
-
/*
* Execute signal actions on fences recently signaled.
* This is done from a workqueue so we don't have to execute
@@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
goto out_unlock;
}
list_add_tail(&fence->head, &fman->fence_list);
- ++fman->num_fence_objects;
out_unlock:
spin_unlock(&fman->lock);
@@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
u32 passed_seqno)
{
u32 goal_seqno;
- struct vmw_fence_obj *fence;
+ struct vmw_fence_obj *fence, *next_fence;
if (likely(!fman->seqno_valid))
return false;
@@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
return false;
fman->seqno_valid = false;
- list_for_each_entry(fence, &fman->fence_list, head) {
+ list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (!list_empty(&fence->seq_passed_actions)) {
fman->seqno_valid = true;
vmw_fence_goal_write(fman->dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 00c4ff6841..288ed0bb75 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -193,13 +194,16 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
- if (vps->surf) {
- if (vps->surf_mapped)
- return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
- return vps->surf->snooper.image;
- } else if (vps->bo)
- return vmw_bo_map_and_cache(vps->bo);
- return NULL;
+ struct vmw_surface *surf;
+
+ if (vmw_user_object_is_null(&vps->uo))
+ return NULL;
+
+ surf = vmw_user_object_surface(&vps->uo);
+ if (surf && !vmw_user_object_is_mapped(&vps->uo))
+ return surf->snooper.image;
+
+ return vmw_user_object_map(&vps->uo);
}
static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
@@ -536,22 +540,16 @@ void vmw_du_primary_plane_destroy(struct drm_plane *plane)
* vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
*
* @vps: plane state associated with the display surface
- * @unreference: true if we also want to unreference the display.
*/
-void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
- bool unreference)
+void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps)
{
- if (vps->surf) {
+ struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
+
+ if (surf) {
if (vps->pinned) {
- vmw_resource_unpin(&vps->surf->res);
+ vmw_resource_unpin(&surf->res);
vps->pinned--;
}
-
- if (unreference) {
- if (vps->pinned)
- DRM_ERROR("Surface still pinned\n");
- vmw_surface_unreference(&vps->surf);
- }
}
}
@@ -572,7 +570,7 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- vmw_du_plane_unpin_surf(vps, false);
+ vmw_du_plane_unpin_surf(vps);
}
@@ -661,25 +659,14 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->surf_mapped) {
- vmw_bo_unmap(vps->surf->res.guest_memory_bo);
- vps->surf_mapped = false;
- }
+ if (!vmw_user_object_is_null(&vps->uo))
+ vmw_user_object_unmap(&vps->uo);
vmw_du_cursor_plane_unmap_cm(vps);
vmw_du_put_cursor_mob(vcp, vps);
- vmw_du_plane_unpin_surf(vps, false);
-
- if (vps->surf) {
- vmw_surface_unreference(&vps->surf);
- vps->surf = NULL;
- }
-
- if (vps->bo) {
- vmw_bo_unreference(&vps->bo);
- vps->bo = NULL;
- }
+ vmw_du_plane_unpin_surf(vps);
+ vmw_user_object_unref(&vps->uo);
}
@@ -698,64 +685,48 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_state->fb;
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct vmw_bo *bo = NULL;
int ret = 0;
- if (vps->surf) {
- if (vps->surf_mapped) {
- vmw_bo_unmap(vps->surf->res.guest_memory_bo);
- vps->surf_mapped = false;
- }
- vmw_surface_unreference(&vps->surf);
- vps->surf = NULL;
- }
-
- if (vps->bo) {
- vmw_bo_unreference(&vps->bo);
- vps->bo = NULL;
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ vmw_user_object_unmap(&vps->uo);
+ vmw_user_object_unref(&vps->uo);
}
if (fb) {
if (vmw_framebuffer_to_vfb(fb)->bo) {
- vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
- vmw_bo_reference(vps->bo);
+ vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer;
+ vps->uo.surface = NULL;
} else {
- vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
- vmw_surface_reference(vps->surf);
+ memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo));
}
+ vmw_user_object_ref(&vps->uo);
}
- if (!vps->surf && vps->bo) {
- const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
+ struct ttm_operation_ctx ctx = {false, false};
- /*
- * Not using vmw_bo_map_and_cache() helper here as we need to
- * reserve the ttm_buffer_object first which
- * vmw_bo_map_and_cache() omits.
- */
- ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
-
- if (unlikely(ret != 0))
+ ret = ttm_bo_reserve(&bo->tbo, true, false, NULL);
+ if (ret != 0)
return -ENOMEM;
- ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
-
- ttm_bo_unreserve(&vps->bo->tbo);
-
- if (unlikely(ret != 0))
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret != 0)
return -ENOMEM;
- } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
- WARN_ON(vps->surf->snooper.image);
- ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
- NULL);
- if (unlikely(ret != 0))
- return -ENOMEM;
- vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
- ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
- vps->surf_mapped = true;
+ vmw_bo_pin_reserved(bo, true);
+ if (vmw_framebuffer_to_vfb(fb)->bo) {
+ const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
+
+ (void)vmw_bo_map_and_cache_size(bo, size);
+ } else {
+ vmw_bo_map_and_cache(bo);
+ }
+ ttm_bo_unreserve(&bo->tbo);
}
- if (vps->surf || vps->bo) {
+ if (!vmw_user_object_is_null(&vps->uo)) {
vmw_du_get_cursor_mob(vcp, vps);
vmw_du_cursor_plane_map_cm(vps);
}
@@ -777,14 +748,17 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
+ struct vmw_bo *old_bo = NULL;
+ struct vmw_bo *new_bo = NULL;
s32 hotspot_x, hotspot_y;
+ int ret;
hotspot_x = du->hotspot_x + new_state->hotspot_x;
hotspot_y = du->hotspot_y + new_state->hotspot_y;
- du->cursor_surface = vps->surf;
+ du->cursor_surface = vmw_user_object_surface(&vps->uo);
- if (!vps->surf && !vps->bo) {
+ if (vmw_user_object_is_null(&vps->uo)) {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return;
}
@@ -792,10 +766,26 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
vps->cursor.hotspot_x = hotspot_x;
vps->cursor.hotspot_y = hotspot_y;
- if (vps->surf) {
+ if (du->cursor_surface)
du->cursor_age = du->cursor_surface->snooper.age;
+
+ if (!vmw_user_object_is_null(&old_vps->uo)) {
+ old_bo = vmw_user_object_buffer(&old_vps->uo);
+ ret = ttm_bo_reserve(&old_bo->tbo, false, false, NULL);
+ if (ret != 0)
+ return;
}
+ if (!vmw_user_object_is_null(&vps->uo)) {
+ new_bo = vmw_user_object_buffer(&vps->uo);
+ if (old_bo != new_bo) {
+ ret = ttm_bo_reserve(&new_bo->tbo, false, false, NULL);
+ if (ret != 0)
+ return;
+ } else {
+ new_bo = NULL;
+ }
+ }
if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
/*
* If it hasn't changed, avoid making the device do extra
@@ -813,6 +803,11 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
+ if (old_bo)
+ ttm_bo_unreserve(&old_bo->tbo);
+ if (new_bo)
+ ttm_bo_unreserve(&new_bo->tbo);
+
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
@@ -913,7 +908,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
}
if (!vmw_framebuffer_to_vfb(fb)->bo) {
- surface = vmw_framebuffer_to_vfbs(fb)->surface;
+ surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo);
WARN_ON(!surface);
@@ -1074,12 +1069,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
memset(&vps->cursor, 0, sizeof(vps->cursor));
/* Each ref counted resource needs to be acquired again */
- if (vps->surf)
- (void) vmw_surface_reference(vps->surf);
-
- if (vps->bo)
- (void) vmw_bo_reference(vps->bo);
-
+ vmw_user_object_ref(&vps->uo);
state = &vps->base;
__drm_atomic_helper_plane_duplicate_state(plane, state);
@@ -1128,11 +1118,7 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
/* Should have been freed by cleanup_fb */
- if (vps->surf)
- vmw_surface_unreference(&vps->surf);
-
- if (vps->bo)
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
drm_atomic_helper_plane_destroy_state(plane, state);
}
@@ -1227,7 +1213,7 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
vmw_framebuffer_to_vfbs(framebuffer);
drm_framebuffer_cleanup(framebuffer);
- vmw_surface_unreference(&vfbs->surface);
+ vmw_user_object_unref(&vfbs->uo);
kfree(vfbs);
}
@@ -1272,29 +1258,41 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
return -ENOSYS;
}
+static int vmw_framebuffer_surface_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(fb);
+ struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo);
+
+ return drm_gem_handle_create(file_priv, &bo->tbo.base, handle);
+}
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
+ .create_handle = vmw_framebuffer_surface_create_handle,
.destroy = vmw_framebuffer_surface_destroy,
.dirty = drm_atomic_helper_dirtyfb,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
- struct vmw_surface *surface,
+ struct vmw_user_object *uo,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
- *mode_cmd,
- bool is_bo_proxy)
+ *mode_cmd)
{
struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
+ struct vmw_surface *surface;
int ret;
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
return -ENOSYS;
+ surface = vmw_user_object_surface(uo);
+
/*
* Sanity checks.
*/
@@ -1357,8 +1355,8 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
}
drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
- vfbs->surface = vmw_surface_reference(surface);
- vfbs->is_bo_proxy = is_bo_proxy;
+ memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
+ vmw_user_object_ref(&vfbs->uo);
*out = &vfbs->base;
@@ -1370,7 +1368,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
return 0;
out_err2:
- vmw_surface_unreference(&surface);
+ vmw_user_object_unref(&vfbs->uo);
kfree(vfbs);
out_err1:
return ret;
@@ -1386,7 +1384,6 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
{
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(fb);
-
return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
}
@@ -1407,86 +1404,6 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.dirty = drm_atomic_helper_dirtyfb,
};
-/**
- * vmw_create_bo_proxy - create a proxy surface for the buffer object
- *
- * @dev: DRM device
- * @mode_cmd: parameters for the new surface
- * @bo_mob: MOB backing the buffer object
- * @srf_out: newly created surface
- *
- * When the content FB is a buffer object, we create a surface as a proxy to the
- * same buffer. This way we can do a surface copy rather than a surface DMA.
- * This is a more efficient approach
- *
- * RETURNS:
- * 0 on success, error code otherwise
- */
-static int vmw_create_bo_proxy(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct vmw_bo *bo_mob,
- struct vmw_surface **srf_out)
-{
- struct vmw_surface_metadata metadata = {0};
- uint32_t format;
- struct vmw_resource *res;
- unsigned int bytes_pp;
- int ret;
-
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- format = SVGA3D_X8R8G8B8;
- bytes_pp = 4;
- break;
-
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB1555:
- format = SVGA3D_R5G6B5;
- bytes_pp = 2;
- break;
-
- case 8:
- format = SVGA3D_P8;
- bytes_pp = 1;
- break;
-
- default:
- DRM_ERROR("Invalid framebuffer format %p4cc\n",
- &mode_cmd->pixel_format);
- return -EINVAL;
- }
-
- metadata.format = format;
- metadata.mip_levels[0] = 1;
- metadata.num_sizes = 1;
- metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
- metadata.base_size.height = mode_cmd->height;
- metadata.base_size.depth = 1;
- metadata.scanout = true;
-
- ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
- if (ret) {
- DRM_ERROR("Failed to allocate proxy content buffer\n");
- return ret;
- }
-
- res = &(*srf_out)->res;
-
- /* Reserve and switch the backing mob. */
- mutex_lock(&res->dev_priv->cmdbuf_mutex);
- (void) vmw_resource_reserve(res, false, true);
- vmw_user_bo_unref(&res->guest_memory_bo);
- res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
- res->guest_memory_offset = 0;
- vmw_resource_unreserve(res, false, false, false, NULL, 0);
- mutex_unlock(&res->dev_priv->cmdbuf_mutex);
-
- return 0;
-}
-
-
-
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct vmw_bo *bo,
struct vmw_framebuffer **out,
@@ -1565,55 +1482,24 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
* vmw_kms_new_framebuffer - Create a new framebuffer.
*
* @dev_priv: Pointer to device private struct.
- * @bo: Pointer to buffer object to wrap the kms framebuffer around.
- * Either @bo or @surface must be NULL.
- * @surface: Pointer to a surface to wrap the kms framebuffer around.
- * Either @bo or @surface must be NULL.
- * @only_2d: No presents will occur to this buffer object based framebuffer.
- * This helps the code to do some important optimizations.
+ * @uo: Pointer to user object to wrap the kms framebuffer around.
+ * Either the buffer or surface inside the user object must be NULL.
* @mode_cmd: Frame-buffer metadata.
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_bo *bo,
- struct vmw_surface *surface,
- bool only_2d,
+ struct vmw_user_object *uo,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct vmw_framebuffer *vfb = NULL;
- bool is_bo_proxy = false;
int ret;
- /*
- * We cannot use the SurfaceDMA command in an non-accelerated VM,
- * therefore, wrap the buffer object in a surface so we can use the
- * SurfaceCopy command.
- */
- if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
- bo && only_2d &&
- mode_cmd->width > 64 && /* Don't create a proxy for cursor */
- dev_priv->active_display_unit == vmw_du_screen_target) {
- ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
- bo, &surface);
- if (ret)
- return ERR_PTR(ret);
-
- is_bo_proxy = true;
- }
-
/* Create the new framebuffer depending one what we have */
- if (surface) {
- ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
- mode_cmd,
- is_bo_proxy);
- /*
- * vmw_create_bo_proxy() adds a reference that is no longer
- * needed
- */
- if (is_bo_proxy)
- vmw_surface_unreference(&surface);
- } else if (bo) {
- ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
+ if (vmw_user_object_surface(uo)) {
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb,
+ mode_cmd);
+ } else if (uo->buffer) {
+ ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb,
mode_cmd);
} else {
BUG();
@@ -1635,14 +1521,12 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
- struct vmw_surface *surface = NULL;
- struct vmw_bo *bo = NULL;
+ struct vmw_user_object uo = {0};
int ret;
/* returns either a bo or surface */
- ret = vmw_user_lookup_handle(dev_priv, file_priv,
- mode_cmd->handles[0],
- &surface, &bo);
+ ret = vmw_user_object_lookup(dev_priv, file_priv, mode_cmd->handles[0],
+ &uo);
if (ret) {
DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
mode_cmd->handles[0], mode_cmd->handles[0]);
@@ -1650,7 +1534,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- if (!bo &&
+ if (vmw_user_object_surface(&uo) &&
!vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
DRM_ERROR("Surface size cannot exceed %dx%d\n",
dev_priv->texture_max_width,
@@ -1659,20 +1543,15 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
}
- vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
- !(dev_priv->capabilities & SVGA_CAP_3D),
- mode_cmd);
+ vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd);
if (IS_ERR(vfb)) {
ret = PTR_ERR(vfb);
goto err_out;
}
err_out:
- /* vmw_user_lookup_handle takes one ref so does new_fb */
- if (bo)
- vmw_user_bo_unref(&bo);
- if (surface)
- vmw_surface_unreference(&surface);
+ /* vmw_user_object_lookup takes one ref so does new_fb */
+ vmw_user_object_unref(&uo);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -2585,72 +2464,6 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_update_proxy - Helper function to update a proxy surface from
- * its backing MOB.
- *
- * @res: Pointer to the surface resource
- * @clips: Clip rects in framebuffer (surface) space.
- * @num_clips: Number of clips in @clips.
- * @increment: Integer with which to increment the clip counter when looping.
- * Used to skip a predetermined number of clip rects.
- *
- * This function makes sure the proxy surface is updated from its backing MOB
- * using the region given by @clips. The surface resource @res and its backing
- * MOB needs to be reserved and validated on call.
- */
-int vmw_kms_update_proxy(struct vmw_resource *res,
- const struct drm_clip_rect *clips,
- unsigned num_clips,
- int increment)
-{
- struct vmw_private *dev_priv = res->dev_priv;
- struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBImage body;
- } *cmd;
- SVGA3dBox *box;
- size_t copy_size = 0;
- int i;
-
- if (!clips)
- return 0;
-
- cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
- if (!cmd)
- return -ENOMEM;
-
- for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
- box = &cmd->body.box;
-
- cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.image.sid = res->id;
- cmd->body.image.face = 0;
- cmd->body.image.mipmap = 0;
-
- if (clips->x1 > size->width || clips->x2 > size->width ||
- clips->y1 > size->height || clips->y2 > size->height) {
- DRM_ERROR("Invalid clips outsize of framebuffer.\n");
- return -EINVAL;
- }
-
- box->x = clips->x1;
- box->y = clips->y1;
- box->z = 0;
- box->w = clips->x2 - clips->x1;
- box->h = clips->y2 - clips->y1;
- box->d = 1;
-
- copy_size += sizeof(*cmd);
- }
-
- vmw_cmd_commit(dev_priv, copy_size);
-
- return 0;
-}
-
-/**
* vmw_kms_create_implicit_placement_property - Set up the implicit placement
* property.
*
@@ -2784,8 +2597,9 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
} else {
struct vmw_framebuffer_surface *vfbs =
container_of(update->vfb, typeof(*vfbs), base);
+ struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo);
- ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+ ret = vmw_validation_add_resource(&val_ctx, &surf->res,
0, VMW_RES_DIRTY_NONE, NULL,
NULL);
}
@@ -2941,3 +2755,93 @@ int vmw_connector_get_modes(struct drm_connector *connector)
return num_modes;
}
+
+struct vmw_user_object *vmw_user_object_ref(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ vmw_user_bo_ref(uo->buffer);
+ else if (uo->surface)
+ vmw_surface_reference(uo->surface);
+ return uo;
+}
+
+void vmw_user_object_unref(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ vmw_user_bo_unref(&uo->buffer);
+ else if (uo->surface)
+ vmw_surface_unreference(&uo->surface);
+}
+
+struct vmw_bo *
+vmw_user_object_buffer(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ return uo->buffer;
+ else if (uo->surface)
+ return uo->surface->res.guest_memory_bo;
+ return NULL;
+}
+
+struct vmw_surface *
+vmw_user_object_surface(struct vmw_user_object *uo)
+{
+ if (uo->buffer)
+ return uo->buffer->dumb_surface;
+ return uo->surface;
+}
+
+void *vmw_user_object_map(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+
+ WARN_ON(!bo);
+ return vmw_bo_map_and_cache(bo);
+}
+
+void *vmw_user_object_map_size(struct vmw_user_object *uo, size_t size)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+
+ WARN_ON(!bo);
+ return vmw_bo_map_and_cache_size(bo, size);
+}
+
+void vmw_user_object_unmap(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo = vmw_user_object_buffer(uo);
+ int ret;
+
+ WARN_ON(!bo);
+
+ /* Fence the mob creation so we are guarateed to have the mob */
+ ret = ttm_bo_reserve(&bo->tbo, false, false, NULL);
+ if (ret != 0)
+ return;
+
+ vmw_bo_unmap(bo);
+ vmw_bo_pin_reserved(bo, false);
+
+ ttm_bo_unreserve(&bo->tbo);
+}
+
+bool vmw_user_object_is_mapped(struct vmw_user_object *uo)
+{
+ struct vmw_bo *bo;
+
+ if (!uo || vmw_user_object_is_null(uo))
+ return false;
+
+ bo = vmw_user_object_buffer(uo);
+
+ if (WARN_ON(!bo))
+ return false;
+
+ WARN_ON(bo->map.bo && !bo->map.virtual);
+ return bo->map.virtual;
+}
+
+bool vmw_user_object_is_null(struct vmw_user_object *uo)
+{
+ return !uo->buffer && !uo->surface;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index bf24f2f0dc..6141fadf81 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,7 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -221,11 +222,9 @@ struct vmw_framebuffer {
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
- struct vmw_surface *surface;
- bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
+ struct vmw_user_object uo;
};
-
struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
struct vmw_bo *buffer;
@@ -277,8 +276,7 @@ struct vmw_cursor_plane_state {
*/
struct vmw_plane_state {
struct drm_plane_state base;
- struct vmw_surface *surf;
- struct vmw_bo *bo;
+ struct vmw_user_object uo;
int content_fb_type;
unsigned long bo_size;
@@ -457,9 +455,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_bo *bo,
- struct vmw_surface *surface,
- bool only_2d,
+ struct vmw_user_object *uo,
const struct drm_mode_fb_cmd2 *mode_cmd);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
@@ -486,8 +482,7 @@ void vmw_du_plane_reset(struct drm_plane *plane);
struct drm_plane_state *vmw_du_plane_duplicate_state(struct drm_plane *plane);
void vmw_du_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
-void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
- bool unreference);
+void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps);
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5befc2719a..39949e0a49 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -147,8 +148,9 @@ static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
struct vmw_bo *buf;
int ret;
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+ buf = vfb->bo ?
+ vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
if (!buf)
return 0;
@@ -169,8 +171,10 @@ static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_bo *buf;
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+ buf = vfb->bo ?
+ vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_user_object_buffer(&vmw_framebuffer_to_vfbs(&vfb->base)->uo);
+
if (WARN_ON(!buf))
return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index c45b4724e4..e20f64b67b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
{
struct vmw_escape_video_flush *flush;
size_t fifo_size;
- bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
+ bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
int i, num_items;
SVGAGuestPtr ptr;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index c99cad4449..598b90ac75 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2013 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2013-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -31,6 +32,7 @@
*/
#include "vmwgfx_drv.h"
+#include "vmwgfx_bo.h"
#include "ttm_object.h"
#include <linux/dma-buf.h>
@@ -88,13 +90,35 @@ int vmw_prime_handle_to_fd(struct drm_device *dev,
uint32_t handle, uint32_t flags,
int *prime_fd)
{
+ struct vmw_private *vmw = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_bo *vbo;
int ret;
+ int surf_handle;
- if (handle > VMWGFX_NUM_MOB)
+ if (handle > VMWGFX_NUM_MOB) {
ret = ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
- else
- ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
+ } else {
+ ret = vmw_user_bo_lookup(file_priv, handle, &vbo);
+ if (ret)
+ return ret;
+ if (vbo && vbo->is_dumb) {
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv, handle,
+ flags, prime_fd);
+ } else {
+ surf_handle = vmw_lookup_surface_handle_for_buffer(vmw,
+ vbo,
+ handle);
+ if (surf_handle > 0)
+ ret = ttm_prime_handle_to_fd(tfile, surf_handle,
+ flags, prime_fd);
+ else
+ ret = drm_gem_prime_handle_to_fd(dev, file_priv,
+ handle, flags,
+ prime_fd);
+ }
+ vmw_user_bo_unref(&vbo);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 848dba0998..a73af8a355 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -58,6 +59,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
rb_link_node(&res->mob_node, parent, new);
rb_insert_color(&res->mob_node, &gbo->res_tree);
+ vmw_bo_del_detached_resource(gbo, res);
vmw_bo_prio_add(gbo, res->used_prio);
}
@@ -287,28 +289,35 @@ out_bad_resource:
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
-int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+int vmw_user_object_lookup(struct vmw_private *dev_priv,
struct drm_file *filp,
- uint32_t handle,
- struct vmw_surface **out_surf,
- struct vmw_bo **out_buf)
+ u32 handle,
+ struct vmw_user_object *uo)
{
struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
int ret;
- BUG_ON(*out_surf || *out_buf);
+ WARN_ON(uo->surface || uo->buffer);
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
user_surface_converter,
&res);
if (!ret) {
- *out_surf = vmw_res_to_srf(res);
+ uo->surface = vmw_res_to_srf(res);
return 0;
}
- *out_surf = NULL;
- ret = vmw_user_bo_lookup(filp, handle, out_buf);
+ uo->surface = NULL;
+ ret = vmw_user_bo_lookup(filp, handle, &uo->buffer);
+ if (!ret && !uo->buffer->is_dumb) {
+ uo->surface = vmw_lookup_surface_for_buffer(dev_priv,
+ uo->buffer,
+ handle);
+ if (uo->surface)
+ vmw_user_bo_unref(&uo->buffer);
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index df0039a8ef..0f4bfd9848 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -240,7 +241,7 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct vmw_connector_state *vmw_conn_state;
int x, y;
- sou->buffer = vps->bo;
+ sou->buffer = vmw_user_object_buffer(&vps->uo);
conn_state = sou->base.connector.state;
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
@@ -376,10 +377,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
struct drm_crtc *crtc = plane->state->crtc ?
plane->state->crtc : old_state->crtc;
+ struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo);
- if (vps->bo)
- vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false);
- vmw_bo_unreference(&vps->bo);
+ if (bo)
+ vmw_bo_unpin(vmw_priv(crtc->dev), bo, false);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
vmw_du_plane_cleanup_fb(plane, old_state);
@@ -411,9 +413,10 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
.bo_type = ttm_bo_type_device,
.pin = true
};
+ struct vmw_bo *bo = NULL;
if (!new_fb) {
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
return 0;
@@ -422,17 +425,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
bo_params.size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev);
- if (vps->bo) {
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo) {
if (vps->bo_size == bo_params.size) {
/*
* Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called.
*/
- return vmw_bo_pin_in_vram(dev_priv, vps->bo,
- true);
+ return vmw_bo_pin_in_vram(dev_priv, bo, true);
}
- vmw_bo_unreference(&vps->bo);
+ vmw_user_object_unref(&vps->uo);
vps->bo_size = 0;
}
@@ -442,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo);
+ ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer);
vmw_overlay_resume_all(dev_priv);
if (ret)
return ret;
@@ -453,7 +456,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* TTM already thinks the buffer is pinned, but make sure the
* pin_count is upped.
*/
- return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
+ return vmw_bo_pin_in_vram(dev_priv, vps->uo.buffer, true);
}
static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
@@ -580,6 +583,7 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
{
struct vmw_kms_sou_dirty_cmd *blit = cmd;
struct vmw_framebuffer_surface *vfbs;
+ struct vmw_surface *surf = NULL;
vfbs = container_of(update->vfb, typeof(*vfbs), base);
@@ -587,7 +591,8 @@ static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
num_hits;
- blit->body.srcImage.sid = vfbs->surface->res.id;
+ surf = vmw_user_object_surface(&vfbs->uo);
+ blit->body.srcImage.sid = surf->res.id;
blit->body.destScreenId = update->du->unit;
/* Update the source and destination bounding box later in post_clip */
@@ -1104,7 +1109,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
int ret;
if (!srf)
- srf = &vfbs->surface->res;
+ srf = &vmw_user_object_surface(&vfbs->uo)->res;
ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
NULL, NULL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index a04e073631..5453f7cf0e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
*
- * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2014-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -29,6 +30,7 @@
#include "vmwgfx_kms.h"
#include "vmwgfx_vkms.h"
#include "vmw_surface_cache.h"
+#include <linux/fsnotify.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -735,7 +737,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
int ret;
if (!srf)
- srf = &vfbs->surface->res;
+ srf = &vmw_user_object_surface(&vfbs->uo)->res;
ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
NULL, NULL);
@@ -746,12 +748,6 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (ret)
goto out_unref;
- if (vfbs->is_bo_proxy) {
- ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
- if (ret)
- goto out_finish;
- }
-
sdirty.base.fifo_commit = vmw_kms_stdu_surface_fifo_commit;
sdirty.base.clip = vmw_kms_stdu_surface_clip;
sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
@@ -765,7 +761,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
dest_x, dest_y, num_clips, inc,
&sdirty.base);
-out_finish:
+
vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
NULL);
@@ -877,6 +873,32 @@ vmw_stdu_connector_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+/*
+ * Trigger a modeset if the X,Y position of the Screen Target changes.
+ * This is needed when multi-mon is cycled. The original Screen Target will have
+ * the same mode but its relative X,Y position in the topology will change.
+ */
+static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *conn_state;
+ struct vmw_screen_target_display_unit *du;
+ struct drm_crtc_state *new_crtc_state;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ du = vmw_connector_to_stdu(conn);
+
+ if (!conn_state->crtc)
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (du->base.gui_x != du->base.set_gui_x ||
+ du->base.gui_y != du->base.set_gui_y)
+ new_crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
@@ -891,7 +913,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
.get_modes = vmw_connector_get_modes,
- .mode_valid = vmw_stdu_connector_mode_valid
+ .mode_valid = vmw_stdu_connector_mode_valid,
+ .atomic_check = vmw_stdu_connector_atomic_check,
};
@@ -918,9 +941,8 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->surf)
+ if (vmw_user_object_surface(&vps->uo))
WARN_ON(!vps->pinned);
-
vmw_du_plane_cleanup_fb(plane, old_state);
vps->content_fb_type = SAME_AS_DISPLAY;
@@ -928,7 +950,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
}
-
/**
* vmw_stdu_primary_plane_prepare_fb - Readies the display surface
*
@@ -952,13 +973,15 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
enum stdu_content_type new_content_type;
struct vmw_framebuffer_surface *new_vfbs;
uint32_t hdisplay = new_state->crtc_w, vdisplay = new_state->crtc_h;
+ struct drm_plane_state *old_state = plane->state;
+ struct drm_rect rect;
int ret;
/* No FB to prepare */
if (!new_fb) {
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
return 0;
@@ -968,8 +991,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
new_vfbs = (vfb->bo) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
if (new_vfbs &&
- new_vfbs->surface->metadata.base_size.width == hdisplay &&
- new_vfbs->surface->metadata.base_size.height == vdisplay)
+ vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.width == hdisplay &&
+ vmw_user_object_surface(&new_vfbs->uo)->metadata.base_size.height == vdisplay)
new_content_type = SAME_AS_DISPLAY;
else if (vfb->bo)
new_content_type = SEPARATE_BO;
@@ -1007,29 +1030,29 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
metadata.num_sizes = 1;
metadata.scanout = true;
} else {
- metadata = new_vfbs->surface->metadata;
+ metadata = vmw_user_object_surface(&new_vfbs->uo)->metadata;
}
metadata.base_size.width = hdisplay;
metadata.base_size.height = vdisplay;
metadata.base_size.depth = 1;
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
struct drm_vmw_size cur_base_size =
- vps->surf->metadata.base_size;
+ vmw_user_object_surface(&vps->uo)->metadata.base_size;
if (cur_base_size.width != metadata.base_size.width ||
cur_base_size.height != metadata.base_size.height ||
- vps->surf->metadata.format != metadata.format) {
+ vmw_user_object_surface(&vps->uo)->metadata.format != metadata.format) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
}
- if (!vps->surf) {
+ if (!vmw_user_object_surface(&vps->uo)) {
ret = vmw_gb_surface_define(dev_priv, &metadata,
- &vps->surf);
+ &vps->uo.surface);
if (ret != 0) {
DRM_ERROR("Couldn't allocate STDU surface.\n");
return ret;
@@ -1042,18 +1065,19 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
* The only time we add a reference in prepare_fb is if the
* state object doesn't have a reference to begin with
*/
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
WARN_ON(vps->pinned != 0);
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
}
- vps->surf = vmw_surface_reference(new_vfbs->surface);
+ memcpy(&vps->uo, &new_vfbs->uo, sizeof(vps->uo));
+ vmw_user_object_ref(&vps->uo);
}
- if (vps->surf) {
+ if (vmw_user_object_surface(&vps->uo)) {
/* Pin new surface before flipping */
- ret = vmw_resource_pin(&vps->surf->res, false);
+ ret = vmw_resource_pin(&vmw_user_object_surface(&vps->uo)->res, false);
if (ret)
goto out_srf_unref;
@@ -1063,6 +1087,34 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
vps->content_fb_type = new_content_type;
/*
+ * The drm fb code will do blit's via the vmap interface, which doesn't
+ * trigger vmw_bo page dirty tracking due to being kernel side (and thus
+ * doesn't require mmap'ing) so we have to update the surface's dirty
+ * regions by hand but we want to be careful to not overwrite the
+ * resource if it has been written to by the gpu (res_dirty).
+ */
+ if (vps->uo.buffer && vps->uo.buffer->is_dumb) {
+ struct vmw_surface *surf = vmw_user_object_surface(&vps->uo);
+ struct vmw_resource *res = &surf->res;
+
+ if (!res->res_dirty && drm_atomic_helper_damage_merged(old_state,
+ new_state,
+ &rect)) {
+ /*
+ * At some point it might be useful to actually translate
+ * (rect.x1, rect.y1) => start, and (rect.x2, rect.y2) => end,
+ * but currently the fb code will just report the entire fb
+ * dirty so in practice it doesn't matter.
+ */
+ pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
+ pgoff_t end = __KERNEL_DIV_ROUND_UP(res->guest_memory_offset +
+ res->guest_memory_size,
+ PAGE_SIZE);
+ vmw_resource_dirty_update(res, start, end);
+ }
+ }
+
+ /*
* This should only happen if the buffer object is too large to create a
* proxy surface for.
*/
@@ -1072,7 +1124,7 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
return 0;
out_srf_unref:
- vmw_surface_unreference(&vps->surf);
+ vmw_user_object_unref(&vps->uo);
return ret;
}
@@ -1214,14 +1266,8 @@ static uint32_t
vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
- struct vmw_framebuffer_surface *vfbs;
uint32_t size = 0;
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- if (vfbs->is_bo_proxy)
- size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
-
size += sizeof(struct vmw_stdu_update);
return size;
@@ -1230,14 +1276,8 @@ vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
- struct vmw_framebuffer_surface *vfbs;
uint32_t size = 0;
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- if (vfbs->is_bo_proxy)
- size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
-
size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
num_hits + sizeof(struct vmw_stdu_update);
@@ -1245,47 +1285,6 @@ static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
}
static uint32_t
-vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
-{
- struct vmw_framebuffer_surface *vfbs;
- struct drm_plane_state *state = update->plane->state;
- struct drm_plane_state *old_state = update->old_state;
- struct vmw_stdu_update_gb_image *cmd_update = cmd;
- struct drm_atomic_helper_damage_iter iter;
- struct drm_rect clip;
- uint32_t copy_size = 0;
-
- vfbs = container_of(update->vfb, typeof(*vfbs), base);
-
- /*
- * proxy surface is special where a buffer object type fb is wrapped
- * in a surface and need an update gb image command to sync with device.
- */
- drm_atomic_helper_damage_iter_init(&iter, old_state, state);
- drm_atomic_for_each_plane_damage(&iter, &clip) {
- SVGA3dBox *box = &cmd_update->body.box;
-
- cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
- cmd_update->header.size = sizeof(cmd_update->body);
- cmd_update->body.image.sid = vfbs->surface->res.id;
- cmd_update->body.image.face = 0;
- cmd_update->body.image.mipmap = 0;
-
- box->x = clip.x1;
- box->y = clip.y1;
- box->z = 0;
- box->w = drm_rect_width(&clip);
- box->h = drm_rect_height(&clip);
- box->d = 1;
-
- copy_size += sizeof(*cmd_update);
- cmd_update++;
- }
-
- return copy_size;
-}
-
-static uint32_t
vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
uint32_t num_hits)
{
@@ -1299,7 +1298,7 @@ vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
num_hits;
- cmd_copy->body.src.sid = vfbs->surface->res.id;
+ cmd_copy->body.src.sid = vmw_user_object_surface(&vfbs->uo)->res.id;
cmd_copy->body.dest.sid = stdu->display_srf->res.id;
return sizeof(*cmd_copy);
@@ -1370,10 +1369,7 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
srf_update.mutex = &dev_priv->cmdbuf_mutex;
srf_update.intr = true;
- if (vfbs->is_bo_proxy)
- srf_update.post_prepare = vmw_stdu_surface_update_proxy;
-
- if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+ if (vmw_user_object_surface(&vfbs->uo)->res.id != stdu->display_srf->res.id) {
srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
srf_update.pre_clip = vmw_stdu_surface_populate_copy;
srf_update.clip = vmw_stdu_surface_populate_clip;
@@ -1417,7 +1413,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- stdu->display_srf = vps->surf;
+ stdu->display_srf = vmw_user_object_surface(&vps->uo);
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e7a744dfce..8ae6a761c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -36,9 +37,6 @@
#include <drm/ttm/ttm_placement.h>
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
-#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
-#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
- (svga3d_flags & ((uint64_t)U32_MAX))
/**
* struct vmw_user_surface - User-space visible surface resource
@@ -686,6 +684,14 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
+
+ /*
+ * Dumb buffers own the resource and they'll unref the
+ * resource themselves
+ */
+ if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb)
+ return;
+
vmw_resource_unreference(&res);
}
@@ -812,7 +818,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
}
res->guest_memory_size = cur_bo_offset;
- if (metadata->scanout &&
+ if (!file_priv->atomic &&
+ metadata->scanout &&
metadata->num_sizes == 1 &&
metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT &&
@@ -864,6 +871,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
vmw_resource_unreference(&res);
goto out_unlock;
}
+ vmw_bo_add_detached_resource(res->guest_memory_bo, res);
}
tmp = vmw_resource_reference(&srf->res);
@@ -892,6 +900,113 @@ out_unlock:
return ret;
}
+static struct vmw_user_surface *
+vmw_lookup_user_surface_for_buffer(struct vmw_private *vmw, struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf = NULL;
+ struct vmw_surface *surf;
+ struct ttm_base_object *base;
+
+ surf = vmw_bo_surface(bo);
+ if (surf) {
+ rcu_read_lock();
+ user_srf = container_of(surf, struct vmw_user_surface, srf);
+ base = &user_srf->prime.base;
+ if (base && !kref_get_unless_zero(&base->refcount)) {
+ drm_dbg_driver(&vmw->drm,
+ "%s: referencing a stale surface handle %d\n",
+ __func__, handle);
+ base = NULL;
+ user_srf = NULL;
+ }
+ rcu_read_unlock();
+ }
+
+ return user_srf;
+}
+
+struct vmw_surface *vmw_lookup_surface_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf =
+ vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
+ struct vmw_surface *surf = NULL;
+ struct ttm_base_object *base;
+
+ if (user_srf) {
+ surf = vmw_surface_reference(&user_srf->srf);
+ base = &user_srf->prime.base;
+ ttm_base_object_unref(&base);
+ }
+ return surf;
+}
+
+u32 vmw_lookup_surface_handle_for_buffer(struct vmw_private *vmw,
+ struct vmw_bo *bo,
+ u32 handle)
+{
+ struct vmw_user_surface *user_srf =
+ vmw_lookup_user_surface_for_buffer(vmw, bo, handle);
+ int surf_handle = 0;
+ struct ttm_base_object *base;
+
+ if (user_srf) {
+ base = &user_srf->prime.base;
+ surf_handle = (u32)base->handle;
+ ttm_base_object_unref(&base);
+ }
+ return surf_handle;
+}
+
+static int vmw_buffer_prime_to_surface_base(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ u32 fd, u32 *handle,
+ struct ttm_base_object **base_p)
+{
+ struct ttm_base_object *base;
+ struct vmw_bo *bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_user_surface *user_srf;
+ int ret;
+
+ ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Wasn't able to find user buffer for fd = %u.\n", fd);
+ return ret;
+ }
+
+ ret = vmw_user_bo_lookup(file_priv, *handle, &bo);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Wasn't able to lookup user buffer for handle = %u.\n", *handle);
+ return ret;
+ }
+
+ user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle);
+ if (WARN_ON(!user_srf)) {
+ drm_warn(&dev_priv->drm,
+ "User surface fd %d (handle %d) is null.\n", fd, *handle);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ base = &user_srf->prime.base;
+ ret = ttm_ref_object_add(tfile, base, NULL, false);
+ if (ret) {
+ drm_warn(&dev_priv->drm,
+ "Couldn't add an object ref for the buffer (%d).\n", *handle);
+ goto out;
+ }
+
+ *base_p = base;
+out:
+ vmw_user_bo_unref(&bo);
+
+ return ret;
+}
static int
vmw_surface_handle_reference(struct vmw_private *dev_priv,
@@ -901,15 +1016,19 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
struct ttm_base_object **base_p)
{
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_user_surface *user_srf;
+ struct vmw_user_surface *user_srf = NULL;
uint32_t handle;
struct ttm_base_object *base;
int ret;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
- if (unlikely(ret != 0))
- return ret;
+ if (ret)
+ return vmw_buffer_prime_to_surface_base(dev_priv,
+ file_priv,
+ u_handle,
+ &handle,
+ base_p);
} else {
handle = u_handle;
}
@@ -1503,7 +1622,12 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
&res->guest_memory_bo);
if (ret == 0) {
- if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
+ if (res->guest_memory_bo->is_dumb) {
+ VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n");
+ vmw_user_bo_unref(&res->guest_memory_bo);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_user_bo_unref(&res->guest_memory_bo);
ret = -EINVAL;
@@ -1560,6 +1684,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
rep->handle = user_srf->prime.base.handle;
rep->backup_size = res->guest_memory_size;
if (res->guest_memory_bo) {
+ vmw_bo_add_detached_resource(res->guest_memory_bo, res);
rep->buffer_map_handle =
drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
rep->buffer_size = res->guest_memory_bo->tbo.base.size;
@@ -2100,3 +2225,140 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
out_unlock:
return ret;
}
+
+static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw,
+ int bpp)
+{
+ switch (bpp) {
+ case 8: /* DRM_FORMAT_C8 */
+ return SVGA3D_P8;
+ case 16: /* DRM_FORMAT_RGB565 */
+ return SVGA3D_R5G6B5;
+ case 32: /* DRM_FORMAT_XRGB8888 */
+ if (has_sm4_context(vmw))
+ return SVGA3D_B8G8R8X8_UNORM;
+ return SVGA3D_X8R8G8B8;
+ default:
+ drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp);
+ return SVGA3D_X8R8G8B8;
+ }
+}
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ * Return: Zero on success, negative error code on failure.
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_bo_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_bo *vbo = NULL;
+ struct vmw_resource *res = NULL;
+ union drm_vmw_gb_surface_create_ext_arg arg = { 0 };
+ struct drm_vmw_gb_surface_create_ext_req *req = &arg.req;
+ int ret;
+ struct drm_vmw_size drm_size = {
+ .width = args->width,
+ .height = args->height,
+ .depth = 1,
+ };
+ SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp);
+ const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
+ SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
+ SVGA3D_SURFACE_HINT_RENDERTARGET |
+ SVGA3D_SURFACE_SCREENTARGET |
+ SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
+ SVGA3D_SURFACE_BIND_RENDER_TARGET;
+
+ /*
+ * Without mob support we're just going to use raw memory buffer
+ * because we wouldn't be able to support full surface coherency
+ * without mobs
+ */
+ if (!dev_priv->has_mob) {
+ int cpp = DIV_ROUND_UP(args->bpp, 8);
+
+ switch (cpp) {
+ case 1: /* DRM_FORMAT_C8 */
+ case 2: /* DRM_FORMAT_RGB565 */
+ case 4: /* DRM_FORMAT_XRGB8888 */
+ break;
+ default:
+ /*
+ * Dumb buffers don't allow anything else.
+ * This is tested via IGT's dumb_buffers
+ */
+ return -EINVAL;
+ }
+
+ args->pitch = args->width * cpp;
+ args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
+
+ ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
+ args->size, &args->handle,
+ &vbo);
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->tbo.base);
+ return ret;
+ }
+
+ req->version = drm_vmw_gb_surface_v1;
+ req->multisample_pattern = SVGA3D_MS_PATTERN_NONE;
+ req->quality_level = SVGA3D_MS_QUALITY_NONE;
+ req->buffer_byte_stride = 0;
+ req->must_be_zero = 0;
+ req->base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(flags);
+ req->svga3d_flags_upper_32_bits = SVGA3D_FLAGS_UPPER_32(flags);
+ req->base.format = (uint32_t)format;
+ req->base.drm_surface_flags = drm_vmw_surface_flag_scanout;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
+ req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
+ req->base.base_size.width = args->width;
+ req->base.base_size.height = args->height;
+ req->base.base_size.depth = 1;
+ req->base.array_size = 0;
+ req->base.mip_levels = 1;
+ req->base.multisample_count = 0;
+ req->base.buffer_handle = SVGA3D_INVALID_ID;
+ req->base.autogen_filter = SVGA3D_TEX_FILTER_NONE;
+ ret = vmw_gb_surface_define_ext_ioctl(dev, &arg, file_priv);
+ if (ret) {
+ drm_warn(dev, "Unable to create a dumb buffer\n");
+ return ret;
+ }
+
+ args->handle = arg.rep.buffer_handle;
+ args->size = arg.rep.buffer_size;
+ args->pitch = vmw_surface_calculate_pitch(desc, &drm_size);
+
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg.rep.handle,
+ user_surface_converter,
+ &res);
+ if (ret) {
+ drm_err(dev, "Created resource handle doesn't exist!\n");
+ goto err;
+ }
+
+ vbo = res->guest_memory_bo;
+ vbo->is_dumb = true;
+ vbo->dumb_surface = vmw_res_to_srf(res);
+
+err:
+ if (res)
+ vmw_resource_unreference(&res);
+ if (ret)
+ ttm_ref_object_base_unref(tfile, arg.rep.handle);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
index 7e93a45948..ac002048d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -76,7 +76,7 @@ done:
return ret;
}
-static int
+static void
compute_crc(struct drm_crtc *crtc,
struct vmw_surface *surf,
u32 *crc)
@@ -102,8 +102,6 @@ compute_crc(struct drm_crtc *crtc,
}
vmw_bo_unmap(bo);
-
- return 0;
}
static void
@@ -117,7 +115,6 @@ crc_generate_worker(struct work_struct *work)
u64 frame_start, frame_end;
u32 crc32 = 0;
struct vmw_surface *surf = 0;
- int ret;
spin_lock_irq(&du->vkms.crc_state_lock);
crc_pending = du->vkms.crc_pending;
@@ -131,22 +128,24 @@ crc_generate_worker(struct work_struct *work)
return;
spin_lock_irq(&du->vkms.crc_state_lock);
- surf = du->vkms.surface;
+ surf = vmw_surface_reference(du->vkms.surface);
spin_unlock_irq(&du->vkms.crc_state_lock);
- if (vmw_surface_sync(vmw, surf)) {
- drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n");
- return;
- }
+ if (surf) {
+ if (vmw_surface_sync(vmw, surf)) {
+ drm_warn(
+ crtc->dev,
+ "CRC worker wasn't able to sync the crc surface!\n");
+ return;
+ }
- ret = compute_crc(crtc, surf, &crc32);
- if (ret)
- return;
+ compute_crc(crtc, surf, &crc32);
+ vmw_surface_unreference(&surf);
+ }
spin_lock_irq(&du->vkms.crc_state_lock);
frame_start = du->vkms.frame_start;
frame_end = du->vkms.frame_end;
- crc_pending = du->vkms.crc_pending;
du->vkms.frame_start = 0;
du->vkms.frame_end = 0;
du->vkms.crc_pending = false;
@@ -165,7 +164,7 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer)
struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer);
struct drm_crtc *crtc = &du->crtc;
struct vmw_private *vmw = vmw_priv(crtc->dev);
- struct vmw_surface *surf = NULL;
+ bool has_surface = false;
u64 ret_overrun;
bool locked, ret;
@@ -180,10 +179,10 @@ vmw_vkms_vblank_simulate(struct hrtimer *timer)
WARN_ON(!ret);
if (!locked)
return HRTIMER_RESTART;
- surf = du->vkms.surface;
+ has_surface = du->vkms.surface != NULL;
vmw_vkms_unlock(crtc);
- if (du->vkms.crc_enabled && surf) {
+ if (du->vkms.crc_enabled && has_surface) {
u64 frame = drm_crtc_accurate_vblank_count(crtc);
spin_lock(&du->vkms.crc_state_lock);
@@ -337,6 +336,8 @@ vmw_vkms_crtc_cleanup(struct drm_crtc *crtc)
{
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+ if (du->vkms.surface)
+ vmw_surface_unreference(&du->vkms.surface);
WARN_ON(work_pending(&du->vkms.crc_generator_work));
hrtimer_cancel(&du->vkms.timer);
}
@@ -498,9 +499,12 @@ vmw_vkms_set_crc_surface(struct drm_crtc *crtc,
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_private *vmw = vmw_priv(crtc->dev);
- if (vmw->vkms_enabled) {
+ if (vmw->vkms_enabled && du->vkms.surface != surf) {
WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET);
- du->vkms.surface = surf;
+ if (du->vkms.surface)
+ vmw_surface_unreference(&du->vkms.surface);
+ if (surf)
+ du->vkms.surface = vmw_surface_reference(surf);
}
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index bdb578e089..4b59687ff5 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -288,12 +288,22 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
mp2_ops->start(privdata, info);
cl_data->sensor_sts[i] = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
+
+ if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
+ cl_data->is_any_sensor_enabled = true;
+ }
+
+ if (!cl_data->is_any_sensor_enabled ||
+ (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
+ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
+ cl_data->is_any_sensor_enabled);
+ rc = -EOPNOTSUPP;
+ goto cleanup;
}
for (i = 0; i < cl_data->num_hid_devices; i++) {
cl_data->cur_hid_dev = i;
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
- cl_data->is_any_sensor_enabled = true;
rc = amdtp_hid_probe(i, cl_data);
if (rc)
goto cleanup;
@@ -305,12 +315,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_sts[i]);
}
- if (!cl_data->is_any_sensor_enabled ||
- (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
- dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
- rc = -EOPNOTSUPP;
- goto cleanup;
- }
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
return 0;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index a44367aef6..20de97ce0f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -714,13 +714,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
case 0x8e2: /* IntuosHT2 pen */
case 0x022:
case 0x200: /* Pro Pen 3 */
- case 0x04200: /* Pro Pen 3 */
case 0x10842: /* MobileStudio Pro Pro Pen slim */
case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
case 0x16802: /* Cintiq 13HD Pro Pen */
case 0x18802: /* DTH2242 Pen */
case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
- case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
+ case 0x8842: /* Intuos Pro and Cintiq Pro 3D Pen */
tool_type = BTN_TOOL_PEN;
break;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 43952689bf..23627c973e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7491,8 +7491,8 @@ static int bnxt_get_avail_msix(struct bnxt *bp, int num);
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_rings hwr = {0};
+ int rx_rings, old_rx_rings, rc;
int cp = bp->cp_nr_rings;
- int rx_rings, rc;
int ulp_msix = 0;
bool sh = false;
int tx_cp;
@@ -7526,6 +7526,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.grp = bp->rx_nr_rings;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
hwr.stat = bnxt_get_func_stat_ctxs(bp);
+ old_rx_rings = bp->hw_resc.resv_rx_rings;
rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
@@ -7580,7 +7581,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
- if (!netif_is_rxfh_configured(bp->dev))
+ if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
+ !netif_is_rxfh_configured(bp->dev))
bnxt_set_dflt_rss_indir_tbl(bp, NULL);
if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 99a75a5907..caaa101579 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -765,18 +765,17 @@ static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
}
/**
- * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
* @ring: Rx ring to use
*
- * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
- * present, NULL otherwise.
+ * Sets XSK buff pool pointer on Rx ring.
*/
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- return ice_get_xp_from_qid(vsi, qid);
+ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
}
/**
@@ -801,7 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;
- ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
+ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 5d396c1a77..1facf179a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -536,7 +536,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return err;
}
- ring->xsk_pool = ice_xsk_pool(ring);
+ ice_rx_xsk_pool(ring);
if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq);
@@ -597,7 +597,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
if (!ok) {
u16 pf_q = ring->vsi->rxq_map[ring->q_index];
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 55a42aad92..9b075dd488 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2949,7 +2949,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_pool)
+ if (READ_ONCE(rx_ring->xsk_pool))
napi_schedule(&rx_ring->q_vector->napi);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 8bb743f78f..8d25b69812 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- rx_ring->xdp_prog = NULL;
+ WRITE_ONCE(rx_ring->xdp_prog, NULL);
if (rx_ring->xsk_pool) {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
@@ -1521,10 +1521,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
bool wd;
- if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring);
+ if (xsk_pool)
+ wd = ice_xmit_zc(tx_ring, xsk_pool);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1550,6 +1551,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
int cleaned;
/* A dedicated path for zero-copy allows making a single
@@ -1557,7 +1559,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
cleaned = rx_ring->xsk_pool ?
- ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a65955eb23..240a7bec24 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -52,10 +52,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi)) {
- synchronize_rcu();
+ if (ice_is_xdp_ena_vsi(vsi))
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
- }
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
@@ -112,25 +110,29 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
* ice_qvec_cfg_msix - Enable IRQ for given queue vector
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
+ * @qid: queue index
*/
static void
-ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
+ int q, _qid = qid;
ice_cfg_itr(hw, q_vector);
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
- q_vector->tx.itr_idx);
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx);
+ _qid++;
+ }
+
+ _qid = qid;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
- q_vector->rx.itr_idx);
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx);
+ _qid++;
+ }
ice_flush(hw);
}
@@ -164,6 +166,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
int timeout = 50;
+ int fail = 0;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
@@ -180,15 +183,17 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
usleep_range(1000, 2000);
}
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
ice_qvec_dis_irq(vsi, rx_ring, q_vector);
ice_qvec_toggle_napi(vsi, q_vector, false);
- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
@@ -196,17 +201,15 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
&txq_meta);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
}
- err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
- if (err)
- return err;
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
- return 0;
+ return fail;
}
/**
@@ -219,40 +222,48 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_q_vector *q_vector;
+ int fail = 0;
+ bool link_up;
int err;
err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_single_rxq(vsi, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
q_vector = vsi->rx_rings[q_idx]->q_vector;
- ice_qvec_cfg_msix(vsi, q_vector);
+ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
clear_bit(ICE_CFG_BUSY, vsi->state);
- return 0;
+ return fail;
}
/**
@@ -459,6 +470,7 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
/**
* __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
* @count: The number of buffers to allocate
*
* Place the @count of descriptors onto Rx ring. Handle the ring wrap
@@ -467,7 +479,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
*
* Returns true if all allocations were successful, false if any fail.
*/
-static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
{
u32 nb_buffs_extra = 0, nb_buffs = 0;
union ice_32b_rx_flex_desc *rx_desc;
@@ -479,8 +492,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
xdp = ice_xdp_buf(rx_ring, ntu);
if (ntu + count >= rx_ring->count) {
- nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
- rx_desc,
+ nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc,
rx_ring->count - ntu);
if (nb_buffs_extra != rx_ring->count - ntu) {
ntu += nb_buffs_extra;
@@ -493,7 +505,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
ice_release_rx_desc(rx_ring, 0);
}
- nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+ nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
ntu += nb_buffs;
if (ntu == rx_ring->count)
@@ -509,6 +521,7 @@ exit:
/**
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
* @count: The number of buffers to allocate
*
* Wrapper for internal allocation routine; figure out how many tail
@@ -516,7 +529,8 @@ exit:
*
* Returns true if all calls to internal alloc routine succeeded
*/
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
u16 leftover, i, tail_bumps;
@@ -525,9 +539,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
- if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+ if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh))
return false;
- return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
+ return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
}
/**
@@ -596,8 +610,10 @@ out:
/**
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
*/
-static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
@@ -648,7 +664,7 @@ skip:
if (xdp_ring->next_to_clean >= cnt)
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ xsk_tx_completed(xsk_pool, xsk_frames);
return completed_frames;
}
@@ -657,6 +673,7 @@ skip:
* ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
* @xdp: XDP buffer to xmit
* @xdp_ring: XDP ring to produce descriptor onto
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* note that this function works directly on xdp_buff, no need to convert
* it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
@@ -666,7 +683,8 @@ skip:
* was not enough space on XDP ring
*/
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
- struct ice_tx_ring *xdp_ring)
+ struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
@@ -680,7 +698,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
free_space = ICE_DESC_UNUSED(xdp_ring);
if (free_space < ICE_RING_QUARTER(xdp_ring))
- free_space += ice_clean_xdp_irq_zc(xdp_ring);
+ free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
if (unlikely(!free_space))
goto busy;
@@ -700,7 +718,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
dma_addr_t dma;
dma = xsk_buff_xdp_get_dma(xdp);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size);
tx_buf->xdp = xdp;
tx_buf->type = ICE_TX_BUF_XSK_TX;
@@ -742,12 +760,14 @@ busy:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
int err, result = ICE_XDP_PASS;
u32 act;
@@ -758,7 +778,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (!err)
return ICE_XDP_REDIR;
- if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
result = ICE_XDP_EXIT;
else
result = ICE_XDP_CONSUMED;
@@ -769,7 +789,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
break;
case XDP_TX:
- result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
+ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
@@ -821,14 +841,16 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
* @budget: NAPI budget
*
* Returns number of processed packets on success, remaining budget on failure.
*/
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool,
+ int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
u32 ntc = rx_ring->next_to_clean;
u32 ntu = rx_ring->next_to_use;
struct xdp_buff *first = NULL;
@@ -891,7 +913,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
+ xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
+ xsk_pool);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
xdp_xmit |= xdp_res;
} else if (xdp_res == ICE_XDP_EXIT) {
@@ -940,7 +963,8 @@ construct_skb:
rx_ring->next_to_clean = ntc;
entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
- failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -963,17 +987,19 @@ construct_skb:
/**
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @desc: AF_XDP descriptor to pull the DMA address and length from
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
unsigned int *total_bytes)
{
struct ice_tx_desc *tx_desc;
dma_addr_t dma;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
+ dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len);
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buf_addr = cpu_to_le64(dma);
@@ -986,10 +1012,13 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
/**
* ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool,
+ struct xdp_desc *descs,
unsigned int *total_bytes)
{
u16 ntu = xdp_ring->next_to_use;
@@ -999,8 +1028,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
tx_desc->buf_addr = cpu_to_le64(dma);
@@ -1016,60 +1045,69 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
/**
* ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
* @nb_pkts: count of packets to be send
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
- u32 nb_pkts, unsigned int *total_bytes)
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool,
+ struct xdp_desc *descs, u32 nb_pkts,
+ unsigned int *total_bytes)
{
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
leftover = nb_pkts & (PKTS_PER_BATCH - 1);
for (i = 0; i < batched; i += PKTS_PER_BATCH)
- ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes);
for (; i < batched + leftover; i++)
- ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+ ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
}
/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{
- struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
int budget;
- ice_clean_xdp_irq_zc(xdp_ring);
+ ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
+
+ if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
+ !netif_running(xdp_ring->vsi->netdev))
+ return true;
budget = ICE_DESC_UNUSED(xdp_ring);
budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
if (!nb_pkts)
return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
- ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed,
+ &total_bytes);
xdp_ring->next_to_use = 0;
}
- ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
- &total_bytes);
+ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed],
+ nb_pkts - nb_processed, &total_bytes);
ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
- if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
return nb_pkts < budget;
}
@@ -1091,7 +1129,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *ring;
- if (test_bit(ICE_VSI_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
@@ -1102,7 +1140,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
ring = vsi->rx_rings[queue_id]->xdp_ring;
- if (!ring->xsk_pool)
+ if (!READ_ONCE(ring->xsk_pool))
return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 6fa181f080..45adeb5132 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -20,16 +20,20 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool,
+ int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
-static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool)
{
return false;
}
@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool,
int __always_unused budget)
{
return 0;
@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool,
u16 __always_unused count)
{
return false;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 87b655b839..33069880c8 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6310,21 +6310,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
size_t n;
int i;
- switch (qopt->cmd) {
- case TAPRIO_CMD_REPLACE:
- break;
- case TAPRIO_CMD_DESTROY:
- return igc_tsn_clear_schedule(adapter);
- case TAPRIO_CMD_STATS:
- igc_taprio_stats(adapter->netdev, &qopt->stats);
- return 0;
- case TAPRIO_CMD_QUEUE_STATS:
- igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-
if (qopt->base_time < 0)
return -ERANGE;
@@ -6433,7 +6418,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
- err = igc_save_qbv_schedule(adapter, qopt);
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = igc_save_qbv_schedule(adapter, qopt);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ err = igc_tsn_clear_schedule(adapter);
+ break;
+ case TAPRIO_CMD_STATS:
+ igc_taprio_stats(adapter->netdev, &qopt->stats);
+ return 0;
+ case TAPRIO_CMD_QUEUE_STATS:
+ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 9adf4301c9..a40b631188 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
{
struct mvpp2_port *port;
- int i;
+ int i, j;
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
if (port->priv->percpu_pools) {
- for (i = 0; i < port->nrxqs; i++)
- mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ for (j = 0; j < port->nrxqs; j++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
port->tx_fc & en);
} else {
mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index fadfa8b50b..8c4e3ecef5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -920,6 +920,7 @@ err_rule:
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
+ *attr = *old_attr;
kfree(old_attr);
err_attr:
kvfree(spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 6e00afe467..797db853de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -51,9 +51,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
- if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
+ if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
+ ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
caps |= MLX5_IPSEC_CAP_PRIO;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3320f12ba2..58eb96a688 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1409,7 +1409,12 @@ static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ if (err) {
+ netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
+ goto out;
+ }
+
mlx5_toggle_port_link(mdev);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 979c49ae6b..b43ca0b762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -207,6 +207,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ struct devlink *devlink = priv_to_devlink(dev);
/* if this is the driver that initiated the fw reset, devlink completed the reload */
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
@@ -218,9 +219,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
mlx5_load_one(dev, true);
- devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
+ devl_lock(devlink);
+ devlink_remote_reload_actions_performed(devlink, 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+ devl_unlock(devlink);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 612e666ec2..e2230c8f18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -48,6 +48,7 @@ static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{
struct irq_affinity_desc auto_desc = {};
+ struct mlx5_irq *irq;
u32 irq_index;
int err;
@@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
else
cpu_get(pool, cpumask_first(&af_desc->mask));
}
- return mlx5_irq_alloc(pool, irq_index,
- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
- NULL);
+ irq = mlx5_irq_alloc(pool, irq_index,
+ cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
+ NULL);
+ if (IS_ERR(irq))
+ xa_erase(&pool->irqs, irq_index);
+ return irq;
}
/* Looking for the IRQ with the smallest refcount that fits req_mask.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index d0871c46b8..cf8045b926 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -1538,7 +1538,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
goto unlock;
for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
+ if (ldev->pf[i].netdev == slave) {
port = i;
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 459a836a5d..3e55a6c6a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -2140,7 +2140,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
/* Panic tear down fw command will stop the PCI bus communication
* with the HCA, so the health poll is no longer needed.
*/
- mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_fast_teardown_hca(dev);
@@ -2175,6 +2174,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n");
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ mlx5_drain_health_wq(dev);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index b2986175d9..b706f14865 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -112,6 +112,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
struct mlx5_core_dev *mdev = sf_dev->mdev;
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
mlx5_unload_one(mdev, false);
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 7b9e048845..b6e89fc5a4 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4347,7 +4347,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
if (unlikely(!rtl_tx_slots_avail(tp))) {
if (net_ratelimit())
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
- goto err_stop_0;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
}
opts[1] = rtl8169_tx_vlan_tag(skb);
@@ -4403,11 +4404,6 @@ err_dma_0:
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
-
-err_stop_0:
- netif_stop_queue(dev);
- dev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
}
static unsigned int rtl_last_frag_len(struct sk_buff *skb)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index c29809cd92..fa510f4e26 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2219,9 +2219,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
napi_enable(&lp->napi_rx);
napi_enable(&lp->napi_tx);
+ axienet_setoptions(ndev, lp->options);
}
/**
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ebafedde0a..0803b6e83c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1389,6 +1389,8 @@ static int ksz9131_config_init(struct phy_device *phydev)
const struct device *dev_walker;
int ret;
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
dev_walker = &phydev->mdio.dev;
do {
of_node = dev_walker->of_node;
@@ -1438,28 +1440,30 @@ static int ksz9131_config_init(struct phy_device *phydev)
#define MII_KSZ9131_AUTO_MDIX 0x1C
#define MII_KSZ9131_AUTO_MDI_SET BIT(7)
#define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6)
+#define MII_KSZ9131_DIG_AXAN_STS 0x14
+#define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14)
+#define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12)
static int ksz9131_mdix_update(struct phy_device *phydev)
{
int ret;
- ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
- if (ret < 0)
- return ret;
-
- if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) {
- if (ret & MII_KSZ9131_AUTO_MDI_SET)
- phydev->mdix_ctrl = ETH_TP_MDI;
- else
- phydev->mdix_ctrl = ETH_TP_MDI_X;
+ if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) {
+ phydev->mdix = phydev->mdix_ctrl;
} else {
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- }
+ ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS);
+ if (ret < 0)
+ return ret;
- if (ret & MII_KSZ9131_AUTO_MDI_SET)
- phydev->mdix = ETH_TP_MDI;
- else
- phydev->mdix = ETH_TP_MDI_X;
+ if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) {
+ if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT)
+ phydev->mdix = ETH_TP_MDI;
+ else
+ phydev->mdix = ETH_TP_MDI_X;
+ } else {
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ }
+ }
return 0;
}
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 7ab41f95da..ffa07c3f04 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -1351,6 +1351,13 @@ static struct phy_driver realtek_drvs[] = {
.handle_interrupt = genphy_handle_interrupt_no_ack,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc960),
+ .name = "RTL8366S Gigabit Ethernet",
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
},
};
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 0a662e42ed..cb7d2f798f 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
int rc = 0;
+ int err;
if (phy_id) {
netdev_dbg(netdev, "Only internal phy supported\n");
@@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (loc == MII_BMSR) {
u8 value;
- sr_read_reg(dev, SR_NSR, &value);
+ err = sr_read_reg(dev, SR_NSR, &value);
+ if (err < 0)
+ return err;
+
if (value & NSR_LINKST)
rc = 1;
}
- sr_share_read_word(dev, 1, loc, &res);
+ err = sr_share_read_word(dev, 1, loc, &res);
+ if (err < 0)
+ return err;
+
if (rc == 1)
res = le16_to_cpu(res) | BMSR_LSTATUS;
else
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
index c5e7ca793c..8fcfbde31a 100644
--- a/drivers/net/wan/fsl_qmc_hdlc.c
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -18,6 +18,7 @@
#include <linux/hdlc.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -37,7 +38,7 @@ struct qmc_hdlc {
struct qmc_chan *qmc_chan;
struct net_device *netdev;
struct framer *framer;
- spinlock_t carrier_lock; /* Protect carrier detection */
+ struct mutex carrier_lock; /* Protect carrier detection */
struct notifier_block nb;
bool is_crc32;
spinlock_t tx_lock; /* Protect tx descriptors */
@@ -60,7 +61,7 @@ static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
if (!qmc_hdlc->framer)
return 0;
- guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
+ guard(mutex)(&qmc_hdlc->carrier_lock);
ret = framer_get_status(qmc_hdlc->framer, &framer_status);
if (ret) {
@@ -249,6 +250,7 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
struct qmc_hdlc_desc *desc = context;
struct net_device *netdev;
struct qmc_hdlc *qmc_hdlc;
+ size_t crc_size;
int ret;
netdev = desc->netdev;
@@ -267,15 +269,26 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
netdev->stats.rx_crc_errors++;
kfree_skb(desc->skb);
- } else {
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += length;
+ goto re_queue;
+ }
- skb_put(desc->skb, length);
- desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
- netif_rx(desc->skb);
+ /* Discard the CRC */
+ crc_size = qmc_hdlc->is_crc32 ? 4 : 2;
+ if (length < crc_size) {
+ netdev->stats.rx_length_errors++;
+ kfree_skb(desc->skb);
+ goto re_queue;
}
+ length -= crc_size;
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+
+ skb_put(desc->skb, length);
+ desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
+ netif_rx(desc->skb);
+re_queue:
/* Re-queue a transfer using the same descriptor */
ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
if (ret) {
@@ -706,7 +719,7 @@ static int qmc_hdlc_probe(struct platform_device *pdev)
qmc_hdlc->dev = dev;
spin_lock_init(&qmc_hdlc->tx_lock);
- spin_lock_init(&qmc_hdlc->carrier_lock);
+ mutex_init(&qmc_hdlc->carrier_lock);
qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
if (IS_ERR(qmc_hdlc->qmc_chan))
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 16af046c33..55fde0d331 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -472,7 +472,8 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
int i;
- clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+ if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b1d0a1b391..9d3c249207 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -485,7 +485,9 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
struct pci_dev *pdev = ctrl_dev(ctrl);
pci_config_pm_runtime_get(pdev);
- pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC, status),
+
+ /* Attention and Power Indicator Control bits are supported */
+ pcie_write_cmd_nowait(ctrl, FIELD_PREP(PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC, status),
PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_PIC);
pci_config_pm_runtime_put(pdev);
return 0;
diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
index 72c2d3074c..98af97750a 100644
--- a/drivers/perf/fsl_imx9_ddr_perf.c
+++ b/drivers/perf/fsl_imx9_ddr_perf.c
@@ -476,12 +476,12 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
- if (flags & PERF_EF_START)
- ddr_perf_event_start(event, flags);
-
/* read trans, write trans, read beat */
ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
+ if (flags & PERF_EF_START)
+ ddr_perf_event_start(event, flags);
+
return 0;
}
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 4e842dcedf..11c7c85047 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -412,7 +412,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
* but not in the user access mode as we want to use the other counters
* that support sampling/filtering.
*/
- if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
+ if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
cmask = 1;
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 945b1b15a0..f8242b8dda 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -805,9 +805,11 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
if (ret == -ENOPROTOOPT) {
dev_dbg(ec_dev->dev,
"GET_NEXT_EVENT returned invalid version error.\n");
+ mutex_lock(&ec_dev->lock);
ret = cros_ec_get_host_command_version_mask(ec_dev,
EC_CMD_GET_NEXT_EVENT,
&ver_mask);
+ mutex_unlock(&ec_dev->lock);
if (ret < 0 || ver_mask == 0)
/*
* Do not change the MKBP supported version if we can't
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 60066822b5..350cd1201c 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1216,8 +1216,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
block_group->space_info->total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
- block_group->space_info->bytes_zone_unusable -=
- block_group->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
+ -block_group->zone_unusable);
block_group->space_info->disk_total -= block_group->length * factor;
spin_unlock(&block_group->space_info->lock);
@@ -1389,7 +1389,8 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes to readonly */
sinfo->bytes_readonly += cache->zone_unusable;
- sinfo->bytes_zone_unusable -= cache->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
+ -cache->zone_unusable);
cache->zone_unusable = 0;
}
cache->ro++;
@@ -3034,9 +3035,11 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */
cache->zone_unusable =
- (cache->alloc_offset - cache->used) +
+ (cache->alloc_offset - cache->used - cache->pinned -
+ cache->reserved) +
(cache->length - cache->zone_capacity);
- sinfo->bytes_zone_unusable += cache->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
+ cache->zone_unusable);
sinfo->bytes_readonly -= cache->zone_unusable;
}
num_bytes = cache->length - cache->reserved -
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3774c191e3..b75e14f399 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2806,7 +2806,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
readonly = true;
} else if (btrfs_is_zoned(fs_info)) {
/* Need reset before reusing in a zoned block group */
- space_info->bytes_zone_unusable += len;
+ btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
+ len);
readonly = true;
}
spin_unlock(&cache->lock);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index dabc3d0793..d674f21065 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2723,8 +2723,10 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
* If the block group is read-only, we should account freed space into
* bytes_readonly.
*/
- if (!block_group->ro)
+ if (!block_group->ro) {
block_group->zone_unusable += to_unusable;
+ WARN_ON(block_group->zone_unusable > block_group->length);
+ }
spin_unlock(&ctl->tree_lock);
if (!used) {
spin_lock(&block_group->lock);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3a2b902b2d..39d22693e4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -737,8 +737,9 @@ out:
return ret;
}
-static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
- u64 end,
+static noinline int cow_file_range_inline(struct btrfs_inode *inode,
+ struct page *locked_page,
+ u64 offset, u64 end,
size_t compressed_size,
int compress_type,
struct folio *compressed_folio,
@@ -762,7 +763,10 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
return ret;
}
- extent_clear_unlock_delalloc(inode, offset, end, NULL, &cached,
+ if (ret == 0)
+ locked_page = NULL;
+
+ extent_clear_unlock_delalloc(inode, offset, end, locked_page, &cached,
clear_flags,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
@@ -1037,10 +1041,10 @@ again:
* extent for the subpage case.
*/
if (total_in < actual_end)
- ret = cow_file_range_inline(inode, start, end, 0,
+ ret = cow_file_range_inline(inode, NULL, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
else
- ret = cow_file_range_inline(inode, start, end, total_compressed,
+ ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
compress_type, folios[0], false);
if (ret <= 0) {
if (ret < 0)
@@ -1359,7 +1363,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (!no_inline) {
/* lets try to make an inline extent */
- ret = cow_file_range_inline(inode, start, end, 0,
+ ret = cow_file_range_inline(inode, locked_page, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
if (ret <= 0) {
/*
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index ae8c564425..8f194eefd3 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -311,7 +311,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
found->bytes_used += block_group->used;
found->disk_used += block_group->used * factor;
found->bytes_readonly += block_group->bytes_super;
- found->bytes_zone_unusable += block_group->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
if (block_group->length > 0)
found->full = 0;
btrfs_try_granting_tickets(info, found);
@@ -573,8 +573,7 @@ again:
spin_lock(&cache->lock);
avail = cache->length - cache->used - cache->pinned -
- cache->reserved - cache->delalloc_bytes -
- cache->bytes_super - cache->zone_unusable;
+ cache->reserved - cache->bytes_super - cache->zone_unusable;
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
cache->start, cache->length, cache->used, cache->pinned,
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index a733458fd1..3e304300fc 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -207,6 +207,7 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
+DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index c4941ba245..68df96800e 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2016,6 +2016,8 @@ bool __ceph_should_report_size(struct ceph_inode_info *ci)
* CHECK_CAPS_AUTHONLY - we should only check the auth cap
* CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
* further delay.
+ * CHECK_CAPS_FLUSH_FORCE - we should flush any caps immediately, without
+ * further delay.
*/
void ceph_check_caps(struct ceph_inode_info *ci, int flags)
{
@@ -2097,7 +2099,7 @@ retry:
}
doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s "
- "flushing %s issued %s revoking %s retain %s %s%s%s\n",
+ "flushing %s issued %s revoking %s retain %s %s%s%s%s\n",
inode, ceph_vinop(inode), ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_flushing_caps),
@@ -2105,7 +2107,8 @@ retry:
ceph_cap_string(retain),
(flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
(flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "",
- (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "");
+ (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "",
+ (flags & CHECK_CAPS_FLUSH_FORCE) ? " FLUSH_FORCE" : "");
/*
* If we no longer need to hold onto old our caps, and we may
@@ -2180,6 +2183,11 @@ retry:
queue_writeback = true;
}
+ if (flags & CHECK_CAPS_FLUSH_FORCE) {
+ doutc(cl, "force to flush caps\n");
+ goto ack;
+ }
+
if (cap == ci->i_auth_cap &&
(cap->issued & CEPH_CAP_FILE_WR)) {
/* request larger max_size from MDS? */
@@ -3504,6 +3512,8 @@ static void handle_cap_grant(struct inode *inode,
bool queue_invalidate = false;
bool deleted_inode = false;
bool fill_inline = false;
+ bool revoke_wait = false;
+ int flags = 0;
/*
* If there is at least one crypto block then we'll trust
@@ -3699,16 +3709,18 @@ static void handle_cap_grant(struct inode *inode,
ceph_cap_string(cap->issued), ceph_cap_string(newcaps),
ceph_cap_string(revoking));
if (S_ISREG(inode->i_mode) &&
- (revoking & used & CEPH_CAP_FILE_BUFFER))
+ (revoking & used & CEPH_CAP_FILE_BUFFER)) {
writeback = true; /* initiate writeback; will delay ack */
- else if (queue_invalidate &&
+ revoke_wait = true;
+ } else if (queue_invalidate &&
revoking == CEPH_CAP_FILE_CACHE &&
- (newcaps & CEPH_CAP_FILE_LAZYIO) == 0)
- ; /* do nothing yet, invalidation will be queued */
- else if (cap == ci->i_auth_cap)
+ (newcaps & CEPH_CAP_FILE_LAZYIO) == 0) {
+ revoke_wait = true; /* do nothing yet, invalidation will be queued */
+ } else if (cap == ci->i_auth_cap) {
check_caps = 1; /* check auth cap only */
- else
+ } else {
check_caps = 2; /* check all caps */
+ }
/* If there is new caps, try to wake up the waiters */
if (~cap->issued & newcaps)
wake = true;
@@ -3735,8 +3747,9 @@ static void handle_cap_grant(struct inode *inode,
BUG_ON(cap->issued & ~cap->implemented);
/* don't let check_caps skip sending a response to MDS for revoke msgs */
- if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
+ if (!revoke_wait && le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
cap->mds_wanted = 0;
+ flags |= CHECK_CAPS_FLUSH_FORCE;
if (cap == ci->i_auth_cap)
check_caps = 1; /* check auth cap only */
else
@@ -3792,9 +3805,9 @@ static void handle_cap_grant(struct inode *inode,
mutex_unlock(&session->s_mutex);
if (check_caps == 1)
- ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
+ ceph_check_caps(ci, flags | CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
else if (check_caps == 2)
- ceph_check_caps(ci, CHECK_CAPS_NOINVAL);
+ ceph_check_caps(ci, flags | CHECK_CAPS_NOINVAL);
}
/*
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b63b4cd9b5..6e817bf133 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -200,9 +200,10 @@ struct ceph_cap {
struct list_head caps_item;
};
-#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
-#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
-#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
+#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
+#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
+#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
+#define CHECK_CAPS_FLUSH_FORCE 8 /* force flush any caps */
struct ceph_cap_flush {
u64 tid;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 4bae9ccf5f..4b0d64a76e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -453,6 +453,35 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
}
#endif /* ES_AGGRESSIVE_TEST */
+static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
+ struct ext4_map_blocks *map)
+{
+ unsigned int status;
+ int retval;
+
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ retval = ext4_ext_map_blocks(handle, inode, map, 0);
+ else
+ retval = ext4_ind_map_blocks(handle, inode, map, 0);
+
+ if (retval <= 0)
+ return retval;
+
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
+ }
+
+ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+ map->m_pblk, status);
+ return retval;
+}
+
/*
* The ext4_map_blocks() function tries to look up the requested blocks,
* and returns if the blocks are already mapped.
@@ -1708,6 +1737,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
if (ext4_es_is_hole(&es))
goto add_delayed;
+found:
/*
* Delayed extent could be allocated by fallocate.
* So we need to check it.
@@ -1744,36 +1774,34 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_has_inline_data(inode))
retval = 0;
- else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
- retval = ext4_ind_map_blocks(NULL, inode, map, 0);
- if (retval < 0) {
- up_read(&EXT4_I(inode)->i_data_sem);
+ retval = ext4_map_query_blocks(NULL, inode, map);
+ up_read(&EXT4_I(inode)->i_data_sem);
+ if (retval)
return retval;
- }
- if (retval > 0) {
- unsigned int status;
- if (unlikely(retval != map->m_len)) {
- ext4_warning(inode->i_sb,
- "ES len assertion failed for inode "
- "%lu: retval %d != map->m_len %d",
- inode->i_ino, retval, map->m_len);
- WARN_ON(1);
+add_delayed:
+ down_write(&EXT4_I(inode)->i_data_sem);
+ /*
+ * Page fault path (ext4_page_mkwrite does not take i_rwsem)
+ * and fallocate path (no folio lock) can race. Make sure we
+ * lookup the extent status tree here again while i_data_sem
+ * is held in write mode, before inserting a new da entry in
+ * the extent status tree.
+ */
+ if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
+ if (!ext4_es_is_hole(&es)) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ goto found;
+ }
+ } else if (!ext4_has_inline_data(inode)) {
+ retval = ext4_map_query_blocks(NULL, inode, map);
+ if (retval) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ return retval;
}
-
- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
- ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
- map->m_pblk, status);
- up_read(&EXT4_I(inode)->i_data_sem);
- return retval;
}
- up_read(&EXT4_I(inode)->i_data_sem);
-add_delayed:
- down_write(&EXT4_I(inode)->i_data_sem);
retval = ext4_insert_delayed_block(inode, map->m_lblk);
up_write(&EXT4_I(inode)->i_data_sem);
if (retval)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 259e235bec..6018257852 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -3483,7 +3483,9 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
if (page_private_gcing(fio->page)) {
if (fio->sbi->am.atgc_enabled &&
(fio->io_type == FS_DATA_IO) &&
- (fio->sbi->gc_mode != GC_URGENT_HIGH))
+ (fio->sbi->gc_mode != GC_URGENT_HIGH) &&
+ __is_valid_data_blkaddr(fio->old_blkaddr) &&
+ !is_inode_flag_set(inode, FI_OPU_WRITE))
return CURSEG_ALL_DATA_ATGC;
else
return CURSEG_COLD_DATA;
diff --git a/fs/file.c b/fs/file.c
index a3b72aa64f..a11e59b5d6 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -1248,6 +1248,7 @@ __releases(&files->file_lock)
* tables and this condition does not arise without those.
*/
fdt = files_fdtable(files);
+ fd = array_index_nospec(fd, fdt->max_fds);
tofree = fdt->fd[fd];
if (!tofree && fd_is_open(fd, fdt))
goto Ebusy;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 7a5785f405..0a8fd4a3d0 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -147,6 +147,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_LOONGARCH_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_IRQ_RISCV_IMSIC_STARTING,
+ CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c73ad77fa3..d73c7d89d2 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -132,18 +132,6 @@ static inline bool hugepage_global_always(void)
(1<<TRANSPARENT_HUGEPAGE_FLAG);
}
-static inline bool hugepage_flags_enabled(void)
-{
- /*
- * We cover both the anon and the file-backed case here; we must return
- * true if globally enabled, even when all anon sizes are set to never.
- * So we don't need to look at huge_anon_orders_inherit.
- */
- return hugepage_global_enabled() ||
- huge_anon_orders_always ||
- huge_anon_orders_madvise;
-}
-
static inline int highest_order(unsigned long orders)
{
return fls_long(orders) - 1;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 2ce13e8a30..9438cc7c2a 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -142,9 +142,16 @@ const struct movable_operations *page_movable_ops(struct page *page)
}
#ifdef CONFIG_NUMA_BALANCING
+int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node);
int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int node);
#else
+static inline int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node)
+{
+ return -EAGAIN; /* can't migrate now */
+}
static inline int migrate_misplaced_folio(struct folio *folio,
struct vm_area_struct *vma, int node)
{
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index c978fa2893..9b38d015c5 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -2394,6 +2394,14 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
TP_ARGS(fs_info, sinfo, old, diff)
);
+DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable,
+
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
+
+ TP_ARGS(fs_info, sinfo, old, diff)
+);
+
DECLARE_EVENT_CLASS(btrfs_raid56_bio,
TP_PROTO(const struct btrfs_raid_bio *rbio,
diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h
index 09e72215b9..085b749cdd 100644
--- a/include/trace/events/mptcp.h
+++ b/include/trace/events/mptcp.h
@@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
struct sock *ssk;
__entry->active = mptcp_subflow_active(subflow);
- __entry->backup = subflow->backup;
+ __entry->backup = subflow->backup || subflow->request_bkup;
if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
__entry->free = sk_stream_memory_free(subflow->tcp_sock);
diff --git a/init/Kconfig b/init/Kconfig
index febdea2afc..d8a971b804 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1906,6 +1906,7 @@ config RUST
depends on !MODVERSIONS
depends on !GCC_PLUGINS
depends on !RANDSTRUCT
+ depends on !SHADOW_CALL_STACK
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
help
Enables Rust support in the kernel.
diff --git a/io_uring/poll.c b/io_uring/poll.c
index 0a8e029446..1f63b60e85 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -347,6 +347,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts)
v &= IO_POLL_REF_MASK;
} while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
+ io_napi_add(req);
return IOU_POLL_NO_ACTION;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 374a0d54b0..5f32a196a6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -517,6 +517,13 @@ static ssize_t thpsize_enabled_store(struct kobject *kobj,
} else
ret = -EINVAL;
+ if (ret > 0) {
+ int err;
+
+ err = start_stop_khugepaged();
+ if (err)
+ ret = err;
+ }
return ret;
}
@@ -1659,7 +1666,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int nid = NUMA_NO_NODE;
int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
- bool migrated = false, writable = false;
+ bool writable = false;
int flags = 0;
vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
@@ -1695,16 +1702,17 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
if (node_is_toptier(nid))
last_cpupid = folio_last_cpupid(folio);
target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
- if (target_nid == NUMA_NO_NODE) {
- folio_put(folio);
+ if (target_nid == NUMA_NO_NODE)
+ goto out_map;
+ if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
+ flags |= TNF_MIGRATE_FAIL;
goto out_map;
}
-
+ /* The folio is isolated and isolation code holds a folio reference. */
spin_unlock(vmf->ptl);
writable = false;
- migrated = migrate_misplaced_folio(folio, vma, target_nid);
- if (migrated) {
+ if (!migrate_misplaced_folio(folio, vma, target_nid)) {
flags |= TNF_MIGRATED;
nid = target_nid;
} else {
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 774a97e6e2..92ecd59fff 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -416,6 +416,26 @@ static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
test_bit(MMF_DISABLE_THP, &mm->flags);
}
+static bool hugepage_pmd_enabled(void)
+{
+ /*
+ * We cover both the anon and the file-backed case here; file-backed
+ * hugepages, when configured in, are determined by the global control.
+ * Anon pmd-sized hugepages are determined by the pmd-size control.
+ */
+ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
+ hugepage_global_enabled())
+ return true;
+ if (test_bit(PMD_ORDER, &huge_anon_orders_always))
+ return true;
+ if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
+ return true;
+ if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
+ hugepage_global_enabled())
+ return true;
+ return false;
+}
+
void __khugepaged_enter(struct mm_struct *mm)
{
struct khugepaged_mm_slot *mm_slot;
@@ -452,7 +472,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
- hugepage_flags_enabled()) {
+ hugepage_pmd_enabled()) {
if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
PMD_ORDER))
__khugepaged_enter(vma->vm_mm);
@@ -2465,8 +2485,7 @@ breakouterloop_mmap_lock:
static int khugepaged_has_work(void)
{
- return !list_empty(&khugepaged_scan.mm_head) &&
- hugepage_flags_enabled();
+ return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
}
static int khugepaged_wait_event(void)
@@ -2539,7 +2558,7 @@ static void khugepaged_wait_work(void)
return;
}
- if (hugepage_flags_enabled())
+ if (hugepage_pmd_enabled())
wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
}
@@ -2570,7 +2589,7 @@ static void set_recommended_min_free_kbytes(void)
int nr_zones = 0;
unsigned long recommended_min;
- if (!hugepage_flags_enabled()) {
+ if (!hugepage_pmd_enabled()) {
calculate_min_free_kbytes();
goto update_wmarks;
}
@@ -2620,7 +2639,7 @@ int start_stop_khugepaged(void)
int err = 0;
mutex_lock(&khugepaged_mutex);
- if (hugepage_flags_enabled()) {
+ if (hugepage_pmd_enabled()) {
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
@@ -2646,7 +2665,7 @@ fail:
void khugepaged_min_free_kbytes_update(void)
{
mutex_lock(&khugepaged_mutex);
- if (hugepage_flags_enabled() && khugepaged_thread)
+ if (hugepage_pmd_enabled() && khugepaged_thread)
set_recommended_min_free_kbytes();
mutex_unlock(&khugepaged_mutex);
}
diff --git a/mm/memory.c b/mm/memory.c
index f81760c938..755ffe082e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5067,8 +5067,6 @@ int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
{
struct vm_area_struct *vma = vmf->vma;
- folio_get(folio);
-
/* Record the current PID acceesing VMA */
vma_set_access_pid_bit(vma);
@@ -5205,16 +5203,19 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
else
last_cpupid = folio_last_cpupid(folio);
target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
- if (target_nid == NUMA_NO_NODE) {
- folio_put(folio);
+ if (target_nid == NUMA_NO_NODE)
+ goto out_map;
+ if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
+ flags |= TNF_MIGRATE_FAIL;
goto out_map;
}
+ /* The folio is isolated and isolation code holds a folio reference. */
pte_unmap_unlock(vmf->pte, vmf->ptl);
writable = false;
ignore_writable = true;
/* Migrate to the requested node */
- if (migrate_misplaced_folio(folio, vma, target_nid)) {
+ if (!migrate_misplaced_folio(folio, vma, target_nid)) {
nid = target_nid;
flags |= TNF_MIGRATED;
} else {
diff --git a/mm/migrate.c b/mm/migrate.c
index a8c6f466e3..9dabeb90f7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2557,16 +2557,44 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src,
return __folio_alloc_node(gfp, order, nid);
}
-static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
+/*
+ * Prepare for calling migrate_misplaced_folio() by isolating the folio if
+ * permitted. Must be called with the PTL still held.
+ */
+int migrate_misplaced_folio_prepare(struct folio *folio,
+ struct vm_area_struct *vma, int node)
{
int nr_pages = folio_nr_pages(folio);
+ pg_data_t *pgdat = NODE_DATA(node);
+
+ if (folio_is_file_lru(folio)) {
+ /*
+ * Do not migrate file folios that are mapped in multiple
+ * processes with execute permissions as they are probably
+ * shared libraries.
+ *
+ * See folio_likely_mapped_shared() on possible imprecision
+ * when we cannot easily detect if a folio is shared.
+ */
+ if ((vma->vm_flags & VM_EXEC) &&
+ folio_likely_mapped_shared(folio))
+ return -EACCES;
+
+ /*
+ * Do not migrate dirty folios as not all filesystems can move
+ * dirty folios in MIGRATE_ASYNC mode which is a waste of
+ * cycles.
+ */
+ if (folio_test_dirty(folio))
+ return -EAGAIN;
+ }
/* Avoid migrating to a node that is nearly full */
if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
int z;
if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
- return 0;
+ return -EAGAIN;
for (z = pgdat->nr_zones - 1; z >= 0; z--) {
if (managed_zone(pgdat->node_zones + z))
break;
@@ -2577,78 +2605,42 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
* further.
*/
if (z < 0)
- return 0;
+ return -EAGAIN;
wakeup_kswapd(pgdat->node_zones + z, 0,
folio_order(folio), ZONE_MOVABLE);
- return 0;
+ return -EAGAIN;
}
if (!folio_isolate_lru(folio))
- return 0;
+ return -EAGAIN;
node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
nr_pages);
-
- /*
- * Isolating the folio has taken another reference, so the
- * caller's reference can be safely dropped without the folio
- * disappearing underneath us during migration.
- */
- folio_put(folio);
- return 1;
+ return 0;
}
/*
* Attempt to migrate a misplaced folio to the specified destination
- * node. Caller is expected to have an elevated reference count on
- * the folio that will be dropped by this function before returning.
+ * node. Caller is expected to have isolated the folio by calling
+ * migrate_misplaced_folio_prepare(), which will result in an
+ * elevated reference count on the folio. This function will un-isolate the
+ * folio, dereferencing the folio before returning.
*/
int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int node)
{
pg_data_t *pgdat = NODE_DATA(node);
- int isolated;
int nr_remaining;
unsigned int nr_succeeded;
LIST_HEAD(migratepages);
- int nr_pages = folio_nr_pages(folio);
-
- /*
- * Don't migrate file folios that are mapped in multiple processes
- * with execute permissions as they are probably shared libraries.
- *
- * See folio_likely_mapped_shared() on possible imprecision when we
- * cannot easily detect if a folio is shared.
- */
- if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) &&
- (vma->vm_flags & VM_EXEC))
- goto out;
-
- /*
- * Also do not migrate dirty folios as not all filesystems can move
- * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
- */
- if (folio_is_file_lru(folio) && folio_test_dirty(folio))
- goto out;
-
- isolated = numamigrate_isolate_folio(pgdat, folio);
- if (!isolated)
- goto out;
list_add(&folio->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED, &nr_succeeded);
- if (nr_remaining) {
- if (!list_empty(&migratepages)) {
- list_del(&folio->lru);
- node_stat_mod_folio(folio, NR_ISOLATED_ANON +
- folio_is_file_lru(folio), -nr_pages);
- folio_putback_lru(folio);
- }
- isolated = 0;
- }
+ if (nr_remaining && !list_empty(&migratepages))
+ putback_movable_pages(&migratepages);
if (nr_succeeded) {
count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
@@ -2656,11 +2648,7 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
nr_succeeded);
}
BUG_ON(!list_empty(&migratepages));
- return isolated;
-
-out:
- folio_put(folio);
- return 0;
+ return nr_remaining ? -EAGAIN : 0;
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_NUMA */
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 7ae118a6d9..6ecb110bf4 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -120,13 +120,6 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
case DISCOVERY_STARTING:
break;
case DISCOVERY_FINDING:
- /* If discovery was not started then it was initiated by the
- * MGMT interface so no MGMT event shall be generated either
- */
- if (old_state != DISCOVERY_STARTING) {
- hdev->discovery.state = old_state;
- return;
- }
mgmt_discovering(hdev, 1);
break;
case DISCOVERY_RESOLVING:
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4611a67d7d..a78f6d706c 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1722,9 +1722,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
switch (enable) {
case LE_SCAN_ENABLE:
hci_dev_set_flag(hdev, HCI_LE_SCAN);
- if (hdev->le_scan_type == LE_SCAN_ACTIVE)
+ if (hdev->le_scan_type == LE_SCAN_ACTIVE) {
clear_pending_adv_report(hdev);
- hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ hci_discovery_set_state(hdev, DISCOVERY_FINDING);
+ }
break;
case LE_SCAN_DISABLE:
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index bb70408855..2f26147fdf 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -2929,6 +2929,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
*/
filter_policy = hci_update_accept_list_sync(hdev);
+ /* If suspended and filter_policy set to 0x00 (no acceptlist) then
+ * passive scanning cannot be started since that would require the host
+ * to be woken up to process the reports.
+ */
+ if (hdev->suspended && !filter_policy) {
+ /* Check if accept list is empty then there is no need to scan
+ * while suspended.
+ */
+ if (list_empty(&hdev->le_accept_list))
+ return 0;
+
+ /* If there are devices is the accept_list that means some
+ * devices could not be programmed which in non-suspended case
+ * means filter_policy needs to be set to 0x00 so the host needs
+ * to filter, but since this is treating suspended case we
+ * can ignore device needing host to filter to allow devices in
+ * the acceptlist to be able to wakeup the system.
+ */
+ filter_policy = 0x01;
+ }
+
/* When the controller is using random resolvable addresses and
* with that having LE privacy enabled, then controllers with
* Extended Scanner Filter Policies support can now enable support
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4668d67180..5e589f0a62 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3288,7 +3288,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
- dev = rtnl_dev_get(net, tb);
+ dev = rtnl_dev_get(tgt_net, tb);
else if (tb[IFLA_GROUP])
err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
else
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 223dcd25d8..fcc3dbef8b 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1277,11 +1277,11 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
u32 rss_cfg_offset = offsetof(struct ethtool_rxfh, rss_config[0]);
const struct ethtool_ops *ops = dev->ethtool_ops;
u32 dev_indir_size = 0, dev_key_size = 0, i;
+ u32 user_indir_len = 0, indir_bytes = 0;
struct ethtool_rxfh_param rxfh_dev = {};
struct netlink_ext_ack *extack = NULL;
struct ethtool_rxnfc rx_rings;
struct ethtool_rxfh rxfh;
- u32 indir_bytes = 0;
u8 *rss_config;
int ret;
@@ -1342,6 +1342,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
*/
if (rxfh.indir_size &&
rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) {
+ user_indir_len = indir_bytes;
rxfh_dev.indir = (u32 *)rss_config;
rxfh_dev.indir_size = dev_indir_size;
ret = ethtool_copy_validate_indir(rxfh_dev.indir,
@@ -1368,7 +1369,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
rxfh_dev.key_size = dev_key_size;
rxfh_dev.key = rss_config + indir_bytes;
if (copy_from_user(rxfh_dev.key,
- useraddr + rss_cfg_offset + indir_bytes,
+ useraddr + rss_cfg_offset + user_indir_len,
rxfh.key_size)) {
ret = -EFAULT;
goto out;
diff --git a/net/ethtool/rss.c b/net/ethtool/rss.c
index 71679137ef..5c4c4505ab 100644
--- a/net/ethtool/rss.c
+++ b/net/ethtool/rss.c
@@ -111,7 +111,8 @@ rss_reply_size(const struct ethnl_req_info *req_base,
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
int len;
- len = nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
+ len = nla_total_size(sizeof(u32)) + /* _RSS_CONTEXT */
+ nla_total_size(sizeof(u32)) + /* _RSS_HFUNC */
nla_total_size(sizeof(u32)) + /* _RSS_INPUT_XFRM */
nla_total_size(sizeof(u32) * data->indir_size) + /* _RSS_INDIR */
nla_total_size(data->hkey_size); /* _RSS_HKEY */
@@ -124,6 +125,11 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
const struct rss_reply_data *data = RSS_REPDATA(reply_base);
+ struct rss_req_info *request = RSS_REQINFO(req_base);
+
+ if (request->rss_context &&
+ nla_put_u32(skb, ETHTOOL_A_RSS_CONTEXT, request->rss_context))
+ return -EMSGSIZE;
if ((data->hfunc &&
nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 4d42d0756f..a5db7c67d6 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
static int __init iptable_nat_init(void)
{
- int ret = xt_register_template(&nf_nat_ipv4_table,
- iptable_nat_table_init);
+ int ret;
+ /* net->gen->ptr[iptable_nat_net_id] must be allocated
+ * before calling iptable_nat_table_init().
+ */
+ ret = register_pernet_subsys(&iptable_nat_net_ops);
if (ret < 0)
return ret;
- ret = register_pernet_subsys(&iptable_nat_net_ops);
- if (ret < 0) {
- xt_unregister_template(&nf_nat_ipv4_table);
- return ret;
- }
+ ret = xt_register_template(&nf_nat_ipv4_table,
+ iptable_nat_table_init);
+ if (ret < 0)
+ unregister_pernet_subsys(&iptable_nat_net_ops);
return ret;
}
static void __exit iptable_nat_exit(void)
{
- unregister_pernet_subsys(&iptable_nat_net_ops);
xt_unregister_template(&nf_nat_ipv4_table);
+ unregister_pernet_subsys(&iptable_nat_net_ops);
}
module_init(iptable_nat_init);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 570e87ad9a..ecd5211085 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -754,8 +754,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
* <prev RTT . ><current RTT .. ><next RTT .... >
*/
- if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
- !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) {
u64 rcvwin, grow;
int rcvbuf;
@@ -771,12 +770,22 @@ void tcp_rcv_space_adjust(struct sock *sk)
rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
- if (rcvbuf > sk->sk_rcvbuf) {
- WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ if (rcvbuf > sk->sk_rcvbuf) {
+ WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
- /* Make the window clamp follow along. */
- WRITE_ONCE(tp->window_clamp,
- tcp_win_from_space(sk, rcvbuf));
+ /* Make the window clamp follow along. */
+ WRITE_ONCE(tp->window_clamp,
+ tcp_win_from_space(sk, rcvbuf));
+ }
+ } else {
+ /* Make the window clamp follow along while being bounded
+ * by SO_RCVBUF.
+ */
+ int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf));
+
+ if (clamp > tp->window_clamp)
+ WRITE_ONCE(tp->window_clamp, clamp);
}
}
tp->rcvq_space.space = copied;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index d914b23256..0282d15725 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
return NULL;
memset(ndopts, 0, sizeof(*ndopts));
while (opt_len) {
+ bool unknown = false;
int l;
if (opt_len < sizeof(struct nd_opt_hdr))
return NULL;
@@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
break;
#endif
default:
- if (ndisc_is_useropt(dev, nd_opt)) {
- ndopts->nd_useropts_end = nd_opt;
- if (!ndopts->nd_useropts)
- ndopts->nd_useropts = nd_opt;
- } else {
- /*
- * Unknown options must be silently ignored,
- * to accommodate future extension to the
- * protocol.
- */
- ND_PRINTK(2, notice,
- "%s: ignored unsupported option; type=%d, len=%d\n",
- __func__,
- nd_opt->nd_opt_type,
- nd_opt->nd_opt_len);
- }
+ unknown = true;
+ }
+ if (ndisc_is_useropt(dev, nd_opt)) {
+ ndopts->nd_useropts_end = nd_opt;
+ if (!ndopts->nd_useropts)
+ ndopts->nd_useropts = nd_opt;
+ } else if (unknown) {
+ /*
+ * Unknown options must be silently ignored,
+ * to accommodate future extension to the
+ * protocol.
+ */
+ ND_PRINTK(2, notice,
+ "%s: ignored unsupported option; type=%d, len=%d\n",
+ __func__,
+ nd_opt->nd_opt_type,
+ nd_opt->nd_opt_len);
}
next_opt:
opt_len -= l;
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 52cf104e34..e119d4f090 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
static int __init ip6table_nat_init(void)
{
- int ret = xt_register_template(&nf_nat_ipv6_table,
- ip6table_nat_table_init);
+ int ret;
+ /* net->gen->ptr[ip6table_nat_net_id] must be allocated
+ * before calling ip6t_nat_register_lookups().
+ */
+ ret = register_pernet_subsys(&ip6table_nat_net_ops);
if (ret < 0)
return ret;
- ret = register_pernet_subsys(&ip6table_nat_net_ops);
+ ret = xt_register_template(&nf_nat_ipv6_table,
+ ip6table_nat_table_init);
if (ret)
- xt_unregister_template(&nf_nat_ipv6_table);
+ unregister_pernet_subsys(&ip6table_nat_net_ops);
return ret;
}
static void __exit ip6table_nat_exit(void)
{
- unregister_pernet_subsys(&ip6table_nat_net_ops);
xt_unregister_template(&nf_nat_ipv6_table);
+ unregister_pernet_subsys(&ip6table_nat_net_ops);
}
module_init(ip6table_nat_init);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c3b0b610b0..c00323fa9e 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_path *path = iucv->path;
- if (iucv->path) {
- iucv->path = NULL;
+ /* Whoever resets the path pointer, must sever and free it. */
+ if (xchg(&iucv->path, NULL)) {
if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 83ad6c9709..87a7b569cc 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -114,7 +114,7 @@ static int ieee80211_set_mon_options(struct ieee80211_sub_if_data *sdata,
/* apply all changes now - no failures allowed */
- if (monitor_sdata)
+ if (monitor_sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
ieee80211_set_mu_mimo_follow(monitor_sdata, params);
if (params->flags) {
@@ -3038,6 +3038,9 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return -EOPNOTSUPP;
+
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
if (!sdata)
@@ -3100,7 +3103,7 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy,
if (has_monitor) {
sdata = wiphy_dereference(local->hw.wiphy,
local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
sdata->deflink.user_power_level = local->user_power_level;
if (txp_type != sdata->vif.bss_conf.txpower_type)
update_txp_type = true;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 72a9ba8bc5..edba4a3184 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1768,7 +1768,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
break;
}
sdata = rcu_dereference(local->monitor_sdata);
- if (sdata) {
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
vif = &sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
@@ -3957,7 +3957,8 @@ begin:
break;
}
tx.sdata = rcu_dereference(local->monitor_sdata);
- if (tx.sdata) {
+ if (tx.sdata &&
+ ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) {
vif = &tx.sdata->vif;
info->hw_queue =
vif->hw_queue[skb_get_queue_mapping(skb)];
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 771c05640a..c11dbe82ae 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -776,7 +776,7 @@ static void __iterate_interfaces(struct ieee80211_local *local,
sdata = rcu_dereference_check(local->monitor_sdata,
lockdep_is_held(&local->iflist_mtx) ||
lockdep_is_held(&local->hw.wiphy->mtx));
- if (sdata &&
+ if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) &&
(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only ||
sdata->flags & IEEE80211_SDATA_IN_DRIVER))
iterator(data, sdata->vif.addr, &sdata->vif);
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index c30405e768..7884217f33 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -19,7 +19,9 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPTCPRetrans", MPTCP_MIB_RETRANSSEGS),
SNMP_MIB_ITEM("MPJoinNoTokenFound", MPTCP_MIB_JOINNOTOKEN),
SNMP_MIB_ITEM("MPJoinSynRx", MPTCP_MIB_JOINSYNRX),
+ SNMP_MIB_ITEM("MPJoinSynBackupRx", MPTCP_MIB_JOINSYNBACKUPRX),
SNMP_MIB_ITEM("MPJoinSynAckRx", MPTCP_MIB_JOINSYNACKRX),
+ SNMP_MIB_ITEM("MPJoinSynAckBackupRx", MPTCP_MIB_JOINSYNACKBACKUPRX),
SNMP_MIB_ITEM("MPJoinSynAckHMacFailure", MPTCP_MIB_JOINSYNACKMAC),
SNMP_MIB_ITEM("MPJoinAckRx", MPTCP_MIB_JOINACKRX),
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index 2704afd0df..66aa67f49d 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -14,7 +14,9 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_RETRANSSEGS, /* Segments retransmitted at the MPTCP-level */
MPTCP_MIB_JOINNOTOKEN, /* Received MP_JOIN but the token was not found */
MPTCP_MIB_JOINSYNRX, /* Received a SYN + MP_JOIN */
+ MPTCP_MIB_JOINSYNBACKUPRX, /* Received a SYN + MP_JOIN + backup flag */
MPTCP_MIB_JOINSYNACKRX, /* Received a SYN/ACK + MP_JOIN */
+ MPTCP_MIB_JOINSYNACKBACKUPRX, /* Received a SYN/ACK + MP_JOIN + backup flag */
MPTCP_MIB_JOINSYNACKMAC, /* HMAC was wrong on SYN/ACK + MP_JOIN */
MPTCP_MIB_JOINACKRX, /* Received an ACK + MP_JOIN */
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 8e8dcfbc29..8a68382a4f 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -909,7 +909,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
return true;
} else if (subflow_req->mp_join) {
opts->suboptions = OPTION_MPTCP_MPJ_SYNACK;
- opts->backup = subflow_req->backup;
+ opts->backup = subflow_req->request_bkup;
opts->join_id = subflow_req->local_id;
opts->thmac = subflow_req->thmac;
opts->nonce = subflow_req->local_nonce;
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 55406720c6..23bb89c94e 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -426,6 +426,18 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
return mptcp_pm_nl_get_local_id(msk, &skc_local);
}
+bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc)
+{
+ struct mptcp_addr_info skc_local;
+
+ mptcp_local_address((struct sock_common *)skc, &skc_local);
+
+ if (mptcp_pm_is_userspace(msk))
+ return mptcp_userspace_pm_is_backup(msk, &skc_local);
+
+ return mptcp_pm_nl_is_backup(msk, &skc_local);
+}
+
int mptcp_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id,
u8 *flags, int *ifindex)
{
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index ea9e5817b9..37954a0b08 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -471,7 +471,6 @@ static void __mptcp_pm_send_ack(struct mptcp_sock *msk, struct mptcp_subflow_con
slow = lock_sock_fast(ssk);
if (prio) {
subflow->send_mp_prio = 1;
- subflow->backup = backup;
subflow->request_bkup = backup;
}
@@ -1102,6 +1101,24 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc
return ret;
}
+bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc)
+{
+ struct pm_nl_pernet *pernet = pm_nl_get_pernet_from_msk(msk);
+ struct mptcp_pm_addr_entry *entry;
+ bool backup = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (mptcp_addresses_equal(&entry->addr, skc, entry->addr.port)) {
+ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return backup;
+}
+
#define MPTCP_PM_CMD_GRP_OFFSET 0
#define MPTCP_PM_EV_GRP_OFFSET 1
@@ -1401,6 +1418,7 @@ static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
ret = remove_anno_list_by_saddr(msk, addr);
if (ret || force) {
spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= ret;
mptcp_pm_remove_addr(msk, &list);
spin_unlock_bh(&msk->pm.lock);
}
@@ -1534,16 +1552,25 @@ void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
{
struct mptcp_rm_list alist = { .nr = 0 };
struct mptcp_pm_addr_entry *entry;
+ int anno_nr = 0;
list_for_each_entry(entry, rm_list, list) {
- if ((remove_anno_list_by_saddr(msk, &entry->addr) ||
- lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) &&
- alist.nr < MPTCP_RM_IDS_MAX)
- alist.ids[alist.nr++] = entry->addr.id;
+ if (alist.nr >= MPTCP_RM_IDS_MAX)
+ break;
+
+ /* only delete if either announced or matching a subflow */
+ if (remove_anno_list_by_saddr(msk, &entry->addr))
+ anno_nr++;
+ else if (!lookup_subflow_by_saddr(&msk->conn_list,
+ &entry->addr))
+ continue;
+
+ alist.ids[alist.nr++] = entry->addr.id;
}
if (alist.nr) {
spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= anno_nr;
mptcp_pm_remove_addr(msk, &alist);
spin_unlock_bh(&msk->pm.lock);
}
@@ -1556,17 +1583,18 @@ static void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
struct mptcp_pm_addr_entry *entry;
list_for_each_entry(entry, rm_list, list) {
- if (lookup_subflow_by_saddr(&msk->conn_list, &entry->addr) &&
- slist.nr < MPTCP_RM_IDS_MAX)
+ if (slist.nr < MPTCP_RM_IDS_MAX &&
+ lookup_subflow_by_saddr(&msk->conn_list, &entry->addr))
slist.ids[slist.nr++] = entry->addr.id;
- if (remove_anno_list_by_saddr(msk, &entry->addr) &&
- alist.nr < MPTCP_RM_IDS_MAX)
+ if (alist.nr < MPTCP_RM_IDS_MAX &&
+ remove_anno_list_by_saddr(msk, &entry->addr))
alist.ids[alist.nr++] = entry->addr.id;
}
if (alist.nr) {
spin_lock_bh(&msk->pm.lock);
+ msk->pm.add_addr_signaled -= alist.nr;
mptcp_pm_remove_addr(msk, &alist);
spin_unlock_bh(&msk->pm.lock);
}
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index f0a4590506..8eaa9fbe3e 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -165,6 +165,24 @@ int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk,
return mptcp_userspace_pm_append_new_local_addr(msk, &new_entry, true);
}
+bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk,
+ struct mptcp_addr_info *skc)
+{
+ struct mptcp_pm_addr_entry *entry;
+ bool backup = false;
+
+ spin_lock_bh(&msk->pm.lock);
+ list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) {
+ if (mptcp_addresses_equal(&entry->addr, skc, false)) {
+ backup = !!(entry->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
+ break;
+ }
+ }
+ spin_unlock_bh(&msk->pm.lock);
+
+ return backup;
+}
+
int mptcp_pm_nl_announce_doit(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index bb7dca8aa2..ff8292d0cf 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -350,8 +350,10 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
skb_orphan(skb);
/* try to fetch required memory from subflow */
- if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
+ if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
goto drop;
+ }
has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
@@ -844,10 +846,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
sk_rbuf = ssk_rbuf;
/* over limit? can't append more skbs to msk, Also, no need to wake-up*/
- if (__mptcp_rmem(sk) > sk_rbuf) {
- MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED);
+ if (__mptcp_rmem(sk) > sk_rbuf)
return;
- }
/* Wake-up the reader only for in-sequence data */
mptcp_data_lock(sk);
@@ -1422,13 +1422,15 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
mptcp_for_each_subflow(msk, subflow) {
+ bool backup = subflow->backup || subflow->request_bkup;
+
trace_mptcp_subflow_get_send(subflow);
ssk = mptcp_subflow_tcp_sock(subflow);
if (!mptcp_subflow_active(subflow))
continue;
tout = max(tout, mptcp_timeout_from_subflow(subflow));
- nr_active += !subflow->backup;
+ nr_active += !backup;
pace = subflow->avg_pacing_rate;
if (unlikely(!pace)) {
/* init pacing rate from socket */
@@ -1439,9 +1441,9 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
- if (linger_time < send_info[subflow->backup].linger_time) {
- send_info[subflow->backup].ssk = ssk;
- send_info[subflow->backup].linger_time = linger_time;
+ if (linger_time < send_info[backup].linger_time) {
+ send_info[backup].ssk = ssk;
+ send_info[backup].linger_time = linger_time;
}
}
__mptcp_set_timeout(sk, tout);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 7aa47e2dd5..8357046732 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -443,6 +443,7 @@ struct mptcp_subflow_request_sock {
u16 mp_capable : 1,
mp_join : 1,
backup : 1,
+ request_bkup : 1,
csum_reqd : 1,
allow_join_id0 : 1;
u8 local_id;
@@ -1103,6 +1104,9 @@ bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+bool mptcp_pm_is_backup(struct mptcp_sock *msk, struct sock_common *skc);
+bool mptcp_pm_nl_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
+bool mptcp_userspace_pm_is_backup(struct mptcp_sock *msk, struct mptcp_addr_info *skc);
int mptcp_pm_dump_addr(struct sk_buff *msg, struct netlink_callback *cb);
int mptcp_pm_nl_dump_addr(struct sk_buff *msg,
struct netlink_callback *cb);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 612c38570a..c330946384 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -100,6 +100,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
return NULL;
}
subflow_req->local_id = local_id;
+ subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
return msk;
}
@@ -168,6 +169,9 @@ static int subflow_check_req(struct request_sock *req,
return 0;
} else if (opt_mp_join) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
+
+ if (mp_opt.backup)
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
}
if (opt_mp_capable && listener->request_mptcp) {
@@ -577,6 +581,9 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->mp_join = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
+ if (subflow->backup)
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
+
if (subflow_use_different_dport(msk, sk)) {
pr_debug("synack inet_dport=%d %d",
ntohs(inet_sk(sk)->inet_dport),
@@ -614,6 +621,8 @@ static int subflow_chk_local_id(struct sock *sk)
return err;
subflow_set_local_id(subflow, err);
+ subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
+
return 0;
}
@@ -1221,14 +1230,22 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
- u32 incr;
+ struct tcp_sock *tp = tcp_sk(ssk);
+ u32 offset, incr, avail_len;
- incr = limit >= skb->len ? skb->len + fin : limit;
+ offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
+ if (WARN_ON_ONCE(offset > skb->len))
+ goto out;
- pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
- subflow->map_subflow_seq);
+ avail_len = skb->len - offset;
+ incr = limit >= avail_len ? avail_len + fin : limit;
+
+ pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
+ offset, subflow->map_subflow_seq);
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
tcp_sk(ssk)->copied_seq += incr;
+
+out:
if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
sk_eat_skb(ssk, skb);
if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
@@ -2005,6 +2022,7 @@ static void subflow_ulp_clone(const struct request_sock *req,
new_ctx->fully_established = 1;
new_ctx->remote_key_valid = 1;
new_ctx->backup = subflow_req->backup;
+ new_ctx->request_bkup = subflow_req->request_bkup;
WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
new_ctx->token = subflow_req->token;
new_ctx->thmac = subflow_req->thmac;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 6fa3cca87d..9d451d77d5 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -44,6 +44,8 @@ static DEFINE_MUTEX(zones_mutex);
struct zones_ht_key {
struct net *net;
u16 zone;
+ /* Note : pad[] must be the last field. */
+ u8 pad[];
};
struct tcf_ct_flow_table {
@@ -60,7 +62,7 @@ struct tcf_ct_flow_table {
static const struct rhashtable_params zones_params = {
.head_offset = offsetof(struct tcf_ct_flow_table, node),
.key_offset = offsetof(struct tcf_ct_flow_table, key),
- .key_len = sizeof_field(struct tcf_ct_flow_table, key),
+ .key_len = offsetof(struct zones_ht_key, pad),
.automatic_shrinking = true,
};
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0222ede0fe..292b530a6d 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -3136,8 +3136,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
struct ieee80211_mgmt *mgmt, size_t len,
gfp_t gfp)
{
- size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable);
+ size_t min_hdr_len;
struct ieee80211_ext *ext = NULL;
enum cfg80211_bss_frame_type ftype;
u16 beacon_interval;
@@ -3160,10 +3159,16 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
ext = (void *) mgmt;
- min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon);
if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
min_hdr_len = offsetof(struct ieee80211_ext,
u.s1g_short_beacon.variable);
+ else
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_beacon.variable);
+ } else {
+ /* same for beacons */
+ min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
}
if (WARN_ON(len < min_hdr_len))
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a8ad55f111..1cfe673bc5 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -1045,6 +1045,7 @@ void cfg80211_connect_done(struct net_device *dev,
cfg80211_hold_bss(
bss_from_pub(params->links[link].bss));
ev->cr.links[link].bss = params->links[link].bss;
+ ev->cr.links[link].status = params->links[link].status;
if (params->links[link].addr) {
ev->cr.links[link].addr = next;
diff --git a/sound/core/seq/seq_ump_convert.c b/sound/core/seq/seq_ump_convert.c
index e90b27a135..d9dacfbe4a 100644
--- a/sound/core/seq/seq_ump_convert.c
+++ b/sound/core/seq/seq_ump_convert.c
@@ -1192,44 +1192,53 @@ static int cvt_sysex_to_ump(struct snd_seq_client *dest,
{
struct snd_seq_ump_event ev_cvt;
unsigned char status;
- u8 buf[6], *xbuf;
+ u8 buf[8], *xbuf;
int offset = 0;
int len, err;
+ bool finished = false;
if (!snd_seq_ev_is_variable(event))
return 0;
setup_ump_event(&ev_cvt, event);
- for (;;) {
+ while (!finished) {
len = snd_seq_expand_var_event_at(event, sizeof(buf), buf, offset);
if (len <= 0)
break;
- if (WARN_ON(len > 6))
+ if (WARN_ON(len > sizeof(buf)))
break;
- offset += len;
+
xbuf = buf;
+ status = UMP_SYSEX_STATUS_CONTINUE;
+ /* truncate the sysex start-marker */
if (*xbuf == UMP_MIDI1_MSG_SYSEX_START) {
status = UMP_SYSEX_STATUS_START;
- xbuf++;
len--;
- if (len > 0 && xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
+ offset++;
+ xbuf++;
+ }
+
+ /* if the last of this packet or the 1st byte of the next packet
+ * is the end-marker, finish the transfer with this packet
+ */
+ if (len > 0 && len < 8 &&
+ xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
+ if (status == UMP_SYSEX_STATUS_START)
status = UMP_SYSEX_STATUS_SINGLE;
- len--;
- }
- } else {
- if (xbuf[len - 1] == UMP_MIDI1_MSG_SYSEX_END) {
+ else
status = UMP_SYSEX_STATUS_END;
- len--;
- } else {
- status = UMP_SYSEX_STATUS_CONTINUE;
- }
+ len--;
+ finished = true;
}
+
+ len = min(len, 6);
fill_sysex7_ump(dest_port, ev_cvt.ump, status, xbuf, len);
err = __snd_seq_deliver_single_event(dest, dest_port,
(struct snd_seq_event *)&ev_cvt,
atomic, hop);
if (err < 0)
return err;
+ offset += len;
}
return 0;
}
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 1a163bbcab..c827d7d8d8 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -77,6 +77,8 @@
// overrun. Actual device can skip more, then this module stops the packet streaming.
#define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
+static void pcm_period_work(struct work_struct *work);
+
/**
* amdtp_stream_init - initialize an AMDTP stream structure
* @s: the AMDTP stream to initialize
@@ -105,6 +107,7 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
s->flags = flags;
s->context = ERR_PTR(-1);
mutex_init(&s->mutex);
+ INIT_WORK(&s->period_work, pcm_period_work);
s->packet_index = 0;
init_waitqueue_head(&s->ready_wait);
@@ -347,6 +350,7 @@ EXPORT_SYMBOL(amdtp_stream_get_max_payload);
*/
void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
{
+ cancel_work_sync(&s->period_work);
s->pcm_buffer_pointer = 0;
s->pcm_period_pointer = 0;
}
@@ -611,19 +615,21 @@ static void update_pcm_pointers(struct amdtp_stream *s,
// The program in user process should periodically check the status of intermediate
// buffer associated to PCM substream to process PCM frames in the buffer, instead
// of receiving notification of period elapsed by poll wait.
- if (!pcm->runtime->no_period_wakeup) {
- if (in_softirq()) {
- // In software IRQ context for 1394 OHCI.
- snd_pcm_period_elapsed(pcm);
- } else {
- // In process context of ALSA PCM application under acquired lock of
- // PCM substream.
- snd_pcm_period_elapsed_under_stream_lock(pcm);
- }
- }
+ if (!pcm->runtime->no_period_wakeup)
+ queue_work(system_highpri_wq, &s->period_work);
}
}
+static void pcm_period_work(struct work_struct *work)
+{
+ struct amdtp_stream *s = container_of(work, struct amdtp_stream,
+ period_work);
+ struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
+
+ if (pcm)
+ snd_pcm_period_elapsed(pcm);
+}
+
static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
bool sched_irq)
{
@@ -1849,11 +1855,14 @@ unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
{
struct amdtp_stream *irq_target = d->irq_target;
- // Process isochronous packets queued till recent isochronous cycle to handle PCM frames.
if (irq_target && amdtp_stream_running(irq_target)) {
- // In software IRQ context, the call causes dead-lock to disable the tasklet
- // synchronously.
- if (!in_softirq())
+ // use wq to prevent AB/BA deadlock competition for
+ // substream lock:
+ // fw_iso_context_flush_completions() acquires
+ // lock by ohci_flush_iso_completions(),
+ // amdtp-stream process_rx_packets() attempts to
+ // acquire same lock by snd_pcm_elapsed()
+ if (current_work() != &s->period_work)
fw_iso_context_flush_completions(irq_target->context);
}
@@ -1909,6 +1918,7 @@ static void amdtp_stream_stop(struct amdtp_stream *s)
return;
}
+ cancel_work_sync(&s->period_work);
fw_iso_context_stop(s->context);
fw_iso_context_destroy(s->context);
s->context = ERR_PTR(-1);
diff --git a/sound/firewire/amdtp-stream.h b/sound/firewire/amdtp-stream.h
index a1ed2e80f9..775db3fc49 100644
--- a/sound/firewire/amdtp-stream.h
+++ b/sound/firewire/amdtp-stream.h
@@ -191,6 +191,7 @@ struct amdtp_stream {
/* For a PCM substream processing. */
struct snd_pcm_substream *pcm;
+ struct work_struct period_work;
snd_pcm_uframes_t pcm_buffer_pointer;
unsigned int pcm_period_pointer;
unsigned int pcm_frame_multiplier;
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index c2d0109866..68c883f202 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -28,7 +28,7 @@
#else
#define AZX_DCAPS_I915_COMPONENT 0 /* NOP */
#endif
-/* 14 unused */
+#define AZX_DCAPS_AMD_ALLOC_FIX (1 << 14) /* AMD allocation workaround */
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 3500108f6b..87203b819d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -40,6 +40,7 @@
#ifdef CONFIG_X86
/* for snoop control */
+#include <linux/dma-map-ops.h>
#include <asm/set_memory.h>
#include <asm/cpufeature.h>
#endif
@@ -306,7 +307,7 @@ enum {
/* quirks for ATI HDMI with snoop off */
#define AZX_DCAPS_PRESET_ATI_HDMI_NS \
- (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
+ (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_AMD_ALLOC_FIX)
/* quirks for AMD SB */
#define AZX_DCAPS_PRESET_AMD_SB \
@@ -1702,6 +1703,13 @@ static void azx_check_snoop_available(struct azx *chip)
if (chip->driver_caps & AZX_DCAPS_SNOOP_OFF)
snoop = false;
+#ifdef CONFIG_X86
+ /* check the presence of DMA ops (i.e. IOMMU), disable snoop conditionally */
+ if ((chip->driver_caps & AZX_DCAPS_AMD_ALLOC_FIX) &&
+ !get_dma_ops(chip->card->dev))
+ snoop = false;
+#endif
+
chip->snoop = snoop;
if (!snoop) {
dev_info(chip->card->dev, "Force to non-snoop mode\n");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 17389a3801..4472923ba6 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -21,12 +21,6 @@
#include "hda_jack.h"
#include "hda_generic.h"
-enum {
- CX_HEADSET_NOPRESENT = 0,
- CX_HEADSET_PARTPRESENT,
- CX_HEADSET_ALLPRESENT,
-};
-
struct conexant_spec {
struct hda_gen_spec gen;
@@ -48,7 +42,6 @@ struct conexant_spec {
unsigned int gpio_led;
unsigned int gpio_mute_led_mask;
unsigned int gpio_mic_led_mask;
- unsigned int headset_present_flag;
bool is_cx8070_sn6140;
};
@@ -250,48 +243,19 @@ static void cx_process_headset_plugin(struct hda_codec *codec)
}
}
-static void cx_update_headset_mic_vref(struct hda_codec *codec, unsigned int res)
+static void cx_update_headset_mic_vref(struct hda_codec *codec, struct hda_jack_callback *event)
{
- unsigned int phone_present, mic_persent, phone_tag, mic_tag;
- struct conexant_spec *spec = codec->spec;
+ unsigned int mic_present;
/* In cx8070 and sn6140, the node 16 can only be config to headphone or disabled,
* the node 19 can only be config to microphone or disabled.
* Check hp&mic tag to process headset pulgin&plugout.
*/
- phone_tag = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
- mic_tag = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_UNSOLICITED_RESPONSE, 0x0);
- if ((phone_tag & (res >> AC_UNSOL_RES_TAG_SHIFT)) ||
- (mic_tag & (res >> AC_UNSOL_RES_TAG_SHIFT))) {
- phone_present = snd_hda_codec_read(codec, 0x16, 0, AC_VERB_GET_PIN_SENSE, 0x0);
- if (!(phone_present & AC_PINSENSE_PRESENCE)) {/* headphone plugout */
- spec->headset_present_flag = CX_HEADSET_NOPRESENT;
- snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
- return;
- }
- if (spec->headset_present_flag == CX_HEADSET_NOPRESENT) {
- spec->headset_present_flag = CX_HEADSET_PARTPRESENT;
- } else if (spec->headset_present_flag == CX_HEADSET_PARTPRESENT) {
- mic_persent = snd_hda_codec_read(codec, 0x19, 0,
- AC_VERB_GET_PIN_SENSE, 0x0);
- /* headset is present */
- if ((phone_present & AC_PINSENSE_PRESENCE) &&
- (mic_persent & AC_PINSENSE_PRESENCE)) {
- cx_process_headset_plugin(codec);
- spec->headset_present_flag = CX_HEADSET_ALLPRESENT;
- }
- }
- }
-}
-
-static void cx_jack_unsol_event(struct hda_codec *codec, unsigned int res)
-{
- struct conexant_spec *spec = codec->spec;
-
- if (spec->is_cx8070_sn6140)
- cx_update_headset_mic_vref(codec, res);
-
- snd_hda_jack_unsol_event(codec, res);
+ mic_present = snd_hda_codec_read(codec, 0x19, 0, AC_VERB_GET_PIN_SENSE, 0x0);
+ if (!(mic_present & AC_PINSENSE_PRESENCE)) /* mic plugout */
+ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20);
+ else
+ cx_process_headset_plugin(codec);
}
static int cx_auto_suspend(struct hda_codec *codec)
@@ -305,7 +269,7 @@ static const struct hda_codec_ops cx_auto_patch_ops = {
.build_pcms = snd_hda_gen_build_pcms,
.init = cx_auto_init,
.free = cx_auto_free,
- .unsol_event = cx_jack_unsol_event,
+ .unsol_event = snd_hda_jack_unsol_event,
.suspend = cx_auto_suspend,
.check_power_status = snd_hda_gen_check_power_status,
};
@@ -1163,7 +1127,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
case 0x14f11f86:
case 0x14f11f87:
spec->is_cx8070_sn6140 = true;
- spec->headset_present_flag = CX_HEADSET_NOPRESENT;
+ snd_hda_jack_detect_enable_callback(codec, 0x19, cx_update_headset_mic_vref);
break;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d749769438..a6c1e2199e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -9866,6 +9866,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
+ SND_PCI_QUIRK(0x1025, 0x100c, "Acer Aspire E5-574G", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index d5409f3879..e14c725ace 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -244,8 +244,8 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
SNDRV_CHMAP_FR, /* right front */
SNDRV_CHMAP_FC, /* center front */
SNDRV_CHMAP_LFE, /* LFE */
- SNDRV_CHMAP_SL, /* left surround */
- SNDRV_CHMAP_SR, /* right surround */
+ SNDRV_CHMAP_RL, /* left surround */
+ SNDRV_CHMAP_RR, /* right surround */
SNDRV_CHMAP_FLC, /* left of center */
SNDRV_CHMAP_FRC, /* right of center */
SNDRV_CHMAP_RC, /* surround */
diff --git a/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
index 9b4a032186..7149caec4f 100644
--- a/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/andes/ax45/firmware.json
@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
},
{
- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
},
{
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
diff --git a/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json b/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
index a9939823b1..0c9b9a2d29 100644
--- a/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
@@ -74,7 +74,7 @@
{
"PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event",
"ConfigCode": "0x800000000000000c",
- "EventName": "FW_SFENCE_VMA_RECEIVED",
+ "EventName": "FW_SFENCE_VMA_ASID_SENT",
"BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event"
},
{
diff --git a/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json b/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
index 9b4a032186..7149caec4f 100644
--- a/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
},
{
- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
},
{
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
index 9b4a032186..7149caec4f 100644
--- a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json
@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
},
{
- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
},
{
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
index 9b4a032186..7149caec4f 100644
--- a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
+++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json
@@ -36,7 +36,7 @@
"ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
},
{
- "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT"
},
{
"ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 1730b852a9..6d075648d2 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -1141,7 +1141,7 @@ int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *samp
int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
bool hide_unresolved)
{
- struct machine *machine = maps__machine(node->ms.maps);
+ struct machine *machine = node->ms.maps ? maps__machine(node->ms.maps) : NULL;
maps__put(al->maps);
al->maps = maps__get(node->ms.maps);
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index d2043ec3bf..4209b95690 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -1115,11 +1115,11 @@ again:
return 1;
}
- if (--cfg_repeat > 0) {
- if (cfg_input)
- close(fd);
+ if (cfg_input)
+ close(fd);
+
+ if (--cfg_repeat > 0)
goto again;
- }
return 0;
}
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index 108aeeb84e..7043984b7e 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -661,7 +661,7 @@ pm_nl_check_endpoint()
done
if [ -z "${id}" ]; then
- test_fail "bad test - missing endpoint id"
+ fail_test "bad test - missing endpoint id"
return
fi
@@ -1634,6 +1634,8 @@ chk_prio_nr()
{
local mp_prio_nr_tx=$1
local mp_prio_nr_rx=$2
+ local mpj_syn=$3
+ local mpj_syn_ack=$4
local count
print_check "ptx"
@@ -1655,6 +1657,26 @@ chk_prio_nr()
else
print_ok
fi
+
+ print_check "syn backup"
+ count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynBackupRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mpj_syn" ]; then
+ fail_test "got $count JOIN[s] syn with Backup expected $mpj_syn"
+ else
+ print_ok
+ fi
+
+ print_check "synack backup"
+ count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckBackupRx")
+ if [ -z "$count" ]; then
+ print_skip
+ elif [ "$count" != "$mpj_syn_ack" ]; then
+ fail_test "got $count JOIN[s] synack with Backup expected $mpj_syn_ack"
+ else
+ print_ok
+ fi
}
chk_subflow_nr()
@@ -2612,33 +2634,46 @@ backup_tests()
sflags=nobackup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 1 0
fi
# single address, backup
if reset "single address, backup" &&
continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 0 1
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup
+ pm_nl_set_limits $ns2 1 1
+ sflags=nobackup speed=slow \
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ chk_add_nr 1 1
+ chk_prio_nr 1 0 0 1
+ fi
+
+ # single address, switch to backup
+ if reset "single address, switch to backup" &&
+ continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
+ pm_nl_set_limits $ns1 0 1
pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
pm_nl_set_limits $ns2 1 1
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
chk_add_nr 1 1
- chk_prio_nr 1 1
+ chk_prio_nr 1 1 0 0
fi
# single address with port, backup
if reset "single address with port, backup" &&
continue_if mptcp_lib_kallsyms_has "subflow_rebuild_header$"; then
pm_nl_set_limits $ns1 0 1
- pm_nl_add_endpoint $ns1 10.0.2.1 flags signal port 10100
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal,backup port 10100
pm_nl_set_limits $ns2 1 1
- sflags=backup speed=slow \
+ sflags=nobackup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 1
chk_add_nr 1 1
- chk_prio_nr 1 1
+ chk_prio_nr 1 0 0 1
fi
if reset "mpc backup" &&
@@ -2647,17 +2682,26 @@ backup_tests()
speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 0 0
fi
if reset "mpc backup both sides" &&
continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
- pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
+ pm_nl_set_limits $ns1 0 2
+ pm_nl_set_limits $ns2 1 2
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags signal,backup
pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+
+ # 10.0.2.2 (non-backup) -> 10.0.1.1 (backup)
+ pm_nl_add_endpoint $ns2 10.0.2.2 flags subflow
+ # 10.0.1.2 (backup) -> 10.0.2.1 (non-backup)
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+ ip -net "$ns2" route add 10.0.2.1 via 10.0.1.1 dev ns2eth1 # force this path
+
speed=slow \
run_tests $ns1 $ns2 10.0.1.1
- chk_join_nr 0 0 0
- chk_prio_nr 1 1
+ chk_join_nr 2 2 2
+ chk_prio_nr 1 1 1 1
fi
if reset "mpc switch to backup" &&
@@ -2666,7 +2710,7 @@ backup_tests()
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 0 0
fi
if reset "mpc switch to backup both sides" &&
@@ -2676,7 +2720,7 @@ backup_tests()
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 0 0 0
- chk_prio_nr 1 1
+ chk_prio_nr 1 1 0 0
fi
}
@@ -3053,7 +3097,7 @@ fullmesh_tests()
addr_nr_ns2=1 sflags=backup,fullmesh speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 1 0
chk_rm_nr 0 1
fi
@@ -3066,7 +3110,7 @@ fullmesh_tests()
sflags=nobackup,nofullmesh speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 2 2 2
- chk_prio_nr 0 1
+ chk_prio_nr 0 1 1 0
chk_rm_nr 0 1
fi
}
@@ -3318,7 +3362,7 @@ userspace_tests()
sflags=backup speed=slow \
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr 1 1 0
- chk_prio_nr 0 0
+ chk_prio_nr 0 0 0 0
fi
# userspace pm type prevents rm_addr