diff options
402 files changed, 3697 insertions, 2060 deletions
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block index 1fe9a553c3..f0025d1c3d 100644 --- a/Documentation/ABI/stable/sysfs-block +++ b/Documentation/ABI/stable/sysfs-block @@ -101,6 +101,16 @@ Description: devices that support receiving integrity metadata. +What: /sys/block/<disk>/partscan +Date: May 2024 +Contact: Christoph Hellwig <hch@lst.de> +Description: + The /sys/block/<disk>/partscan files reports if partition + scanning is enabled for the disk. It returns "1" if partition + scanning is enabled, or "0" if not. The value type is a 32-bit + unsigned integer, but only "0" and "1" are valid values. + + What: /sys/block/<disk>/<partition>/alignment_offset Date: April 2009 Contact: Martin K. Petersen <martin.petersen@oracle.com> diff --git a/Documentation/admin-guide/hw-vuln/core-scheduling.rst b/Documentation/admin-guide/hw-vuln/core-scheduling.rst index cf1eeefdfc..a92e10ec40 100644 --- a/Documentation/admin-guide/hw-vuln/core-scheduling.rst +++ b/Documentation/admin-guide/hw-vuln/core-scheduling.rst @@ -67,8 +67,8 @@ arg4: will be performed for all tasks in the task group of ``pid``. arg5: - userspace pointer to an unsigned long for storing the cookie returned by - ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands. + userspace pointer to an unsigned long long for storing the cookie returned + by ``PR_SCHED_CORE_GET`` command. Should be 0 for all other commands. In order for a process to push a cookie to, or pull a cookie from a process, it is required to have the ptrace access mode: `PTRACE_MODE_READ_REALCREDS` to the diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index 9d23144bf9..87fdc258d1 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -450,7 +450,7 @@ pages of all memory cgroups except ``/having_care_already``.:: # # further filter out all cgroups except one at '/having_care_already' echo memcg > 1/type echo /having_care_already > 1/memcg_path - echo N > 1/matching + echo Y > 1/matching Note that ``anon`` and ``memcg`` filters are currently supported only when ``paddr`` :ref:`implementation <sysfs_context>` is being used. diff --git a/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml b/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml index c13c10c8d6..eed0df9d3a 100644 --- a/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml +++ b/Documentation/devicetree/bindings/iio/health/maxim,max30102.yaml @@ -42,7 +42,7 @@ allOf: properties: compatible: contains: - const: maxim,max30100 + const: maxim,max30102 then: properties: maxim,green-led-current-microamp: false diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml index e74502a0af..3202dc7967 100644 --- a/Documentation/devicetree/bindings/net/mediatek,net.yaml +++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml @@ -337,8 +337,8 @@ allOf: minItems: 4 clocks: - minItems: 34 - maxItems: 34 + minItems: 24 + maxItems: 24 clock-names: items: @@ -351,18 +351,6 @@ allOf: - const: ethwarp_wocpu1 - const: ethwarp_wocpu0 - const: esw - - const: netsys0 - - const: netsys1 - - const: sgmii_tx250m - - const: sgmii_rx250m - - const: sgmii2_tx250m - - const: sgmii2_rx250m - - const: top_usxgmii0_sel - - const: top_usxgmii1_sel - - const: top_sgm0_sel - - const: top_sgm1_sel - - const: top_xfi_phy0_xtal_sel - - const: top_xfi_phy1_xtal_sel - const: top_eth_gmii_sel - const: top_eth_refck_50m_sel - const: top_eth_sys_200m_sel @@ -375,16 +363,10 @@ allOf: - const: top_netsys_sync_250m_sel - const: top_netsys_ppefb_250m_sel - const: top_netsys_warp_sel - - const: wocpu1 - - const: wocpu0 - const: xgp1 - const: xgp2 - const: xgp3 - mediatek,sgmiisys: - minItems: 2 - maxItems: 2 - patternProperties: "^mac@[0-1]$": type: object diff --git a/Documentation/netlink/specs/rt_link.yaml b/Documentation/netlink/specs/rt_link.yaml index 8e4d19adee..4e702ac8bf 100644 --- a/Documentation/netlink/specs/rt_link.yaml +++ b/Documentation/netlink/specs/rt_link.yaml @@ -1144,6 +1144,12 @@ attribute-sets: - name: mcast-querier-state type: binary + - + name: fdb-n-learned + type: u32 + - + name: fdb-max-learned + type: u32 - name: linkinfo-brport-attrs name-prefix: ifla-brport- diff --git a/Documentation/sphinx/kernel_include.py b/Documentation/sphinx/kernel_include.py index abe7680883..6387624423 100755 --- a/Documentation/sphinx/kernel_include.py +++ b/Documentation/sphinx/kernel_include.py @@ -97,7 +97,6 @@ class KernelInclude(Include): # HINT: this is the only line I had to change / commented out: #path = utils.relative_path(None, path) - path = nodes.reprunicode(path) encoding = self.options.get( 'encoding', self.state.document.settings.input_encoding) e_handler=self.state.document.settings.input_encoding_error_handler @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 8 -SUBLEVEL = 9 +SUBLEVEL = 11 EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index a86a1d4f34..93afd1005b 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S @@ -127,6 +127,10 @@ cpu_resume_after_mmu: instr_sync #endif bl cpu_init @ restore the und/abt/irq banked regs +#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) + mov r0, sp + bl kasan_unpoison_task_stack_below +#endif mov r0, #0 @ return zero on success ldmfd sp!, {r4 - r11, pc} ENDPROC(cpu_resume_after_mmu) diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 1d672457d0..72b5cd697f 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -871,16 +871,11 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], } /* dst = src (4 bytes)*/ -static inline void emit_a32_mov_r(const s8 dst, const s8 src, const u8 off, - struct jit_ctx *ctx) { +static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; s8 rt; rt = arm_bpf_get_reg32(src, tmp[0], ctx); - if (off && off != 32) { - emit(ARM_LSL_I(rt, rt, 32 - off), ctx); - emit(ARM_ASR_I(rt, rt, 32 - off), ctx); - } arm_bpf_put_reg32(dst, rt, ctx); } @@ -889,15 +884,15 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], const s8 src[], struct jit_ctx *ctx) { if (!is64) { - emit_a32_mov_r(dst_lo, src_lo, 0, ctx); + emit_a32_mov_r(dst_lo, src_lo, ctx); if (!ctx->prog->aux->verifier_zext) /* Zero out high 4 bytes */ emit_a32_mov_i(dst_hi, 0, ctx); } else if (__LINUX_ARM_ARCH__ < 6 && ctx->cpu_architecture < CPU_ARCH_ARMv5TE) { /* complete 8 byte move */ - emit_a32_mov_r(dst_lo, src_lo, 0, ctx); - emit_a32_mov_r(dst_hi, src_hi, 0, ctx); + emit_a32_mov_r(dst_lo, src_lo, ctx); + emit_a32_mov_r(dst_hi, src_hi, ctx); } else if (is_stacked(src_lo) && is_stacked(dst_lo)) { const u8 *tmp = bpf2a32[TMP_REG_1]; @@ -917,17 +912,52 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[], struct jit_ctx *ctx) { const s8 *tmp = bpf2a32[TMP_REG_1]; - const s8 *rt; + s8 rs; + s8 rd; - rt = arm_bpf_get_reg64(dst, tmp, ctx); + if (is_stacked(dst_lo)) + rd = tmp[1]; + else + rd = dst_lo; + rs = arm_bpf_get_reg32(src_lo, rd, ctx); + /* rs may be one of src[1], dst[1], or tmp[1] */ + + /* Sign extend rs if needed. If off == 32, lower 32-bits of src are moved to dst and sign + * extension only happens in the upper 64 bits. + */ + if (off != 32) { + /* Sign extend rs into rd */ + emit(ARM_LSL_I(rd, rs, 32 - off), ctx); + emit(ARM_ASR_I(rd, rd, 32 - off), ctx); + } else { + rd = rs; + } + + /* Write rd to dst_lo + * + * Optimization: + * Assume: + * 1. dst == src and stacked. + * 2. off == 32 + * + * In this case src_lo was loaded into rd(tmp[1]) but rd was not sign extended as off==32. + * So, we don't need to write rd back to dst_lo as they have the same value. + * This saves us one str instruction. + */ + if (dst_lo != src_lo || off != 32) + arm_bpf_put_reg32(dst_lo, rd, ctx); - emit_a32_mov_r(dst_lo, src_lo, off, ctx); if (!is64) { if (!ctx->prog->aux->verifier_zext) /* Zero out high 4 bytes */ emit_a32_mov_i(dst_hi, 0, ctx); } else { - emit(ARM_ASR_I(rt[0], rt[1], 31), ctx); + if (is_stacked(dst_hi)) { + emit(ARM_ASR_I(tmp[0], rd, 31), ctx); + arm_bpf_put_reg32(dst_hi, tmp[0], ctx); + } else { + emit(ARM_ASR_I(dst_hi, rd, 31), ctx); + } } } diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts index a2e74b8293..6a7ae61651 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-pico6.dts @@ -82,7 +82,8 @@ }; &mmc1 { - bt_reset: bt-reset { + bluetooth@2 { + reg = <2>; compatible = "mediatek,mt7921s-bluetooth"; pinctrl-names = "default"; pinctrl-0 = <&bt_pins_reset>; diff --git a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts index 5e4287f8c8..b2cf2c9883 100644 --- a/arch/arm64/boot/dts/qcom/sa8155p-adp.dts +++ b/arch/arm64/boot/dts/qcom/sa8155p-adp.dts @@ -367,6 +367,16 @@ }; }; +&pmm8155au_1_gpios { + pmm8155au_1_sdc2_cd: sdc2-cd-default-state { + pins = "gpio4"; + function = "normal"; + input-enable; + bias-pull-up; + power-source = <0>; + }; +}; + &qupv3_id_1 { status = "okay"; }; @@ -384,10 +394,10 @@ &sdhc_2 { status = "okay"; - cd-gpios = <&tlmm 4 GPIO_ACTIVE_LOW>; + cd-gpios = <&pmm8155au_1_gpios 4 GPIO_ACTIVE_LOW>; pinctrl-names = "default", "sleep"; - pinctrl-0 = <&sdc2_on>; - pinctrl-1 = <&sdc2_off>; + pinctrl-0 = <&sdc2_on &pmm8155au_1_sdc2_cd>; + pinctrl-1 = <&sdc2_off &pmm8155au_1_sdc2_cd>; vqmmc-supply = <&vreg_l13c_2p96>; /* IO line power */ vmmc-supply = <&vreg_l17a_2p96>; /* Card power line */ bus-width = <4>; @@ -505,13 +515,6 @@ bias-pull-up; /* pull up */ drive-strength = <16>; /* 16 MA */ }; - - sd-cd-pins { - pins = "gpio96"; - function = "gpio"; - bias-pull-up; /* pull up */ - drive-strength = <2>; /* 2 MA */ - }; }; sdc2_off: sdc2-off-state { @@ -532,13 +535,6 @@ bias-pull-up; /* pull up */ drive-strength = <2>; /* 2 MA */ }; - - sd-cd-pins { - pins = "gpio96"; - function = "gpio"; - bias-pull-up; /* pull up */ - drive-strength = <2>; /* 2 MA */ - }; }; usb2phy_ac_en1_default: usb2phy-ac-en1-default-state { diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index f48b8dab8b..1d26bb5b02 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -338,12 +338,12 @@ int kvm_register_vgic_device(unsigned long type) int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, struct vgic_reg_attr *reg_attr) { - int cpuid; + int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr); - cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr); - - reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid); reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; + reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid); + if (!reg_attr->vcpu) + return -EINVAL; return 0; } diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 00217d8d03..ceed77c29f 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1738,15 +1738,15 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l, emit_call(enter_prog, ctx); + /* save return value to callee saved register x20 */ + emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx); + /* if (__bpf_prog_enter(prog) == 0) * goto skip_exec_of_prog; */ branch = ctx->image + ctx->idx; emit(A64_NOP, ctx); - /* save return value to callee saved register x20 */ - emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx); - emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx); if (!p->jited) emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx); diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index d14d0e37ad..4a2b40ce39 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -159,7 +159,7 @@ extern unsigned long exception_ip(struct pt_regs *regs); #define exception_ip(regs) exception_ip(regs) #define profile_pc(regs) instruction_pointer(regs) -extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall); +extern asmlinkage long syscall_trace_enter(struct pt_regs *regs); extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); extern void die(const char *, struct pt_regs *) __noreturn; diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index d1b11f66f7..cb1045ebab 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -101,6 +101,7 @@ void output_thread_info_defines(void) OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); OFFSET(TI_REGS, thread_info, regs); + OFFSET(TI_SYSCALL, thread_info, syscall); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 59288c13b5..61503a3606 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -1317,16 +1317,13 @@ long arch_ptrace(struct task_struct *child, long request, * Notification of system call entry/exit * - triggered by current->work.syscall_trace */ -asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) +asmlinkage long syscall_trace_enter(struct pt_regs *regs) { user_exit(); - current_thread_info()->syscall = syscall; - if (test_thread_flag(TIF_SYSCALL_TRACE)) { if (ptrace_report_syscall_entry(regs)) return -1; - syscall = current_thread_info()->syscall; } #ifdef CONFIG_SECCOMP @@ -1335,7 +1332,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) struct seccomp_data sd; unsigned long args[6]; - sd.nr = syscall; + sd.nr = current_thread_info()->syscall; sd.arch = syscall_get_arch(current); syscall_get_arguments(current, regs, args); for (i = 0; i < 6; i++) @@ -1345,23 +1342,23 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) ret = __secure_computing(&sd); if (ret == -1) return ret; - syscall = current_thread_info()->syscall; } #endif if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->regs[2]); - audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], + audit_syscall_entry(current_thread_info()->syscall, + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); /* * Negative syscall numbers are mistaken for rejected syscalls, but * won't have had the return value set appropriately, so we do so now. */ - if (syscall < 0) + if (current_thread_info()->syscall < 0) syscall_set_return_value(current, regs, -ENOSYS, 0); - return syscall; + return current_thread_info()->syscall; } /* diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 18dc9b3450..2c604717e6 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -77,6 +77,18 @@ loads_done: PTR_WD load_a7, bad_stack_a7 .previous + /* + * syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + */ + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ + LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number + b 2f +1: + LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number +2: + lw t0, TI_FLAGS($28) # syscall tracing enabled? li t1, _TIF_WORK_SYSCALL_ENTRY and t0, t1 @@ -114,16 +126,7 @@ syscall_trace_entry: SAVE_STATIC move a0, sp - /* - * syscall number is in v0 unless we called syscall(__NR_###) - * where the real syscall number is in a0 - */ - move a1, v0 - subu t2, v0, __NR_O32_Linux - bnez t2, 1f /* __NR_syscall at offset 0 */ - lw a1, PT_R4(sp) - -1: jal syscall_trace_enter + jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 97456b2ca7..9778885923 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting + LONG_S v0, TI_SYSCALL($28) # Store syscall number + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -72,7 +74,6 @@ syscall_common: n32_syscall_trace_entry: SAVE_STATIC move a0, sp - move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index e6264aa62e..be11ea5cc6 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp) sd a3, PT_R26(sp) # save a3 for syscall restarting + LONG_S v0, TI_SYSCALL($28) # Store syscall number + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -82,7 +84,6 @@ n64_syscall_exit: syscall_trace_entry: SAVE_STATIC move a0, sp - move a1, v0 jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index d3c2616cba..7a5abb73e5 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -79,6 +79,22 @@ loads_done: PTR_WD load_a7, bad_stack_a7 .previous + /* + * absolute syscall number is in v0 unless we called syscall(__NR_###) + * where the real syscall number is in a0 + * note: NR_syscall is the first O32 syscall but the macro is + * only defined when compiling with -mabi=32 (CONFIG_32BIT) + * therefore __NR_O32_Linux is used (4000) + */ + + subu t2, v0, __NR_O32_Linux + bnez t2, 1f /* __NR_syscall at offset 0 */ + LONG_S a0, TI_SYSCALL($28) # Save a0 as syscall number + b 2f +1: + LONG_S v0, TI_SYSCALL($28) # Save v0 as syscall number +2: + li t1, _TIF_WORK_SYSCALL_ENTRY LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? and t0, t1, t0 @@ -113,22 +129,7 @@ trace_a_syscall: sd a7, PT_R11(sp) # For indirect syscalls move a0, sp - /* - * absolute syscall number is in v0 unless we called syscall(__NR_###) - * where the real syscall number is in a0 - * note: NR_syscall is the first O32 syscall but the macro is - * only defined when compiling with -mabi=32 (CONFIG_32BIT) - * therefore __NR_O32_Linux is used (4000) - */ - .set push - .set reorder - subu t1, v0, __NR_O32_Linux - move a1, v0 - bnez t1, 1f /* __NR_syscall at offset 0 */ - ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ - .set pop - -1: jal syscall_trace_enter + jal syscall_trace_enter bltz v0, 1f # seccomp failed? Skip syscall diff --git a/arch/powerpc/crypto/chacha-p10-glue.c b/arch/powerpc/crypto/chacha-p10-glue.c index 74fb86b0d2..7c72875585 100644 --- a/arch/powerpc/crypto/chacha-p10-glue.c +++ b/arch/powerpc/crypto/chacha-p10-glue.c @@ -197,6 +197,9 @@ static struct skcipher_alg algs[] = { static int __init chacha_p10_init(void) { + if (!cpu_has_feature(CPU_FTR_ARCH_31)) + return 0; + static_branch_enable(&have_p10); return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); @@ -204,10 +207,13 @@ static int __init chacha_p10_init(void) static void __exit chacha_p10_exit(void) { + if (!static_branch_likely(&have_p10)) + return; + crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); } -module_cpu_feature_match(PPC_MODULE_FEATURE_P10, chacha_p10_init); +module_init(chacha_p10_init); module_exit(chacha_p10_exit); MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (P10 accelerated)"); diff --git a/arch/powerpc/include/asm/plpks.h b/arch/powerpc/include/asm/plpks.h index 23b77027c9..7a84069759 100644 --- a/arch/powerpc/include/asm/plpks.h +++ b/arch/powerpc/include/asm/plpks.h @@ -44,9 +44,8 @@ #define PLPKS_MAX_DATA_SIZE 4000 // Timeouts for PLPKS operations -#define PLPKS_MAX_TIMEOUT 5000 // msec -#define PLPKS_FLUSH_SLEEP 10 // msec -#define PLPKS_FLUSH_SLEEP_RANGE 400 +#define PLPKS_MAX_TIMEOUT (5 * USEC_PER_SEC) +#define PLPKS_FLUSH_SLEEP 10000 // usec struct plpks_var { char *component; diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index e8c4129697..b1e6d275cd 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c @@ -786,8 +786,16 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) * parent bus. During reboot, there will be ibm,dma-window property to * define DMA window. For kdump, there will at least be default window or DDW * or both. + * There is an exception to the above. In case the PE goes into frozen + * state, firmware may not provide ibm,dma-window property at the time + * of LPAR boot up. */ + if (!pdn) { + pr_debug(" no ibm,dma-window property !\n"); + return; + } + ppci = PCI_DN(pdn); pr_debug(" parent is %pOF, iommu_table: 0x%p\n", diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c index febe18f251..4a595493d2 100644 --- a/arch/powerpc/platforms/pseries/plpks.c +++ b/arch/powerpc/platforms/pseries/plpks.c @@ -415,8 +415,7 @@ static int plpks_confirm_object_flushed(struct label *label, break; } - usleep_range(PLPKS_FLUSH_SLEEP, - PLPKS_FLUSH_SLEEP + PLPKS_FLUSH_SLEEP_RANGE); + fsleep(PLPKS_FLUSH_SLEEP); timeout = timeout + PLPKS_FLUSH_SLEEP; } while (timeout < PLPKS_MAX_TIMEOUT); @@ -464,9 +463,10 @@ int plpks_signed_update_var(struct plpks_var *var, u64 flags) continuetoken = retbuf[0]; if (pseries_status_to_err(rc) == -EBUSY) { - int delay_ms = get_longbusy_msecs(rc); - mdelay(delay_ms); - timeout += delay_ms; + int delay_us = get_longbusy_msecs(rc) * 1000; + + fsleep(delay_us); + timeout += delay_us; } rc = pseries_status_to_err(rc); } while (rc == -EBUSY && timeout < PLPKS_MAX_TIMEOUT); diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 719a97e7ed..0bd747d1d0 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -740,6 +740,9 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of if (ret) return ret; + /* store prog start time */ + emit_mv(RV_REG_S1, RV_REG_A0, ctx); + /* if (__bpf_prog_enter(prog) == 0) * goto skip_exec_of_prog; */ @@ -747,9 +750,6 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of /* nop reserved for conditional jump */ emit(rv_nop(), ctx); - /* store prog start time */ - emit_mv(RV_REG_S1, RV_REG_A0, ctx); - /* arg1: &args_off */ emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx); if (!p->jited) diff --git a/arch/s390/include/asm/dwarf.h b/arch/s390/include/asm/dwarf.h index 4f21ae561e..390906b8e3 100644 --- a/arch/s390/include/asm/dwarf.h +++ b/arch/s390/include/asm/dwarf.h @@ -9,6 +9,7 @@ #define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset #define CFI_ADJUST_CFA_OFFSET .cfi_adjust_cfa_offset #define CFI_RESTORE .cfi_restore +#define CFI_REL_OFFSET .cfi_rel_offset #ifdef CONFIG_AS_CFI_VAL_OFFSET #define CFI_VAL_OFFSET .cfi_val_offset diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S index 57f62596e5..85247ef5a4 100644 --- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S +++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S @@ -24,8 +24,10 @@ __kernel_\func: CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE) CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD stg %r14,STACK_FRAME_OVERHEAD(%r15) + CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD brasl %r14,__s390_vdso_\func lg %r14,STACK_FRAME_OVERHEAD(%r15) + CFI_RESTORE 14 aghi %r15,WRAPPER_FRAME_SIZE CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD CFI_RESTORE 15 diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 08a7eca03d..41a4a60c5e 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2659,7 +2659,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, return 0; start = pmd_val(*pmd) & HPAGE_MASK; - end = start + HPAGE_SIZE - 1; + end = start + HPAGE_SIZE; __storage_key_init_range(start, end); set_bit(PG_arch_1, &page->flags); cond_resched(); diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 5f64f3d0fa..763469e518 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -139,7 +139,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste) } if (!test_and_set_bit(PG_arch_1, &page->flags)) - __storage_key_init_range(paddr, paddr + size - 1); + __storage_key_init_range(paddr, paddr + size); } void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 75bd5ac7ac..a04a5163e5 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1808,7 +1808,7 @@ void x2apic_setup(void) __x2apic_enable(); } -static __init void apic_set_fixmap(void); +static __init void apic_set_fixmap(bool read_apic); static __init void x2apic_disable(void) { @@ -1830,7 +1830,12 @@ static __init void x2apic_disable(void) } __x2apic_disable(); - apic_set_fixmap(); + /* + * Don't reread the APIC ID as it was already done from + * check_x2apic() and the APIC driver still is a x2APIC variant, + * which fails to do the read after x2APIC was disabled. + */ + apic_set_fixmap(false); } static __init void x2apic_enable(void) @@ -2095,13 +2100,14 @@ void __init init_apic_mappings(void) } } -static __init void apic_set_fixmap(void) +static __init void apic_set_fixmap(bool read_apic) { set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); apic_mmio_base = APIC_BASE; apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", apic_mmio_base, mp_lapic_addr); - apic_read_boot_cpu_id(false); + if (read_apic) + apic_read_boot_cpu_id(false); } void __init register_lapic_address(unsigned long address) @@ -2111,7 +2117,7 @@ void __init register_lapic_address(unsigned long address) mp_lapic_addr = address; if (!x2apic_mode) - apic_set_fixmap(); + apic_set_fixmap(true); } /* diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index d008a153a2..7ed1a2085b 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h @@ -115,9 +115,9 @@ #define MAKE_RA_FOR_CALL(ra,ws) (((ra) & 0x3fffffff) | (ws) << 30) /* Convert return address to a valid pc - * Note: We assume that the stack pointer is in the same 1GB ranges as the ra + * Note: 'text' is the address within the same 1GB range as the ra */ -#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) +#define MAKE_PC_FROM_RA(ra, text) (((ra) & 0x3fffffff) | ((unsigned long)(text) & 0xc0000000)) #elif defined(__XTENSA_CALL0_ABI__) @@ -127,9 +127,9 @@ #define MAKE_RA_FOR_CALL(ra, ws) (ra) /* Convert return address to a valid pc - * Note: We assume that the stack pointer is in the same 1GB ranges as the ra + * Note: 'text' is not used as 'ra' is always the full address */ -#define MAKE_PC_FROM_RA(ra, sp) (ra) +#define MAKE_PC_FROM_RA(ra, text) (ra) #else #error Unsupported Xtensa ABI diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h index a270467556..86c7011737 100644 --- a/arch/xtensa/include/asm/ptrace.h +++ b/arch/xtensa/include/asm/ptrace.h @@ -87,7 +87,7 @@ struct pt_regs { # define user_mode(regs) (((regs)->ps & 0x00000020)!=0) # define instruction_pointer(regs) ((regs)->pc) # define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \ - (regs)->areg[1])) + (regs)->pc)) # ifndef CONFIG_SMP # define profile_pc(regs) instruction_pointer(regs) diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index a815577d25..7bd66677f7 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -47,6 +47,7 @@ #include <asm/asm-offsets.h> #include <asm/regs.h> #include <asm/hw_breakpoint.h> +#include <asm/sections.h> #include <asm/traps.h> extern void ret_from_fork(void); @@ -380,7 +381,7 @@ unsigned long __get_wchan(struct task_struct *p) int count = 0; sp = p->thread.sp; - pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp); + pc = MAKE_PC_FROM_RA(p->thread.ra, _text); do { if (sp < stack_page + sizeof(struct task_struct) || @@ -392,7 +393,7 @@ unsigned long __get_wchan(struct task_struct *p) /* Stack layout: sp-4: ra, sp-3: sp' */ - pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp); + pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), _text); sp = SPILL_SLOT(sp, 1); } while (count++ < 16); return 0; diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c index 831ffb648b..ed324fdf2a 100644 --- a/arch/xtensa/kernel/stacktrace.c +++ b/arch/xtensa/kernel/stacktrace.c @@ -13,6 +13,7 @@ #include <linux/stacktrace.h> #include <asm/ftrace.h> +#include <asm/sections.h> #include <asm/stacktrace.h> #include <asm/traps.h> #include <linux/uaccess.h> @@ -189,7 +190,7 @@ void walk_stackframe(unsigned long *sp, if (a1 <= (unsigned long)sp) break; - frame.pc = MAKE_PC_FROM_RA(a0, a1); + frame.pc = MAKE_PC_FROM_RA(a0, _text); frame.sp = a1; if (fn(&frame, data)) diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 04d44f0bcb..f3b68b7994 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1347,7 +1347,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) { struct ioc *ioc = iocg->ioc; struct blkcg_gq *blkg = iocg_to_blkg(iocg); - u64 tdelta, delay, new_delay; + u64 tdelta, delay, new_delay, shift; s64 vover, vover_pct; u32 hwa; @@ -1362,8 +1362,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) /* calculate the current delay in effect - 1/2 every second */ tdelta = now->now - iocg->delay_at; - if (iocg->delay) - delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC); + shift = div64_u64(tdelta, USEC_PER_SEC); + if (iocg->delay && shift < BITS_PER_LONG) + delay = iocg->delay >> shift; else delay = 0; @@ -1438,8 +1439,11 @@ static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay, lockdep_assert_held(&iocg->ioc->lock); lockdep_assert_held(&iocg->waitq.lock); - /* make sure that nobody messed with @iocg */ - WARN_ON_ONCE(list_empty(&iocg->active_list)); + /* + * make sure that nobody messed with @iocg. Check iocg->pd.online + * to avoid warn when removing blkcg or disk. + */ + WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online); WARN_ON_ONCE(iocg->inuse > 1); iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); diff --git a/block/genhd.c b/block/genhd.c index d74fb5b4ae..d0471f469f 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -345,9 +345,7 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode) struct bdev_handle *handle; int ret = 0; - if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) - return -EINVAL; - if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state)) + if (!disk_has_partscan(disk)) return -EINVAL; if (disk->open_partitions) return -EBUSY; @@ -503,8 +501,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, goto out_unregister_bdi; /* Make sure the first partition scan will be proceed */ - if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) && - !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state)) + if (get_capacity(disk) && disk_has_partscan(disk)) set_bit(GD_NEED_PART_SCAN, &disk->state); bdev_add(disk->part0, ddev->devt); @@ -1047,6 +1044,12 @@ static ssize_t diskseq_show(struct device *dev, return sprintf(buf, "%llu\n", disk->diskseq); } +static ssize_t partscan_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%u\n", disk_has_partscan(dev_to_disk(dev))); +} + static DEVICE_ATTR(range, 0444, disk_range_show, NULL); static DEVICE_ATTR(ext_range, 0444, disk_ext_range_show, NULL); static DEVICE_ATTR(removable, 0444, disk_removable_show, NULL); @@ -1060,6 +1063,7 @@ static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store); static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL); +static DEVICE_ATTR(partscan, 0444, partscan_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST ssize_t part_fail_show(struct device *dev, @@ -1106,6 +1110,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_events_async.attr, &dev_attr_events_poll_msecs.attr, &dev_attr_diskseq.attr, + &dev_attr_partscan.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif diff --git a/block/ioctl.c b/block/ioctl.c index 5f8c988239..7dbed0c115 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -89,7 +89,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, unsigned long arg) { uint64_t range[2]; - uint64_t start, len; + uint64_t start, len, end; struct inode *inode = bdev->bd_inode; int err; @@ -110,7 +110,8 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode, if (len & 511) return -EINVAL; - if (start + len > bdev_nr_bytes(bdev)) + if (check_add_overflow(start, len, &end) || + end > bdev_nr_bytes(bdev)) return -EINVAL; filemap_invalidate_lock(inode->i_mapping); diff --git a/block/partitions/core.c b/block/partitions/core.c index 5f5ed5c75f..eac887755f 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -584,10 +584,7 @@ static int blk_add_partitions(struct gendisk *disk) struct parsed_partitions *state; int ret = -EAGAIN, p; - if (disk->flags & GENHD_FL_NO_PART) - return 0; - - if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state)) + if (!disk_has_partscan(disk)) return 0; state = check_partition(disk); diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index b35c7aedca..bad1ccc81a 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #include <linux/firmware.h> @@ -371,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev) { int ret; - ivpu_prepare_for_reset(vdev); + /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */ + pci_save_state(to_pci_dev(vdev->drm.dev)); ret = ivpu_hw_power_down(vdev); if (ret) ivpu_warn(vdev, "Failed to power down HW: %d\n", ret); + pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); + return ret; } @@ -543,11 +546,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev) /* Power up early so the rest of init code can access VPU registers */ ret = ivpu_hw_power_up(vdev); if (ret) - goto err_power_down; + goto err_shutdown; ret = ivpu_mmu_global_context_init(vdev); if (ret) - goto err_power_down; + goto err_shutdown; ret = ivpu_mmu_init(vdev); if (ret) @@ -584,10 +587,8 @@ err_mmu_rctx_fini: ivpu_mmu_reserved_context_fini(vdev); err_mmu_gctx_fini: ivpu_mmu_global_context_fini(vdev); -err_power_down: - ivpu_hw_power_down(vdev); - if (IVPU_WA(d3hot_after_power_off)) - pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); +err_shutdown: + ivpu_shutdown(vdev); err_xa_destroy: xa_destroy(&vdev->submitted_jobs_xa); xa_destroy(&vdev->context_xa); @@ -610,9 +611,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev) static void ivpu_dev_fini(struct ivpu_device *vdev) { ivpu_pm_disable(vdev); + ivpu_prepare_for_reset(vdev); ivpu_shutdown(vdev); - if (IVPU_WA(d3hot_after_power_off)) - pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); ivpu_jobs_abort_all(vdev); ivpu_job_done_consumer_fini(vdev); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 069ace4adb..e7a9e84994 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #ifndef __IVPU_DRV_H__ @@ -87,7 +87,6 @@ struct ivpu_wa_table { bool punit_disabled; bool clear_runtime_mem; - bool d3hot_after_power_off; bool interrupt_clear_with_0; bool disable_clock_relinquish; bool disable_d0i3_msg; diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c index 32bb772e03..5e392b6823 100644 --- a/drivers/accel/ivpu/ivpu_hw_37xx.c +++ b/drivers/accel/ivpu/ivpu_hw_37xx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #include "ivpu_drv.h" @@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev) { vdev->wa.punit_disabled = false; vdev->wa.clear_runtime_mem = false; - vdev->wa.d3hot_after_power_off = true; REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK); if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) { @@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev) IVPU_PRINT_WA(punit_disabled); IVPU_PRINT_WA(clear_runtime_mem); - IVPU_PRINT_WA(d3hot_after_power_off); IVPU_PRINT_WA(interrupt_clear_with_0); } diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index 91bd640655..2e46b322c4 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd) case IVPU_MMU_EVT_F_VMS_FETCH: return "Fetch of VMS caused external abort"; default: - return "Unknown CMDQ command"; + return "Unknown event"; } } @@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err) { switch (err) { case IVPU_MMU_CERROR_NONE: - return "No CMDQ Error"; + return "No error"; case IVPU_MMU_CERROR_ILL: return "Illegal command"; case IVPU_MMU_CERROR_ABT: - return "External abort on CMDQ read"; + return "External abort on command queue read"; case IVPU_MMU_CERROR_ATC_INV_SYNC: return "Sync failed to complete ATS invalidation"; default: - return "Unknown CMDQ Error"; + return "Unknown error"; } } diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index a15d30d094..64618fc2ce 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #include <linux/highmem.h> @@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev) { int ret; - /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */ - pci_save_state(to_pci_dev(vdev->drm.dev)); + ivpu_prepare_for_reset(vdev); ret = ivpu_shutdown(vdev); if (ret) - ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret); - - pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); + ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret); return ret; } diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d6f14c8e20..e029687e57 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -5367,7 +5367,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) goto err; break; case BINDER_SET_MAX_THREADS: { - int max_threads; + u32 max_threads; if (copy_from_user(&max_threads, ubuf, sizeof(max_threads))) { diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 7270d4d222..5b7c80b99a 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h @@ -421,7 +421,7 @@ struct binder_proc { struct list_head todo; struct binder_stats stats; struct list_head delivered_death; - int max_threads; + u32 max_threads; int requested_threads; int requested_threads_started; int tmp_ref; diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c index 400b22ee99..4c270999ba 100644 --- a/drivers/ata/sata_gemini.c +++ b/drivers/ata/sata_gemini.c @@ -200,7 +200,10 @@ int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge) pclk = sg->sata0_pclk; else pclk = sg->sata1_pclk; - clk_enable(pclk); + ret = clk_enable(pclk); + if (ret) + return ret; + msleep(10); /* Do not keep clocking a bridge that is not online */ diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 6db77d8e45..4ae399c30f 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2837,6 +2837,43 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) EXPORT_SYMBOL_GPL(regmap_read); /** + * regmap_read_bypassed() - Read a value from a single register direct + * from the device, bypassing the cache + * + * @map: Register map to read from + * @reg: Register to be read from + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val) +{ + int ret; + bool bypass, cache_only; + + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + + map->lock(map->lock_arg); + + bypass = map->cache_bypass; + cache_only = map->cache_only; + map->cache_bypass = true; + map->cache_only = false; + + ret = _regmap_read(map, reg, val); + + map->cache_bypass = bypass; + map->cache_only = cache_only; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_read_bypassed); + +/** * regmap_raw_read() - Read raw data from the device * * @map: Register map to read from diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 19cfc342fc..638074992c 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -99,7 +99,8 @@ static int qca_read_fw_build_info(struct hci_dev *hdev) { struct sk_buff *skb; struct edl_event_hdr *edl; - char cmd, build_label[QCA_FW_BUILD_VER_LEN]; + char *build_label; + char cmd; int build_lbl_len, err = 0; bt_dev_dbg(hdev, "QCA read fw build info"); @@ -114,6 +115,11 @@ static int qca_read_fw_build_info(struct hci_dev *hdev) return err; } + if (skb->len < sizeof(*edl)) { + err = -EILSEQ; + goto out; + } + edl = (struct edl_event_hdr *)(skb->data); if (!edl) { bt_dev_err(hdev, "QCA read fw build info with no header"); @@ -129,14 +135,25 @@ static int qca_read_fw_build_info(struct hci_dev *hdev) goto out; } + if (skb->len < sizeof(*edl) + 1) { + err = -EILSEQ; + goto out; + } + build_lbl_len = edl->data[0]; - if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) { - memcpy(build_label, edl->data + 1, build_lbl_len); - *(build_label + build_lbl_len) = '\0'; + + if (skb->len < sizeof(*edl) + 1 + build_lbl_len) { + err = -EILSEQ; + goto out; } + build_label = kstrndup(&edl->data[1], build_lbl_len, GFP_KERNEL); + if (!build_label) + goto out; + hci_set_fw_info(hdev, "%s", build_label); + kfree(build_label); out: kfree_skb(skb); return err; @@ -235,6 +252,11 @@ static int qca_read_fw_board_id(struct hci_dev *hdev, u16 *bid) goto out; } + if (skb->len < 3) { + err = -EILSEQ; + goto out; + } + *bid = (edl->data[1] << 8) + edl->data[2]; bt_dev_dbg(hdev, "%s: bid = %x", __func__, *bid); @@ -265,9 +287,10 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) } EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); -static void qca_tlv_check_data(struct hci_dev *hdev, +static int qca_tlv_check_data(struct hci_dev *hdev, struct qca_fw_config *config, - u8 *fw_data, enum qca_btsoc_type soc_type) + u8 *fw_data, size_t fw_size, + enum qca_btsoc_type soc_type) { const u8 *data; u32 type_len; @@ -277,12 +300,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev, struct tlv_type_patch *tlv_patch; struct tlv_type_nvm *tlv_nvm; uint8_t nvm_baud_rate = config->user_baud_rate; + u8 type; config->dnld_mode = QCA_SKIP_EVT_NONE; config->dnld_type = QCA_SKIP_EVT_NONE; switch (config->type) { case ELF_TYPE_PATCH: + if (fw_size < 7) + return -EINVAL; + config->dnld_mode = QCA_SKIP_EVT_VSE_CC; config->dnld_type = QCA_SKIP_EVT_VSE_CC; @@ -291,6 +318,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev, bt_dev_dbg(hdev, "File version : 0x%x", fw_data[6]); break; case TLV_TYPE_PATCH: + if (fw_size < sizeof(struct tlv_type_hdr) + sizeof(struct tlv_type_patch)) + return -EINVAL; + tlv = (struct tlv_type_hdr *)fw_data; type_len = le32_to_cpu(tlv->type_len); tlv_patch = (struct tlv_type_patch *)tlv->data; @@ -330,25 +360,64 @@ static void qca_tlv_check_data(struct hci_dev *hdev, break; case TLV_TYPE_NVM: + if (fw_size < sizeof(struct tlv_type_hdr)) + return -EINVAL; + tlv = (struct tlv_type_hdr *)fw_data; type_len = le32_to_cpu(tlv->type_len); - length = (type_len >> 8) & 0x00ffffff; + length = type_len >> 8; + type = type_len & 0xff; - BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff); + /* Some NVM files have more than one set of tags, only parse + * the first set when it has type 2 for now. When there is + * more than one set there is an enclosing header of type 4. + */ + if (type == 4) { + if (fw_size < 2 * sizeof(struct tlv_type_hdr)) + return -EINVAL; + + tlv++; + + type_len = le32_to_cpu(tlv->type_len); + length = type_len >> 8; + type = type_len & 0xff; + } + + BT_DBG("TLV Type\t\t : 0x%x", type); BT_DBG("Length\t\t : %d bytes", length); + if (type != 2) + break; + + if (fw_size < length + (tlv->data - fw_data)) + return -EINVAL; + idx = 0; data = tlv->data; - while (idx < length) { + while (idx < length - sizeof(struct tlv_type_nvm)) { tlv_nvm = (struct tlv_type_nvm *)(data + idx); tag_id = le16_to_cpu(tlv_nvm->tag_id); tag_len = le16_to_cpu(tlv_nvm->tag_len); + if (length < idx + sizeof(struct tlv_type_nvm) + tag_len) + return -EINVAL; + /* Update NVM tags as needed */ switch (tag_id) { + case EDL_TAG_ID_BD_ADDR: + if (tag_len != sizeof(bdaddr_t)) + return -EINVAL; + + memcpy(&config->bdaddr, tlv_nvm->data, sizeof(bdaddr_t)); + + break; + case EDL_TAG_ID_HCI: + if (tag_len < 3) + return -EINVAL; + /* HCI transport layer parameters * enabling software inband sleep * onto controller side. @@ -364,6 +433,9 @@ static void qca_tlv_check_data(struct hci_dev *hdev, break; case EDL_TAG_ID_DEEP_SLEEP: + if (tag_len < 1) + return -EINVAL; + /* Sleep enable mask * enabling deep sleep feature on controller. */ @@ -372,14 +444,16 @@ static void qca_tlv_check_data(struct hci_dev *hdev, break; } - idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len); + idx += sizeof(struct tlv_type_nvm) + tag_len; } break; default: BT_ERR("Unknown TLV type %d", config->type); - break; + return -EINVAL; } + + return 0; } static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size, @@ -529,7 +603,9 @@ static int qca_download_firmware(struct hci_dev *hdev, memcpy(data, fw->data, size); release_firmware(fw); - qca_tlv_check_data(hdev, config, data, soc_type); + ret = qca_tlv_check_data(hdev, config, data, size, soc_type); + if (ret) + goto out; segment = data; remain = size; @@ -612,6 +688,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) } EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); +static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *config) +{ + struct hci_rp_read_bd_addr *bda; + struct sk_buff *skb; + int err; + + if (bacmp(&hdev->public_addr, BDADDR_ANY)) + return 0; + + skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to read device address (%d)", err); + return err; + } + + if (skb->len != sizeof(*bda)) { + bt_dev_err(hdev, "Device address length mismatch"); + kfree_skb(skb); + return -EIO; + } + + bda = (struct hci_rp_read_bd_addr *)skb->data; + if (!bacmp(&bda->bdaddr, &config->bdaddr)) + set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); + + kfree_skb(skb); + + return 0; +} + static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size, struct qca_btsoc_version ver, u8 rom_ver, u16 bid) { @@ -633,7 +741,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, const char *firmware_name) { - struct qca_fw_config config; + struct qca_fw_config config = {}; int err; u8 rom_ver = 0; u32 soc_ver; @@ -818,6 +926,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, break; } + err = qca_check_bdaddr(hdev, &config); + if (err) + return err; + bt_dev_info(hdev, "QCA setup on UART is completed"); return 0; diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index dc31984f71..215433fd76 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -29,6 +29,7 @@ #define EDL_PATCH_CONFIG_RES_EVT (0x00) #define QCA_DISABLE_LOGGING_SUB_OP (0x14) +#define EDL_TAG_ID_BD_ADDR 2 #define EDL_TAG_ID_HCI (17) #define EDL_TAG_ID_DEEP_SLEEP (27) @@ -47,7 +48,6 @@ #define get_soc_ver(soc_id, rom_ver) \ ((le32_to_cpu(soc_id) << 16) | (le16_to_cpu(rom_ver))) -#define QCA_FW_BUILD_VER_LEN 255 #define QCA_HSP_GF_SOC_ID 0x1200 #define QCA_HSP_GF_SOC_MASK 0x0000ff00 @@ -94,6 +94,7 @@ struct qca_fw_config { uint8_t user_baud_rate; enum qca_tlv_dnld_mode dnld_mode; enum qca_tlv_dnld_mode dnld_type; + bdaddr_t bdaddr; }; struct edl_event_hdr { diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index ce5c2aa743..0c9c9ee565 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1908,8 +1908,6 @@ retry: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: - set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); - qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->bdaddr_property_broken) set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index cf1fc0edfd..260e901d0b 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -4552,7 +4552,8 @@ void clk_unregister(struct clk *clk) if (ops == &clk_nodrv_ops) { pr_err("%s: unregistered clock: %s\n", __func__, clk->core->name); - goto unlock; + clk_prepare_unlock(); + return; } /* * Assign empty clock ops for consumers that might still hold @@ -4586,11 +4587,10 @@ void clk_unregister(struct clk *clk) if (clk->core->protect_count) pr_warn("%s: unregistering protected clock: %s\n", __func__, clk->core->name); + clk_prepare_unlock(); kref_put(&clk->core->ref, __clk_release); free_clk(clk); -unlock: - clk_prepare_unlock(); } EXPORT_SYMBOL_GPL(clk_unregister); @@ -4749,13 +4749,11 @@ void __clk_put(struct clk *clk) if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX) clk_set_rate_range_nolock(clk, 0, ULONG_MAX); - owner = clk->core->owner; - kref_put(&clk->core->ref, __clk_release); - clk_prepare_unlock(); + owner = clk->core->owner; + kref_put(&clk->core->ref, __clk_release); module_put(owner); - free_clk(clk); } diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 8602c02047..45c5255bcd 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -768,6 +768,7 @@ static struct clk_smd_rpm *msm8976_clks[] = { static const struct rpm_smd_clk_desc rpm_clk_msm8976 = { .clks = msm8976_clks, + .num_clks = ARRAY_SIZE(msm8976_clks), .icc_clks = bimc_pcnoc_snoc_smmnoc_icc_clks, .num_icc_clks = ARRAY_SIZE(bimc_pcnoc_snoc_smmnoc_icc_clks), }; diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c index 3484e6cc80..503c6f5b20 100644 --- a/drivers/clk/samsung/clk-exynos-clkout.c +++ b/drivers/clk/samsung/clk-exynos-clkout.c @@ -13,9 +13,9 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm.h> -#include <linux/property.h> #define EXYNOS_CLKOUT_NR_CLKS 1 #define EXYNOS_CLKOUT_PARENTS 32 @@ -84,17 +84,24 @@ MODULE_DEVICE_TABLE(of, exynos_clkout_ids); static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask) { const struct exynos_clkout_variant *variant; + const struct of_device_id *match; if (!dev->parent) { dev_err(dev, "not instantiated from MFD\n"); return -EINVAL; } - variant = device_get_match_data(dev->parent); - if (!variant) { + /* + * 'exynos_clkout_ids' arrays is not the ids array matched by + * the dev->parent driver, so of_device_get_match_data() or + * device_get_match_data() cannot be used here. + */ + match = of_match_device(exynos_clkout_ids, dev->parent); + if (!match) { dev_err(dev, "cannot match parent device\n"); return -EINVAL; } + variant = match->data; *mux_mask = variant->mux_mask; diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index 8951ffc14f..6a4b2b9ef3 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c @@ -182,6 +182,8 @@ static struct ccu_nkm pll_mipi_clk = { &ccu_nkm_ops, CLK_SET_RATE_UNGATE | CLK_SET_RATE_PARENT), .features = CCU_FEATURE_CLOSEST_RATE, + .min_rate = 500000000, + .max_rate = 1400000000, }, }; diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c index 42568c6161..892df80727 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c @@ -1181,11 +1181,18 @@ static const u32 usb2_clk_regs[] = { SUN50I_H6_USB3_CLK_REG, }; +static struct ccu_mux_nb sun50i_h6_cpu_nb = { + .common = &cpux_clk.common, + .cm = &cpux_clk.mux, + .delay_us = 1, + .bypass_index = 0, /* index of 24 MHz oscillator */ +}; + static int sun50i_h6_ccu_probe(struct platform_device *pdev) { void __iomem *reg; + int i, ret; u32 val; - int i; reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg)) @@ -1252,7 +1259,15 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev) val |= BIT(24); writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG); - return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc); + ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc); + if (ret) + return ret; + + /* Reparent CPU during PLL CPUX rate changes */ + ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk, + &sun50i_h6_cpu_nb); + + return 0; } static const struct of_device_id sun50i_h6_ccu_ids[] = { diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c index 8babce5530..ac0091b4ce 100644 --- a/drivers/clk/sunxi-ng/ccu_common.c +++ b/drivers/clk/sunxi-ng/ccu_common.c @@ -44,6 +44,16 @@ bool ccu_is_better_rate(struct ccu_common *common, unsigned long current_rate, unsigned long best_rate) { + unsigned long min_rate, max_rate; + + clk_hw_get_rate_range(&common->hw, &min_rate, &max_rate); + + if (current_rate > max_rate) + return false; + + if (current_rate < min_rate) + return false; + if (common->features & CCU_FEATURE_CLOSEST_RATE) return abs(current_rate - target_rate) < abs(best_rate - target_rate); @@ -122,6 +132,7 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev, for (i = 0; i < desc->hw_clks->num ; i++) { struct clk_hw *hw = desc->hw_clks->hws[i]; + struct ccu_common *common = hw_to_ccu_common(hw); const char *name; if (!hw) @@ -136,6 +147,14 @@ static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev, pr_err("Couldn't register clock %d - %s\n", i, name); goto err_clk_unreg; } + + if (common->max_rate) + clk_hw_set_rate_range(hw, common->min_rate, + common->max_rate); + else + WARN(common->min_rate, + "No max_rate, ignoring min_rate of clock %d - %s\n", + i, name); } ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h index 942a72c094..329734f8cf 100644 --- a/drivers/clk/sunxi-ng/ccu_common.h +++ b/drivers/clk/sunxi-ng/ccu_common.h @@ -31,6 +31,9 @@ struct ccu_common { u16 lock_reg; u32 prediv; + unsigned long min_rate; + unsigned long max_rate; + unsigned long features; spinlock_t *lock; struct clk_hw hw; diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 59456f2177..dcefb838b8 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -400,6 +400,18 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) int rc; dev_dbg(&pdev->dev, "%s called\n", __func__); + + /* + * Due to an erratum in some of the devices supported by the driver, + * direct user submission to the device can be unsafe. + * (See the INTEL-SA-01084 security advisory) + * + * For the devices that exhibit this behavior, require that the user + * has CAP_SYS_RAWIO capabilities. + */ + if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) + return -EPERM; + rc = check_vma(wq, vma, __func__); if (rc < 0) return rc; @@ -414,6 +426,70 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_page_prot); } +static int idxd_submit_user_descriptor(struct idxd_user_context *ctx, + struct dsa_hw_desc __user *udesc) +{ + struct idxd_wq *wq = ctx->wq; + struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev; + const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40; + void __iomem *portal = idxd_wq_portal_addr(wq); + struct dsa_hw_desc descriptor __aligned(64); + int rc; + + rc = copy_from_user(&descriptor, udesc, sizeof(descriptor)); + if (rc) + return -EFAULT; + + /* + * DSA devices are capable of indirect ("batch") command submission. + * On devices where direct user submissions are not safe, we cannot + * allow this since there is no good way for us to verify these + * indirect commands. + */ + if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH && + !wq->idxd->user_submission_safe) + return -EINVAL; + /* + * As per the programming specification, the completion address must be + * aligned to 32 or 64 bytes. If this is violated the hardware + * engine can get very confused (security issue). + */ + if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align)) + return -EINVAL; + + if (wq_dedicated(wq)) + iosubmit_cmds512(portal, &descriptor, 1); + else { + descriptor.priv = 0; + descriptor.pasid = ctx->pasid; + rc = idxd_enqcmds(wq, portal, &descriptor); + if (rc < 0) + return rc; + } + + return 0; +} + +static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len, + loff_t *unused) +{ + struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf; + struct idxd_user_context *ctx = filp->private_data; + ssize_t written = 0; + int i; + + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { + int rc = idxd_submit_user_descriptor(ctx, udesc + i); + + if (rc) + return written ? written : rc; + + written += sizeof(struct dsa_hw_desc); + } + + return written; +} + static __poll_t idxd_cdev_poll(struct file *filp, struct poll_table_struct *wait) { @@ -436,6 +512,7 @@ static const struct file_operations idxd_cdev_fops = { .open = idxd_cdev_open, .release = idxd_cdev_release, .mmap = idxd_cdev_mmap, + .write = idxd_cdev_write, .poll = idxd_cdev_poll, }; diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index df91472f0f..eb73cabb4a 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -288,6 +288,7 @@ struct idxd_driver_data { int evl_cr_off; int cr_status_off; int cr_result_off; + bool user_submission_safe; load_device_defaults_fn_t load_device_defaults; }; @@ -374,6 +375,8 @@ struct idxd_device { struct dentry *dbgfs_dir; struct dentry *dbgfs_evl_file; + + bool user_submission_safe; }; static inline unsigned int evl_ent_size(struct idxd_device *idxd) diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 264c4e47d7..a7295943fa 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -47,6 +47,7 @@ static struct idxd_driver_data idxd_driver_data[] = { .align = 32, .dev_type = &dsa_device_type, .evl_cr_off = offsetof(struct dsa_evl_entry, cr), + .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ .cr_status_off = offsetof(struct dsa_completion_record, status), .cr_result_off = offsetof(struct dsa_completion_record, result), }, @@ -57,6 +58,7 @@ static struct idxd_driver_data idxd_driver_data[] = { .align = 64, .dev_type = &iax_device_type, .evl_cr_off = offsetof(struct iax_evl_entry, cr), + .user_submission_safe = false, /* See INTEL-SA-01084 security advisory */ .cr_status_off = offsetof(struct iax_completion_record, status), .cr_result_off = offsetof(struct iax_completion_record, error_code), .load_device_defaults = idxd_load_iaa_device_defaults, @@ -774,6 +776,8 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", idxd->hw.version); + idxd->user_submission_safe = data->user_submission_safe; + return 0; err_dev_register: diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h index 315c004f58..e16dbf9ab3 100644 --- a/drivers/dma/idxd/registers.h +++ b/drivers/dma/idxd/registers.h @@ -6,9 +6,6 @@ #include <uapi/linux/idxd.h> /* PCI Config */ -#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 -#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe - #define DEVICE_VERSION_1 0x100 #define DEVICE_VERSION_2 0x200 diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index 523ae0dff7..3a5ce477a8 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1197,12 +1197,35 @@ static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attrib static struct device_attribute dev_attr_wq_enqcmds_retries = __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store); +static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *opcap_bmap) +{ + ssize_t pos; + int i; + + pos = 0; + for (i = IDXD_MAX_OPCAP_BITS/64 - 1; i >= 0; i--) { + unsigned long val = opcap_bmap[i]; + + /* On systems where direct user submissions are not safe, we need to clear out + * the BATCH capability from the capability mask in sysfs since we cannot support + * that command on such systems. + */ + if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe) + clear_bit(DSA_OPCODE_BATCH % 64, &val); + + pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val); + pos += sysfs_emit_at(buf, pos, "%c", i == 0 ? '\n' : ','); + } + + return pos; +} + static ssize_t wq_op_config_show(struct device *dev, struct device_attribute *attr, char *buf) { struct idxd_wq *wq = confdev_to_wq(dev); - return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, wq->opcap_bmap); + return op_cap_show_common(dev, buf, wq->opcap_bmap); } static int idxd_verify_supported_opcap(struct idxd_device *idxd, unsigned long *opmask) @@ -1455,7 +1478,7 @@ static ssize_t op_cap_show(struct device *dev, { struct idxd_device *idxd = confdev_to_idxd(dev); - return sysfs_emit(buf, "%*pb\n", IDXD_MAX_OPCAP_BITS, idxd->opcap_bmap); + return op_cap_show_common(dev, buf, idxd->opcap_bmap); } static DEVICE_ATTR_RO(op_cap); diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c index 62caf454b5..a840c6922e 100644 --- a/drivers/edac/versal_edac.c +++ b/drivers/edac/versal_edac.c @@ -423,7 +423,7 @@ static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat) convert_to_physical(priv, pinf), pinf.burstpos); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, - priv->ce_cnt, 0, 0, 0, 0, 0, -1, + 1, 0, 0, 0, 0, 0, -1, priv->message, ""); } @@ -436,7 +436,7 @@ static void handle_error(struct mem_ctl_info *mci, struct ecc_status *stat) convert_to_physical(priv, pinf), pinf.burstpos); edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, - priv->ue_cnt, 0, 0, 0, 0, 0, -1, + 1, 0, 0, 0, 0, 0, -1, priv->message, ""); } diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index b0d671db17..ea31ac7ac1 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -148,10 +148,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length) if (atomic_read(&buffer->size) == 0) return -ENODEV; - /* FIXME: Check length <= user_length. */ + length = buffer->head->length; + + if (length > user_length) + return 0; end = buffer->data + buffer->capacity; - length = buffer->head->length; if (&buffer->head->data[length] < end) { if (copy_to_user(data, buffer->head->data, length)) diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 7bc71f4be6..b9ae0340b8 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -1556,6 +1556,8 @@ static int handle_at_packet(struct context *context, #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) +static u32 get_cycle_time(struct fw_ohci *ohci); + static void handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr) { @@ -1580,6 +1582,8 @@ static void handle_local_rom(struct fw_ohci *ohci, (void *) ohci->config_rom + i, length); } + // Timestamping on behalf of the hardware. + response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); fw_core_handle_response(&ohci->card, &response); } @@ -1628,6 +1632,8 @@ static void handle_local_lock(struct fw_ohci *ohci, fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); out: + // Timestamping on behalf of the hardware. + response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); fw_core_handle_response(&ohci->card, &response); } @@ -1670,8 +1676,6 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet) } } -static u32 get_cycle_time(struct fw_ohci *ohci); - static void at_context_transmit(struct context *ctx, struct fw_packet *packet) { unsigned long flags; @@ -2060,6 +2064,8 @@ static void bus_reset_work(struct work_struct *work) ohci->generation = generation; reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); + if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) + reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); if (ohci->quirks & QUIRK_RESET_PACKET) ohci->request_generation = generation; @@ -2125,12 +2131,14 @@ static irqreturn_t irq_handler(int irq, void *data) return IRQ_NONE; /* - * busReset and postedWriteErr must not be cleared yet + * busReset and postedWriteErr events must not be cleared yet * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) */ reg_write(ohci, OHCI1394_IntEventClear, event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); log_irqs(ohci, event); + if (event & OHCI1394_busReset) + reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset); if (event & OHCI1394_selfIDComplete) queue_work(selfid_workqueue, &ohci->bus_reset_work); diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c index 5b439d0407..50f6503fe4 100644 --- a/drivers/firmware/efi/unaccepted_memory.c +++ b/drivers/firmware/efi/unaccepted_memory.c @@ -4,6 +4,7 @@ #include <linux/memblock.h> #include <linux/spinlock.h> #include <linux/crash_dump.h> +#include <linux/nmi.h> #include <asm/unaccepted_memory.h> /* Protects unaccepted memory bitmap and accepting_list */ @@ -149,6 +150,9 @@ retry: } list_del(&range.list); + + touch_softlockup_watchdog(); + spin_unlock_irqrestore(&unaccepted_memory_lock, flags); } diff --git a/drivers/firmware/microchip/mpfs-auto-update.c b/drivers/firmware/microchip/mpfs-auto-update.c index fbeeaee4ac..23134ffc4d 100644 --- a/drivers/firmware/microchip/mpfs-auto-update.c +++ b/drivers/firmware/microchip/mpfs-auto-update.c @@ -206,10 +206,12 @@ static int mpfs_auto_update_verify_image(struct fw_upload *fw_uploader) if (ret | response->resp_status) { dev_warn(priv->dev, "Verification of Upgrade Image failed!\n"); ret = ret ? ret : -EBADMSG; + goto free_message; } dev_info(priv->dev, "Verification of Upgrade Image passed!\n"); +free_message: devm_kfree(priv->dev, message); free_response: devm_kfree(priv->dev, response); diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index 1ee62cd585..25db014494 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c @@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type) case 0x5e: return GPIOPANELCTL; default: - return -EOPNOTSUPP; + return -ENOTSUPP; } } diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index 5ef8af8249..c097e310c9 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c @@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = { { .compatible = "nxp,lpc3220-gpio", }, { }, }; +MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match); static struct platform_driver lpc32xx_gpio_driver = { .driver = { diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c index c18b6b4738..94ca9d03c0 100644 --- a/drivers/gpio/gpio-wcove.c +++ b/drivers/gpio/gpio-wcove.c @@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type) unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE; if (gpio >= WCOVE_GPIO_NUM) - return -EOPNOTSUPP; + return -ENOTSUPP; return reg + gpio; } diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 1438fdca0b..5cca9e8803 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -1199,6 +1199,8 @@ static int edge_detector_update(struct line *line, struct gpio_v2_line_config *lc, unsigned int line_idx, u64 edflags) { + u64 eflags; + int ret; u64 active_edflags = READ_ONCE(line->edflags); unsigned int debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx); @@ -1210,6 +1212,18 @@ static int edge_detector_update(struct line *line, /* sw debounced and still will be...*/ if (debounce_period_us && READ_ONCE(line->sw_debounced)) { line_set_debounce_period(line, debounce_period_us); + /* + * ensure event fifo is initialised if edge detection + * is now enabled. + */ + eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS; + if (eflags && !kfifo_initialized(&line->req->events)) { + ret = kfifo_alloc(&line->req->events, + line->req->event_buffer_size, + GFP_KERNEL); + if (ret) + return ret; + } return 0; } @@ -2800,11 +2814,11 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file) struct gpio_chardev_data *cdev = file->private_data; struct gpio_device *gdev = cdev->gdev; - bitmap_free(cdev->watched_lines); blocking_notifier_chain_unregister(&gdev->device_notifier, &cdev->device_unregistered_nb); blocking_notifier_chain_unregister(&gdev->line_state_notifier, &cdev->lineinfo_changed_nb); + bitmap_free(cdev->watched_lines); gpio_device_put(gdev); kfree(cdev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d0afb9ba37..14d8786755 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4120,18 +4120,22 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ip_blocks[i].status.hw = true; } } + } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(adev)) { + r = psp_gpu_reset(adev); } else { - tmp = amdgpu_reset_method; - /* It should do a default reset when loading or reloading the driver, - * regardless of the module parameter reset_method. - */ - amdgpu_reset_method = AMD_RESET_METHOD_NONE; - r = amdgpu_asic_reset(adev); - amdgpu_reset_method = tmp; - if (r) { - dev_err(adev->dev, "asic reset on init failed\n"); - goto failed; - } + tmp = amdgpu_reset_method; + /* It should do a default reset when loading or reloading the driver, + * regardless of the module parameter reset_method. + */ + amdgpu_reset_method = AMD_RESET_METHOD_NONE; + r = amdgpu_asic_reset(adev); + amdgpu_reset_method = tmp; + } + + if (r) { + dev_err(adev->dev, "asic reset on init failed\n"); + goto failed; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 4f9900779e..ff28265838 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1867,6 +1867,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 71a5cf37b4..0b8c6581b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -300,12 +300,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) dma_fence_set_error(finished, -ECANCELED); if (finished->error < 0) { - DRM_INFO("Skip scheduling IBs!\n"); + dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)", + ring->name); } else { r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, &fence); if (r) - DRM_ERROR("Error scheduling IBs (%d)\n", r); + dev_err(adev->dev, + "Error scheduling IBs (%d) in ring(%s)", r, + ring->name); } job->job_run_counter++; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 866bfde1ca..9e5526046a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1244,14 +1244,18 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, * amdgpu_bo_move_notify - notification about a memory move * @bo: pointer to a buffer object * @evict: if this move is evicting the buffer from the graphics address space + * @new_mem: new resource for backing the BO * * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs * bookkeeping. * TTM driver callback which is called when ttm moves a buffer. */ -void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) +void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, + bool evict, + struct ttm_resource *new_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); + struct ttm_resource *old_mem = bo->resource; struct amdgpu_bo *abo; if (!amdgpu_bo_is_amdgpu_bo(bo)) @@ -1263,12 +1267,12 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) amdgpu_bo_kunmap(abo); if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && - bo->resource->mem_type != TTM_PL_SYSTEM) + old_mem && old_mem->mem_type != TTM_PL_SYSTEM) dma_buf_move_notify(abo->tbo.base.dma_buf); - /* remember the eviction */ - if (evict) - atomic64_inc(&adev->num_evictions); + /* move_notify is called before move happens */ + trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1, + old_mem ? old_mem->mem_type : -1); } void amdgpu_bo_get_memory(struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index fa03d9e487..bc42ccbde6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -328,7 +328,9 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, size_t buffer_size, uint32_t *metadata_size, uint64_t *flags); -void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict); +void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, + bool evict, + struct ttm_resource *new_mem); void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 851509c6e9..43ab165df4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -424,7 +424,7 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, return false; if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || - res->mem_type == AMDGPU_PL_PREEMPT) + res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL) return true; if (res->mem_type != TTM_PL_VRAM) @@ -432,7 +432,7 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, amdgpu_res_first(res, 0, res->size, &cursor); while (cursor.remaining) { - if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size) + if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size) return false; amdgpu_res_next(&cursor, cursor.size); } @@ -486,14 +486,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL)) { + amdgpu_bo_move_notify(bo, evict, new_mem); ttm_bo_move_null(bo, new_mem); - goto out; + return 0; } if (old_mem->mem_type == TTM_PL_SYSTEM && (new_mem->mem_type == TTM_PL_TT || new_mem->mem_type == AMDGPU_PL_PREEMPT)) { + amdgpu_bo_move_notify(bo, evict, new_mem); ttm_bo_move_null(bo, new_mem); - goto out; + return 0; } if ((old_mem->mem_type == TTM_PL_TT || old_mem->mem_type == AMDGPU_PL_PREEMPT) && @@ -503,9 +505,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); + amdgpu_bo_move_notify(bo, evict, new_mem); ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); - goto out; + return 0; } if (old_mem->mem_type == AMDGPU_PL_GDS || @@ -517,8 +520,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == AMDGPU_PL_OA || new_mem->mem_type == AMDGPU_PL_DOORBELL) { /* Nothing to save here */ + amdgpu_bo_move_notify(bo, evict, new_mem); ttm_bo_move_null(bo, new_mem); - goto out; + return 0; } if (bo->type == ttm_bo_type_device && @@ -530,22 +534,23 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; } - if (adev->mman.buffer_funcs_enabled) { - if (((old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) || - (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM))) { - hop->fpfn = 0; - hop->lpfn = 0; - hop->mem_type = TTM_PL_TT; - hop->flags = TTM_PL_FLAG_TEMPORARY; - return -EMULTIHOP; - } + if (adev->mman.buffer_funcs_enabled && + ((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM))) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = TTM_PL_FLAG_TEMPORARY; + return -EMULTIHOP; + } + amdgpu_bo_move_notify(bo, evict, new_mem); + if (adev->mman.buffer_funcs_enabled) r = amdgpu_move_blit(bo, evict, new_mem, old_mem); - } else { + else r = -ENODEV; - } if (r) { /* Check that all memory is CPU accessible */ @@ -560,11 +565,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; } - trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type); -out: - /* update statistics */ + /* update statistics after the move */ + if (evict) + atomic64_inc(&adev->num_evictions); atomic64_add(bo->base.size, &adev->num_bytes_moved); - amdgpu_bo_move_notify(bo, evict); return 0; } @@ -1566,7 +1570,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, static void amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) { - amdgpu_bo_move_notify(bo, false); + amdgpu_bo_move_notify(bo, false, NULL); } static struct ttm_device_funcs amdgpu_bo_driver = { diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index d6f808acfb..fbb43ae762 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } +static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev) +{ + return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst); +} + static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, uint32_t inst_idx, struct amdgpu_ring *ring) { @@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, case AMDGPU_RING_TYPE_VCN_ENC: case AMDGPU_RING_TYPE_VCN_JPEG: ip_blk = AMDGPU_XCP_VCN; - if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + if (aqua_vanjaram_xcp_vcn_shared(adev)) inst_mask = 1 << (inst_idx * 2); break; default: @@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update( aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); - /* VCN is shared by two partitions under CPX MODE */ + /* VCN may be shared by two partitions under CPX MODE in certain + * configs. + */ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || - ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && - adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && + aqua_vanjaram_xcp_vcn_shared(adev)) aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 1caed0163b..328682cbb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1601,19 +1601,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL); - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, - DRAM_ECC_INT_ENABLE, 0); - WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); - break; - /* sdma ecc interrupt is enabled by default - * driver doesn't need to do anything to - * enable the interrupt */ - case AMDGPU_IRQ_STATE_ENABLE: - default: - break; - } + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 08da993df1..88ad54f88d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1138,7 +1138,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, goto err_unlock; } offset = dev->adev->rmmio_remap.bus_addr; - if (!offset) { + if (!offset || (PAGE_SIZE > 4096)) { err = -ENOMEM; goto err_unlock; } @@ -1522,7 +1522,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, /* Find a KFD GPU device that supports the get_dmabuf_info query */ for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++) - if (dev) + if (dev && !kfd_devcgroup_check_permission(dev)) break; if (!dev) return -EINVAL; @@ -1544,7 +1544,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, if (xcp_id >= 0) args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; else - args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id; + args->gpu_id = dev->id; args->flags = flags; /* Copy metadata buffer to user mode */ @@ -2306,7 +2306,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd, return -EINVAL; } offset = pdd->dev->adev->rmmio_remap.bus_addr; - if (!offset) { + if (!offset || (PAGE_SIZE > 4096)) { pr_err("amdgpu_amdkfd_get_mmio_remap_phys_addr failed\n"); return -ENOMEM; } @@ -3347,6 +3347,9 @@ static int kfd_mmio_mmap(struct kfd_node *dev, struct kfd_process *process, if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; + if (PAGE_SIZE > 4096) + return -EINVAL; + address = dev->adev->rmmio_remap.bus_addr; vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0a9cf9dfc2..fcf6558d01 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -944,7 +944,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { struct kfd_node *node; int i; - int count; if (!kfd->init_complete) return; @@ -952,12 +951,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) /* for runtime suspend, skip locking kfd */ if (!run_pm) { mutex_lock(&kfd_processes_mutex); - count = ++kfd_locked; - mutex_unlock(&kfd_processes_mutex); - /* For first KFD device suspend all the KFD processes */ - if (count == 1) + if (++kfd_locked == 1) kfd_suspend_all_processes(); + mutex_unlock(&kfd_processes_mutex); } for (i = 0; i < kfd->num_nodes; i++) { @@ -968,7 +965,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { - int ret, count, i; + int ret, i; if (!kfd->init_complete) return 0; @@ -982,12 +979,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) /* for runtime resume, skip unlocking kfd */ if (!run_pm) { mutex_lock(&kfd_processes_mutex); - count = --kfd_locked; - mutex_unlock(&kfd_processes_mutex); - - WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); - if (count == 0) + if (--kfd_locked == 0) ret = kfd_resume_all_processes(); + WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); + mutex_unlock(&kfd_processes_mutex); } return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index a7697ec818..f85ca6cb90 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -336,7 +336,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, break; } kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23); - } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && + KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) { kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_DEBUG_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index 2a65792fd1..3ca9c160da 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -325,7 +325,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, /* CP */ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) kfd_signal_event_interrupt(pasid, context_id0, 32); - else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) + else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && + KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0))) kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_CTXID0_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 27cdaea405..8a6729939a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -385,7 +385,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, break; } kfd_signal_event_interrupt(pasid, sq_int_data, 24); - } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && + KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) { kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_DEBUG_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 718e533ab4..9044214dfd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -147,6 +147,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); +#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -3025,6 +3028,10 @@ static int dm_resume(void *handle) /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) @@ -4776,6 +4783,9 @@ static int dm_init_microcode(struct amdgpu_device *adev) case IP_VERSION(3, 5, 0): fw_name_dmub = FIRMWARE_DCN_35_DMUB; break; + case IP_VERSION(3, 5, 1): + fw_name_dmub = FIRMWARE_DCN_351_DMUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; @@ -5873,6 +5883,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, &aconnector->base.probed_modes : &aconnector->base.modes; + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return NULL; + if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; @@ -8627,10 +8640,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; +notify: if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; -notify: aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 85fc618130..5d499b99ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1495,7 +1495,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1596,7 +1598,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1681,7 +1685,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1780,7 +1786,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1865,7 +1873,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -1964,7 +1974,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2045,7 +2057,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2141,7 +2155,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2220,7 +2236,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2276,7 +2294,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2347,7 +2367,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } @@ -2418,7 +2440,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx->stream && - pipe_ctx->stream->link == aconnector->dc_link) + pipe_ctx->stream->link == aconnector->dc_link && + pipe_ctx->stream->sink && + pipe_ctx->stream->sink == aconnector->dc_sink) break; } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 05f392501c..ab31643b10 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2948,6 +2948,7 @@ static enum bp_result construct_integrated_info( result = get_integrated_info_v2_1(bp, info); break; case 2: + case 3: result = get_integrated_info_v2_2(bp, info); break; default: diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c index 5b7ad38f85..65e45a0b4f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size( x), 25)); + // If y rounds up to integer, carry it over to x. + if (y >> 25) { + x += 1; + y = 0; + } + switch (stream_encoder_inst) { case 0: REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 0df6c55eb3..6b10124e21 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -1050,7 +1050,12 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - dsc_cfg->num_slices_v = pic_height/slice_height; + if (slice_height > 0) { + dsc_cfg->num_slices_v = pic_height / slice_height; + } else { + is_dsc_possible = false; + goto done; + } if (target_bandwidth_kbps > 0) { is_dsc_possible = decide_dsc_target_bpp_x16( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 1e67374b89..46cbb5a6c8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -638,22 +638,43 @@ void dcn35_power_down_on_boot(struct dc *dc) bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable) { - struct dc_link *edp_links[MAX_NUM_EDP]; - int i, edp_num; if (dc->debug.dmcub_emulation) return true; if (enable) { - dc_get_edp_links(dc, edp_links, &edp_num); - if (edp_num == 0 || edp_num > 1) - return false; + uint32_t num_active_edp = 0; + int i; for (i = 0; i < dc->current_state->stream_count; ++i) { struct dc_stream_state *stream = dc->current_state->streams[i]; + struct dc_link *link = stream->link; + bool is_psr = link && !link->panel_config.psr.disable_psr && + (link->psr_settings.psr_version == DC_PSR_VERSION_1 || + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1); + bool is_replay = link && link->replay_settings.replay_feature_enabled; + + /* Ignore streams that disabled. */ + if (stream->dpms_off) + continue; + + /* Active external displays block idle optimizations. */ + if (!dc_is_embedded_signal(stream->signal)) + return false; + + /* If not PWRSEQ0 can't enter idle optimizations */ + if (link && link->link_index != 0) + return false; - if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal)) + /* Check for panel power features required for idle optimizations. */ + if (!is_psr && !is_replay) return false; + + num_active_edp += 1; } + + /* If more than one active eDP then disallow. */ + if (num_active_edp > 1) + return false; } // TODO: review other cases when idle optimization is allowed diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 0ad947df77..ba1597b01a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -734,7 +734,7 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; smu->is_apu = false; - smu->smu_baco.state = SMU_BACO_STATE_EXIT; + smu->smu_baco.state = SMU_BACO_STATE_NONE; smu->smu_baco.platform_support = false; smu->user_dpm_profile.fan_mode = -1; @@ -1954,10 +1954,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) return 0; } +static int smu_reset_mp1_state(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if ((!adev->in_runpm) && (!adev->in_suspend) && + (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(adev)) + ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); + + return ret; +} + static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = adev->powerplay.pp_handle; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; @@ -1975,7 +1990,15 @@ static int smu_hw_fini(void *handle) adev->pm.dpm_enabled = false; - return smu_smc_hw_cleanup(smu); + ret = smu_smc_hw_cleanup(smu); + if (ret) + return ret; + + ret = smu_reset_mp1_state(smu); + if (ret) + return ret; + + return 0; } static void smu_late_fini(void *handle) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 66e84defd0..2aa4fea873 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -424,6 +424,7 @@ enum smu_reset_mode { enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, SMU_BACO_STATE_EXIT, + SMU_BACO_STATE_NONE, }; struct smu_baco_context { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 9c03296f92..67117ced7c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, switch (mp1_state) { case PP_MP1_STATE_UNLOAD: - ret = smu_cmn_set_mp1_state(smu, mp1_state); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_PrepareMp1ForUnload, + 0x55, NULL); + + if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) + ret = smu_v13_0_disable_pmfw_state(smu); + break; default: /* Ignore others */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index 949131bd1e..4abfcd3274 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,7 +226,7 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && !adev->in_s0ix) { + if (!en && adev->in_s4) { /* Adds a GFX reset as workaround just before sending the * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering * an invalid state. diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index b0516505f7..4d2df7f64d 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -2940,7 +2940,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, dev->mode_config.max_width, dev->mode_config.max_height); else - drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe", + drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe\n", connector->base.id, connector->name); } diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 07e0c73204..ed81e1466c 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -76,19 +76,6 @@ struct intel_audio_funcs { struct intel_crtc_state *crtc_state); }; -/* DP N/M table */ -#define LC_810M 810000 -#define LC_540M 540000 -#define LC_270M 270000 -#define LC_162M 162000 - -struct dp_aud_n_m { - int sample_rate; - int clock; - u16 m; - u16 n; -}; - struct hdmi_aud_ncts { int sample_rate; int clock; @@ -96,60 +83,6 @@ struct hdmi_aud_ncts { int cts; }; -/* Values according to DP 1.4 Table 2-104 */ -static const struct dp_aud_n_m dp_aud_n_m[] = { - { 32000, LC_162M, 1024, 10125 }, - { 44100, LC_162M, 784, 5625 }, - { 48000, LC_162M, 512, 3375 }, - { 64000, LC_162M, 2048, 10125 }, - { 88200, LC_162M, 1568, 5625 }, - { 96000, LC_162M, 1024, 3375 }, - { 128000, LC_162M, 4096, 10125 }, - { 176400, LC_162M, 3136, 5625 }, - { 192000, LC_162M, 2048, 3375 }, - { 32000, LC_270M, 1024, 16875 }, - { 44100, LC_270M, 784, 9375 }, - { 48000, LC_270M, 512, 5625 }, - { 64000, LC_270M, 2048, 16875 }, - { 88200, LC_270M, 1568, 9375 }, - { 96000, LC_270M, 1024, 5625 }, - { 128000, LC_270M, 4096, 16875 }, - { 176400, LC_270M, 3136, 9375 }, - { 192000, LC_270M, 2048, 5625 }, - { 32000, LC_540M, 1024, 33750 }, - { 44100, LC_540M, 784, 18750 }, - { 48000, LC_540M, 512, 11250 }, - { 64000, LC_540M, 2048, 33750 }, - { 88200, LC_540M, 1568, 18750 }, - { 96000, LC_540M, 1024, 11250 }, - { 128000, LC_540M, 4096, 33750 }, - { 176400, LC_540M, 3136, 18750 }, - { 192000, LC_540M, 2048, 11250 }, - { 32000, LC_810M, 1024, 50625 }, - { 44100, LC_810M, 784, 28125 }, - { 48000, LC_810M, 512, 16875 }, - { 64000, LC_810M, 2048, 50625 }, - { 88200, LC_810M, 1568, 28125 }, - { 96000, LC_810M, 1024, 16875 }, - { 128000, LC_810M, 4096, 50625 }, - { 176400, LC_810M, 3136, 28125 }, - { 192000, LC_810M, 2048, 16875 }, -}; - -static const struct dp_aud_n_m * -audio_config_dp_get_n_m(const struct intel_crtc_state *crtc_state, int rate) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(dp_aud_n_m); i++) { - if (rate == dp_aud_n_m[i].sample_rate && - crtc_state->port_clock == dp_aud_n_m[i].clock) - return &dp_aud_n_m[i]; - } - - return NULL; -} - static const struct { int clock; u32 config; @@ -387,47 +320,17 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = i915->display.audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - enum port port = encoder->port; - const struct dp_aud_n_m *nm; - int rate; - u32 tmp; - - rate = acomp ? acomp->aud_sample_rate[port] : 0; - nm = audio_config_dp_get_n_m(crtc_state, rate); - if (nm) - drm_dbg_kms(&i915->drm, "using Maud %u, Naud %u\n", nm->m, - nm->n); - else - drm_dbg_kms(&i915->drm, "using automatic Maud, Naud\n"); - - tmp = intel_de_read(i915, HSW_AUD_CFG(cpu_transcoder)); - tmp &= ~AUD_CONFIG_N_VALUE_INDEX; - tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; - tmp &= ~AUD_CONFIG_N_PROG_ENABLE; - tmp |= AUD_CONFIG_N_VALUE_INDEX; - if (nm) { - tmp &= ~AUD_CONFIG_N_MASK; - tmp |= AUD_CONFIG_N(nm->n); - tmp |= AUD_CONFIG_N_PROG_ENABLE; - } - - intel_de_write(i915, HSW_AUD_CFG(cpu_transcoder), tmp); - - tmp = intel_de_read(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder)); - tmp &= ~AUD_CONFIG_M_MASK; - tmp &= ~AUD_M_CTS_M_VALUE_INDEX; - tmp &= ~AUD_M_CTS_M_PROG_ENABLE; - - if (nm) { - tmp |= nm->m; - tmp |= AUD_M_CTS_M_VALUE_INDEX; - tmp |= AUD_M_CTS_M_PROG_ENABLE; - } + /* Enable time stamps. Let HW calculate Maud/Naud values */ + intel_de_rmw(i915, HSW_AUD_CFG(cpu_transcoder), + AUD_CONFIG_N_VALUE_INDEX | + AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK | + AUD_CONFIG_UPPER_N_MASK | + AUD_CONFIG_LOWER_N_MASK | + AUD_CONFIG_N_PROG_ENABLE, + AUD_CONFIG_N_VALUE_INDEX); - intel_de_write(i915, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp); } static void diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index eb9835d48d..c36633d7dd 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1042,22 +1042,11 @@ parse_lfp_backlight(struct drm_i915_private *i915, panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI; panel->vbt.backlight.controller = 0; if (i915->display.vbt.version >= 191) { - size_t exp_size; + const struct lfp_backlight_control_method *method; - if (i915->display.vbt.version >= 236) - exp_size = sizeof(struct bdb_lfp_backlight_data); - else if (i915->display.vbt.version >= 234) - exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_234; - else - exp_size = EXP_BDB_LFP_BL_DATA_SIZE_REV_191; - - if (get_blocksize(backlight_data) >= exp_size) { - const struct lfp_backlight_control_method *method; - - method = &backlight_data->backlight_control[panel_type]; - panel->vbt.backlight.type = method->type; - panel->vbt.backlight.controller = method->controller; - } + method = &backlight_data->backlight_control[panel_type]; + panel->vbt.backlight.type = method->type; + panel->vbt.backlight.controller = method->controller; } panel->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index a9f44abfc9..b50cd0dcab 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -897,11 +897,6 @@ struct lfp_brightness_level { u16 reserved; } __packed; -#define EXP_BDB_LFP_BL_DATA_SIZE_REV_191 \ - offsetof(struct bdb_lfp_backlight_data, brightness_level) -#define EXP_BDB_LFP_BL_DATA_SIZE_REV_234 \ - offsetof(struct bdb_lfp_backlight_data, brightness_precision_bits) - struct bdb_lfp_backlight_data { u8 entry_size; struct lfp_backlight_data_entry data[16]; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c index 044219c596..99b71bb7da 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c @@ -8,14 +8,14 @@ #include "intel_gt_ccs_mode.h" #include "intel_gt_regs.h" -void intel_gt_apply_ccs_mode(struct intel_gt *gt) +unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt) { int cslice; u32 mode = 0; int first_ccs = __ffs(CCS_MASK(gt)); if (!IS_DG2(gt->i915)) - return; + return 0; /* Build the value for the fixed CCS load balancing */ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) { @@ -35,5 +35,5 @@ void intel_gt_apply_ccs_mode(struct intel_gt *gt) XEHP_CCS_MODE_CSLICE_MASK); } - intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode); + return mode; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h index 9e5549caeb..55547f2ff4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h @@ -8,6 +8,6 @@ struct intel_gt; -void intel_gt_apply_ccs_mode(struct intel_gt *gt); +unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt); #endif /* __INTEL_GT_CCS_MODE_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 59816dd6fb..bc1b598d9a 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -2855,6 +2855,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt, static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct intel_gt *gt = engine->gt; + u32 mode; if (!IS_DG2(gt->i915)) return; @@ -2871,7 +2872,8 @@ static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_li * After having disabled automatic load balancing we need to * assign all slices to a single CCS. We will call it CCS mode 1 */ - intel_gt_apply_ccs_mode(gt); + mode = intel_gt_apply_ccs_mode(gt); + wa_masked_en(wal, XEHP_CCS_MODE, mode); } /* diff --git a/drivers/gpu/drm/imagination/pvr_fw_mips.h b/drivers/gpu/drm/imagination/pvr_fw_mips.h index 408dbe63a9..a0c5c41c8a 100644 --- a/drivers/gpu/drm/imagination/pvr_fw_mips.h +++ b/drivers/gpu/drm/imagination/pvr_fw_mips.h @@ -7,13 +7,14 @@ #include "pvr_rogue_mips.h" #include <asm/page.h> +#include <linux/math.h> #include <linux/types.h> /* Forward declaration from pvr_gem.h. */ struct pvr_gem_object; -#define PVR_MIPS_PT_PAGE_COUNT ((ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K) \ - >> PAGE_SHIFT) +#define PVR_MIPS_PT_PAGE_COUNT DIV_ROUND_UP(ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K, PAGE_SIZE) + /** * struct pvr_fw_mips_data - MIPS-specific data */ diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 5a9538bc0e..5565f77775 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -106,6 +106,8 @@ #define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 */ #define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 */ #define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */ +#define PHY_CNTL1_INIT 0x03900000 +#define PHY_INVERT BIT(17) #define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */ #define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */ #define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */ @@ -130,6 +132,8 @@ struct meson_dw_hdmi_data { unsigned int addr); void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi, unsigned int addr, unsigned int data); + u32 cntl0_init; + u32 cntl1_init; }; struct meson_dw_hdmi { @@ -384,26 +388,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, dw_hdmi_bus_fmt_is_420(hdmi)) mode_is_420 = true; - /* Enable clocks */ - regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); - - /* Bring HDMITX MEM output of power down */ - regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0); - - /* Bring out of reset */ - dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0); - - /* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */ - dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL, - 0x3, 0x3); - - /* Enable cec_clk and hdcp22_tmdsclk_en */ - dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL, - 0x3 << 4, 0x3 << 4); - - /* Enable normal output to PHY */ - dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12)); - /* TMDS pattern setup */ if (mode->clock > 340000 && !mode_is_420) { dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, @@ -425,20 +409,6 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data, /* Setup PHY parameters */ meson_hdmi_phy_setup_mode(dw_hdmi, mode, mode_is_420); - /* Setup PHY */ - regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, - 0xffff << 16, 0x0390 << 16); - - /* BIT_INVERT */ - if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || - dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") || - dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi")) - regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, - BIT(17), 0); - else - regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, - BIT(17), BIT(17)); - /* Disable clock, fifo, fifo_wr */ regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1, 0xf, 0); @@ -492,7 +462,9 @@ static void dw_hdmi_phy_disable(struct dw_hdmi *hdmi, DRM_DEBUG_DRIVER("\n"); - regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0); + /* Fallback to init mode */ + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, dw_hdmi->data->cntl1_init); + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, dw_hdmi->data->cntl0_init); } static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi, @@ -610,11 +582,22 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = { .fast_io = true, }; -static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = { +static const struct meson_dw_hdmi_data meson_dw_hdmi_gxbb_data = { .top_read = dw_hdmi_top_read, .top_write = dw_hdmi_top_write, .dwc_read = dw_hdmi_dwc_read, .dwc_write = dw_hdmi_dwc_write, + .cntl0_init = 0x0, + .cntl1_init = PHY_CNTL1_INIT | PHY_INVERT, +}; + +static const struct meson_dw_hdmi_data meson_dw_hdmi_gxl_data = { + .top_read = dw_hdmi_top_read, + .top_write = dw_hdmi_top_write, + .dwc_read = dw_hdmi_dwc_read, + .dwc_write = dw_hdmi_dwc_write, + .cntl0_init = 0x0, + .cntl1_init = PHY_CNTL1_INIT, }; static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = { @@ -622,6 +605,8 @@ static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = { .top_write = dw_hdmi_g12a_top_write, .dwc_read = dw_hdmi_g12a_dwc_read, .dwc_write = dw_hdmi_g12a_dwc_write, + .cntl0_init = 0x000b4242, /* Bandgap */ + .cntl1_init = PHY_CNTL1_INIT, }; static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) @@ -656,6 +641,13 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_CLK_CNTL, 0xff); + /* Enable normal output to PHY */ + meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12)); + + /* Setup PHY */ + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, meson_dw_hdmi->data->cntl1_init); + regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, meson_dw_hdmi->data->cntl0_init); + /* Enable HDMI-TX Interrupt */ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, HDMITX_TOP_INTR_CORE); @@ -865,11 +857,11 @@ static const struct dev_pm_ops meson_dw_hdmi_pm_ops = { static const struct of_device_id meson_dw_hdmi_of_table[] = { { .compatible = "amlogic,meson-gxbb-dw-hdmi", - .data = &meson_dw_hdmi_gx_data }, + .data = &meson_dw_hdmi_gxbb_data }, { .compatible = "amlogic,meson-gxl-dw-hdmi", - .data = &meson_dw_hdmi_gx_data }, + .data = &meson_dw_hdmi_gxl_data }, { .compatible = "amlogic,meson-gxm-dw-hdmi", - .data = &meson_dw_hdmi_gx_data }, + .data = &meson_dw_hdmi_gxl_data }, { .compatible = "amlogic,meson-g12a-dw-hdmi", .data = &meson_dw_hdmi_g12a_data }, { } diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 6f5d376d8f..a11d16a16c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -15,7 +15,9 @@ struct nvkm_gsp_mem { }; struct nvkm_gsp_radix3 { - struct nvkm_gsp_mem mem[3]; + struct nvkm_gsp_mem lvl0; + struct nvkm_gsp_mem lvl1; + struct sg_table lvl2; }; int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *); diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index 7de7707ec6..3f72bc38bd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -225,12 +225,15 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector, u8 *dpcd = nv_encoder->dp.dpcd; int ret = NOUVEAU_DP_NONE, hpd; - /* If we've already read the DPCD on an eDP device, we don't need to - * reread it as it won't change + /* eDP ports don't support hotplugging - so there's no point in probing eDP ports unless we + * haven't probed them once before. */ - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && - dpcd[DP_DPCD_REV] != 0) - return NOUVEAU_DP_SST; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + if (connector->status == connector_status_connected) + return NOUVEAU_DP_SST; + else if (connector->status == connector_status_disconnected) + return NOUVEAU_DP_NONE; + } mutex_lock(&nv_encoder->dp.hpd_irq_lock); if (mstm) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index a73a5b5897..49171d312f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) rpc->numEntries = NV_GSP_REG_NUM_ENTRIES; str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]); - strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES]; + strings = (char *)rpc + str_offset; for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) { int name_len = strlen(r535_registry_entries[i].name) + 1; @@ -1620,7 +1620,7 @@ r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp) meta->magic = GSP_FW_WPR_META_MAGIC; meta->revision = GSP_FW_WPR_META_REVISION; - meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr; + meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr; meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size; meta->sysmemAddrOfBootloader = gsp->boot.fw.addr; @@ -1914,8 +1914,9 @@ nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt) static void nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3) { - for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) - nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]); + nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2); + nvkm_gsp_mem_dtor(gsp, &rx3->lvl1); + nvkm_gsp_mem_dtor(gsp, &rx3->lvl0); } /** @@ -1951,36 +1952,60 @@ static int nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size, struct nvkm_gsp_radix3 *rx3) { - u64 addr; + struct sg_dma_page_iter sg_dma_iter; + struct scatterlist *sg; + size_t bufsize; + u64 *pte; + int ret, i, page_idx = 0; - for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) { - u64 *ptes; - size_t bufsize; - int ret, idx; + ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0); + if (ret) + return ret; - bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); - ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]); - if (ret) - return ret; + ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1); + if (ret) + goto lvl1_fail; - ptes = rx3->mem[i].data; - if (i == 2) { - struct scatterlist *sgl; + // Allocate level 2 + bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE); + ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2); + if (ret) + goto lvl2_fail; - for_each_sgtable_dma_sg(sgt, sgl, idx) { - for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++) - *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j); - } - } else { - for (int j = 0; j < size / GSP_PAGE_SIZE; j++) - *ptes++ = addr + GSP_PAGE_SIZE * j; + // Write the bus address of level 1 to level 0 + pte = rx3->lvl0.data; + *pte = rx3->lvl1.addr; + + // Write the bus address of each page in level 2 to level 1 + pte = rx3->lvl1.data; + for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0) + *pte++ = sg_page_iter_dma_address(&sg_dma_iter); + + // Finally, write the bus address of each page in sgt to level 2 + for_each_sgtable_sg(&rx3->lvl2, sg, i) { + void *sgl_end; + + pte = sg_virt(sg); + sgl_end = (void *)pte + sg->length; + + for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) { + *pte++ = sg_page_iter_dma_address(&sg_dma_iter); + page_idx++; + + // Go to the next scatterlist for level 2 if we've reached the end + if ((void *)pte >= sgl_end) + break; } + } - size = rx3->mem[i].size; - addr = rx3->mem[i].addr; + if (ret) { +lvl2_fail: + nvkm_gsp_mem_dtor(gsp, &rx3->lvl1); +lvl1_fail: + nvkm_gsp_mem_dtor(gsp, &rx3->lvl0); } - return 0; + return ret; } int @@ -2012,7 +2037,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) sr = gsp->sr.meta.data; sr->magic = GSP_FW_SR_META_MAGIC; sr->revision = GSP_FW_SR_META_REVISION; - sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr; + sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr; sr->sizeOfSuspendResumeData = len; mbox0 = lower_32_bits(gsp->sr.meta.addr); diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 8f37837422..888297d0d3 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -184,7 +184,7 @@ config DRM_PANEL_ILITEK_IL9322 config DRM_PANEL_ILITEK_ILI9341 tristate "Ilitek ILI9341 240x320 QVGA panels" - depends on OF && SPI + depends on SPI select DRM_KMS_HELPER select DRM_GEM_DMA_HELPER depends on BACKLIGHT_CLASS_DEVICE diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 3574681891..b933380b7e 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -22,8 +22,9 @@ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of.h> +#include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> @@ -421,7 +422,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel) ili9341_dpi_init(ili); - return ret; + return 0; } static int ili9341_dpi_enable(struct drm_panel *panel) @@ -691,7 +692,7 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc, * Every new incarnation of this display must have a unique * data entry for the system in this driver. */ - ili->conf = of_device_get_match_data(dev); + ili->conf = device_get_match_data(dev); if (!ili->conf) { dev_err(dev, "missing device configuration\n"); return -ENODEV; @@ -714,18 +715,18 @@ static int ili9341_probe(struct spi_device *spi) reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(reset)) - dev_err(dev, "Failed to get gpio 'reset'\n"); + return dev_err_probe(dev, PTR_ERR(reset), "Failed to get gpio 'reset'\n"); dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW); if (IS_ERR(dc)) - dev_err(dev, "Failed to get gpio 'dc'\n"); + return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n"); if (!strcmp(id->name, "sf-tc240t-9370-t")) return ili9341_dpi_probe(spi, dc, reset); else if (!strcmp(id->name, "yx240qv29")) return ili9341_dbi_probe(spi, dc, reset); - return -1; + return -ENODEV; } static void ili9341_remove(struct spi_device *spi) diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 9febc8b73f..368d26da0d 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -58,56 +58,16 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr, signed long timeout) { struct qxl_device *qdev; - struct qxl_release *release; - int count = 0, sc = 0; - bool have_drawable_releases; unsigned long cur, end = jiffies + timeout; qdev = container_of(fence->lock, struct qxl_device, release_lock); - release = container_of(fence, struct qxl_release, base); - have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE; - -retry: - sc++; - - if (dma_fence_is_signaled(fence)) - goto signaled; - - qxl_io_notify_oom(qdev); - - for (count = 0; count < 11; count++) { - if (!qxl_queue_garbage_collect(qdev, true)) - break; - - if (dma_fence_is_signaled(fence)) - goto signaled; - } - - if (dma_fence_is_signaled(fence)) - goto signaled; - if (have_drawable_releases || sc < 4) { - if (sc > 2) - /* back off */ - usleep_range(500, 1000); - - if (time_after(jiffies, end)) - return 0; - - if (have_drawable_releases && sc > 300) { - DMA_FENCE_WARN(fence, - "failed to wait on release %llu after spincount %d\n", - fence->context & ~0xf0000000, sc); - goto signaled; - } - goto retry; - } - /* - * yeah, original sync_obj_wait gave up after 3 spins when - * have_drawable_releases is not set. - */ + if (!wait_event_timeout(qdev->release_event, + (dma_fence_is_signaled(fence) || + (qxl_io_notify_oom(qdev), 0)), + timeout)) + return 0; -signaled: cur = jiffies; if (time_after(cur, end)) return 0; diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h index 9494722988..b7f22597ee 100644 --- a/drivers/gpu/drm/radeon/pptable.h +++ b/drivers/gpu/drm/radeon/pptable.h @@ -424,7 +424,7 @@ typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{ typedef struct _ATOM_PPLIB_STATE_V2 { //number of valid dpm levels in this state; Driver uses it to calculate the whole - //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR) + //size of the state: struct_size(ATOM_PPLIB_STATE_V2, clockInfoIndex, ucNumDPMLevels) UCHAR ucNumDPMLevels; //a index to the array of nonClockInfos @@ -432,14 +432,14 @@ typedef struct _ATOM_PPLIB_STATE_V2 /** * Driver will read the first ucNumDPMLevels in this array */ - UCHAR clockInfoIndex[1]; + UCHAR clockInfoIndex[] __counted_by(ucNumDPMLevels); } ATOM_PPLIB_STATE_V2; typedef struct _StateArray{ //how many states we have UCHAR ucNumEntries; - ATOM_PPLIB_STATE_V2 states[1]; + ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries); }StateArray; @@ -450,7 +450,7 @@ typedef struct _ClockInfoArray{ //sizeof(ATOM_PPLIB_CLOCK_INFO) UCHAR ucEntrySize; - UCHAR clockInfo[1]; + UCHAR clockInfo[] __counted_by(ucNumEntries); }ClockInfoArray; typedef struct _NonClockInfoArray{ @@ -460,7 +460,7 @@ typedef struct _NonClockInfoArray{ //sizeof(ATOM_PPLIB_NONCLOCK_INFO) UCHAR ucEntrySize; - ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1]; + ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[] __counted_by(ucNumEntries); }NonClockInfoArray; typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 43eaffa7fa..bf9601351f 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -92,7 +92,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) */ if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { page_flags |= TTM_TT_FLAG_DECRYPTED; - drm_info(ddev, "TT memory decryption enabled."); + drm_info_once(ddev, "TT memory decryption enabled."); } bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 4aac88cc5f..ae796e0c64 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -204,6 +204,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, VMW_BO_DOMAIN_VRAM, VMW_BO_DOMAIN_VRAM); buf->places[0].lpfn = PFN_UP(bo->resource->size); + buf->busy_places[0].lpfn = PFN_UP(bo->resource->size); ret = ttm_bo_validate(bo, &buf->placement, &ctx); /* For some reason we didn't end up at the start of vram */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 2a0cda3247..5efc6a766f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -991,7 +991,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, } event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; - event->event.base.length = sizeof(*event); + event->event.base.length = sizeof(event->event); event->event.user_data = user_data; ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h index 5d2a77b52d..d9a81e6a4b 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h @@ -84,7 +84,8 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE) #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1) #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S) -#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) +#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \ + IS_PLATFORM(dev_priv, XE_ALDERLAKE_N)) #define IS_XEHPSDV(dev_priv) (dev_priv && 0) #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2) #define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC) diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index 5592774fc6..81b8362e93 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -120,7 +120,7 @@ #define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234) #define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4) -#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244) +#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED) #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0) diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index b38319d280..72f04a656e 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -95,7 +95,6 @@ static void set_offsets(u32 *regs, #define REG16(x) \ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ (((x) >> 2) & 0x7f) -#define END 0 { const u32 base = hwe->mmio_base; @@ -166,7 +165,7 @@ static const u8 gen12_xcs_offsets[] = { REG16(0x274), REG16(0x270), - END + 0 }; static const u8 dg2_xcs_offsets[] = { @@ -200,7 +199,7 @@ static const u8 dg2_xcs_offsets[] = { REG16(0x274), REG16(0x270), - END + 0 }; static const u8 gen12_rcs_offsets[] = { @@ -296,7 +295,7 @@ static const u8 gen12_rcs_offsets[] = { REG(0x084), NOP(1), - END + 0 }; static const u8 xehp_rcs_offsets[] = { @@ -337,7 +336,7 @@ static const u8 xehp_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END + 0 }; static const u8 dg2_rcs_offsets[] = { @@ -380,7 +379,7 @@ static const u8 dg2_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END + 0 }; static const u8 mtl_rcs_offsets[] = { @@ -423,7 +422,7 @@ static const u8 mtl_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END + 0 }; #define XE2_CTX_COMMON \ @@ -469,7 +468,7 @@ static const u8 xe2_rcs_offsets[] = { LRI(1, 0), /* [0x47] */ REG(0x0c8), /* [0x48] R_PWR_CLK_STATE */ - END + 0 }; static const u8 xe2_bcs_offsets[] = { @@ -480,16 +479,15 @@ static const u8 xe2_bcs_offsets[] = { REG16(0x200), /* [0x42] BCS_SWCTRL */ REG16(0x204), /* [0x44] BLIT_CCTL */ - END + 0 }; static const u8 xe2_xcs_offsets[] = { XE2_CTX_COMMON, - END + 0 }; -#undef END #undef REG16 #undef REG #undef LRI @@ -525,9 +523,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class) static void set_context_control(u32 *regs, struct xe_hw_engine *hwe) { - regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) | - _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | - CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; + regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | + CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); /* TODO: Timestamp */ } diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 70480c3056..7a6ad3469d 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -216,7 +216,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, if (vm->flags & XE_VM_FLAG_64K && level == 1) flags = XE_PDE_64K; - entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) * + entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) * XE_PAGE_SIZE, pat_index); xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64, entry | flags); @@ -224,7 +224,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, /* Write PDE's that point to our BO. */ for (i = 0; i < num_entries - num_level; i++) { - entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE, + entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE, pat_index); xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE + @@ -280,7 +280,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, #define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE) #define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64)) drm_suballoc_manager_init(&m->vm_update_sa, - (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) * + (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) * NUM_VMUSA_UNIT_PER_PAGE, 0); m->pt_bo = bo; @@ -479,7 +479,7 @@ static void emit_pte(struct xe_migrate *m, struct xe_vm *vm = m->q->vm; u16 pat_index; u32 ptes; - u64 ofs = at_pt * XE_PAGE_SIZE; + u64 ofs = (u64)at_pt * XE_PAGE_SIZE; u64 cur_ofs; /* Indirect access needs compression enabled uncached PAT index */ diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 84d042796d..3937889fa9 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev, return 0; } -static int host1x_dma_configure(struct device *dev) -{ - return of_dma_configure(dev, dev->of_node, true); -} - static const struct dev_pm_ops host1x_device_pm_ops = { .suspend = pm_generic_suspend, .resume = pm_generic_resume, @@ -369,7 +364,6 @@ struct bus_type host1x_bus_type = { .name = "host1x", .match = host1x_device_match, .uevent = host1x_device_uevent, - .dma_configure = host1x_dma_configure, .pm = &host1x_device_pm_ops, }; @@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x, device->dev.bus = &host1x_bus_type; device->dev.parent = host1x->dev; - of_dma_configure(&device->dev, host1x->dev->of_node, true); - device->dev.dma_parms = &device->dma_parms; dma_set_max_seg_size(&device->dev, UINT_MAX); diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index adbf674355..fb8cd84693 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -153,7 +153,9 @@ void vmbus_free_ring(struct vmbus_channel *channel) hv_ringbuffer_cleanup(&channel->inbound); if (channel->ringbuffer_page) { - __free_pages(channel->ringbuffer_page, + /* In a CoCo VM leak the memory if it didn't get re-encrypted */ + if (!channel->ringbuffer_gpadlhandle.decrypted) + __free_pages(channel->ringbuffer_page, get_order(channel->ringbuffer_pagecount << PAGE_SHIFT)); channel->ringbuffer_page = NULL; @@ -436,9 +438,18 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel, (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1); ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo); - if (ret) + if (ret) { + gpadl->decrypted = false; return ret; + } + /* + * Set the "decrypted" flag to true for the set_memory_decrypted() + * success case. In the failure case, the encryption state of the + * memory is unknown. Leave "decrypted" as true to ensure the + * memory will be leaked instead of going back on the free list. + */ + gpadl->decrypted = true; ret = set_memory_decrypted((unsigned long)kbuffer, PFN_UP(size)); if (ret) { @@ -527,9 +538,15 @@ cleanup: kfree(msginfo); - if (ret) - set_memory_encrypted((unsigned long)kbuffer, - PFN_UP(size)); + if (ret) { + /* + * If set_memory_encrypted() fails, the decrypted flag is + * left as true so the memory is leaked instead of being + * put back on the free list. + */ + if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size))) + gpadl->decrypted = false; + } return ret; } @@ -850,6 +867,8 @@ post_msg_err: if (ret) pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret); + gpadl->decrypted = ret; + return ret; } EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl); diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c index 3cabeeabb1..f001ae880e 100644 --- a/drivers/hv/connection.c +++ b/drivers/hv/connection.c @@ -237,8 +237,17 @@ int vmbus_connect(void) vmbus_connection.monitor_pages[0], 1); ret |= set_memory_decrypted((unsigned long) vmbus_connection.monitor_pages[1], 1); - if (ret) + if (ret) { + /* + * If set_memory_decrypted() fails, the encryption state + * of the memory is unknown. So leak the memory instead + * of risking returning decrypted memory to the free list. + * For simplicity, always handle both pages the same. + */ + vmbus_connection.monitor_pages[0] = NULL; + vmbus_connection.monitor_pages[1] = NULL; goto cleanup; + } /* * Set_memory_decrypted() will change the memory contents if @@ -337,13 +346,19 @@ void vmbus_disconnect(void) vmbus_connection.int_page = NULL; } - set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1); - set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1); + if (vmbus_connection.monitor_pages[0]) { + if (!set_memory_encrypted( + (unsigned long)vmbus_connection.monitor_pages[0], 1)) + hv_free_hyperv_page(vmbus_connection.monitor_pages[0]); + vmbus_connection.monitor_pages[0] = NULL; + } - hv_free_hyperv_page(vmbus_connection.monitor_pages[0]); - hv_free_hyperv_page(vmbus_connection.monitor_pages[1]); - vmbus_connection.monitor_pages[0] = NULL; - vmbus_connection.monitor_pages[1] = NULL; + if (vmbus_connection.monitor_pages[1]) { + if (!set_memory_encrypted( + (unsigned long)vmbus_connection.monitor_pages[1], 1)) + hv_free_hyperv_page(vmbus_connection.monitor_pages[1]); + vmbus_connection.monitor_pages[1] = NULL; + } } /* diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c index a284a02839..3e63666a61 100644 --- a/drivers/hwmon/corsair-cpro.c +++ b/drivers/hwmon/corsair-cpro.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/types.h> #define USB_VENDOR_ID_CORSAIR 0x1b1c @@ -77,8 +78,11 @@ struct ccp_device { struct hid_device *hdev; struct device *hwmon_dev; + /* For reinitializing the completion below */ + spinlock_t wait_input_report_lock; struct completion wait_input_report; struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */ + u8 *cmd_buffer; u8 *buffer; int target[6]; DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS); @@ -111,15 +115,23 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2, unsigned long t; int ret; - memset(ccp->buffer, 0x00, OUT_BUFFER_SIZE); - ccp->buffer[0] = command; - ccp->buffer[1] = byte1; - ccp->buffer[2] = byte2; - ccp->buffer[3] = byte3; - + memset(ccp->cmd_buffer, 0x00, OUT_BUFFER_SIZE); + ccp->cmd_buffer[0] = command; + ccp->cmd_buffer[1] = byte1; + ccp->cmd_buffer[2] = byte2; + ccp->cmd_buffer[3] = byte3; + + /* + * Disable raw event parsing for a moment to safely reinitialize the + * completion. Reinit is done because hidraw could have triggered + * the raw event parsing and marked the ccp->wait_input_report + * completion as done. + */ + spin_lock_bh(&ccp->wait_input_report_lock); reinit_completion(&ccp->wait_input_report); + spin_unlock_bh(&ccp->wait_input_report_lock); - ret = hid_hw_output_report(ccp->hdev, ccp->buffer, OUT_BUFFER_SIZE); + ret = hid_hw_output_report(ccp->hdev, ccp->cmd_buffer, OUT_BUFFER_SIZE); if (ret < 0) return ret; @@ -135,11 +147,12 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8 struct ccp_device *ccp = hid_get_drvdata(hdev); /* only copy buffer when requested */ - if (completion_done(&ccp->wait_input_report)) - return 0; - - memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size)); - complete(&ccp->wait_input_report); + spin_lock(&ccp->wait_input_report_lock); + if (!completion_done(&ccp->wait_input_report)) { + memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size)); + complete_all(&ccp->wait_input_report); + } + spin_unlock(&ccp->wait_input_report_lock); return 0; } @@ -492,7 +505,11 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id) if (!ccp) return -ENOMEM; - ccp->buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL); + ccp->cmd_buffer = devm_kmalloc(&hdev->dev, OUT_BUFFER_SIZE, GFP_KERNEL); + if (!ccp->cmd_buffer) + return -ENOMEM; + + ccp->buffer = devm_kmalloc(&hdev->dev, IN_BUFFER_SIZE, GFP_KERNEL); if (!ccp->buffer) return -ENOMEM; @@ -510,7 +527,9 @@ static int ccp_probe(struct hid_device *hdev, const struct hid_device_id *id) ccp->hdev = hdev; hid_set_drvdata(hdev, ccp); + mutex_init(&ccp->mutex); + spin_lock_init(&ccp->wait_input_report_lock); init_completion(&ccp->wait_input_report); hid_device_io_start(hdev); diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index 8d9d422450..d817c719b9 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -80,11 +80,11 @@ struct ucd9000_debugfs_entry { * It has been observed that the UCD90320 randomly fails register access when * doing another access right on the back of a register write. To mitigate this * make sure that there is a minimum delay between a write access and the - * following access. The 250us is based on experimental data. At a delay of - * 200us the issue seems to go away. Add a bit of extra margin to allow for + * following access. The 500 is based on experimental data. At a delay of + * 350us the issue seems to go away. Add a bit of extra margin to allow for * system to system differences. */ -#define UCD90320_WAIT_DELAY_US 250 +#define UCD90320_WAIT_DELAY_US 500 static inline void ucd90320_wait(const struct ucd9000_data *data) { diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c index 82e8d0b390..49e30b8773 100644 --- a/drivers/iio/accel/mxc4005.c +++ b/drivers/iio/accel/mxc4005.c @@ -5,6 +5,7 @@ * Copyright (c) 2014, Intel Corporation. */ +#include <linux/delay.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/iio/iio.h> @@ -27,11 +28,16 @@ #define MXC4005_REG_ZOUT_UPPER 0x07 #define MXC4005_REG_ZOUT_LOWER 0x08 +#define MXC4005_REG_INT_MASK0 0x0A + #define MXC4005_REG_INT_MASK1 0x0B #define MXC4005_REG_INT_MASK1_BIT_DRDYE 0x01 +#define MXC4005_REG_INT_CLR0 0x00 + #define MXC4005_REG_INT_CLR1 0x01 #define MXC4005_REG_INT_CLR1_BIT_DRDYC 0x01 +#define MXC4005_REG_INT_CLR1_SW_RST 0x10 #define MXC4005_REG_CONTROL 0x0D #define MXC4005_REG_CONTROL_MASK_FSR GENMASK(6, 5) @@ -39,6 +45,9 @@ #define MXC4005_REG_DEVICE_ID 0x0E +/* Datasheet does not specify a reset time, this is a conservative guess */ +#define MXC4005_RESET_TIME_US 2000 + enum mxc4005_axis { AXIS_X, AXIS_Y, @@ -62,6 +71,8 @@ struct mxc4005_data { s64 timestamp __aligned(8); } scan; bool trigger_enabled; + unsigned int control; + unsigned int int_mask1; }; /* @@ -113,7 +124,9 @@ static bool mxc4005_is_readable_reg(struct device *dev, unsigned int reg) static bool mxc4005_is_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { + case MXC4005_REG_INT_CLR0: case MXC4005_REG_INT_CLR1: + case MXC4005_REG_INT_MASK0: case MXC4005_REG_INT_MASK1: case MXC4005_REG_CONTROL: return true; @@ -330,23 +343,20 @@ static int mxc4005_set_trigger_state(struct iio_trigger *trig, { struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); struct mxc4005_data *data = iio_priv(indio_dev); + unsigned int val; int ret; mutex_lock(&data->mutex); - if (state) { - ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, - MXC4005_REG_INT_MASK1_BIT_DRDYE); - } else { - ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, - ~MXC4005_REG_INT_MASK1_BIT_DRDYE); - } + val = state ? MXC4005_REG_INT_MASK1_BIT_DRDYE : 0; + ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, val); if (ret < 0) { mutex_unlock(&data->mutex); dev_err(data->dev, "failed to update reg_int_mask1"); return ret; } + data->int_mask1 = val; data->trigger_enabled = state; mutex_unlock(&data->mutex); @@ -382,6 +392,21 @@ static int mxc4005_chip_init(struct mxc4005_data *data) dev_dbg(data->dev, "MXC4005 chip id %02x\n", reg); + ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1, + MXC4005_REG_INT_CLR1_SW_RST); + if (ret < 0) + return dev_err_probe(data->dev, ret, "resetting chip\n"); + + fsleep(MXC4005_RESET_TIME_US); + + ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0); + if (ret < 0) + return dev_err_probe(data->dev, ret, "writing INT_MASK0\n"); + + ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, 0); + if (ret < 0) + return dev_err_probe(data->dev, ret, "writing INT_MASK1\n"); + return 0; } @@ -469,6 +494,58 @@ static int mxc4005_probe(struct i2c_client *client) return devm_iio_device_register(&client->dev, indio_dev); } +static int mxc4005_suspend(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct mxc4005_data *data = iio_priv(indio_dev); + int ret; + + /* Save control to restore it on resume */ + ret = regmap_read(data->regmap, MXC4005_REG_CONTROL, &data->control); + if (ret < 0) + dev_err(data->dev, "failed to read reg_control\n"); + + return ret; +} + +static int mxc4005_resume(struct device *dev) +{ + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct mxc4005_data *data = iio_priv(indio_dev); + int ret; + + ret = regmap_write(data->regmap, MXC4005_REG_INT_CLR1, + MXC4005_REG_INT_CLR1_SW_RST); + if (ret) { + dev_err(data->dev, "failed to reset chip: %d\n", ret); + return ret; + } + + fsleep(MXC4005_RESET_TIME_US); + + ret = regmap_write(data->regmap, MXC4005_REG_CONTROL, data->control); + if (ret) { + dev_err(data->dev, "failed to restore control register\n"); + return ret; + } + + ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK0, 0); + if (ret) { + dev_err(data->dev, "failed to restore interrupt 0 mask\n"); + return ret; + } + + ret = regmap_write(data->regmap, MXC4005_REG_INT_MASK1, data->int_mask1); + if (ret) { + dev_err(data->dev, "failed to restore interrupt 1 mask\n"); + return ret; + } + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(mxc4005_pm_ops, mxc4005_suspend, mxc4005_resume); + static const struct acpi_device_id mxc4005_acpi_match[] = { {"MXC4005", 0}, {"MXC6655", 0}, @@ -495,6 +572,7 @@ static struct i2c_driver mxc4005_driver = { .name = MXC4005_DRV_NAME, .acpi_match_table = ACPI_PTR(mxc4005_acpi_match), .of_match_table = mxc4005_of_match, + .pm = pm_sleep_ptr(&mxc4005_pm_ops), }, .probe = mxc4005_probe, .id_table = mxc4005_id, diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c index 64be656f0b..289b6c2bb2 100644 --- a/drivers/iio/imu/adis16475.c +++ b/drivers/iio/imu/adis16475.c @@ -1289,6 +1289,7 @@ static int adis16475_config_sync_mode(struct adis16475 *st) struct device *dev = &st->adis.spi->dev; const struct adis16475_sync *sync; u32 sync_mode; + u16 val; /* default to internal clk */ st->clk_freq = st->info->int_clk * 1000; @@ -1350,8 +1351,9 @@ static int adis16475_config_sync_mode(struct adis16475 *st) * I'm keeping this for simplicity and avoiding extra variables * in chip_info. */ + val = ADIS16475_SYNC_MODE(sync->sync_mode); ret = __adis_update_bits(&st->adis, ADIS16475_REG_MSG_CTRL, - ADIS16475_SYNC_MODE_MASK, sync->sync_mode); + ADIS16475_SYNC_MODE_MASK, val); if (ret) return ret; diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index fe8734468e..62e9e93d91 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -1233,6 +1233,7 @@ const struct bmp280_chip_info bmp380_chip_info = { .chip_id = bmp380_chip_ids, .num_chip_id = ARRAY_SIZE(bmp380_chip_ids), .regmap_config = &bmp380_regmap_config, + .spi_read_extra_byte = true, .start_up_time = 2000, .channels = bmp380_channels, .num_channels = 2, diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c index a444d4b297..4e19ea0b4d 100644 --- a/drivers/iio/pressure/bmp280-spi.c +++ b/drivers/iio/pressure/bmp280-spi.c @@ -96,15 +96,10 @@ static int bmp280_spi_probe(struct spi_device *spi) chip_info = spi_get_device_match_data(spi); - switch (chip_info->chip_id[0]) { - case BMP380_CHIP_ID: - case BMP390_CHIP_ID: + if (chip_info->spi_read_extra_byte) bmp_regmap_bus = &bmp380_regmap_bus; - break; - default: + else bmp_regmap_bus = &bmp280_regmap_bus; - break; - } regmap = devm_regmap_init(&spi->dev, bmp_regmap_bus, @@ -127,7 +122,7 @@ static const struct of_device_id bmp280_of_spi_match[] = { { .compatible = "bosch,bmp180", .data = &bmp180_chip_info }, { .compatible = "bosch,bmp181", .data = &bmp180_chip_info }, { .compatible = "bosch,bmp280", .data = &bmp280_chip_info }, - { .compatible = "bosch,bme280", .data = &bmp280_chip_info }, + { .compatible = "bosch,bme280", .data = &bme280_chip_info }, { .compatible = "bosch,bmp380", .data = &bmp380_chip_info }, { .compatible = "bosch,bmp580", .data = &bmp580_chip_info }, { }, @@ -139,7 +134,7 @@ static const struct spi_device_id bmp280_spi_id[] = { { "bmp180", (kernel_ulong_t)&bmp180_chip_info }, { "bmp181", (kernel_ulong_t)&bmp180_chip_info }, { "bmp280", (kernel_ulong_t)&bmp280_chip_info }, - { "bme280", (kernel_ulong_t)&bmp280_chip_info }, + { "bme280", (kernel_ulong_t)&bme280_chip_info }, { "bmp380", (kernel_ulong_t)&bmp380_chip_info }, { "bmp580", (kernel_ulong_t)&bmp580_chip_info }, { } diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h index 4012387d79..5812a344ed 100644 --- a/drivers/iio/pressure/bmp280.h +++ b/drivers/iio/pressure/bmp280.h @@ -423,6 +423,7 @@ struct bmp280_chip_info { int num_chip_id; const struct regmap_config *regmap_config; + bool spi_read_extra_byte; const struct iio_chan_spec *channels; int num_channels; diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 455e966eef..b27791029f 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -439,6 +439,7 @@ static int remove_device_files(struct super_block *sb, return PTR_ERR(dir); } simple_recursive_removal(dir, NULL); + dput(dir); return 0; } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 4283dd8191..f945bf3253 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2593,6 +2593,10 @@ static int amd_iommu_def_domain_type(struct device *dev) if (!dev_data) return 0; + /* Always use DMA domain for untrusted device */ + if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) + return IOMMU_DOMAIN_DMA; + /* * Do not identity map IOMMUv2 capable devices when: * - memory encryption is active, because some of those devices diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c index 87bf522b9d..957d988b6d 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -221,11 +221,9 @@ static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev) unsigned int inst; irqreturn_t ret = IRQ_NONE; struct arm_smmu_device *smmu; - struct iommu_domain *domain = dev; - struct arm_smmu_domain *smmu_domain; + struct arm_smmu_domain *smmu_domain = dev; struct nvidia_smmu *nvidia; - smmu_domain = container_of(domain, struct arm_smmu_domain, domain); smmu = smmu_domain->smmu; nvidia = to_nvidia_smmu(smmu); diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 7abe9e85a5..51d0eba8cb 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -1789,6 +1789,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = { { .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data}, {} }; +MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids); static struct platform_driver mtk_iommu_driver = { .probe = mtk_iommu_probe, diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 25b41222ab..32cc8341d3 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -599,6 +599,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = { { .compatible = "mediatek,mt2701-m4u", }, {} }; +MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids); static const struct component_master_ops mtk_iommu_v1_com_ops = { .bind = mtk_iommu_v1_bind, diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index aac36750d2..c3a6657dcd 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -115,6 +115,8 @@ #define MEI_DEV_ID_ARL_S 0x7F68 /* Arrow Lake Point S */ #define MEI_DEV_ID_ARL_H 0x7770 /* Arrow Lake Point H */ +#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index bd4e3df448..3c2c28c8ba 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -122,6 +122,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_S, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ARL_H, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c index 9ad20e8278..b21598a18f 100644 --- a/drivers/misc/pvpanic/pvpanic-pci.c +++ b/drivers/misc/pvpanic/pvpanic-pci.c @@ -44,8 +44,6 @@ static struct pci_driver pvpanic_pci_driver = { .name = "pvpanic-pci", .id_table = pvpanic_pci_id_tbl, .probe = pvpanic_pci_probe, - .driver = { - .dev_groups = pvpanic_dev_groups, - }, + .dev_groups = pvpanic_dev_groups, }; module_pci_driver(pvpanic_pci_driver); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 32416d8802..967d913631 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -697,6 +697,18 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, } } +static void mv88e632x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, + struct phylink_config *config) +{ + unsigned long *supported = config->supported_interfaces; + + /* Translate the default cmode */ + mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported); + + config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | + MAC_1000FD; +} + static void mv88e6341_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, struct phylink_config *config) { @@ -5090,7 +5102,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e632x_phylink_get_caps, }; static const struct mv88e6xxx_ops mv88e6321_ops = { @@ -5136,7 +5148,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, - .phylink_get_caps = mv88e6185_phylink_get_caps, + .phylink_get_caps = mv88e632x_phylink_get_caps, }; static const struct mv88e6xxx_ops mv88e6341_ops = { @@ -5702,7 +5714,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141, .family = MV88E6XXX_FAMILY_6341, .name = "Marvell 88E6141", - .num_databases = 4096, + .num_databases = 256, .num_macs = 2048, .num_ports = 6, .num_internal_phys = 5, @@ -6161,7 +6173,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341, .family = MV88E6XXX_FAMILY_6341, .name = "Marvell 88E6341", - .num_databases = 4096, + .num_databases = 256, .num_macs = 2048, .num_internal_phys = 5, .num_ports = 6, diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2d7ae71287..cefb9f34d5 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2,7 +2,7 @@ /* * Broadcom GENET (Gigabit Ethernet) controller driver * - * Copyright (c) 2014-2020 Broadcom + * Copyright (c) 2014-2024 Broadcom */ #define pr_fmt(fmt) "bcmgenet: " fmt @@ -2469,14 +2469,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) { u32 reg; + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); - if (reg & CMD_SW_RESET) + if (reg & CMD_SW_RESET) { + spin_unlock_bh(&priv->reg_lock); return; + } if (enable) reg |= mask; else reg &= ~mask; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); /* UniMAC stops on a packet boundary, wait for a full-size packet * to be processed @@ -2492,8 +2496,10 @@ static void reset_umac(struct bcmgenet_priv *priv) udelay(10); /* issue soft reset and disable MAC while updating its registers */ + spin_lock_bh(&priv->reg_lock); bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); udelay(2); + spin_unlock_bh(&priv->reg_lock); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) @@ -3282,7 +3288,7 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, } /* Returns a reusable dma control register value */ -static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) +static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx) { unsigned int i; u32 reg; @@ -3307,6 +3313,14 @@ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) udelay(10); bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH); + if (flush_rx) { + reg = bcmgenet_rbuf_ctrl_get(priv); + bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0)); + udelay(10); + bcmgenet_rbuf_ctrl_set(priv, reg); + udelay(10); + } + return dma_ctrl; } @@ -3328,7 +3342,9 @@ static void bcmgenet_netif_start(struct net_device *dev) struct bcmgenet_priv *priv = netdev_priv(dev); /* Start the network engine */ + netif_addr_lock_bh(dev); bcmgenet_set_rx_mode(dev); + netif_addr_unlock_bh(dev); bcmgenet_enable_rx_napi(priv); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); @@ -3370,8 +3386,8 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - /* Disable RX/TX DMA and flush TX queues */ - dma_ctrl = bcmgenet_dma_disable(priv); + /* Disable RX/TX DMA and flush TX and RX queues */ + dma_ctrl = bcmgenet_dma_disable(priv, true); /* Reinitialize TDMA and RDMA and SW housekeeping */ ret = bcmgenet_init_dma(priv); @@ -3589,16 +3605,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev) * 3. The number of filters needed exceeds the number filters * supported by the hardware. */ + spin_lock(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) || (nfilter > MAX_MDF_FILTER)) { reg |= CMD_PROMISC; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock(&priv->reg_lock); bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL); return; } else { reg &= ~CMD_PROMISC; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock(&priv->reg_lock); } /* update MDF filter */ @@ -3997,6 +4016,7 @@ static int bcmgenet_probe(struct platform_device *pdev) goto err; } + spin_lock_init(&priv->reg_lock); spin_lock_init(&priv->lock); /* Set default pause parameters */ @@ -4237,7 +4257,7 @@ static int bcmgenet_resume(struct device *d) bcmgenet_hfb_create_rxnfc_filter(priv, rule); /* Disable RX/TX DMA and flush TX queues */ - dma_ctrl = bcmgenet_dma_disable(priv); + dma_ctrl = bcmgenet_dma_disable(priv, false); /* Reinitialize TDMA and RDMA and SW housekeeping */ ret = bcmgenet_init_dma(priv); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 1985c0ec4d..28e2c94ef8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2014-2020 Broadcom + * Copyright (c) 2014-2024 Broadcom */ #ifndef __BCMGENET_H__ @@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule { /* device context */ struct bcmgenet_priv { void __iomem *base; + /* reg_lock: lock to serialize access to shared registers */ + spinlock_t reg_lock; enum bcmgenet_version version; struct net_device *dev; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 7a41cad578..1248792d7f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -2,7 +2,7 @@ /* * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support * - * Copyright (c) 2014-2020 Broadcom + * Copyright (c) 2014-2024 Broadcom */ #define pr_fmt(fmt) "bcmgenet_wol: " fmt @@ -151,6 +151,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, } /* Can't suspend with WoL if MAC is still in reset */ + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); if (reg & CMD_SW_RESET) reg &= ~CMD_SW_RESET; @@ -158,6 +159,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, /* disable RX */ reg &= ~CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); mdelay(10); if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) { @@ -203,6 +205,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, } /* Enable CRC forward */ + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); priv->crc_fwd_en = 1; reg |= CMD_CRC_FWD; @@ -210,6 +213,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, /* Receiver must be enabled for WOL MP detection */ reg |= CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); reg = UMAC_IRQ_MPD_R; if (hfb_enable) @@ -256,7 +260,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, } /* Disable CRC Forward */ + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~CMD_CRC_FWD; bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 97ea76d443..e7c659cd39 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -2,7 +2,7 @@ /* * Broadcom GENET MDIO routines * - * Copyright (c) 2014-2017 Broadcom + * Copyright (c) 2014-2024 Broadcom */ #include <linux/acpi.h> @@ -75,6 +75,7 @@ static void bcmgenet_mac_config(struct net_device *dev) reg |= RGMII_LINK; bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + spin_lock_bh(&priv->reg_lock); reg = bcmgenet_umac_readl(priv, UMAC_CMD); reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) | CMD_HD_EN | @@ -87,6 +88,7 @@ static void bcmgenet_mac_config(struct net_device *dev) reg |= CMD_TX_EN | CMD_RX_EN; } bcmgenet_umac_writel(priv, reg, UMAC_CMD); + spin_unlock_bh(&priv->reg_lock); priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0; bcmgenet_eee_enable_set(dev, @@ -274,6 +276,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * block for the interface to work, unconditionally clear the * Out-of-band disable since we do not need it. */ + mutex_lock(&phydev->lock); reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); reg &= ~OOB_DISABLE; if (priv->ext_phy) { @@ -285,6 +288,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) reg |= RGMII_MODE_EN; } bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + mutex_unlock(&phydev->lock); if (init) dev_info(kdev, "configuring instance for %s\n", phy_name); diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 7246e13dd5..97291bfbee 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, void *kern_buf; /* Copy the user space buf */ - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); @@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf, void *kern_buf; /* Copy the user space buf */ - kern_buf = memdup_user(buf, nbytes); + kern_buf = memdup_user_nul(buf, nbytes); if (IS_ERR(kern_buf)) return PTR_ERR(kern_buf); diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b5ff2e1a99..2285f2e87e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2684,12 +2684,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev) lb->loopback = 1; q = &adap->sge.ethtxq[pi->first_qset]; - __netif_tx_lock(q->txq, smp_processor_id()); + __netif_tx_lock_bh(q->txq); reclaim_completed_tx(adap, &q->q, -1, true); credits = txq_avail(&q->q) - ndesc; if (unlikely(credits < 0)) { - __netif_tx_unlock(q->txq); + __netif_tx_unlock_bh(q->txq); return -ENOMEM; } @@ -2724,7 +2724,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev) init_completion(&lb->completion); txq_advance(&q->q, ndesc); cxgb4_ring_tx_db(adap, &q->q, ndesc); - __netif_tx_unlock(q->txq); + __netif_tx_unlock_bh(q->txq); /* wait for the pkt to return */ ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index d7e175a9cb..2f7195ba49 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -895,7 +895,7 @@ struct hnae3_handle { struct hnae3_roce_private_info rinfo; }; - u32 numa_node_mask; /* for multi-chip support */ + nodemask_t numa_node_mask; /* for multi-chip support */ enum hnae3_port_base_vlan_state port_base_vlan_state; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index a3b7723a97..c55b5430b7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1524,6 +1524,9 @@ static int hclge_configure(struct hclge_dev *hdev) cfg.default_speed, ret); return ret; } + hdev->hw.mac.req_speed = hdev->hw.mac.speed; + hdev->hw.mac.req_autoneg = AUTONEG_ENABLE; + hdev->hw.mac.req_duplex = DUPLEX_FULL; hclge_parse_link_mode(hdev, cfg.speed_ability); @@ -1753,7 +1756,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->pdev = hdev->pdev; nic->ae_algo = &ae_algo; - nic->numa_node_mask = hdev->numa_node_mask; + bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, + MAX_NUMNODES); nic->kinfo.io_base = hdev->hw.hw.io_base; ret = hclge_knic_setup(vport, num_tqps, @@ -2445,7 +2449,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; - roce->numa_node_mask = nic->numa_node_mask; + bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, + MAX_NUMNODES); return 0; } @@ -3329,9 +3334,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle, return ret; } - hdev->hw.mac.autoneg = cmd->base.autoneg; - hdev->hw.mac.speed = cmd->base.speed; - hdev->hw.mac.duplex = cmd->base.duplex; + hdev->hw.mac.req_autoneg = cmd->base.autoneg; + hdev->hw.mac.req_speed = cmd->base.speed; + hdev->hw.mac.req_duplex = cmd->base.duplex; linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); return 0; @@ -3364,9 +3369,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev) if (!hnae3_dev_phy_imp_supported(hdev)) return 0; - cmd.base.autoneg = hdev->hw.mac.autoneg; - cmd.base.speed = hdev->hw.mac.speed; - cmd.base.duplex = hdev->hw.mac.duplex; + cmd.base.autoneg = hdev->hw.mac.req_autoneg; + cmd.base.speed = hdev->hw.mac.req_speed; + cmd.base.duplex = hdev->hw.mac.req_duplex; linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); @@ -7939,8 +7944,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) /* Set the DOWN flag here to disable link updating */ set_bit(HCLGE_STATE_DOWN, &hdev->state); - /* flush memory to make sure DOWN is seen by service task */ - smp_mb__before_atomic(); + smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ hclge_flush_link_update(hdev); } } @@ -9893,6 +9897,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) static int hclge_init_vlan_filter(struct hclge_dev *hdev) { struct hclge_vport *vport; + bool enable = true; int ret; int i; @@ -9912,8 +9917,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev) vport->cur_vlan_fltr_en = true; } + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) && + !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) + enable = false; + return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, - HCLGE_FILTER_FE_INGRESS, true, 0); + HCLGE_FILTER_FE_INGRESS, enable, 0); } static int hclge_init_vlan_type(struct hclge_dev *hdev) @@ -11609,16 +11618,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto out; - ret = hclge_devlink_init(hdev); - if (ret) - goto err_pci_uninit; - - devl_lock(hdev->devlink); - /* Firmware command queue initialize */ ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) - goto err_devlink_uninit; + goto err_pci_uninit; /* Firmware command initialize */ ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, @@ -11746,7 +11749,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_update_port_info(hdev); if (ret) - goto err_mdiobus_unreg; + goto err_ptp_uninit; INIT_KFIFO(hdev->mac_tnl_log); @@ -11786,6 +11789,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) dev_warn(&pdev->dev, "failed to wake on lan init, ret = %d\n", ret); + ret = hclge_devlink_init(hdev); + if (ret) + goto err_ptp_uninit; + hclge_state_init(hdev); hdev->last_reset_time = jiffies; @@ -11793,10 +11800,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) HCLGE_DRIVER_NAME); hclge_task_schedule(hdev, round_jiffies_relative(HZ)); - - devl_unlock(hdev->devlink); return 0; +err_ptp_uninit: + hclge_ptp_uninit(hdev); err_mdiobus_unreg: if (hdev->hw.mac.phydev) mdiobus_unregister(hdev->hw.mac.mdio_bus); @@ -11806,9 +11813,6 @@ err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); -err_devlink_uninit: - devl_unlock(hdev->devlink); - hclge_devlink_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.hw.io_base); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 51979cf712..d4a146c53c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -270,11 +270,14 @@ struct hclge_mac { u8 media_type; /* port media type, e.g. fibre/copper/backplane */ u8 mac_addr[ETH_ALEN]; u8 autoneg; + u8 req_autoneg; u8 duplex; + u8 req_duplex; u8 support_autoneg; u8 speed_type; /* 0: sfp speed, 1: active speed */ u8 lane_num; u32 speed; + u32 req_speed; u32 max_speed; u32 speed_ability; /* speed ability supported by current media */ u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */ @@ -882,7 +885,7 @@ struct hclge_dev { u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */ u16 num_alloc_vport; /* Num vports this driver supports */ - u32 numa_node_mask; + nodemask_t numa_node_mask; u16 rx_buf_len; u16 num_tx_desc; /* desc num of per tx queue */ u16 num_rx_desc; /* desc num of per rx queue */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 4b0d07ca25..a44659906c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -1077,12 +1077,13 @@ static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param) hdev = param->vport->back; cmd_func = hclge_mbx_ops_list[param->req->msg.code]; - if (cmd_func) - ret = cmd_func(param); - else + if (!cmd_func) { dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %u\n", param->req->msg.code); + return; + } + ret = cmd_func(param); /* PF driver should not reply IMP */ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 0aa9beefd1..43ee20eb03 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -412,7 +412,8 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->ae_algo = &ae_algovf; nic->pdev = hdev->pdev; - nic->numa_node_mask = hdev->numa_node_mask; + bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, + MAX_NUMNODES); nic->flags |= HNAE3_SUPPORT_VF; nic->kinfo.io_base = hdev->hw.hw.io_base; @@ -2082,8 +2083,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) roce->pdev = nic->pdev; roce->ae_algo = nic->ae_algo; - roce->numa_node_mask = nic->numa_node_mask; - + bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, + MAX_NUMNODES); return 0; } @@ -2180,8 +2181,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) } else { set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - /* flush memory to make sure DOWN is seen by service task */ - smp_mb__before_atomic(); + smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ hclgevf_flush_link_update(hdev); } } @@ -2845,10 +2845,6 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_devlink_init(hdev); - if (ret) - goto err_devlink_init; - ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); if (ret) goto err_cmd_queue_init; @@ -2941,6 +2937,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) hclgevf_init_rxd_adv_layout(hdev); + ret = hclgevf_devlink_init(hdev); + if (ret) + goto err_config; + set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); hdev->last_reset_time = jiffies; @@ -2960,8 +2960,6 @@ err_misc_irq_init: err_cmd_init: hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); err_cmd_queue_init: - hclgevf_devlink_uninit(hdev); -err_devlink_init: hclgevf_pci_uninit(hdev); clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index a73f2bf3a5..cccef32284 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -236,7 +236,7 @@ struct hclgevf_dev { u16 rss_size_max; /* HW defined max RSS task queue */ u16 num_alloc_vport; /* num vports this driver supports */ - u32 numa_node_mask; + nodemask_t numa_node_mask; u16 rx_buf_len; u16 num_tx_desc; /* desc num of per tx queue */ u16 num_rx_desc; /* desc num of per rx queue */ diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 93544f1cc2..f7ae0e0aa4 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -157,7 +157,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - usleep_range(50, 60); + udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; @@ -181,7 +181,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) - usleep_range(100, 150); + udelay(100); if (success) { *data = (u16)mdic; @@ -237,7 +237,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * the lower time out */ for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { - usleep_range(50, 60); + udelay(50); mdic = er32(MDIC); if (mdic & E1000_MDIC_READY) break; @@ -261,7 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) * reading duplicate data in the next MDIC transaction. */ if (hw->mac.type == e1000_pch2lan) - usleep_range(100, 150); + udelay(100); if (success) return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c index c2bfba6b9e..66aa9759c8 100644 --- a/drivers/net/ethernet/intel/ice/ice_debugfs.c +++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c @@ -174,7 +174,7 @@ ice_debugfs_module_write(struct file *filp, const char __user *buf, if (*ppos != 0 || count > 8) return -EINVAL; - cmd_buf = memdup_user(buf, count); + cmd_buf = memdup_user_nul(buf, count); if (IS_ERR(cmd_buf)) return PTR_ERR(cmd_buf); @@ -260,7 +260,7 @@ ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf, if (*ppos != 0 || count > 4) return -EINVAL; - cmd_buf = memdup_user(buf, count); + cmd_buf = memdup_user_nul(buf, count); if (IS_ERR(cmd_buf)) return PTR_ERR(cmd_buf); @@ -335,7 +335,7 @@ ice_debugfs_enable_write(struct file *filp, const char __user *buf, if (*ppos != 0 || count > 2) return -EINVAL; - cmd_buf = memdup_user(buf, count); + cmd_buf = memdup_user_nul(buf, count); if (IS_ERR(cmd_buf)) return PTR_ERR(cmd_buf); @@ -431,7 +431,7 @@ ice_debugfs_log_size_write(struct file *filp, const char __user *buf, if (*ppos != 0 || count > 5) return -EINVAL; - cmd_buf = memdup_user(buf, count); + cmd_buf = memdup_user_nul(buf, count); if (IS_ERR(cmd_buf)) return PTR_ERR(cmd_buf); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 7b550d7d96..1ff9818b4c 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -550,17 +550,15 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) /** * ice_vc_isvalid_q_id - * @vf: pointer to the VF info - * @vsi_id: VSI ID + * @vsi: VSI to check queue ID against * @qid: VSI relative queue ID * * check for the valid queue ID */ -static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) +static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid) { - struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id); /* allocated Tx and Rx queues should be always equal for VF VSI */ - return (vsi && (qid < vsi->alloc_txq)); + return qid < vsi->alloc_txq; } /** @@ -1318,7 +1316,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) */ q_map = vqs->rx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1340,7 +1338,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) q_map = vqs->tx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1445,7 +1443,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) q_map = vqs->tx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1471,7 +1469,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); } else if (q_map) { for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1527,7 +1525,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { vsi_q_id = vsi_q_id_idx; - if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) + if (!ice_vc_isvalid_q_id(vsi, vsi_q_id)) return VIRTCHNL_STATUS_ERR_PARAM; q_vector->num_ring_rx++; @@ -1541,7 +1539,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { vsi_q_id = vsi_q_id_idx; - if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) + if (!ice_vc_isvalid_q_id(vsi, vsi_q_id)) return VIRTCHNL_STATUS_ERR_PARAM; q_vector->num_ring_tx++; @@ -1698,7 +1696,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) qpi->txq.headwb_enabled || !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || - !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { + !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) { goto error_param; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index f001553e1a..8e4ff3af86 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -94,9 +94,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) return -EINVAL; - if (vsi_id != vf->lan_vsi_num) - return -EINVAL; - if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) return -EINVAL; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index e6d7914ce6..e7eca8141e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp, u16 pcifunc; int ret, lf; - cmd_buf = memdup_user(buffer, count + 1); + cmd_buf = memdup_user_nul(buffer, count); if (IS_ERR(cmd_buf)) return -ENOMEM; - cmd_buf[count] = '\0'; - cmd_buf_tmp = strchr(cmd_buf, '\n'); if (cmd_buf_tmp) { *cmd_buf_tmp = '\0'; diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index d4cdf3d4f5..6453c92f0f 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -234,12 +234,13 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) /** * ks8851_rx_pkts - receive packets from the host * @ks: The device information. + * @rxq: Queue of packets received in this function. * * This is called from the IRQ work queue when the system detects that there * are packets in the receive queue. Find out how many packets there are and * read them from the FIFO. */ -static void ks8851_rx_pkts(struct ks8851_net *ks) +static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq) { struct sk_buff *skb; unsigned rxfc; @@ -299,7 +300,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) ks8851_dbg_dumpkkt(ks, rxpkt); skb->protocol = eth_type_trans(skb, ks->netdev); - __netif_rx(skb); + __skb_queue_tail(rxq, skb); ks->netdev->stats.rx_packets++; ks->netdev->stats.rx_bytes += rxlen; @@ -326,33 +327,25 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) static irqreturn_t ks8851_irq(int irq, void *_ks) { struct ks8851_net *ks = _ks; - unsigned handled = 0; + struct sk_buff_head rxq; unsigned long flags; unsigned int status; - - local_bh_disable(); + struct sk_buff *skb; ks8851_lock(ks, &flags); status = ks8851_rdreg16(ks, KS_ISR); + ks8851_wrreg16(ks, KS_ISR, status); netif_dbg(ks, intr, ks->netdev, "%s: status 0x%04x\n", __func__, status); - if (status & IRQ_LCI) - handled |= IRQ_LCI; - if (status & IRQ_LDI) { u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); pmecr &= ~PMECR_WKEVT_MASK; ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); - - handled |= IRQ_LDI; } - if (status & IRQ_RXPSI) - handled |= IRQ_RXPSI; - if (status & IRQ_TXI) { unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR); @@ -364,27 +357,20 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) if (netif_queue_stopped(ks->netdev)) netif_wake_queue(ks->netdev); spin_unlock(&ks->statelock); - - handled |= IRQ_TXI; } - if (status & IRQ_RXI) - handled |= IRQ_RXI; - if (status & IRQ_SPIBEI) { netdev_err(ks->netdev, "%s: spi bus error\n", __func__); - handled |= IRQ_SPIBEI; } - ks8851_wrreg16(ks, KS_ISR, handled); - if (status & IRQ_RXI) { /* the datasheet says to disable the rx interrupt during * packet read-out, however we're masking the interrupt * from the device so do not bother masking just the RX * from the device. */ - ks8851_rx_pkts(ks); + __skb_queue_head_init(&rxq); + ks8851_rx_pkts(ks, &rxq); } /* if something stopped the rx process, probably due to wanting @@ -408,7 +394,9 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) if (status & IRQ_LCI) mii_check_link(&ks->mii); - local_bh_enable(); + if (status & IRQ_RXI) + while ((skb = __skb_dequeue(&rxq))) + netif_rx(skb); return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index a5ac21a0ee..cb6b33a228 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, struct flow_cls_offload *f) { struct qede_arfs_fltr_node *n; - int min_hlen, rc = -EINVAL; struct qede_arfs_tuple t; + int min_hlen, rc; __qede_lock(edev); @@ -1879,7 +1879,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, } /* parse flower attribute and prepare filter */ - if (qede_parse_flow_attr(edev, proto, f->rule, &t)) + rc = qede_parse_flow_attr(edev, proto, f->rule, &t); + if (rc) goto unlock; /* Validate profile mode and number of filters */ @@ -1888,11 +1889,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, DP_NOTICE(edev, "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n", t.mode, edev->arfs->mode, edev->arfs->filter_count); + rc = -EINVAL; goto unlock; } /* parse tc actions and get the vf_id */ - if (qede_parse_actions(edev, &f->rule->action, f->common.extack)) + rc = qede_parse_actions(edev, &f->rule->action, f->common.extack); + if (rc) goto unlock; if (qede_flow_find_fltr(edev, &t)) { @@ -1998,10 +2001,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev, if (IS_ERR(flow)) return PTR_ERR(flow); - if (qede_parse_flow_attr(edev, proto, flow->rule, t)) { - err = -EINVAL; + err = qede_parse_flow_attr(edev, proto, flow->rule, t); + if (err) goto err_out; - } /* Make sure location is valid and filter isn't already set */ err = qede_flow_spec_validate(edev, &flow->rule->action, t, diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index a6fcbda64e..2b6ec979a6 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -154,8 +154,11 @@ static void free_netvsc_device(struct rcu_head *head) int i; kfree(nvdev->extension); - vfree(nvdev->recv_buf); - vfree(nvdev->send_buf); + + if (!nvdev->recv_buf_gpadl_handle.decrypted) + vfree(nvdev->recv_buf); + if (!nvdev->send_buf_gpadl_handle.decrypted) + vfree(nvdev->send_buf); bitmap_free(nvdev->send_section_map); for (i = 0; i < VRSS_CHANNEL_MAX; i++) { diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 21b6c4d94a..6d31061818 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -174,6 +174,7 @@ struct ax88179_data { u32 wol_supported; u32 wolopts; u8 disconnecting; + u8 initialized; }; struct ax88179_int_data { @@ -1673,6 +1674,18 @@ static int ax88179_reset(struct usbnet *dev) return 0; } +static int ax88179_net_reset(struct usbnet *dev) +{ + struct ax88179_data *ax179_data = dev->driver_priv; + + if (ax179_data->initialized) + ax88179_reset(dev); + else + ax179_data->initialized = 1; + + return 0; +} + static int ax88179_stop(struct usbnet *dev) { u16 tmp16; @@ -1692,6 +1705,7 @@ static const struct driver_info ax88179_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1704,6 +1718,7 @@ static const struct driver_info ax88178a_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1716,7 +1731,7 @@ static const struct driver_info cypress_GX3_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1729,7 +1744,7 @@ static const struct driver_info dlink_dub1312_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1742,7 +1757,7 @@ static const struct driver_info sitecom_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1755,7 +1770,7 @@ static const struct driver_info samsung_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1768,7 +1783,7 @@ static const struct driver_info lenovo_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1781,7 +1796,7 @@ static const struct driver_info belkin_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1794,7 +1809,7 @@ static const struct driver_info toshiba_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1807,7 +1822,7 @@ static const struct driver_info mct_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1820,7 +1835,7 @@ static const struct driver_info at_umc2000_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1833,7 +1848,7 @@ static const struct driver_info at_umc200_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, @@ -1846,7 +1861,7 @@ static const struct driver_info at_umc2000sp_info = { .unbind = ax88179_unbind, .status = ax88179_status, .link_reset = ax88179_link_reset, - .reset = ax88179_reset, + .reset = ax88179_net_reset, .stop = ax88179_stop, .flags = FLAG_ETHER | FLAG_FRAMING_AX, .rx_fixup = ax88179_rx_fixup, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e2e181378f..edc34402e7 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1431,6 +1431,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ {QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */ {QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */ + {QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 9ec46048d3..0a0b4a9717 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -1674,6 +1674,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) bool raw_proto = false; void *oiph; __be32 vni = 0; + int nh; /* Need UDP and VXLAN header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -1762,12 +1763,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) skb->pkt_type = PACKET_HOST; } - oiph = skb_network_header(skb); + /* Save offset of outer header relative to skb->head, + * because we are going to reset the network header to the inner header + * and might change skb->head. + */ + nh = skb_network_header(skb) - skb->head; + skb_reset_network_header(skb); + if (!pskb_inet_may_pull(skb)) { + DEV_STATS_INC(vxlan->dev, rx_length_errors); + DEV_STATS_INC(vxlan->dev, rx_errors); + vxlan_vnifilter_count(vxlan, vni, vninode, + VXLAN_VNI_STATS_RX_ERRORS, 0); + goto drop; + } + + /* Get the outer header. */ + oiph = skb->head + nh; + if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { - ++vxlan->dev->stats.rx_frame_errors; - ++vxlan->dev->stats.rx_errors; + DEV_STATS_INC(vxlan->dev, rx_frame_errors); + DEV_STATS_INC(vxlan->dev, rx_errors); vxlan_vnifilter_count(vxlan, vni, vninode, VXLAN_VNI_STATS_RX_ERRORS, 0); goto drop; @@ -1837,7 +1854,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) goto out; if (!pskb_may_pull(skb, arp_hdr_len(dev))) { - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); + vxlan_vnifilter_count(vxlan, vni, NULL, + VXLAN_VNI_STATS_TX_DROPS, 0); goto out; } parp = arp_hdr(skb); @@ -1893,7 +1912,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) reply->pkt_type = PACKET_HOST; if (netif_rx(reply) == NET_RX_DROP) { - dev->stats.rx_dropped++; + dev_core_stats_rx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2052,7 +2071,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) goto out; if (netif_rx(reply) == NET_RX_DROP) { - dev->stats.rx_dropped++; + dev_core_stats_rx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2263,7 +2282,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, len); } else { drop: - dev->stats.rx_dropped++; + dev_core_stats_rx_dropped_inc(dev); vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX_DROPS, 0); } @@ -2295,7 +2314,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, addr_family, dst_port, vxlan->cfg.flags); if (!dst_vxlan) { - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); kfree_skb(skb); @@ -2559,7 +2578,7 @@ out_unlock: return; drop: - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); return; @@ -2567,11 +2586,11 @@ drop: tx_error: rcu_read_unlock(); if (err == -ELOOP) - dev->stats.collisions++; + DEV_STATS_INC(dev, collisions); else if (err == -ENETUNREACH) - dev->stats.tx_carrier_errors++; + DEV_STATS_INC(dev, tx_carrier_errors); dst_release(ndst); - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0); kfree_skb(skb); } @@ -2604,7 +2623,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev, return; drop: - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); @@ -2642,7 +2661,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev, return NETDEV_TX_OK; drop: - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(netdev_priv(dev), vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); dev_kfree_skb(skb); @@ -2739,7 +2758,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) !is_multicast_ether_addr(eth->h_dest)) vxlan_fdb_miss(vxlan, eth->h_dest); - dev->stats.tx_dropped++; + dev_core_stats_tx_dropped_inc(dev); vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0); kfree_skb(skb); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 1628bf5545..23e64a757c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -855,10 +855,15 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id) { - int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id); + int ret; lockdep_assert_held(&mvm->mutex); + if (WARN_ON(sta_id == IWL_MVM_INVALID_STA)) + return 0; + + ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL); return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index ca74b1b63c..0efa304904 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -1588,9 +1588,9 @@ void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, return; tfd_num = iwl_txq_get_cmd_index(txq, ssn); - read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); spin_lock_bh(&txq->lock); + read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); if (!test_bit(txq_id, trans->txqs.queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index fe3627c5bd..26153cb764 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3630,7 +3630,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) "Found shared namespace %d, but multipathing not supported.\n", info->nsid); dev_warn_once(ctrl->device, - "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n."); + "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n"); } } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 7b87763e2f..738719085e 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -162,6 +162,11 @@ enum nvme_quirks { * Disables simple suspend/resume path. */ NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20), + + /* + * MSI (but not MSI-X) interrupts are broken and never fire. + */ + NVME_QUIRK_BROKEN_MSI = (1 << 21), }; /* diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 8e0bb96926..02565dc99a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2218,6 +2218,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) .priv = dev, }; unsigned int irq_queues, poll_queues; + unsigned int flags = PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY; /* * Poll queues don't need interrupts, but we need at least one I/O queue @@ -2241,8 +2242,10 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) irq_queues = 1; if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) irq_queues += (nr_io_queues - poll_queues); - return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, - PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); + if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) + flags &= ~PCI_IRQ_MSI; + return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, flags, + &affd); } static unsigned int nvme_max_io_queues(struct nvme_dev *dev) @@ -2471,6 +2474,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) { int result = -ENOMEM; struct pci_dev *pdev = to_pci_dev(dev->dev); + unsigned int flags = PCI_IRQ_ALL_TYPES; if (pci_enable_device_mem(pdev)) return result; @@ -2487,7 +2491,9 @@ static int nvme_pci_enable(struct nvme_dev *dev) * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll * adjust this later. */ - result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (dev->ctrl.quirks & NVME_QUIRK_BROKEN_MSI) + flags &= ~PCI_IRQ_MSI; + result = pci_alloc_irq_vectors(pdev, 1, 1, flags); if (result < 0) goto disable; @@ -3384,6 +3390,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ + .driver_data = NVME_QUIRK_BROKEN_MSI }, { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index d376fa7114..029efe16f8 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -43,7 +43,7 @@ #define SCU614 0x614 /* Disable GPIO Internal Pull-Down #1 */ #define SCU618 0x618 /* Disable GPIO Internal Pull-Down #2 */ #define SCU61C 0x61c /* Disable GPIO Internal Pull-Down #3 */ -#define SCU620 0x620 /* Disable GPIO Internal Pull-Down #4 */ +#define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */ #define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */ #define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */ #define SCU690 0x690 /* Multi-function Pin Control #24 */ @@ -2495,38 +2495,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = { ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0), /* GPIOS7 */ - ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23), + ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23), /* GPIOS6 */ - ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22), + ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22), /* GPIOS5 */ - ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21), + ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21), /* GPIOS4 */ - ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20), + ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20), /* GPIOS3*/ - ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19), + ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19), /* GPIOS2 */ - ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18), + ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18), /* GPIOS1 */ - ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17), + ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17), /* GPIOS0 */ - ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16), + ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16), /* GPIOR7 */ - ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15), + ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15), /* GPIOR6 */ - ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14), + ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14), /* GPIOR5 */ - ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13), + ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13), /* GPIOR4 */ - ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12), + ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12), /* GPIOR3*/ - ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11), + ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11), /* GPIOR2 */ - ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10), + ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10), /* GPIOR1 */ - ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9), + ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9), /* GPIOR0 */ - ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8), + ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8), /* GPIOX7 */ ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31), diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index bbcdece83b..ae975e1365 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -2120,13 +2120,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev) error = pinctrl_claim_hogs(pctldev); if (error) { - dev_err(pctldev->dev, "could not claim hogs: %i\n", - error); - pinctrl_free_pindescs(pctldev, pctldev->desc->pins, - pctldev->desc->npins); - mutex_destroy(&pctldev->mutex); - kfree(pctldev); - + dev_err(pctldev->dev, "could not claim hogs: %i\n", error); return error; } diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index df1efc2e52..6a94ecd6a8 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev) for (state = 0; ; state++) { /* Retrieve the pinctrl-* property */ propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state); - if (!propname) - return -ENOMEM; + if (!propname) { + ret = -ENOMEM; + goto err; + } prop = of_find_property(np, propname, &size); kfree(propname); if (!prop) { if (state == 0) { - of_node_put(np); - return -ENODEV; + ret = -ENODEV; + goto err; } break; } diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index ac97724c59..1edb6041fb 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -278,33 +278,33 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 }; static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 }; static const struct intel_pingroup byt_score_groups[] = { - PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1), - PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1), - PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1), - PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1), - PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1), - PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1), - PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1), - PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1), - PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1), - PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1), - PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1), - PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1), - PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1), - PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1), - PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1), - PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values), - PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1), - PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1), - PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1), - PIN_GROUP("sata_grp", byt_score_sata_pins, 1), - PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1), - PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1), - PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1), - PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1), - PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1), - PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1), - PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1), + PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1), + PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1), + PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1), + PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1), + PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1), + PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1), + PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1), + PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1), + PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1), + PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1), + PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1), + PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1), + PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1), + PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1), + PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1), + PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values), + PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1), + PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1), + PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1), + PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1), + PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1), + PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1), + PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1), + PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1), + PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1), + PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1), + PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1), }; static const char * const byt_score_uart_groups[] = { @@ -332,12 +332,14 @@ static const char * const byt_score_plt_clk_groups[] = { }; static const char * const byt_score_smbus_groups[] = { "smbus_grp" }; static const char * const byt_score_gpio_groups[] = { - "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp", - "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp", - "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp", - "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp", - "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp", - "plt_clk4_grp", "plt_clk5_grp", "smbus_grp", + "uart1_grp_gpio", "uart2_grp_gpio", "pwm0_grp_gpio", + "pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio", + "sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio", + "i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio", + "sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio", + "sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio", + "plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio", + "plt_clk5_grp_gpio", "smbus_grp_gpio", }; static const struct intel_function byt_score_functions[] = { @@ -456,8 +458,8 @@ static const struct intel_pingroup byt_sus_groups[] = { PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values), PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values), PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values), - PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1), - PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1), + PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1), + PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1), }; static const char * const byt_sus_usb_groups[] = { @@ -469,7 +471,7 @@ static const char * const byt_sus_pmu_clk_groups[] = { }; static const char * const byt_sus_gpio_groups[] = { "usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio", - "pmu_clk1_grp", "pmu_clk2_grp", + "pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio", }; static const struct intel_function byt_sus_functions[] = { diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index fde65e18cd..6981e2fab9 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -179,6 +179,10 @@ struct intel_community { .modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \ } +#define PIN_GROUP_GPIO(n, p, m) \ + PIN_GROUP(n, p, m), \ + PIN_GROUP(n "_gpio", p, 0) + #define FUNCTION(n, g) \ { \ .func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \ diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index b6bc31abd2..b19bc39170 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -165,20 +165,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev, err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret); break; case PIN_CONFIG_INPUT_ENABLE: - case PIN_CONFIG_OUTPUT_ENABLE: + err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret); + if (!ret) + err = -EINVAL; + break; + case PIN_CONFIG_OUTPUT: err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret); if (err) break; - /* CONFIG Current direction return value - * ------------- ----------------- ---------------------- - * OUTPUT_ENABLE output 1 (= HW value) - * input 0 (= HW value) - * INPUT_ENABLE output 0 (= reverse HW value) - * input 1 (= reverse HW value) - */ - if (param == PIN_CONFIG_INPUT_ENABLE) - ret = !ret; + if (!ret) { + err = -EINVAL; + break; + } + + err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret); break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret); @@ -193,6 +194,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev, } err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret); + if (!ret) + err = -EINVAL; break; case PIN_CONFIG_DRIVE_STRENGTH: if (!hw->soc->drive_get) @@ -281,26 +284,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, break; err = hw->soc->bias_set_combo(hw, desc, 0, arg); break; - case PIN_CONFIG_OUTPUT_ENABLE: - err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT, - MTK_DISABLE); - /* Keep set direction to consider the case that a GPIO pin - * does not have SMT control - */ - if (err != -ENOTSUPP) - break; - - err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, - MTK_OUTPUT); - break; case PIN_CONFIG_INPUT_ENABLE: /* regard all non-zero value as enable */ err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg); - if (err) - break; - - err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR, - MTK_INPUT); break; case PIN_CONFIG_SLEW_RATE: /* regard all non-zero value as enable */ diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c index 79f5d753d7..50a87d9618 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-a1.c +++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c @@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[] = { GPIOX_10 }; static const unsigned int pdm_din2_a_pins[] = { GPIOA_6 }; static const unsigned int pdm_din1_a_pins[] = { GPIOA_7 }; static const unsigned int pdm_din0_a_pins[] = { GPIOA_8 }; -static const unsigned int pdm_dclk_pins[] = { GPIOA_9 }; +static const unsigned int pdm_dclk_a_pins[] = { GPIOA_9 }; /* gen_clk */ static const unsigned int gen_clk_x_pins[] = { GPIOX_7 }; @@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = { GROUP(pdm_din2_a, 3), GROUP(pdm_din1_a, 3), GROUP(pdm_din0_a, 3), - GROUP(pdm_dclk, 3), + GROUP(pdm_dclk_a, 3), GROUP(pwm_c_a, 3), GROUP(pwm_b_a, 3), @@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = { static const char * const pdm_groups[] = { "pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a", - "pdm_din1_a", "pdm_din0_a", "pdm_dclk", + "pdm_din1_a", "pdm_din0_a", "pdm_dclk_a", }; static const char * const gen_clk_groups[] = { diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index ee2e164f86..38c932df64 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -598,6 +598,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = { .driver_data = &quirk_acer_predator_v4, }, { + .callback = dmi_matched, + .ident = "Acer Predator PH18-71", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Predator PH18-71"), + }, + .driver_data = &quirk_acer_predator_v4, + }, + { .callback = set_force_caps, .ident = "Acer Aspire Switch 10E SW3-016", .matches = { diff --git a/drivers/platform/x86/amd/pmf/acpi.c b/drivers/platform/x86/amd/pmf/acpi.c index f2eb07ef85..1c0d2bbc1f 100644 --- a/drivers/platform/x86/amd/pmf/acpi.c +++ b/drivers/platform/x86/amd/pmf/acpi.c @@ -309,7 +309,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev) status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, apmf_walk_resources, pmf_dev); if (ACPI_FAILURE(status)) { - dev_err(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status); + dev_dbg(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status); return -EINVAL; } diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 08df949460..30951f7131 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -719,6 +719,7 @@ static struct miscdevice isst_if_char_driver = { }; static const struct x86_cpu_id hpm_cpu_ids[] = { + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, NULL), X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, NULL), X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, NULL), {} diff --git a/drivers/power/supply/mt6360_charger.c b/drivers/power/supply/mt6360_charger.c index 1305cba61e..aca123783e 100644 --- a/drivers/power/supply/mt6360_charger.c +++ b/drivers/power/supply/mt6360_charger.c @@ -588,7 +588,7 @@ static const struct regulator_ops mt6360_chg_otg_ops = { }; static const struct regulator_desc mt6360_otg_rdesc = { - .of_match = "usb-otg-vbus", + .of_match = "usb-otg-vbus-regulator", .name = "usb-otg-vbus", .ops = &mt6360_chg_otg_ops, .owner = THIS_MODULE, diff --git a/drivers/power/supply/rt9455_charger.c b/drivers/power/supply/rt9455_charger.c index c345a77f9f..e4dbacd50a 100644 --- a/drivers/power/supply/rt9455_charger.c +++ b/drivers/power/supply/rt9455_charger.c @@ -192,6 +192,7 @@ static const int rt9455_voreg_values[] = { 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000 }; +#if IS_ENABLED(CONFIG_USB_PHY) /* * When the charger is in boost mode, REG02[7:2] represent boost output * voltage. @@ -207,6 +208,7 @@ static const int rt9455_boost_voltage_values[] = { 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, }; +#endif /* REG07[3:0] (VMREG) in uV */ static const int rt9455_vmreg_values[] = { diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index a968dabb48..1b897d0009 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1911,19 +1911,24 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, } } - if (err != -EEXIST) + if (err != -EEXIST) { regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs); - if (IS_ERR(regulator->debugfs)) - rdev_dbg(rdev, "Failed to create debugfs directory\n"); + if (IS_ERR(regulator->debugfs)) { + rdev_dbg(rdev, "Failed to create debugfs directory\n"); + regulator->debugfs = NULL; + } + } - debugfs_create_u32("uA_load", 0444, regulator->debugfs, - ®ulator->uA_load); - debugfs_create_u32("min_uV", 0444, regulator->debugfs, - ®ulator->voltage[PM_SUSPEND_ON].min_uV); - debugfs_create_u32("max_uV", 0444, regulator->debugfs, - ®ulator->voltage[PM_SUSPEND_ON].max_uV); - debugfs_create_file("constraint_flags", 0444, regulator->debugfs, - regulator, &constraint_flags_fops); + if (regulator->debugfs) { + debugfs_create_u32("uA_load", 0444, regulator->debugfs, + ®ulator->uA_load); + debugfs_create_u32("min_uV", 0444, regulator->debugfs, + ®ulator->voltage[PM_SUSPEND_ON].min_uV); + debugfs_create_u32("max_uV", 0444, regulator->debugfs, + ®ulator->voltage[PM_SUSPEND_ON].max_uV); + debugfs_create_file("constraint_flags", 0444, regulator->debugfs, + regulator, &constraint_flags_fops); + } /* * Check now if the regulator is an always on regulator - if diff --git a/drivers/regulator/mt6360-regulator.c b/drivers/regulator/mt6360-regulator.c index ad6587a378..24cc9fc94e 100644 --- a/drivers/regulator/mt6360-regulator.c +++ b/drivers/regulator/mt6360-regulator.c @@ -319,15 +319,15 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode) } } -#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg, vmask, \ - mreg, mmask, streg, stmask, vranges, \ - vcnts, offon_delay, irq_tbls) \ +#define MT6360_REGULATOR_DESC(match, _name, _sname, ereg, emask, vreg, \ + vmask, mreg, mmask, streg, stmask, \ + vranges, vcnts, offon_delay, irq_tbls) \ { \ .desc = { \ .name = #_name, \ .supply_name = #_sname, \ .id = MT6360_REGULATOR_##_name, \ - .of_match = of_match_ptr(#_name), \ + .of_match = of_match_ptr(match), \ .regulators_node = of_match_ptr("regulator"), \ .of_map_mode = mt6360_regulator_of_map_mode, \ .owner = THIS_MODULE, \ @@ -351,21 +351,29 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode) } static const struct mt6360_regulator_desc mt6360_regulator_descs[] = { - MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04, + MT6360_REGULATOR_DESC("buck1", BUCK1, BUCK1_VIN, + 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04, buck_vout_ranges, 256, 0, buck1_irq_tbls), - MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04, + MT6360_REGULATOR_DESC("buck2", BUCK2, BUCK2_VIN, + 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04, buck_vout_ranges, 256, 0, buck2_irq_tbls), - MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04, + MT6360_REGULATOR_DESC("ldo6", LDO6, LDO_VIN3, + 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04, ldo_vout_ranges1, 256, 0, ldo6_irq_tbls), - MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04, + MT6360_REGULATOR_DESC("ldo7", LDO7, LDO_VIN3, + 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04, ldo_vout_ranges1, 256, 0, ldo7_irq_tbls), - MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04, + MT6360_REGULATOR_DESC("ldo1", LDO1, LDO_VIN1, + 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04, ldo_vout_ranges2, 256, 0, ldo1_irq_tbls), - MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04, + MT6360_REGULATOR_DESC("ldo2", LDO2, LDO_VIN1, + 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04, ldo_vout_ranges2, 256, 0, ldo2_irq_tbls), - MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04, + MT6360_REGULATOR_DESC("ldo3", LDO3, LDO_VIN1, + 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04, ldo_vout_ranges2, 256, 100, ldo3_irq_tbls), - MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04, + MT6360_REGULATOR_DESC("ldo5", LDO5, LDO_VIN2, + 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04, ldo_vout_ranges3, 128, 100, ldo5_irq_tbls), }; diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c index a06f5f2d79..9c2f0dd426 100644 --- a/drivers/regulator/tps65132-regulator.c +++ b/drivers/regulator/tps65132-regulator.c @@ -267,10 +267,17 @@ static const struct i2c_device_id tps65132_id[] = { }; MODULE_DEVICE_TABLE(i2c, tps65132_id); +static const struct of_device_id __maybe_unused tps65132_of_match[] = { + { .compatible = "ti,tps65132" }, + {}, +}; +MODULE_DEVICE_TABLE(of, tps65132_of_match); + static struct i2c_driver tps65132_i2c_driver = { .driver = { .name = "tps65132", .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .of_match_table = of_match_ptr(tps65132_of_match), }, .probe = tps65132_probe, .id_table = tps65132_id, diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c index a35409eda0..67518291a8 100644 --- a/drivers/remoteproc/mtk_scp.c +++ b/drivers/remoteproc/mtk_scp.c @@ -132,7 +132,7 @@ static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp, static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw) { int ret; - size_t offset; + size_t buf_sz, offset; /* read the ipi buf addr from FW itself first */ ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset); @@ -144,6 +144,14 @@ static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw) } dev_info(scp->dev, "IPI buf addr %#010zx\n", offset); + /* Make sure IPI buffer fits in the L2TCM range assigned to this core */ + buf_sz = sizeof(*scp->recv_buf) + sizeof(*scp->send_buf); + + if (scp->sram_size < buf_sz + offset) { + dev_err(scp->dev, "IPI buffer does not fit in SRAM.\n"); + return -EOVERFLOW; + } + scp->recv_buf = (struct mtk_share_obj __iomem *) (scp->sram_base + offset); scp->send_buf = (struct mtk_share_obj __iomem *) diff --git a/drivers/s390/cio/cio_inject.c b/drivers/s390/cio/cio_inject.c index 8613fa9372..a2e771ebae 100644 --- a/drivers/s390/cio/cio_inject.c +++ b/drivers/s390/cio/cio_inject.c @@ -95,7 +95,7 @@ static ssize_t crw_inject_write(struct file *file, const char __user *buf, return -EINVAL; } - buffer = vmemdup_user(buf, lbuf); + buffer = memdup_user_nul(buf, lbuf); if (IS_ERR(buffer)) return -ENOMEM; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 601d00e09d..dc5ae75d46 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -364,30 +364,33 @@ out: return rc; } +static void qeth_free_cq(struct qeth_card *card) +{ + if (card->qdio.c_q) { + qeth_free_qdio_queue(card->qdio.c_q); + card->qdio.c_q = NULL; + } +} + static int qeth_alloc_cq(struct qeth_card *card) { if (card->options.cq == QETH_CQ_ENABLED) { QETH_CARD_TEXT(card, 2, "cqon"); - card->qdio.c_q = qeth_alloc_qdio_queue(); if (!card->qdio.c_q) { - dev_err(&card->gdev->dev, "Failed to create completion queue\n"); - return -ENOMEM; + card->qdio.c_q = qeth_alloc_qdio_queue(); + if (!card->qdio.c_q) { + dev_err(&card->gdev->dev, + "Failed to create completion queue\n"); + return -ENOMEM; + } } } else { QETH_CARD_TEXT(card, 2, "nocq"); - card->qdio.c_q = NULL; + qeth_free_cq(card); } return 0; } -static void qeth_free_cq(struct qeth_card *card) -{ - if (card->qdio.c_q) { - qeth_free_qdio_queue(card->qdio.c_q); - card->qdio.c_q = NULL; - } -} - static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, int delayed) { @@ -2628,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) QETH_CARD_TEXT(card, 2, "allcqdbf"); + /* completion */ + if (qeth_alloc_cq(card)) + goto out_err; + if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) return 0; @@ -2663,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card) queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; } - /* completion */ - if (qeth_alloc_cq(card)) - goto out_freeoutq; - return 0; out_freeoutq: @@ -2677,6 +2680,8 @@ out_freeoutq: qeth_free_buffer_pool(card); out_buffer_pool: atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); + qeth_free_cq(card); +out_err: return -ENOMEM; } @@ -2684,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card) { int i, j; + qeth_free_cq(card); + if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == QETH_QDIO_UNINITIALIZED) return; - qeth_free_cq(card); for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { if (card->qdio.in_q->bufs[j].rx_skb) { consume_skb(card->qdio.in_q->bufs[j].rx_skb); @@ -3742,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) { - int rc; - - if (card->options.cq == QETH_CQ_NOTAVAILABLE) { - rc = -1; - goto out; - } else { - if (card->options.cq == cq) { - rc = 0; - goto out; - } - - qeth_free_qdio_queues(card); - card->options.cq = cq; - rc = 0; - } -out: - return rc; + if (card->options.cq == QETH_CQ_NOTAVAILABLE) + return -1; + card->options.cq = cq; + return 0; } EXPORT_SYMBOL_GPL(qeth_configure_cq); diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 2c246e80c1..d91659811e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -833,7 +833,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); - spin_lock_bh(&tgt->cq_lock); ctx_base_ptr = tgt->ctx_base; tgt->ctx_base = NULL; @@ -889,7 +888,6 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, tgt->sq, tgt->sq_dma); tgt->sq = NULL; } - spin_unlock_bh(&tgt->cq_lock); if (ctx_base_ptr) iounmap(ctx_base_ptr); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index b56fbc61a1..86112f2347 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2244,7 +2244,15 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task, case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: if ((dw0 & CMPLT_HDR_RSPNS_XFRD_MSK) && (sipc_rx_err_type & RX_FIS_STATUS_ERR_MSK)) { - ts->stat = SAS_PROTO_RESPONSE; + if (task->ata_task.use_ncq) { + struct domain_device *device = task->dev; + struct hisi_sas_device *sas_dev = device->lldd_dev; + + sas_dev->dev_status = HISI_SAS_DEV_NCQ_ERR; + slot->abort = 1; + } else { + ts->stat = SAS_PROTO_RESPONSE; + } } else if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) { ts->residual = trans_tx_fail_type; ts->stat = SAS_DATA_UNDERRUN; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 5c261005b7..f6e6db8b8a 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -135,7 +135,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size, static inline void *alloc_smp_req(int size) { - u8 *p = kzalloc(size, GFP_KERNEL); + u8 *p = kzalloc(ALIGN(size, ARCH_DMA_MINALIGN), GFP_KERNEL); if (p) p[0] = SMP_REQUEST; return p; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 04d608ea91..9670cb2bf1 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1325,7 +1325,6 @@ struct lpfc_hba { struct timer_list fabric_block_timer; unsigned long bit_flags; atomic_t num_rsrc_err; - atomic_t num_cmd_success; unsigned long last_rsrc_error_time; unsigned long last_ramp_down_time; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS @@ -1430,6 +1429,7 @@ struct lpfc_hba { struct timer_list inactive_vmid_poll; /* RAS Support */ + spinlock_t ras_fwlog_lock; /* do not take while holding another lock */ struct lpfc_ras_fwlog ras_fwlog; uint32_t iocb_cnt; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index d3a5d6ecdf..6f97a04171 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -5864,9 +5864,9 @@ lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val) if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn)) return -EINVAL; - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); state = phba->ras_fwlog.state; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); if (state == REG_INPROGRESS) { lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging " diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 2919579fa0..c305d16cfa 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -5070,12 +5070,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job) bsg_reply->reply_data.vendor_reply.vendor_rsp; /* Current logging state */ - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); if (ras_fwlog->state == ACTIVE) ras_reply->state = LPFC_RASLOG_STATE_RUNNING; else ras_reply->state = LPFC_RASLOG_STATE_STOPPED; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); ras_reply->log_level = phba->ras_fwlog.fw_loglevel; ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; @@ -5132,13 +5132,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job) if (action == LPFC_RASACTION_STOP_LOGGING) { /* Check if already disabled */ - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); if (ras_fwlog->state != ACTIVE) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); rc = -ESRCH; goto ras_job_error; } - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); /* Disable logging */ lpfc_ras_stop_fwlog(phba); @@ -5149,10 +5149,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job) * FW-logging with new log-level. Return status * "Logging already Running" to caller. **/ - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); if (ras_fwlog->state != INACTIVE) action_status = -EINPROGRESS; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); /* Enable logging */ rc = lpfc_sli4_ras_fwlog_init(phba, log_level, @@ -5268,13 +5268,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job) goto ras_job_error; /* Logging to be stopped before reading */ - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); if (ras_fwlog->state == ACTIVE) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); rc = -EINPROGRESS; goto ras_job_error; } - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); if (job->request_len < sizeof(struct fc_bsg_request) + diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index ea9b42225e..20662b4f33 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -2196,12 +2196,12 @@ static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba, memset(buffer, 0, size); - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); if (phba->ras_fwlog.state != ACTIVE) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); return -EINVAL; } - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); list_for_each_entry_safe(dmabuf, next, &phba->ras_fwlog.fwlog_buff_list, list) { @@ -2252,13 +2252,13 @@ lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file) int size; int rc = -ENOMEM; - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); if (phba->ras_fwlog.state != ACTIVE) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); rc = -EINVAL; goto out; } - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE, phba->cfg_ras_fwlog_buffsize, &size)) diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 4d72320069..26736122bd 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -4462,23 +4462,23 @@ lpfc_els_retry_delay(struct timer_list *t) unsigned long flags; struct lpfc_work_evt *evtp = &ndlp->els_retry_evt; + /* Hold a node reference for outstanding queued work */ + if (!lpfc_nlp_get(ndlp)) + return; + spin_lock_irqsave(&phba->hbalock, flags); if (!list_empty(&evtp->evt_listp)) { spin_unlock_irqrestore(&phba->hbalock, flags); + lpfc_nlp_put(ndlp); return; } - /* We need to hold the node by incrementing the reference - * count until the queued work is done - */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); - if (evtp->evt_arg1) { - evtp->evt = LPFC_EVT_ELS_RETRY; - list_add_tail(&evtp->evt_listp, &phba->work_list); - lpfc_worker_wake_up(phba); - } + evtp->evt_arg1 = ndlp; + evtp->evt = LPFC_EVT_ELS_RETRY; + list_add_tail(&evtp->evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, flags); - return; + + lpfc_worker_wake_up(phba); } /** diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index f80bbc315f..da3aee0f63 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -257,7 +257,9 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); + spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_worker_wake_up(phba); + return; } spin_unlock_irqrestore(&phba->hbalock, iflags); } else { @@ -275,10 +277,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } - } - - return; } /** diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 70bcee64bc..858ca395c0 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -7698,6 +7698,9 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) "NVME" : " "), (phba->nvmet_support ? "NVMET" : " ")); + /* ras_fwlog state */ + spin_lock_init(&phba->ras_fwlog_lock); + /* Initialize the IO buffer list used by driver for SLI3 SCSI */ spin_lock_init(&phba->scsi_buf_list_get_lock); INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); @@ -13047,7 +13050,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) rc = request_threaded_irq(eqhdl->irq, &lpfc_sli4_hba_intr_handler, &lpfc_sli4_hba_intr_handler_th, - IRQF_ONESHOT, name, eqhdl); + 0, name, eqhdl); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 128fc1bab5..47218cf4d1 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -2616,9 +2616,9 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* No concern about the role change on the nvme remoteport. * The transport will update it. */ - spin_lock_irq(&vport->phba->hbalock); + spin_lock_irq(&ndlp->lock); ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT; - spin_unlock_irq(&vport->phba->hbalock); + spin_unlock_irq(&ndlp->lock); /* Don't let the host nvme transport keep sending keep-alives * on this remoteport. Vport is unloading, no recovery. The diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index bf879d8184..cf506556f3 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -167,11 +167,10 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) struct Scsi_Host *shost; struct scsi_device *sdev; unsigned long new_queue_depth; - unsigned long num_rsrc_err, num_cmd_success; + unsigned long num_rsrc_err; int i; num_rsrc_err = atomic_read(&phba->num_rsrc_err); - num_cmd_success = atomic_read(&phba->num_cmd_success); /* * The error and success command counters are global per @@ -186,20 +185,16 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { - new_queue_depth = - sdev->queue_depth * num_rsrc_err / - (num_rsrc_err + num_cmd_success); - if (!new_queue_depth) - new_queue_depth = sdev->queue_depth - 1; + if (num_rsrc_err >= sdev->queue_depth) + new_queue_depth = 1; else new_queue_depth = sdev->queue_depth - - new_queue_depth; + num_rsrc_err; scsi_change_queue_depth(sdev, new_queue_depth); } } lpfc_destroy_vport_work_array(phba, vports); atomic_set(&phba->num_rsrc_err, 0); - atomic_set(&phba->num_cmd_success, 0); } /** diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 706985358c..e182107255 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1217,9 +1217,9 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, empty = list_empty(&phba->active_rrq_list); list_add_tail(&rrq->list, &phba->active_rrq_list); phba->hba_flag |= HBA_RRQ_ACTIVE; + spin_unlock_irqrestore(&phba->hbalock, iflags); if (empty) lpfc_worker_wake_up(phba); - spin_unlock_irqrestore(&phba->hbalock, iflags); return 0; out: spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -6849,9 +6849,9 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba) { struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); ras_fwlog->state = INACTIVE; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); /* Disable FW logging to host memory */ writel(LPFC_CTL_PDEV_CTL_DDL_RAS, @@ -6894,9 +6894,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) ras_fwlog->lwpd.virt = NULL; } - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); ras_fwlog->state = INACTIVE; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); } /** @@ -6998,9 +6998,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto disable_ras; } - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); ras_fwlog->state = ACTIVE; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); mempool_free(pmb, phba->mbox_mem_pool); return; @@ -7032,9 +7032,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; int rc = 0; - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); ras_fwlog->state = INACTIVE; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize); @@ -7095,9 +7095,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); - spin_lock_irq(&phba->hbalock); + spin_lock_irq(&phba->ras_fwlog_lock); ras_fwlog->state = REG_INPROGRESS; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irq(&phba->ras_fwlog_lock); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; @@ -11373,18 +11373,18 @@ lpfc_sli_post_recovery_event(struct lpfc_hba *phba, unsigned long iflags; struct lpfc_work_evt *evtp = &ndlp->recovery_evt; + /* Hold a node reference for outstanding queued work */ + if (!lpfc_nlp_get(ndlp)) + return; + spin_lock_irqsave(&phba->hbalock, iflags); if (!list_empty(&evtp->evt_listp)) { spin_unlock_irqrestore(&phba->hbalock, iflags); + lpfc_nlp_put(ndlp); return; } - /* Incrementing the reference count until the queued work is done. */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); - if (!evtp->evt_arg1) { - spin_unlock_irqrestore(&phba->hbalock, iflags); - return; - } + evtp->evt_arg1 = ndlp; evtp->evt = LPFC_EVT_RECOVER_PORT; list_add_tail(&evtp->evt_listp, &phba->work_list); spin_unlock_irqrestore(&phba->hbalock, iflags); diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 6c7559cf1a..9e0e9e02d2 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -683,10 +683,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_free_sysfs_attr(vport); lpfc_debugfs_terminate(vport); - /* Remove FC host to break driver binding. */ - fc_remove_host(shost); - scsi_remove_host(shost); - /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */ ndlp = lpfc_findnode_did(vport, Fabric_DID); if (!ndlp) @@ -730,6 +726,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport) skip_logo: + /* Remove FC host to break driver binding. */ + fc_remove_host(shost); + scsi_remove_host(shost); + lpfc_cleanup(vport); /* Remove scsi host now. The nodes are cleaned up. */ diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 0380996b5a..55d590b919 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -1644,7 +1644,7 @@ static long mpi3mr_bsg_process_mpt_cmds(struct bsg_job *job) if ((mpirep_offset != 0xFF) && drv_bufs[mpirep_offset].bsg_buf_len) { drv_buf_iter = &drv_bufs[mpirep_offset]; - drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) - 1 + + drv_buf_iter->kern_buf_len = (sizeof(*bsg_reply_buf) + mrioc->reply_sz); bsg_reply_buf = kzalloc(drv_buf_iter->kern_buf_len, GFP_KERNEL); diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 77aa6d2647..0da5d9d1af 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c @@ -1451,7 +1451,11 @@ static void qcom_slim_ngd_up_worker(struct work_struct *work) ctrl = container_of(work, struct qcom_slim_ngd_ctrl, ngd_up_work); /* Make sure qmi service is up before continuing */ - wait_for_completion_interruptible(&ctrl->qmi_up); + if (!wait_for_completion_interruptible_timeout(&ctrl->qmi_up, + msecs_to_jiffies(MSEC_PER_SEC))) { + dev_err(ctrl->dev, "QMI wait timeout\n"); + return; + } mutex_lock(&ctrl->ssr_lock); qcom_slim_ngd_enable(ctrl, true); diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 9ace259d2d..2b07b6dbf8 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -6,6 +6,7 @@ */ #include <linux/clk.h> +#include <linux/fpga/adi-axi-common.h> #include <linux/idr.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -15,12 +16,6 @@ #include <linux/spi/spi.h> #include <linux/timer.h> -#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff) -#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff) -#define SPI_ENGINE_VERSION_PATCH(x) (x & 0xff) - -#define SPI_ENGINE_REG_VERSION 0x00 - #define SPI_ENGINE_REG_RESET 0x40 #define SPI_ENGINE_REG_INT_ENABLE 0x80 @@ -661,12 +656,12 @@ static int spi_engine_probe(struct platform_device *pdev) if (IS_ERR(spi_engine->base)) return PTR_ERR(spi_engine->base); - version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION); - if (SPI_ENGINE_VERSION_MAJOR(version) != 1) { - dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n", - SPI_ENGINE_VERSION_MAJOR(version), - SPI_ENGINE_VERSION_MINOR(version), - SPI_ENGINE_VERSION_PATCH(version)); + version = readl(spi_engine->base + ADI_AXI_REG_VERSION); + if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) { + dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n", + ADI_AXI_PCORE_VER_MAJOR(version), + ADI_AXI_PCORE_VER_MINOR(version), + ADI_AXI_PCORE_VER_PATCH(version)); return -ENODEV; } diff --git a/drivers/spi/spi-hisi-kunpeng.c b/drivers/spi/spi-hisi-kunpeng.c index 35ef5e8e2f..77e9738e42 100644 --- a/drivers/spi/spi-hisi-kunpeng.c +++ b/drivers/spi/spi-hisi-kunpeng.c @@ -151,8 +151,6 @@ static const struct debugfs_reg32 hisi_spi_regs[] = { HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR), HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC), HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR), - HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN), - HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT), HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR), HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR), HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR), diff --git a/drivers/spi/spi-microchip-core-qspi.c b/drivers/spi/spi-microchip-core-qspi.c index 03d125a71f..09f16471c5 100644 --- a/drivers/spi/spi-microchip-core-qspi.c +++ b/drivers/spi/spi-microchip-core-qspi.c @@ -283,6 +283,7 @@ static int mchp_coreqspi_setup_clock(struct mchp_coreqspi *qspi, struct spi_devi } control = readl_relaxed(qspi->regs + REG_CONTROL); + control &= ~CONTROL_CLKRATE_MASK; control |= baud_rate_val << CONTROL_CLKRATE_SHIFT; writel_relaxed(control, qspi->regs + REG_CONTROL); control = readl_relaxed(qspi->regs + REG_CONTROL); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 46f1535487..a7194f29c2 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -4431,6 +4431,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) wait_for_completion(&done); status = message->status; } + message->complete = NULL; message->context = NULL; return status; diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index c1fbcdd161..c40217f44b 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -3672,6 +3672,8 @@ static int __init target_core_init_configfs(void) { struct configfs_subsystem *subsys = &target_core_fabrics; struct t10_alua_lu_gp *lu_gp; + struct cred *kern_cred; + const struct cred *old_cred; int ret; pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage" @@ -3748,11 +3750,21 @@ static int __init target_core_init_configfs(void) if (ret < 0) goto out; + /* We use the kernel credentials to access the target directory */ + kern_cred = prepare_kernel_cred(&init_task); + if (!kern_cred) { + ret = -ENOMEM; + goto out; + } + old_cred = override_creds(kern_cred); target_init_dbroot(); + revert_creds(old_cred); + put_cred(kern_cred); return 0; out: + target_xcopy_release_pt(); configfs_unregister_subsystem(subsys); core_dev_release_virtual_lun0(); rd_module_exit(); diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c index d78d54ae26..5693cc8b23 100644 --- a/drivers/thermal/thermal_debugfs.c +++ b/drivers/thermal/thermal_debugfs.c @@ -139,11 +139,13 @@ struct tz_episode { * we keep track of the current position in the history array. * * @tz_episodes: a list of thermal mitigation episodes + * @tz: thermal zone this object belongs to * @trips_crossed: an array of trip points crossed by id * @nr_trips: the number of trip points currently being crossed */ struct tz_debugfs { struct list_head tz_episodes; + struct thermal_zone_device *tz; int *trips_crossed; int nr_trips; }; @@ -503,15 +505,23 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev) */ void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev) { - struct thermal_debugfs *thermal_dbg = cdev->debugfs; + struct thermal_debugfs *thermal_dbg; - if (!thermal_dbg) + mutex_lock(&cdev->lock); + + thermal_dbg = cdev->debugfs; + if (!thermal_dbg) { + mutex_unlock(&cdev->lock); return; + } + + cdev->debugfs = NULL; + + mutex_unlock(&cdev->lock); mutex_lock(&thermal_dbg->lock); thermal_debugfs_cdev_clear(&thermal_dbg->cdev_dbg); - cdev->debugfs = NULL; mutex_unlock(&thermal_dbg->lock); @@ -716,8 +726,7 @@ out: static void *tze_seq_start(struct seq_file *s, loff_t *pos) { - struct thermal_zone_device *tz = s->private; - struct thermal_debugfs *thermal_dbg = tz->debugfs; + struct thermal_debugfs *thermal_dbg = s->private; struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg; mutex_lock(&thermal_dbg->lock); @@ -727,8 +736,7 @@ static void *tze_seq_start(struct seq_file *s, loff_t *pos) static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos) { - struct thermal_zone_device *tz = s->private; - struct thermal_debugfs *thermal_dbg = tz->debugfs; + struct thermal_debugfs *thermal_dbg = s->private; struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg; return seq_list_next(v, &tz_dbg->tz_episodes, pos); @@ -736,15 +744,15 @@ static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos) static void tze_seq_stop(struct seq_file *s, void *v) { - struct thermal_zone_device *tz = s->private; - struct thermal_debugfs *thermal_dbg = tz->debugfs; + struct thermal_debugfs *thermal_dbg = s->private; mutex_unlock(&thermal_dbg->lock); } static int tze_seq_show(struct seq_file *s, void *v) { - struct thermal_zone_device *tz = s->private; + struct thermal_debugfs *thermal_dbg = s->private; + struct thermal_zone_device *tz = thermal_dbg->tz_dbg.tz; struct thermal_trip *trip; struct tz_episode *tze; const char *type; @@ -810,6 +818,8 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz) tz_dbg = &thermal_dbg->tz_dbg; + tz_dbg->tz = tz; + tz_dbg->trips_crossed = kzalloc(sizeof(int) * tz->num_trips, GFP_KERNEL); if (!tz_dbg->trips_crossed) { thermal_debugfs_remove_id(thermal_dbg); @@ -818,23 +828,44 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz) INIT_LIST_HEAD(&tz_dbg->tz_episodes); - debugfs_create_file("mitigations", 0400, thermal_dbg->d_top, tz, &tze_fops); + debugfs_create_file("mitigations", 0400, thermal_dbg->d_top, + thermal_dbg, &tze_fops); tz->debugfs = thermal_dbg; } void thermal_debug_tz_remove(struct thermal_zone_device *tz) { - struct thermal_debugfs *thermal_dbg = tz->debugfs; + struct thermal_debugfs *thermal_dbg; + struct tz_episode *tze, *tmp; + struct tz_debugfs *tz_dbg; + int *trips_crossed; - if (!thermal_dbg) + mutex_lock(&tz->lock); + + thermal_dbg = tz->debugfs; + if (!thermal_dbg) { + mutex_unlock(&tz->lock); return; + } + + tz->debugfs = NULL; + + mutex_unlock(&tz->lock); + + tz_dbg = &thermal_dbg->tz_dbg; mutex_lock(&thermal_dbg->lock); - tz->debugfs = NULL; + trips_crossed = tz_dbg->trips_crossed; + + list_for_each_entry_safe(tze, tmp, &tz_dbg->tz_episodes, node) { + list_del(&tze->node); + kfree(tze); + } mutex_unlock(&thermal_dbg->lock); thermal_debugfs_remove_id(thermal_dbg); + kfree(trips_crossed); } diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c index 7ce7bb1640..58ea1e1391 100644 --- a/drivers/tty/serial/kgdboc.c +++ b/drivers/tty/serial/kgdboc.c @@ -19,6 +19,7 @@ #include <linux/console.h> #include <linux/vt_kern.h> #include <linux/input.h> +#include <linux/irq_work.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/serial_core.h> @@ -48,6 +49,25 @@ static struct kgdb_io kgdboc_earlycon_io_ops; static int (*earlycon_orig_exit)(struct console *con); #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */ +/* + * When we leave the debug trap handler we need to reset the keyboard status + * (since the original keyboard state gets partially clobbered by kdb use of + * the keyboard). + * + * The path to deliver the reset is somewhat circuitous. + * + * To deliver the reset we register an input handler, reset the keyboard and + * then deregister the input handler. However, to get this done right, we do + * have to carefully manage the calling context because we can only register + * input handlers from task context. + * + * In particular we need to trigger the action from the debug trap handler with + * all its NMI and/or NMI-like oddities. To solve this the kgdboc trap exit code + * (the "post_exception" callback) uses irq_work_queue(), which is NMI-safe, to + * schedule a callback from a hardirq context. From there we have to defer the + * work again, this time using schedule_work(), to get a callback using the + * system workqueue, which runs in task context. + */ #ifdef CONFIG_KDB_KEYBOARD static int kgdboc_reset_connect(struct input_handler *handler, struct input_dev *dev, @@ -99,10 +119,17 @@ static void kgdboc_restore_input_helper(struct work_struct *dummy) static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper); +static void kgdboc_queue_restore_input_helper(struct irq_work *unused) +{ + schedule_work(&kgdboc_restore_input_work); +} + +static DEFINE_IRQ_WORK(kgdboc_restore_input_irq_work, kgdboc_queue_restore_input_helper); + static void kgdboc_restore_input(void) { if (likely(system_state == SYSTEM_RUNNING)) - schedule_work(&kgdboc_restore_input_work); + irq_work_queue(&kgdboc_restore_input_irq_work); } static int kgdboc_register_kbd(char **cptr) @@ -133,6 +160,7 @@ static void kgdboc_unregister_kbd(void) i--; } } + irq_work_sync(&kgdboc_restore_input_irq_work); flush_work(&kgdboc_restore_input_work); } #else /* ! CONFIG_KDB_KEYBOARD */ diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 0787456c2b..c873fd8239 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -94,7 +94,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds) val = ufshcd_readl(hba, REG_UFS_MCQ_CFG); val &= ~MCQ_CFG_MAC_MASK; - val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds); + val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1); ufshcd_writel(hba, val, REG_UFS_MCQ_CFG); } EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 3b89c9d4aa..4a07a18cf8 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -3172,7 +3172,9 @@ retry: /* MCQ mode */ if (is_mcq_enabled(hba)) { - err = ufshcd_clear_cmd(hba, lrbp->task_tag); + /* successfully cleared the command, retry if needed */ + if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) + err = -EAGAIN; hba->dev_cmd.complete = NULL; return err; } @@ -9745,7 +9747,10 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) /* UFS device & link must be active before we enter in this function */ if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) { - ret = -EINVAL; + /* Wait err handler finish or trigger err recovery */ + if (!ufshcd_eh_in_progress(hba)) + ufshcd_force_error_recovery(hba); + ret = -EBUSY; goto enable_scaling; } diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c index 20d9762331..6be3462b10 100644 --- a/drivers/uio/uio_hv_generic.c +++ b/drivers/uio/uio_hv_generic.c @@ -181,12 +181,14 @@ hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata) { if (pdata->send_gpadl.gpadl_handle) { vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl); - vfree(pdata->send_buf); + if (!pdata->send_gpadl.decrypted) + vfree(pdata->send_buf); } if (pdata->recv_gpadl.gpadl_handle) { vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl); - vfree(pdata->recv_buf); + if (!pdata->recv_gpadl.decrypted) + vfree(pdata->recv_buf); } } @@ -295,7 +297,8 @@ hv_uio_probe(struct hv_device *dev, ret = vmbus_establish_gpadl(channel, pdata->recv_buf, RECV_BUFFER_SIZE, &pdata->recv_gpadl); if (ret) { - vfree(pdata->recv_buf); + if (!pdata->recv_gpadl.decrypted) + vfree(pdata->recv_buf); goto fail_close; } @@ -317,7 +320,8 @@ hv_uio_probe(struct hv_device *dev, ret = vmbus_establish_gpadl(channel, pdata->send_buf, SEND_BUFFER_SIZE, &pdata->send_gpadl); if (ret) { - vfree(pdata->send_buf); + if (!pdata->send_gpadl.decrypted) + vfree(pdata->send_buf); goto fail_close; } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 64e54163f0..48e1e43780 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -5081,9 +5081,10 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, } if (usb_endpoint_maxp(&udev->ep0.desc) == i) { ; /* Initial ep0 maxpacket guess is right */ - } else if ((udev->speed == USB_SPEED_FULL || + } else if (((udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH) && - (i == 8 || i == 16 || i == 32 || i == 64)) { + (i == 8 || i == 16 || i == 32 || i == 64)) || + (udev->speed >= USB_SPEED_SUPER && i > 0)) { /* Initial guess is wrong; use the descriptor's value */ if (udev->speed == USB_SPEED_FULL) dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i); diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 97ff2073cf..d34d67fcb4 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -50,13 +50,15 @@ static ssize_t disable_show(struct device *dev, struct usb_port *port_dev = to_usb_port(dev); struct usb_device *hdev = to_usb_device(dev->parent->parent); struct usb_hub *hub = usb_hub_to_struct_hub(hdev); - struct usb_interface *intf = to_usb_interface(hub->intfdev); + struct usb_interface *intf = to_usb_interface(dev->parent); int port1 = port_dev->portnum; u16 portstatus, unused; bool disabled; int rc; struct kernfs_node *kn; + if (!hub) + return -ENODEV; hub_get(hub); rc = usb_autopm_get_interface(intf); if (rc < 0) @@ -100,12 +102,14 @@ static ssize_t disable_store(struct device *dev, struct device_attribute *attr, struct usb_port *port_dev = to_usb_port(dev); struct usb_device *hdev = to_usb_device(dev->parent->parent); struct usb_hub *hub = usb_hub_to_struct_hub(hdev); - struct usb_interface *intf = to_usb_interface(hub->intfdev); + struct usb_interface *intf = to_usb_interface(dev->parent); int port1 = port_dev->portnum; bool disabled; int rc; struct kernfs_node *kn; + if (!hub) + return -ENODEV; rc = kstrtobool(buf, &disabled); if (rc) return rc; diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 31684cdaaa..100041320e 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -104,6 +104,27 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc) return 0; } +void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) +{ + u32 reg; + + reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); + if (enable && !dwc->dis_u3_susphy_quirk) + reg |= DWC3_GUSB3PIPECTL_SUSPHY; + else + reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; + + dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); + + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + if (enable && !dwc->dis_u2_susphy_quirk) + reg |= DWC3_GUSB2PHYCFG_SUSPHY; + else + reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; + + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); +} + void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) { u32 reg; @@ -585,11 +606,8 @@ static int dwc3_core_ulpi_init(struct dwc3 *dwc) */ static int dwc3_phy_setup(struct dwc3 *dwc) { - unsigned int hw_mode; u32 reg; - hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); - reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); /* @@ -599,21 +617,16 @@ static int dwc3_phy_setup(struct dwc3 *dwc) reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; /* - * Above 1.94a, it is recommended to set DWC3_GUSB3PIPECTL_SUSPHY - * to '0' during coreConsultant configuration. So default value - * will be '0' when the core is reset. Application needs to set it - * to '1' after the core initialization is completed. - */ - if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) - reg |= DWC3_GUSB3PIPECTL_SUSPHY; - - /* - * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be cleared after - * power-on reset, and it can be set after core initialization, which is - * after device soft-reset during initialization. + * Above DWC_usb3.0 1.94a, it is recommended to set + * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration. + * So default value will be '0' when the core is reset. Application + * needs to set it to '1' after the core initialization is completed. + * + * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be + * cleared after power-on reset, and it can be set after core + * initialization. */ - if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD) - reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; + reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; if (dwc->u2ss_inp3_quirk) reg |= DWC3_GUSB3PIPECTL_U2SSINP3OK; @@ -639,9 +652,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc) if (dwc->tx_de_emphasis_quirk) reg |= DWC3_GUSB3PIPECTL_TX_DEEPH(dwc->tx_de_emphasis); - if (dwc->dis_u3_susphy_quirk) - reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; - if (dwc->dis_del_phy_power_chg_quirk) reg &= ~DWC3_GUSB3PIPECTL_DEPOCHANGE; @@ -689,24 +699,15 @@ static int dwc3_phy_setup(struct dwc3 *dwc) } /* - * Above 1.94a, it is recommended to set DWC3_GUSB2PHYCFG_SUSPHY to - * '0' during coreConsultant configuration. So default value will - * be '0' when the core is reset. Application needs to set it to - * '1' after the core initialization is completed. - */ - if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) - reg |= DWC3_GUSB2PHYCFG_SUSPHY; - - /* - * For DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared after - * power-on reset, and it can be set after core initialization, which is - * after device soft-reset during initialization. + * Above DWC_usb3.0 1.94a, it is recommended to set + * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration. + * So default value will be '0' when the core is reset. Application + * needs to set it to '1' after the core initialization is completed. + * + * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared + * after power-on reset, and it can be set after core initialization. */ - if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD) - reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; - - if (dwc->dis_u2_susphy_quirk) - reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; + reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; if (dwc->dis_enblslpm_quirk) reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; @@ -1227,21 +1228,6 @@ static int dwc3_core_init(struct dwc3 *dwc) if (ret) goto err_exit_phy; - if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && - !DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) { - if (!dwc->dis_u3_susphy_quirk) { - reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); - reg |= DWC3_GUSB3PIPECTL_SUSPHY; - dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); - } - - if (!dwc->dis_u2_susphy_quirk) { - reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); - reg |= DWC3_GUSB2PHYCFG_SUSPHY; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); - } - } - dwc3_core_setup_global_control(dwc); dwc3_core_num_eps(dwc); diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 893b1e694e..d96a28eecb 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1578,6 +1578,7 @@ int dwc3_event_buffers_setup(struct dwc3 *dwc); void dwc3_event_buffers_cleanup(struct dwc3 *dwc); int dwc3_core_soft_reset(struct dwc3 *dwc); +void dwc3_enable_susphy(struct dwc3 *dwc, bool enable); #if IS_ENABLED(CONFIG_USB_DWC3_HOST) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) int dwc3_host_init(struct dwc3 *dwc); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 07820b1a88..579d90efc2 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1718,7 +1718,6 @@ static int __dwc3_gadget_get_frame(struct dwc3 *dwc) */ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt) { - struct dwc3 *dwc = dep->dwc; struct dwc3_gadget_ep_cmd_params params; u32 cmd; int ret; @@ -1743,8 +1742,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int dep->resource_index = 0; if (!interrupt) { - if (!DWC3_IP_IS(DWC3) || DWC3_VER_IS_PRIOR(DWC3, 310A)) - mdelay(1); + mdelay(1); dep->flags &= ~DWC3_EP_TRANSFER_STARTED; } else if (!ret) { dep->flags |= DWC3_EP_END_TRANSFER_PENDING; @@ -2937,6 +2935,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) dwc3_ep0_out_start(dwc); dwc3_gadget_enable_irq(dwc); + dwc3_enable_susphy(dwc, true); return 0; @@ -4703,6 +4702,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc) if (!dwc->gadget) return; + dwc3_enable_susphy(dwc, false); usb_del_gadget(dwc->gadget); dwc3_gadget_free_endpoints(dwc); usb_put_gadget(dwc->gadget); diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index f6a020d77f..6c143f7d24 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -10,9 +10,30 @@ #include <linux/irq.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/usb.h> +#include <linux/usb/hcd.h> +#include "../host/xhci-plat.h" #include "core.h" +static void dwc3_xhci_plat_start(struct usb_hcd *hcd) +{ + struct platform_device *pdev; + struct dwc3 *dwc; + + if (!usb_hcd_is_primary_hcd(hcd)) + return; + + pdev = to_platform_device(hcd->self.controller); + dwc = dev_get_drvdata(pdev->dev.parent); + + dwc3_enable_susphy(dwc, true); +} + +static const struct xhci_plat_priv dwc3_xhci_plat_quirk = { + .plat_start = dwc3_xhci_plat_start, +}; + static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc, int irq, char *name) { @@ -117,6 +138,11 @@ int dwc3_host_init(struct dwc3 *dwc) } } + ret = platform_device_add_data(xhci, &dwc3_xhci_plat_quirk, + sizeof(struct xhci_plat_priv)); + if (ret) + goto err; + ret = platform_device_add(xhci); if (ret) { dev_err(dwc->dev, "failed to register xHCI device\n"); @@ -142,6 +168,7 @@ void dwc3_host_exit(struct dwc3 *dwc) if (dwc->sys_wakeup) device_init_wakeup(&dwc->xhci->dev, false); + dwc3_enable_susphy(dwc, false); platform_device_unregister(dwc->xhci); dwc->xhci = NULL; } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 0ace45b66a..0e151b54aa 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -2112,7 +2112,7 @@ unknown: buf[5] = 0x01; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: - if (w_index != 0x4 || (w_value >> 8)) + if (w_index != 0x4 || (w_value & 0xff)) break; buf[6] = w_index; /* Number of ext compat interfaces */ @@ -2128,9 +2128,9 @@ unknown: } break; case USB_RECIP_INTERFACE: - if (w_index != 0x5 || (w_value >> 8)) + if (w_index != 0x5 || (w_value & 0xff)) break; - interface = w_value & 0xFF; + interface = w_value >> 8; if (interface >= MAX_CONFIG_INTERFACES || !os_desc_cfg->interface[interface]) break; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 6bff6cb937..839426d92b 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -821,6 +821,7 @@ static void ffs_user_copy_worker(struct work_struct *work) work); int ret = io_data->status; bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD; + unsigned long flags; if (io_data->read && ret > 0) { kthread_use_mm(io_data->mm); @@ -833,6 +834,11 @@ static void ffs_user_copy_worker(struct work_struct *work) if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd) eventfd_signal(io_data->ffs->ffs_eventfd); + spin_lock_irqsave(&io_data->ffs->eps_lock, flags); + usb_ep_free_request(io_data->ep, io_data->req); + io_data->req = NULL; + spin_unlock_irqrestore(&io_data->ffs->eps_lock, flags); + if (io_data->read) kfree(io_data->to_free); ffs_free_buffer(io_data); @@ -846,7 +852,6 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep, struct ffs_data *ffs = io_data->ffs; io_data->status = req->status ? req->status : req->actual; - usb_ep_free_request(_ep, req); INIT_WORK(&io_data->work, ffs_user_copy_worker); queue_work(ffs->io_completion_wq, &io_data->work); @@ -3330,7 +3335,7 @@ static int ffs_func_setup(struct usb_function *f, __ffs_event_add(ffs, FUNCTIONFS_SETUP); spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); - return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; + return ffs->ev.setup.wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; } static bool ffs_func_req_match(struct usb_function *f, diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 7e704b2bcf..a4377df612 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -92,10 +92,10 @@ static int __uvcg_iter_item_entries(const char *page, size_t len, while (pg - page < len) { i = 0; - while (i < sizeof(buf) && (pg - page < len) && + while (i < bufsize && (pg - page < len) && *pg != '\0' && *pg != '\n') buf[i++] = *pg++; - if (i == sizeof(buf)) { + if (i == bufsize) { ret = -EINVAL; goto out_free_buf; } diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 4f9982ecfb..5cec7640e9 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -888,6 +888,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) /* Check for an all 1's result which is a typical consequence * of dead, unclocked, or unplugged (CardBus...) devices */ +again: if (ints == ~(u32)0) { ohci->rh_state = OHCI_RH_HALTED; ohci_dbg (ohci, "device removed!\n"); @@ -982,6 +983,13 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) } spin_unlock(&ohci->lock); + /* repeat until all enabled interrupts are handled */ + if (ohci->rh_state != OHCI_RH_HALTED) { + ints = ohci_readl(ohci, ®s->intrstatus); + if (ints && (ints & ohci_readl(ohci, ®s->intrenable))) + goto again; + } + return IRQ_HANDLED; } diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index 2d15386f2c..6475130eac 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -8,7 +8,9 @@ #ifndef _XHCI_PLAT_H #define _XHCI_PLAT_H -#include "xhci.h" /* for hcd_to_xhci() */ +struct device; +struct platform_device; +struct usb_hcd; struct xhci_plat_priv { const char *firmware_name; diff --git a/drivers/usb/host/xhci-rzv2m.c b/drivers/usb/host/xhci-rzv2m.c index ec65b24eaf..4f59867d71 100644 --- a/drivers/usb/host/xhci-rzv2m.c +++ b/drivers/usb/host/xhci-rzv2m.c @@ -6,6 +6,7 @@ */ #include <linux/usb/rzv2m_usb3drd.h> +#include "xhci.h" #include "xhci-plat.h" #include "xhci-rzv2m.h" diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index df9a5d6760..3f7200a227 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1501,7 +1501,8 @@ static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt) port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT]; port->partner_ident.product = product; - typec_partner_set_identity(port->partner); + if (port->partner) + typec_partner_set_identity(port->partner); tcpm_log(port, "Identity: %04x:%04x.%04x", PD_IDH_VID(vdo), @@ -1589,6 +1590,9 @@ static void tcpm_register_partner_altmodes(struct tcpm_port *port) struct typec_altmode *altmode; int i; + if (!port->partner) + return; + for (i = 0; i < modep->altmodes; i++) { altmode = typec_partner_register_altmode(port->partner, &modep->altmode_desc[i]); @@ -2435,7 +2439,7 @@ static int tcpm_register_sink_caps(struct tcpm_port *port) { struct usb_power_delivery_desc desc = { port->negotiated_rev }; struct usb_power_delivery_capabilities_desc caps = { }; - struct usb_power_delivery_capabilities *cap; + struct usb_power_delivery_capabilities *cap = port->partner_source_caps; if (!port->partner_pd) port->partner_pd = usb_power_delivery_register(NULL, &desc); @@ -2445,6 +2449,9 @@ static int tcpm_register_sink_caps(struct tcpm_port *port) memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps); caps.role = TYPEC_SINK; + if (cap) + usb_power_delivery_unregister_capabilities(cap); + cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps); if (IS_ERR(cap)) return PTR_ERR(cap); @@ -3584,7 +3591,10 @@ static int tcpm_init_vconn(struct tcpm_port *port) static void tcpm_typec_connect(struct tcpm_port *port) { + struct typec_partner *partner; + if (!port->connected) { + port->connected = true; /* Make sure we don't report stale identity information */ memset(&port->partner_ident, 0, sizeof(port->partner_ident)); port->partner_desc.usb_pd = port->pd_capable; @@ -3594,9 +3604,13 @@ static void tcpm_typec_connect(struct tcpm_port *port) port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO; else port->partner_desc.accessory = TYPEC_ACCESSORY_NONE; - port->partner = typec_register_partner(port->typec_port, - &port->partner_desc); - port->connected = true; + partner = typec_register_partner(port->typec_port, &port->partner_desc); + if (IS_ERR(partner)) { + dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner)); + return; + } + + port->partner = partner; typec_partner_set_usb_power_delivery(port->partner, port->partner_pd); } } @@ -3666,9 +3680,11 @@ out_disable_mux: static void tcpm_typec_disconnect(struct tcpm_port *port) { if (port->connected) { - typec_partner_set_usb_power_delivery(port->partner, NULL); - typec_unregister_partner(port->partner); - port->partner = NULL; + if (port->partner) { + typec_partner_set_usb_power_delivery(port->partner, NULL); + typec_unregister_partner(port->partner); + port->partner = NULL; + } port->connected = false; } } @@ -3884,6 +3900,9 @@ static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode) static void tcpm_set_initial_svdm_version(struct tcpm_port *port) { + if (!port->partner) + return; + switch (port->negotiated_rev) { case PD_REV30: break; @@ -4873,6 +4892,7 @@ static void run_state_machine(struct tcpm_port *port) break; case PORT_RESET: tcpm_reset_port(port); + port->pd_events = 0; if (port->self_powered) tcpm_set_cc(port, TYPEC_CC_OPEN); else diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index 0717cfcd9f..191f86da28 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -28,6 +28,7 @@ #define TPS_REG_MODE 0x03 #define TPS_REG_CMD1 0x08 #define TPS_REG_DATA1 0x09 +#define TPS_REG_VERSION 0x0F #define TPS_REG_INT_EVENT1 0x14 #define TPS_REG_INT_EVENT2 0x15 #define TPS_REG_INT_MASK1 0x16 @@ -604,11 +605,11 @@ static irqreturn_t tps25750_interrupt(int irq, void *data) if (!tps6598x_read_status(tps, &status)) goto err_clear_ints; - if ((event[0] | event[1]) & TPS_REG_INT_POWER_STATUS_UPDATE) + if (event[0] & TPS_REG_INT_POWER_STATUS_UPDATE) if (!tps6598x_read_power_status(tps)) goto err_clear_ints; - if ((event[0] | event[1]) & TPS_REG_INT_DATA_STATUS_UPDATE) + if (event[0] & TPS_REG_INT_DATA_STATUS_UPDATE) if (!tps6598x_read_data_status(tps)) goto err_clear_ints; @@ -617,7 +618,7 @@ static irqreturn_t tps25750_interrupt(int irq, void *data) * a plug event. Therefore, we need to check * for pr/dr status change to set TypeC dr/pr accordingly. */ - if ((event[0] | event[1]) & TPS_REG_INT_PLUG_EVENT || + if (event[0] & TPS_REG_INT_PLUG_EVENT || tps6598x_has_role_changed(tps, status)) tps6598x_handle_plug_event(tps, status); @@ -636,49 +637,67 @@ err_unlock: static irqreturn_t tps6598x_interrupt(int irq, void *data) { + int intev_len = TPS_65981_2_6_INTEVENT_LEN; struct tps6598x *tps = data; - u64 event1 = 0; - u64 event2 = 0; + u64 event1[2] = { }; + u64 event2[2] = { }; + u32 version; u32 status; int ret; mutex_lock(&tps->lock); - ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event1); - ret |= tps6598x_read64(tps, TPS_REG_INT_EVENT2, &event2); + ret = tps6598x_read32(tps, TPS_REG_VERSION, &version); + if (ret) + dev_warn(tps->dev, "%s: failed to read version (%d)\n", + __func__, ret); + + if (TPS_VERSION_HW_VERSION(version) == TPS_VERSION_HW_65987_8_DH || + TPS_VERSION_HW_VERSION(version) == TPS_VERSION_HW_65987_8_DK) + intev_len = TPS_65987_8_INTEVENT_LEN; + + ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event1, intev_len); + + ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT1, event1, intev_len); if (ret) { - dev_err(tps->dev, "%s: failed to read events\n", __func__); + dev_err(tps->dev, "%s: failed to read event1\n", __func__); goto err_unlock; } - trace_tps6598x_irq(event1, event2); + ret = tps6598x_block_read(tps, TPS_REG_INT_EVENT2, event2, intev_len); + if (ret) { + dev_err(tps->dev, "%s: failed to read event2\n", __func__); + goto err_unlock; + } + trace_tps6598x_irq(event1[0], event2[0]); - if (!(event1 | event2)) + if (!(event1[0] | event1[1] | event2[0] | event2[1])) goto err_unlock; if (!tps6598x_read_status(tps, &status)) goto err_clear_ints; - if ((event1 | event2) & TPS_REG_INT_POWER_STATUS_UPDATE) + if ((event1[0] | event2[0]) & TPS_REG_INT_POWER_STATUS_UPDATE) if (!tps6598x_read_power_status(tps)) goto err_clear_ints; - if ((event1 | event2) & TPS_REG_INT_DATA_STATUS_UPDATE) + if ((event1[0] | event2[0]) & TPS_REG_INT_DATA_STATUS_UPDATE) if (!tps6598x_read_data_status(tps)) goto err_clear_ints; /* Handle plug insert or removal */ - if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT) + if ((event1[0] | event2[0]) & TPS_REG_INT_PLUG_EVENT) tps6598x_handle_plug_event(tps, status); err_clear_ints: - tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event1); - tps6598x_write64(tps, TPS_REG_INT_CLEAR2, event2); + tps6598x_block_write(tps, TPS_REG_INT_CLEAR1, event1, intev_len); + tps6598x_block_write(tps, TPS_REG_INT_CLEAR2, event2, intev_len); err_unlock: mutex_unlock(&tps->lock); - if (event1 | event2) + if (event1[0] | event1[1] | event2[0] | event2[1]) return IRQ_HANDLED; + return IRQ_NONE; } diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h index 89b2451946..9b23e90174 100644 --- a/drivers/usb/typec/tipd/tps6598x.h +++ b/drivers/usb/typec/tipd/tps6598x.h @@ -253,4 +253,15 @@ #define TPS_PTCC_DEV 2 #define TPS_PTCC_APP 3 +/* Version Register */ +#define TPS_VERSION_HW_VERSION_MASK GENMASK(31, 24) +#define TPS_VERSION_HW_VERSION(x) TPS_FIELD_GET(TPS_VERSION_HW_VERSION_MASK, (x)) +#define TPS_VERSION_HW_65981_2_6 0x00 +#define TPS_VERSION_HW_65987_8_DH 0xF7 +#define TPS_VERSION_HW_65987_8_DK 0xF9 + +/* Int Event Register length */ +#define TPS_65981_2_6_INTEVENT_LEN 8 +#define TPS_65987_8_INTEVENT_LEN 11 + #endif /* __TPS6598X_H__ */ diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index d9d3c91125..8be92fc1d1 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -275,8 +275,6 @@ static void ucsi_displayport_work(struct work_struct *work) struct ucsi_dp *dp = container_of(work, struct ucsi_dp, work); int ret; - mutex_lock(&dp->con->lock); - ret = typec_altmode_vdm(dp->alt, dp->header, dp->vdo_data, dp->vdo_size); if (ret) @@ -285,8 +283,6 @@ static void ucsi_displayport_work(struct work_struct *work) dp->vdo_data = NULL; dp->vdo_size = 0; dp->header = 0; - - mutex_unlock(&dp->con->lock); } void ucsi_displayport_remove_partner(struct typec_altmode *alt) diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index cd415565b6..b501760012 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -975,7 +975,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num) struct ucsi_connector *con = &ucsi->connector[num - 1]; if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) { - dev_dbg(ucsi->dev, "Bogus connector change event\n"); + dev_dbg(ucsi->dev, "Early connector change event\n"); return; } @@ -1406,6 +1406,7 @@ static int ucsi_init(struct ucsi *ucsi) { struct ucsi_connector *con, *connector; u64 command, ntfy; + u32 cci; int ret; int i; @@ -1458,6 +1459,15 @@ static int ucsi_init(struct ucsi *ucsi) ucsi->connector = connector; ucsi->ntfy = ntfy; + + mutex_lock(&ucsi->ppm_lock); + ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci)); + mutex_unlock(&ucsi->ppm_lock); + if (ret) + return ret; + if (UCSI_CCI_CONNECTOR(cci)) + ucsi_connector_change(ucsi, UCSI_CCI_CONNECTOR(cci)); + return 0; err_unregister: diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index cb5b7f865d..e727941f58 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -71,6 +71,8 @@ static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev) case PCI_DEVICE_ID_INTEL_QAT_C62X_VF: case PCI_DEVICE_ID_INTEL_QAT_DH895XCC: case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF: + case PCI_DEVICE_ID_INTEL_DSA_SPR0: + case PCI_DEVICE_ID_INTEL_IAX_SPR0: return true; default: return false; diff --git a/fs/9p/fid.h b/fs/9p/fid.h index 29281b7c38..0d6138bee2 100644 --- a/fs/9p/fid.h +++ b/fs/9p/fid.h @@ -49,9 +49,6 @@ static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry) static inline void v9fs_fid_add_modes(struct p9_fid *fid, unsigned int s_flags, unsigned int s_cache, unsigned int f_flags) { - if (fid->qid.type != P9_QTFILE) - return; - if ((!s_cache) || ((fid->qid.version == 0) && !(s_flags & V9FS_IGNORE_QV)) || (s_flags & V9FS_DIRECT_IO) || (f_flags & O_DIRECT)) { diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index bae330c2f0..a504240c81 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -520,6 +520,7 @@ const struct file_operations v9fs_file_operations = { .splice_read = v9fs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = v9fs_file_fsync, + .setlease = simple_nosetlease, }; const struct file_operations v9fs_file_operations_dotl = { @@ -534,4 +535,5 @@ const struct file_operations v9fs_file_operations_dotl = { .splice_read = v9fs_file_splice_read, .splice_write = iter_file_splice_write, .fsync = v9fs_file_fsync_dotl, + .setlease = simple_nosetlease, }; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 32572982f7..0fde0ab77e 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -83,7 +83,7 @@ static int p9mode2perm(struct v9fs_session_info *v9ses, int res; int mode = stat->mode; - res = mode & S_IALLUGO; + res = mode & 0777; /* S_IRWXUGO */ if (v9fs_proto_dotu(v9ses)) { if ((mode & P9_DMSETUID) == P9_DMSETUID) res |= S_ISUID; @@ -178,6 +178,9 @@ int v9fs_uflags2omode(int uflags, int extended) break; } + if (uflags & O_TRUNC) + ret |= P9_OTRUNC; + if (extended) { if (uflags & O_EXCL) ret |= P9_OEXCL; @@ -371,17 +374,21 @@ void v9fs_evict_inode(struct inode *inode) struct v9fs_inode __maybe_unused *v9inode = V9FS_I(inode); __le32 __maybe_unused version; - truncate_inode_pages_final(&inode->i_data); + if (!is_bad_inode(inode)) { + truncate_inode_pages_final(&inode->i_data); - version = cpu_to_le32(v9inode->qid.version); - netfs_clear_inode_writeback(inode, &version); + version = cpu_to_le32(v9inode->qid.version); + netfs_clear_inode_writeback(inode, &version); - clear_inode(inode); - filemap_fdatawrite(&inode->i_data); + clear_inode(inode); + filemap_fdatawrite(&inode->i_data); #ifdef CONFIG_9P_FSCACHE - fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false); + if (v9fs_inode_cookie(v9inode)) + fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false); #endif + } else + clear_inode(inode); } static int v9fs_test_inode(struct inode *inode, void *data) @@ -1145,8 +1152,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, struct v9fs_session_info *v9ses = sb->s_fs_info; struct v9fs_inode *v9inode = V9FS_I(inode); - set_nlink(inode, 1); - inode_set_atime(inode, stat->atime, 0); inode_set_mtime(inode, stat->mtime, 0); inode_set_ctime(inode, stat->mtime, 0); diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index 941f7d0e0b..23cc67f29a 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -310,6 +310,7 @@ static const struct super_operations v9fs_super_ops = { .alloc_inode = v9fs_alloc_inode, .free_inode = v9fs_free_inode, .statfs = simple_statfs, + .drop_inode = v9fs_drop_inode, .evict_inode = v9fs_evict_inode, .show_options = v9fs_show_options, .umount_begin = v9fs_umount_begin, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b28bb1c93d..d3c534c1bf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2522,7 +2522,7 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, */ if (bits & EXTENT_CLEAR_META_RESV && root != fs_info->tree_root) - btrfs_delalloc_release_metadata(inode, len, false); + btrfs_delalloc_release_metadata(inode, len, true); /* For sanity tests. */ if (btrfs_is_testing(fs_info)) diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 59850dc17b..81f67ebf74 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -1189,6 +1189,7 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent( ordered->disk_bytenr += len; ordered->num_bytes -= len; ordered->disk_num_bytes -= len; + ordered->ram_bytes -= len; if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) { ASSERT(ordered->bytes_left == 0); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 132802bd80..82d4559eb4 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -3052,6 +3052,8 @@ int btrfs_qgroup_check_inherit(struct btrfs_fs_info *fs_info, struct btrfs_qgroup_inherit *inherit, size_t size) { + if (!btrfs_qgroup_enabled(fs_info)) + return 0; if (inherit->flags & ~BTRFS_QGROUP_INHERIT_FLAGS_SUPP) return -EOPNOTSUPP; if (size < sizeof(*inherit) || size > PAGE_SIZE) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f1705ae59e..b93cdf5f17 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1496,6 +1496,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)root->root_key.objectid, BTRFS_ROOT_TRANS_TAG); + btrfs_qgroup_free_meta_all_pertrans(root); spin_unlock(&fs_info->fs_roots_radix_lock); btrfs_free_log(trans, root); @@ -1520,7 +1521,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans) if (ret2) return ret2; spin_lock(&fs_info->fs_roots_radix_lock); - btrfs_qgroup_free_meta_all_pertrans(root); } } spin_unlock(&fs_info->fs_roots_radix_lock); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 6eccf84964..75c78a5556 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1793,6 +1793,11 @@ enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf) return BTRFS_TREE_BLOCK_INVALID_LEVEL; } + if (unlikely(!btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_WRITTEN))) { + generic_err(leaf, 0, "invalid flag for leaf, WRITTEN not set"); + return BTRFS_TREE_BLOCK_WRITTEN_NOT_SET; + } + /* * Extent buffers from a relocation tree have a owner field that * corresponds to the subvolume tree they are based on. So just from an @@ -1854,6 +1859,7 @@ enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf) for (slot = 0; slot < nritems; slot++) { u32 item_end_expected; u64 item_data_end; + enum btrfs_tree_block_status ret; btrfs_item_key_to_cpu(leaf, &key, slot); @@ -1909,21 +1915,10 @@ enum btrfs_tree_block_status __btrfs_check_leaf(struct extent_buffer *leaf) return BTRFS_TREE_BLOCK_INVALID_OFFSETS; } - /* - * We only want to do this if WRITTEN is set, otherwise the leaf - * may be in some intermediate state and won't appear valid. - */ - if (btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_WRITTEN)) { - enum btrfs_tree_block_status ret; - - /* - * Check if the item size and content meet other - * criteria - */ - ret = check_leaf_item(leaf, &key, slot, &prev_key); - if (unlikely(ret != BTRFS_TREE_BLOCK_CLEAN)) - return ret; - } + /* Check if the item size and content meet other criteria. */ + ret = check_leaf_item(leaf, &key, slot, &prev_key); + if (unlikely(ret != BTRFS_TREE_BLOCK_CLEAN)) + return ret; prev_key.objectid = key.objectid; prev_key.type = key.type; @@ -1953,6 +1948,11 @@ enum btrfs_tree_block_status __btrfs_check_node(struct extent_buffer *node) int level = btrfs_header_level(node); u64 bytenr; + if (unlikely(!btrfs_header_flag(node, BTRFS_HEADER_FLAG_WRITTEN))) { + generic_err(node, 0, "invalid flag for node, WRITTEN not set"); + return BTRFS_TREE_BLOCK_WRITTEN_NOT_SET; + } + if (unlikely(level <= 0 || level >= BTRFS_MAX_LEVEL)) { generic_err(node, 0, "invalid level for node, have %d expect [1, %d]", diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h index 14b9fbe82d..580ec4fde0 100644 --- a/fs/btrfs/tree-checker.h +++ b/fs/btrfs/tree-checker.h @@ -51,6 +51,7 @@ enum btrfs_tree_block_status { BTRFS_TREE_BLOCK_INVALID_BLOCKPTR, BTRFS_TREE_BLOCK_INVALID_ITEM, BTRFS_TREE_BLOCK_INVALID_OWNER, + BTRFS_TREE_BLOCK_WRITTEN_NOT_SET, }; /* diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f3890f7c78..3ebadc5ec8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1182,23 +1182,30 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device; struct btrfs_device *latest_dev = NULL; struct btrfs_device *tmp_device; + int ret = 0; list_for_each_entry_safe(device, tmp_device, &fs_devices->devices, dev_list) { - int ret; + int ret2; - ret = btrfs_open_one_device(fs_devices, device, flags, holder); - if (ret == 0 && + ret2 = btrfs_open_one_device(fs_devices, device, flags, holder); + if (ret2 == 0 && (!latest_dev || device->generation > latest_dev->generation)) { latest_dev = device; - } else if (ret == -ENODATA) { + } else if (ret2 == -ENODATA) { fs_devices->num_devices--; list_del(&device->dev_list); btrfs_free_device(device); } + if (ret == 0 && ret2 != 0) + ret = ret2; } - if (fs_devices->open_devices == 0) + + if (fs_devices->open_devices == 0) { + if (ret) + return ret; return -EINVAL; + } fs_devices->opened = 1; fs_devices->latest_dev = latest_dev; @@ -3449,6 +3456,7 @@ again: * alignment and size). */ ret = -EUCLEAN; + mutex_unlock(&fs_info->reclaim_bgs_lock); goto error; } diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 410f5af623..c69174675c 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -84,13 +84,6 @@ struct erofs_dev_context { bool flatdev; }; -struct erofs_fs_context { - struct erofs_mount_opts opt; - struct erofs_dev_context *devs; - char *fsid; - char *domain_id; -}; - /* all filesystem-wide lz4 configurations */ struct erofs_sb_lz4_info { /* # of pages needed for EROFS lz4 rolling decompression */ diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 24788c230b..a2fa745585 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -370,18 +370,18 @@ out: return ret; } -static void erofs_default_options(struct erofs_fs_context *ctx) +static void erofs_default_options(struct erofs_sb_info *sbi) { #ifdef CONFIG_EROFS_FS_ZIP - ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; - ctx->opt.max_sync_decompress_pages = 3; - ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO; + sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; + sbi->opt.max_sync_decompress_pages = 3; + sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO; #endif #ifdef CONFIG_EROFS_FS_XATTR - set_opt(&ctx->opt, XATTR_USER); + set_opt(&sbi->opt, XATTR_USER); #endif #ifdef CONFIG_EROFS_FS_POSIX_ACL - set_opt(&ctx->opt, POSIX_ACL); + set_opt(&sbi->opt, POSIX_ACL); #endif } @@ -426,17 +426,17 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = { static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode) { #ifdef CONFIG_FS_DAX - struct erofs_fs_context *ctx = fc->fs_private; + struct erofs_sb_info *sbi = fc->s_fs_info; switch (mode) { case EROFS_MOUNT_DAX_ALWAYS: warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); - set_opt(&ctx->opt, DAX_ALWAYS); - clear_opt(&ctx->opt, DAX_NEVER); + set_opt(&sbi->opt, DAX_ALWAYS); + clear_opt(&sbi->opt, DAX_NEVER); return true; case EROFS_MOUNT_DAX_NEVER: - set_opt(&ctx->opt, DAX_NEVER); - clear_opt(&ctx->opt, DAX_ALWAYS); + set_opt(&sbi->opt, DAX_NEVER); + clear_opt(&sbi->opt, DAX_ALWAYS); return true; default: DBG_BUGON(1); @@ -451,7 +451,7 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode) static int erofs_fc_parse_param(struct fs_context *fc, struct fs_parameter *param) { - struct erofs_fs_context *ctx = fc->fs_private; + struct erofs_sb_info *sbi = fc->s_fs_info; struct fs_parse_result result; struct erofs_device_info *dif; int opt, ret; @@ -464,9 +464,9 @@ static int erofs_fc_parse_param(struct fs_context *fc, case Opt_user_xattr: #ifdef CONFIG_EROFS_FS_XATTR if (result.boolean) - set_opt(&ctx->opt, XATTR_USER); + set_opt(&sbi->opt, XATTR_USER); else - clear_opt(&ctx->opt, XATTR_USER); + clear_opt(&sbi->opt, XATTR_USER); #else errorfc(fc, "{,no}user_xattr options not supported"); #endif @@ -474,16 +474,16 @@ static int erofs_fc_parse_param(struct fs_context *fc, case Opt_acl: #ifdef CONFIG_EROFS_FS_POSIX_ACL if (result.boolean) - set_opt(&ctx->opt, POSIX_ACL); + set_opt(&sbi->opt, POSIX_ACL); else - clear_opt(&ctx->opt, POSIX_ACL); + clear_opt(&sbi->opt, POSIX_ACL); #else errorfc(fc, "{,no}acl options not supported"); #endif break; case Opt_cache_strategy: #ifdef CONFIG_EROFS_FS_ZIP - ctx->opt.cache_strategy = result.uint_32; + sbi->opt.cache_strategy = result.uint_32; #else errorfc(fc, "compression not supported, cache_strategy ignored"); #endif @@ -505,27 +505,27 @@ static int erofs_fc_parse_param(struct fs_context *fc, kfree(dif); return -ENOMEM; } - down_write(&ctx->devs->rwsem); - ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL); - up_write(&ctx->devs->rwsem); + down_write(&sbi->devs->rwsem); + ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL); + up_write(&sbi->devs->rwsem); if (ret < 0) { kfree(dif->path); kfree(dif); return ret; } - ++ctx->devs->extra_devices; + ++sbi->devs->extra_devices; break; #ifdef CONFIG_EROFS_FS_ONDEMAND case Opt_fsid: - kfree(ctx->fsid); - ctx->fsid = kstrdup(param->string, GFP_KERNEL); - if (!ctx->fsid) + kfree(sbi->fsid); + sbi->fsid = kstrdup(param->string, GFP_KERNEL); + if (!sbi->fsid) return -ENOMEM; break; case Opt_domain_id: - kfree(ctx->domain_id); - ctx->domain_id = kstrdup(param->string, GFP_KERNEL); - if (!ctx->domain_id) + kfree(sbi->domain_id); + sbi->domain_id = kstrdup(param->string, GFP_KERNEL); + if (!sbi->domain_id) return -ENOMEM; break; #else @@ -582,8 +582,7 @@ static const struct export_operations erofs_export_ops = { static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) { struct inode *inode; - struct erofs_sb_info *sbi; - struct erofs_fs_context *ctx = fc->fs_private; + struct erofs_sb_info *sbi = EROFS_SB(sb); int err; sb->s_magic = EROFS_SUPER_MAGIC; @@ -591,19 +590,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_op = &erofs_sops; - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (!sbi) - return -ENOMEM; - - sb->s_fs_info = sbi; - sbi->opt = ctx->opt; - sbi->devs = ctx->devs; - ctx->devs = NULL; - sbi->fsid = ctx->fsid; - ctx->fsid = NULL; - sbi->domain_id = ctx->domain_id; - ctx->domain_id = NULL; - sbi->blkszbits = PAGE_SHIFT; if (erofs_is_fscache_mode(sb)) { sb->s_blocksize = PAGE_SIZE; @@ -707,9 +693,9 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) static int erofs_fc_get_tree(struct fs_context *fc) { - struct erofs_fs_context *ctx = fc->fs_private; + struct erofs_sb_info *sbi = fc->s_fs_info; - if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid) + if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) return get_tree_nodev(fc, erofs_fc_fill_super); return get_tree_bdev(fc, erofs_fc_fill_super); @@ -719,19 +705,19 @@ static int erofs_fc_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; struct erofs_sb_info *sbi = EROFS_SB(sb); - struct erofs_fs_context *ctx = fc->fs_private; + struct erofs_sb_info *new_sbi = fc->s_fs_info; DBG_BUGON(!sb_rdonly(sb)); - if (ctx->fsid || ctx->domain_id) + if (new_sbi->fsid || new_sbi->domain_id) erofs_info(sb, "ignoring reconfiguration for fsid|domain_id."); - if (test_opt(&ctx->opt, POSIX_ACL)) + if (test_opt(&new_sbi->opt, POSIX_ACL)) fc->sb_flags |= SB_POSIXACL; else fc->sb_flags &= ~SB_POSIXACL; - sbi->opt = ctx->opt; + sbi->opt = new_sbi->opt; fc->sb_flags |= SB_RDONLY; return 0; @@ -762,12 +748,15 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs) static void erofs_fc_free(struct fs_context *fc) { - struct erofs_fs_context *ctx = fc->fs_private; + struct erofs_sb_info *sbi = fc->s_fs_info; - erofs_free_dev_context(ctx->devs); - kfree(ctx->fsid); - kfree(ctx->domain_id); - kfree(ctx); + if (!sbi) + return; + + erofs_free_dev_context(sbi->devs); + kfree(sbi->fsid); + kfree(sbi->domain_id); + kfree(sbi); } static const struct fs_context_operations erofs_context_ops = { @@ -779,38 +768,35 @@ static const struct fs_context_operations erofs_context_ops = { static int erofs_init_fs_context(struct fs_context *fc) { - struct erofs_fs_context *ctx; + struct erofs_sb_info *sbi; - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); - if (!ctx) + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + if (!sbi) return -ENOMEM; - ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL); - if (!ctx->devs) { - kfree(ctx); + + sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL); + if (!sbi->devs) { + kfree(sbi); return -ENOMEM; } - fc->fs_private = ctx; + fc->s_fs_info = sbi; - idr_init(&ctx->devs->tree); - init_rwsem(&ctx->devs->rwsem); - erofs_default_options(ctx); + idr_init(&sbi->devs->tree); + init_rwsem(&sbi->devs->rwsem); + erofs_default_options(sbi); fc->ops = &erofs_context_ops; return 0; } static void erofs_kill_sb(struct super_block *sb) { - struct erofs_sb_info *sbi; + struct erofs_sb_info *sbi = EROFS_SB(sb); - if (erofs_is_fscache_mode(sb)) + if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) kill_anon_super(sb); else kill_block_super(sb); - sbi = EROFS_SB(sb); - if (!sbi) - return; - erofs_free_dev_context(sbi->devs); fs_put_dax(sbi->dax_dev, NULL); erofs_fscache_unregister_fs(sb); diff --git a/fs/exfat/file.c b/fs/exfat/file.c index cc00f1a7a1..9adfc38ca7 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -51,7 +51,7 @@ static int exfat_cont_expand(struct inode *inode, loff_t size) clu.flags = ei->flags; ret = exfat_alloc_cluster(inode, new_num_clusters - num_clusters, - &clu, IS_DIRSYNC(inode)); + &clu, inode_needs_sync(inode)); if (ret) return ret; @@ -77,12 +77,11 @@ out: ei->i_size_aligned = round_up(size, sb->s_blocksize); ei->i_size_ondisk = ei->i_size_aligned; inode->i_blocks = round_up(size, sbi->cluster_size) >> 9; + mark_inode_dirty(inode); - if (IS_DIRSYNC(inode)) + if (IS_SYNC(inode)) return write_inode_now(inode, 1); - mark_inode_dirty(inode); - return 0; free_clu: diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index d9ccfd27e4..643175498d 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -1718,7 +1718,8 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) struct buffer_head *dibh, *bh; struct gfs2_holder rd_gh; unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; - u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift; + unsigned int bsize = 1 << bsize_shift; + u64 lblock = (offset + bsize - 1) >> bsize_shift; __u16 start_list[GFS2_MAX_META_HEIGHT]; __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL; unsigned int start_aligned, end_aligned; @@ -1729,7 +1730,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) u64 prev_bnr = 0; __be64 *start, *end; - if (offset >= maxsize) { + if (offset + bsize - 1 >= maxsize) { /* * The starting point lies beyond the allocated metadata; * there are no blocks to deallocate. diff --git a/fs/nfs/client.c b/fs/nfs/client.c index fbdc9ca80f..a8fad331df 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -73,7 +73,6 @@ const struct rpc_program nfs_program = { .number = NFS_PROGRAM, .nrvers = ARRAY_SIZE(nfs_version), .version = nfs_version, - .stats = &nfs_rpcstat, .pipe_dir_name = NFS_PIPE_DIRNAME, }; @@ -502,6 +501,7 @@ int nfs_create_rpc_client(struct nfs_client *clp, const struct nfs_client_initdata *cl_init, rpc_authflavor_t flavor) { + struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); struct rpc_clnt *clnt = NULL; struct rpc_create_args args = { .net = clp->cl_net, @@ -513,6 +513,7 @@ int nfs_create_rpc_client(struct nfs_client *clp, .servername = clp->cl_hostname, .nodename = cl_init->nodename, .program = &nfs_program, + .stats = &nn->rpcstats, .version = clp->rpc_ops->version, .authflavor = flavor, .cred = cl_init->cred, @@ -1182,6 +1183,8 @@ void nfs_clients_init(struct net *net) #endif spin_lock_init(&nn->nfs_client_lock); nn->boot_time = ktime_get_real(); + memset(&nn->rpcstats, 0, sizeof(nn->rpcstats)); + nn->rpcstats.program = &nfs_program; nfs_netns_sysfs_setup(nn, net); } diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index ebb8d60e11..6fe4b47c39 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -2426,12 +2426,21 @@ EXPORT_SYMBOL_GPL(nfs_net_id); static int nfs_net_init(struct net *net) { + struct nfs_net *nn = net_generic(net, nfs_net_id); + nfs_clients_init(net); + + if (!rpc_proc_register(net, &nn->rpcstats)) { + nfs_clients_exit(net); + return -ENOMEM; + } + return nfs_fs_proc_net_init(net); } static void nfs_net_exit(struct net *net) { + rpc_proc_unregister(net, "nfs"); nfs_fs_proc_net_exit(net); nfs_clients_exit(net); } @@ -2486,15 +2495,12 @@ static int __init init_nfs_fs(void) if (err) goto out1; - rpc_proc_register(&init_net, &nfs_rpcstat); - err = register_nfs_fs(); if (err) goto out0; return 0; out0: - rpc_proc_unregister(&init_net, "nfs"); nfs_destroy_directcache(); out1: nfs_destroy_writepagecache(); @@ -2524,7 +2530,6 @@ static void __exit exit_nfs_fs(void) nfs_destroy_inodecache(); nfs_destroy_nfspagecache(); unregister_pernet_subsys(&nfs_net_ops); - rpc_proc_unregister(&init_net, "nfs"); unregister_nfs_fs(); nfs_fs_proc_exit(); nfsiod_stop(); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e3722ce672..06253695fe 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -449,8 +449,6 @@ int nfs_try_get_tree(struct fs_context *); int nfs_get_tree_common(struct fs_context *); void nfs_kill_super(struct super_block *); -extern struct rpc_stat nfs_rpcstat; - extern int __init register_nfs_fs(void); extern void __exit unregister_nfs_fs(void); extern bool nfs_sb_active(struct super_block *sb); diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h index c8374f74dc..a68b21603e 100644 --- a/fs/nfs/netns.h +++ b/fs/nfs/netns.h @@ -9,6 +9,7 @@ #include <linux/nfs4.h> #include <net/net_namespace.h> #include <net/netns/generic.h> +#include <linux/sunrpc/stats.h> struct bl_dev_msg { int32_t status; @@ -34,6 +35,7 @@ struct nfs_net { struct nfs_netns_client *nfs_client; spinlock_t nfs_client_lock; ktime_t boot_time; + struct rpc_stat rpcstats; #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_nfsfs; #endif diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h index 4cbe0434cb..66a05fefae 100644 --- a/fs/nfsd/cache.h +++ b/fs/nfsd/cache.h @@ -80,8 +80,6 @@ enum { int nfsd_drc_slab_create(void); void nfsd_drc_slab_free(void); -int nfsd_net_reply_cache_init(struct nfsd_net *nn); -void nfsd_net_reply_cache_destroy(struct nfsd_net *nn); int nfsd_reply_cache_init(struct nfsd_net *); void nfsd_reply_cache_shutdown(struct nfsd_net *); int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 74b4360779..0cef4bb407 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -11,6 +11,7 @@ #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/filelock.h> +#include <linux/nfs4.h> #include <linux/percpu_counter.h> #include <linux/siphash.h> @@ -26,10 +27,22 @@ struct nfsd4_client_tracking_ops; enum { /* cache misses due only to checksum comparison failures */ - NFSD_NET_PAYLOAD_MISSES, + NFSD_STATS_PAYLOAD_MISSES, /* amount of memory (in bytes) currently consumed by the DRC */ - NFSD_NET_DRC_MEM_USAGE, - NFSD_NET_COUNTERS_NUM + NFSD_STATS_DRC_MEM_USAGE, + NFSD_STATS_RC_HITS, /* repcache hits */ + NFSD_STATS_RC_MISSES, /* repcache misses */ + NFSD_STATS_RC_NOCACHE, /* uncached reqs */ + NFSD_STATS_FH_STALE, /* FH stale error */ + NFSD_STATS_IO_READ, /* bytes returned to read requests */ + NFSD_STATS_IO_WRITE, /* bytes passed in write requests */ +#ifdef CONFIG_NFSD_V4 + NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */ + NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP, +#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op)) + NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */ +#endif + NFSD_STATS_COUNTERS_NUM }; /* @@ -164,7 +177,7 @@ struct nfsd_net { atomic_t num_drc_entries; /* Per-netns stats counters */ - struct percpu_counter counter[NFSD_NET_COUNTERS_NUM]; + struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM]; /* longest hash chain seen */ unsigned int longest_chain; diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 926c29879c..30aa241038 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -85,7 +85,21 @@ static void encode_uint32(struct xdr_stream *xdr, u32 n) static void encode_bitmap4(struct xdr_stream *xdr, const __u32 *bitmap, size_t len) { - WARN_ON_ONCE(xdr_stream_encode_uint32_array(xdr, bitmap, len) < 0); + xdr_stream_encode_uint32_array(xdr, bitmap, len); +} + +static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap, + struct nfs4_cb_fattr *fattr) +{ + fattr->ncf_cb_change = 0; + fattr->ncf_cb_fsize = 0; + if (bitmap[0] & FATTR4_WORD0_CHANGE) + if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0) + return -NFSERR_BAD_XDR; + if (bitmap[0] & FATTR4_WORD0_SIZE) + if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0) + return -NFSERR_BAD_XDR; + return 0; } static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op) @@ -334,6 +348,30 @@ encode_cb_recallany4args(struct xdr_stream *xdr, } /* + * CB_GETATTR4args + * struct CB_GETATTR4args { + * nfs_fh4 fh; + * bitmap4 attr_request; + * }; + * + * The size and change attributes are the only one + * guaranteed to be serviced by the client. + */ +static void +encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr, + struct nfs4_cb_fattr *fattr) +{ + struct nfs4_delegation *dp = + container_of(fattr, struct nfs4_delegation, dl_cb_fattr); + struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle; + + encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR); + encode_nfs_fh4(xdr, fh); + encode_bitmap4(xdr, fattr->ncf_cb_bmap, ARRAY_SIZE(fattr->ncf_cb_bmap)); + hdr->nops++; +} + +/* * CB_SEQUENCE4args * * struct CB_SEQUENCE4args { @@ -469,6 +507,26 @@ static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr, } /* + * 20.1. Operation 3: CB_GETATTR - Get Attributes + */ +static void nfs4_xdr_enc_cb_getattr(struct rpc_rqst *req, + struct xdr_stream *xdr, const void *data) +{ + const struct nfsd4_callback *cb = data; + struct nfs4_cb_fattr *ncf = + container_of(cb, struct nfs4_cb_fattr, ncf_getattr); + struct nfs4_cb_compound_hdr hdr = { + .ident = cb->cb_clp->cl_cb_ident, + .minorversion = cb->cb_clp->cl_minorversion, + }; + + encode_cb_compound4args(xdr, &hdr); + encode_cb_sequence4args(xdr, cb, &hdr); + encode_cb_getattr4args(xdr, &hdr, ncf); + encode_cb_nops(&hdr); +} + +/* * 20.2. Operation 4: CB_RECALL - Recall a Delegation */ static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr, @@ -524,6 +582,42 @@ static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr, } /* + * 20.1. Operation 3: CB_GETATTR - Get Attributes + */ +static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + void *data) +{ + struct nfsd4_callback *cb = data; + struct nfs4_cb_compound_hdr hdr; + int status; + u32 bitmap[3] = {0}; + u32 attrlen; + struct nfs4_cb_fattr *ncf = + container_of(cb, struct nfs4_cb_fattr, ncf_getattr); + + status = decode_cb_compound4res(xdr, &hdr); + if (unlikely(status)) + return status; + + status = decode_cb_sequence4res(xdr, cb); + if (unlikely(status || cb->cb_seq_status)) + return status; + + status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status); + if (status) + return status; + if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0) + return -NFSERR_BAD_XDR; + if (xdr_stream_decode_u32(xdr, &attrlen) < 0) + return -NFSERR_BAD_XDR; + if (attrlen > (sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize))) + return -NFSERR_BAD_XDR; + status = decode_cb_fattr4(xdr, bitmap, ncf); + return status; +} + +/* * 20.2. Operation 4: CB_RECALL - Recall a Delegation */ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, @@ -831,6 +925,7 @@ static const struct rpc_procinfo nfs4_cb_procedures[] = { PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock), PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload), PROC(CB_RECALL_ANY, COMPOUND, cb_recall_any, cb_recall_any), + PROC(CB_GETATTR, COMPOUND, cb_getattr, cb_getattr), }; static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)]; diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 14712fa08f..648ff42700 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -2490,10 +2490,10 @@ nfsd4_proc_null(struct svc_rqst *rqstp) return rpc_success; } -static inline void nfsd4_increment_op_stats(u32 opnum) +static inline void nfsd4_increment_op_stats(struct nfsd_net *nn, u32 opnum) { if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP) - percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]); + percpu_counter_inc(&nn->counter[NFSD_STATS_NFS4_OP(opnum)]); } static const struct nfsd4_operation nfsd4_ops[]; @@ -2768,7 +2768,7 @@ encode_op: status, nfsd4_op_name(op->opnum)); nfsd4_cstate_clear_replay(cstate); - nfsd4_increment_op_stats(op->opnum); + nfsd4_increment_op_stats(nn, op->opnum); } fh_put(current_fh); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 692ede4882..71f9442c5c 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -8447,6 +8447,7 @@ __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) { __be32 status; + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct file_lock_context *ctx; struct file_lock *fl; struct nfs4_delegation *dp; @@ -8476,7 +8477,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode) } break_lease: spin_unlock(&ctx->flc_lock); - nfsd_stats_wdeleg_getattr_inc(); + nfsd_stats_wdeleg_getattr_inc(nn); status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); if (status != nfserr_jukebox || !nfsd_wait_for_delegreturn(rqstp, inode)) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index c17bdf973c..24db9f9ea8 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3513,6 +3513,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, args.exp = exp; args.dentry = dentry; args.ignore_crossmnt = (ignore_crossmnt != 0); + args.acl = NULL; /* * Make a local copy of the attribute bitmap that can be modified. @@ -3567,7 +3568,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, } else args.fhp = fhp; - args.acl = NULL; if (attrmask[0] & FATTR4_WORD0_ACL) { err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl); if (err == -EOPNOTSUPP) diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 5c1a4a0aa6..cfcc6ac8f2 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c @@ -176,27 +176,6 @@ void nfsd_drc_slab_free(void) kmem_cache_destroy(drc_slab); } -/** - * nfsd_net_reply_cache_init - per net namespace reply cache set-up - * @nn: nfsd_net being initialized - * - * Returns zero on succes; otherwise a negative errno is returned. - */ -int nfsd_net_reply_cache_init(struct nfsd_net *nn) -{ - return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM); -} - -/** - * nfsd_net_reply_cache_destroy - per net namespace reply cache tear-down - * @nn: nfsd_net being freed - * - */ -void nfsd_net_reply_cache_destroy(struct nfsd_net *nn) -{ - nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM); -} - int nfsd_reply_cache_init(struct nfsd_net *nn) { unsigned int hashsize; @@ -501,7 +480,7 @@ out: int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, unsigned int len, struct nfsd_cacherep **cacherep) { - struct nfsd_net *nn; + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct nfsd_cacherep *rp, *found; __wsum csum; struct nfsd_drc_bucket *b; @@ -510,7 +489,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, int rtn = RC_DOIT; if (type == RC_NOCACHE) { - nfsd_stats_rc_nocache_inc(); + nfsd_stats_rc_nocache_inc(nn); goto out; } @@ -520,7 +499,6 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, * Since the common case is a cache miss followed by an insert, * preallocate an entry. */ - nn = net_generic(SVC_NET(rqstp), nfsd_net_id); rp = nfsd_cacherep_alloc(rqstp, csum, nn); if (!rp) goto out; @@ -537,7 +515,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, nfsd_cacherep_dispose(&dispose); - nfsd_stats_rc_misses_inc(); + nfsd_stats_rc_misses_inc(nn); atomic_inc(&nn->num_drc_entries); nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp)); goto out; @@ -545,7 +523,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp, unsigned int start, found_entry: /* We found a matching entry which is either in progress or done. */ nfsd_reply_cache_free_locked(NULL, rp, nn); - nfsd_stats_rc_hits_inc(); + nfsd_stats_rc_hits_inc(nn); rtn = RC_DROPIT; rp = found; @@ -687,15 +665,15 @@ int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) atomic_read(&nn->num_drc_entries)); seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits); seq_printf(m, "mem usage: %lld\n", - percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE])); + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_DRC_MEM_USAGE])); seq_printf(m, "cache hits: %lld\n", - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS])); + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS])); seq_printf(m, "cache misses: %lld\n", - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES])); + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES])); seq_printf(m, "not cached: %lld\n", - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE])); + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE])); seq_printf(m, "payload misses: %lld\n", - percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES])); + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_PAYLOAD_MISSES])); seq_printf(m, "longest chain len: %u\n", nn->longest_chain); seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize); return 0; diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index f206ca32e7..ea3c811424 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -1671,7 +1671,7 @@ static __net_init int nfsd_net_init(struct net *net) retval = nfsd_idmap_init(net); if (retval) goto out_idmap_error; - retval = nfsd_net_reply_cache_init(nn); + retval = nfsd_stat_counters_init(nn); if (retval) goto out_repcache_error; nn->nfsd_versions = NULL; @@ -1679,6 +1679,7 @@ static __net_init int nfsd_net_init(struct net *net) nfsd4_init_leases_net(nn); get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key)); seqlock_init(&nn->writeverf_lock); + nfsd_proc_stat_init(net); return 0; @@ -1699,7 +1700,8 @@ static __net_exit void nfsd_net_exit(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - nfsd_net_reply_cache_destroy(nn); + nfsd_proc_stat_shutdown(net); + nfsd_stat_counters_destroy(nn); nfsd_idmap_shutdown(net); nfsd_export_shutdown(net); nfsd_netns_free_versions(nn); @@ -1722,12 +1724,9 @@ static int __init init_nfsd(void) retval = nfsd4_init_pnfs(); if (retval) goto out_free_slabs; - retval = nfsd_stat_init(); /* Statistics */ - if (retval) - goto out_free_pnfs; retval = nfsd_drc_slab_create(); if (retval) - goto out_free_stat; + goto out_free_pnfs; nfsd_lockd_init(); /* lockd->nfsd callbacks */ retval = create_proc_exports_entry(); if (retval) @@ -1761,8 +1760,6 @@ out_free_exports: out_free_lockd: nfsd_lockd_shutdown(); nfsd_drc_slab_free(); -out_free_stat: - nfsd_stat_shutdown(); out_free_pnfs: nfsd4_exit_pnfs(); out_free_slabs: @@ -1780,7 +1777,6 @@ static void __exit exit_nfsd(void) nfsd_drc_slab_free(); remove_proc_entry("fs/nfs/exports", NULL); remove_proc_entry("fs/nfs", NULL); - nfsd_stat_shutdown(); nfsd_lockd_shutdown(); nfsd4_free_slabs(); nfsd4_exit_pnfs(); diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index dbfa0ac135..40fecf7b22 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -327,6 +327,7 @@ out: __be32 fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access) { + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct svc_export *exp = NULL; struct dentry *dentry; __be32 error; @@ -395,7 +396,7 @@ skip_pseudoflavor_check: out: trace_nfsd_fh_verify_err(rqstp, fhp, type, access, error); if (error == nfserr_stale) - nfsd_stats_fh_stale_inc(exp); + nfsd_stats_fh_stale_inc(nn, exp); return error; } diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 41bdc913fa..0bbbe57e02 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -117,6 +117,16 @@ struct nfs4_cpntf_state { time64_t cpntf_time; /* last time stateid used */ }; +struct nfs4_cb_fattr { + struct nfsd4_callback ncf_getattr; + u32 ncf_cb_status; + u32 ncf_cb_bmap[1]; + + /* from CB_GETATTR reply */ + u64 ncf_cb_change; + u64 ncf_cb_fsize; +}; + /* * Represents a delegation stateid. The nfs4_client holds references to these * and they are put when it is being destroyed or when the delegation is @@ -150,6 +160,9 @@ struct nfs4_delegation { int dl_retries; struct nfsd4_callback dl_recall; bool dl_recalled; + + /* for CB_GETATTR */ + struct nfs4_cb_fattr dl_cb_fattr; }; #define cb_to_delegation(cb) \ @@ -640,6 +653,7 @@ enum nfsd4_cb_op { NFSPROC4_CLNT_CB_SEQUENCE, NFSPROC4_CLNT_CB_NOTIFY_LOCK, NFSPROC4_CLNT_CB_RECALL_ANY, + NFSPROC4_CLNT_CB_GETATTR, }; /* Returns true iff a is later than b: */ diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c index 12d79f5d4e..44e275324b 100644 --- a/fs/nfsd/stats.c +++ b/fs/nfsd/stats.c @@ -34,15 +34,17 @@ struct svc_stat nfsd_svcstats = { static int nfsd_show(struct seq_file *seq, void *v) { + struct net *net = pde_data(file_inode(seq->file)); + struct nfsd_net *nn = net_generic(net, nfsd_net_id); int i; seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n", - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]), - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]), - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]), - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]), - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]), - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE])); + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]), + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]), + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]), + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_FH_STALE]), + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_READ]), + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE])); /* thread usage: */ seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt)); @@ -63,10 +65,10 @@ static int nfsd_show(struct seq_file *seq, void *v) seq_printf(seq, "proc4ops %u", LAST_NFS4_OP + 1); for (i = 0; i <= LAST_NFS4_OP; i++) { seq_printf(seq, " %lld", - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)])); + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_NFS4_OP(i)])); } seq_printf(seq, "\nwdeleg_getattr %lld", - percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR])); + percpu_counter_sum_positive(&nn->counter[NFSD_STATS_WDELEG_GETATTR])); seq_putc(seq, '\n'); #endif @@ -108,31 +110,22 @@ void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num) percpu_counter_destroy(&counters[i]); } -static int nfsd_stat_counters_init(void) +int nfsd_stat_counters_init(struct nfsd_net *nn) { - return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM); + return nfsd_percpu_counters_init(nn->counter, NFSD_STATS_COUNTERS_NUM); } -static void nfsd_stat_counters_destroy(void) +void nfsd_stat_counters_destroy(struct nfsd_net *nn) { - nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM); + nfsd_percpu_counters_destroy(nn->counter, NFSD_STATS_COUNTERS_NUM); } -int nfsd_stat_init(void) +void nfsd_proc_stat_init(struct net *net) { - int err; - - err = nfsd_stat_counters_init(); - if (err) - return err; - - svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops); - - return 0; + svc_proc_register(net, &nfsd_svcstats, &nfsd_proc_ops); } -void nfsd_stat_shutdown(void) +void nfsd_proc_stat_shutdown(struct net *net) { - nfsd_stat_counters_destroy(); - svc_proc_unregister(&init_net, "nfsd"); + svc_proc_unregister(net, "nfsd"); } diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h index 14f50c660b..c24be4ddbe 100644 --- a/fs/nfsd/stats.h +++ b/fs/nfsd/stats.h @@ -10,26 +10,7 @@ #include <uapi/linux/nfsd/stats.h> #include <linux/percpu_counter.h> - -enum { - NFSD_STATS_RC_HITS, /* repcache hits */ - NFSD_STATS_RC_MISSES, /* repcache misses */ - NFSD_STATS_RC_NOCACHE, /* uncached reqs */ - NFSD_STATS_FH_STALE, /* FH stale error */ - NFSD_STATS_IO_READ, /* bytes returned to read requests */ - NFSD_STATS_IO_WRITE, /* bytes passed in write requests */ -#ifdef CONFIG_NFSD_V4 - NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */ - NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP, -#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op)) - NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */ -#endif - NFSD_STATS_COUNTERS_NUM -}; - struct nfsd_stats { - struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM]; - atomic_t th_cnt; /* number of available threads */ }; @@ -40,64 +21,69 @@ extern struct svc_stat nfsd_svcstats; int nfsd_percpu_counters_init(struct percpu_counter *counters, int num); void nfsd_percpu_counters_reset(struct percpu_counter *counters, int num); void nfsd_percpu_counters_destroy(struct percpu_counter *counters, int num); -int nfsd_stat_init(void); -void nfsd_stat_shutdown(void); +int nfsd_stat_counters_init(struct nfsd_net *nn); +void nfsd_stat_counters_destroy(struct nfsd_net *nn); +void nfsd_proc_stat_init(struct net *net); +void nfsd_proc_stat_shutdown(struct net *net); -static inline void nfsd_stats_rc_hits_inc(void) +static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn) { - percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]); + percpu_counter_inc(&nn->counter[NFSD_STATS_RC_HITS]); } -static inline void nfsd_stats_rc_misses_inc(void) +static inline void nfsd_stats_rc_misses_inc(struct nfsd_net *nn) { - percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]); + percpu_counter_inc(&nn->counter[NFSD_STATS_RC_MISSES]); } -static inline void nfsd_stats_rc_nocache_inc(void) +static inline void nfsd_stats_rc_nocache_inc(struct nfsd_net *nn) { - percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]); + percpu_counter_inc(&nn->counter[NFSD_STATS_RC_NOCACHE]); } -static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp) +static inline void nfsd_stats_fh_stale_inc(struct nfsd_net *nn, + struct svc_export *exp) { - percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]); + percpu_counter_inc(&nn->counter[NFSD_STATS_FH_STALE]); if (exp && exp->ex_stats) percpu_counter_inc(&exp->ex_stats->counter[EXP_STATS_FH_STALE]); } -static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount) +static inline void nfsd_stats_io_read_add(struct nfsd_net *nn, + struct svc_export *exp, s64 amount) { - percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount); + percpu_counter_add(&nn->counter[NFSD_STATS_IO_READ], amount); if (exp && exp->ex_stats) percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_READ], amount); } -static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount) +static inline void nfsd_stats_io_write_add(struct nfsd_net *nn, + struct svc_export *exp, s64 amount) { - percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount); + percpu_counter_add(&nn->counter[NFSD_STATS_IO_WRITE], amount); if (exp && exp->ex_stats) percpu_counter_add(&exp->ex_stats->counter[EXP_STATS_IO_WRITE], amount); } static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn) { - percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]); + percpu_counter_inc(&nn->counter[NFSD_STATS_PAYLOAD_MISSES]); } static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount) { - percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount); + percpu_counter_add(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount); } static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount) { - percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount); + percpu_counter_sub(&nn->counter[NFSD_STATS_DRC_MEM_USAGE], amount); } #ifdef CONFIG_NFSD_V4 -static inline void nfsd_stats_wdeleg_getattr_inc(void) +static inline void nfsd_stats_wdeleg_getattr_inc(struct nfsd_net *nn) { - percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR]); + percpu_counter_inc(&nn->counter[NFSD_STATS_WDELEG_GETATTR]); } #endif #endif /* _NFSD_STATS_H */ diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 4ed1e83def..9b5e20b2d0 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1002,7 +1002,9 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp, unsigned long *count, u32 *eof, ssize_t host_err) { if (host_err >= 0) { - nfsd_stats_io_read_add(fhp->fh_export, host_err); + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); + + nfsd_stats_io_read_add(nn, fhp->fh_export, host_err); *eof = nfsd_eof_on_read(file, offset, host_err, *count); *count = host_err; fsnotify_access(file); @@ -1185,7 +1187,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, goto out_nfserr; } *cnt = host_err; - nfsd_stats_io_write_add(exp, *cnt); + nfsd_stats_io_write_add(nn, exp, *cnt); fsnotify_modify(file); host_err = filemap_check_wb_err(file->f_mapping, since); if (host_err < 0) diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h index 0d39af1b00..e8b00309c4 100644 --- a/fs/nfsd/xdr4cb.h +++ b/fs/nfsd/xdr4cb.h @@ -54,3 +54,21 @@ #define NFS4_dec_cb_recall_any_sz (cb_compound_dec_hdr_sz + \ cb_sequence_dec_sz + \ op_dec_sz) + +/* + * 1: CB_GETATTR opcode (32-bit) + * N: file_handle + * 1: number of entry in attribute array (32-bit) + * 1: entry 0 in attribute array (32-bit) + */ +#define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \ + cb_sequence_enc_sz + \ + 1 + enc_nfs4_fh_sz + 1 + 1) +/* + * 4: fattr_bitmap_maxsz + * 1: attribute array len + * 2: change attr (64-bit) + * 2: size (64-bit) + */ +#define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \ + cb_sequence_dec_sz + 4 + 1 + 2 + 2 + op_dec_sz) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 3f78ebbb79..8433f076e9 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1818,15 +1818,13 @@ static unsigned long pagemap_page_category(struct pagemap_scan_private *p, } static void make_uffd_wp_pte(struct vm_area_struct *vma, - unsigned long addr, pte_t *pte) + unsigned long addr, pte_t *pte, pte_t ptent) { - pte_t ptent = ptep_get(pte); - if (pte_present(ptent)) { pte_t old_pte; old_pte = ptep_modify_prot_start(vma, addr, pte); - ptent = pte_mkuffd_wp(ptent); + ptent = pte_mkuffd_wp(old_pte); ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); } else if (is_swap_pte(ptent)) { ptent = pte_swp_mkuffd_wp(ptent); @@ -2176,9 +2174,12 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) { /* Fast path for performing exclusive WP */ for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { - if (pte_uffd_wp(ptep_get(pte))) + pte_t ptent = ptep_get(pte); + + if ((pte_present(ptent) && pte_uffd_wp(ptent)) || + pte_swp_uffd_wp_any(ptent)) continue; - make_uffd_wp_pte(vma, addr, pte); + make_uffd_wp_pte(vma, addr, pte, ptent); if (!flush_end) start = addr; flush_end = addr + PAGE_SIZE; @@ -2191,8 +2192,10 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, p->arg.return_mask == PAGE_IS_WRITTEN) { for (addr = start; addr < end; pte++, addr += PAGE_SIZE) { unsigned long next = addr + PAGE_SIZE; + pte_t ptent = ptep_get(pte); - if (pte_uffd_wp(ptep_get(pte))) + if ((pte_present(ptent) && pte_uffd_wp(ptent)) || + pte_swp_uffd_wp_any(ptent)) continue; ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN, p, addr, &next); @@ -2200,7 +2203,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, break; if (~p->arg.flags & PM_SCAN_WP_MATCHING) continue; - make_uffd_wp_pte(vma, addr, pte); + make_uffd_wp_pte(vma, addr, pte, ptent); if (!flush_end) start = addr; flush_end = next; @@ -2209,8 +2212,9 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, } for (addr = start; addr != end; pte++, addr += PAGE_SIZE) { + pte_t ptent = ptep_get(pte); unsigned long categories = p->cur_vma_category | - pagemap_page_category(p, vma, addr, ptep_get(pte)); + pagemap_page_category(p, vma, addr, ptent); unsigned long next = addr + PAGE_SIZE; if (!pagemap_scan_is_interesting_page(categories, p)) @@ -2225,7 +2229,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, if (~categories & PAGE_IS_WRITTEN) continue; - make_uffd_wp_pte(vma, addr, pte); + make_uffd_wp_pte(vma, addr, pte, ptent); if (!flush_end) start = addr; flush_end = next; diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 0c3311de5d..a393d505e8 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -1063,6 +1063,7 @@ struct cifs_ses { and after mount option parsing we fill it */ char *domainName; char *password; + char *password2; /* When key rotation used, new password may be set before it expires */ char workstation_name[CIFS_MAX_WORKSTATION_LEN]; struct session_key auth_key; struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */ diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 7516c7d455..e28f011f11 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -2186,6 +2186,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses) } ++delim; + /* BB consider adding support for password2 (Key Rotation) for multiuser in future */ ctx->password = kstrndup(delim, len, GFP_KERNEL); if (!ctx->password) { cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n", @@ -2209,6 +2210,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses) kfree(ctx->username); ctx->username = NULL; kfree_sensitive(ctx->password); + /* no need to free ctx->password2 since not allocated in this path */ ctx->password = NULL; goto out_key_put; } @@ -2320,6 +2322,12 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) if (!ses->password) goto get_ses_fail; } + /* ctx->password freed at unmount */ + if (ctx->password2) { + ses->password2 = kstrdup(ctx->password2, GFP_KERNEL); + if (!ses->password2) + goto get_ses_fail; + } if (ctx->domainname) { ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL); if (!ses->domainName) diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 775b0ca605..f119035a82 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -162,6 +162,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = { fsparam_string("username", Opt_user), fsparam_string("pass", Opt_pass), fsparam_string("password", Opt_pass), + fsparam_string("password2", Opt_pass2), fsparam_string("ip", Opt_ip), fsparam_string("addr", Opt_ip), fsparam_string("domain", Opt_domain), @@ -315,6 +316,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx new_ctx->nodename = NULL; new_ctx->username = NULL; new_ctx->password = NULL; + new_ctx->password2 = NULL; new_ctx->server_hostname = NULL; new_ctx->domainname = NULL; new_ctx->UNC = NULL; @@ -327,6 +329,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx DUP_CTX_STR(prepath); DUP_CTX_STR(username); DUP_CTX_STR(password); + DUP_CTX_STR(password2); DUP_CTX_STR(server_hostname); DUP_CTX_STR(UNC); DUP_CTX_STR(source); @@ -885,6 +888,8 @@ static int smb3_reconfigure(struct fs_context *fc) else { kfree_sensitive(ses->password); ses->password = kstrdup(ctx->password, GFP_KERNEL); + kfree_sensitive(ses->password2); + ses->password2 = kstrdup(ctx->password2, GFP_KERNEL); } STEAL_STRING(cifs_sb, ctx, domainname); STEAL_STRING(cifs_sb, ctx, nodename); @@ -1287,6 +1292,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, goto cifs_parse_mount_err; } break; + case Opt_pass2: + kfree_sensitive(ctx->password2); + ctx->password2 = NULL; + if (strlen(param->string) == 0) + break; + + ctx->password2 = kstrdup(param->string, GFP_KERNEL); + if (ctx->password2 == NULL) { + cifs_errorf(fc, "OOM when copying password2 string\n"); + goto cifs_parse_mount_err; + } + break; case Opt_ip: if (strlen(param->string) == 0) { ctx->got_ip = false; @@ -1586,6 +1603,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, cifs_parse_mount_err: kfree_sensitive(ctx->password); ctx->password = NULL; + kfree_sensitive(ctx->password2); + ctx->password2 = NULL; return -EINVAL; } @@ -1690,6 +1709,8 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx) ctx->username = NULL; kfree_sensitive(ctx->password); ctx->password = NULL; + kfree_sensitive(ctx->password2); + ctx->password2 = NULL; kfree(ctx->server_hostname); ctx->server_hostname = NULL; kfree(ctx->UNC); diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h index 61776572b8..369a3fea1d 100644 --- a/fs/smb/client/fs_context.h +++ b/fs/smb/client/fs_context.h @@ -138,6 +138,7 @@ enum cifs_param { Opt_source, Opt_user, Opt_pass, + Opt_pass2, Opt_ip, Opt_domain, Opt_srcaddr, @@ -171,6 +172,7 @@ struct smb3_fs_context { char *username; char *password; + char *password2; char *domainname; char *source; char *server_hostname; diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c index 0d13db80e6..d56959d02e 100644 --- a/fs/smb/client/misc.c +++ b/fs/smb/client/misc.c @@ -101,6 +101,7 @@ sesInfoFree(struct cifs_ses *buf_to_free) kfree(buf_to_free->serverDomain); kfree(buf_to_free->serverNOS); kfree_sensitive(buf_to_free->password); + kfree_sensitive(buf_to_free->password2); kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); kfree_sensitive(buf_to_free->auth_key.response); diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index b71e32d66e..60793143e2 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -367,6 +367,17 @@ again: } rc = cifs_setup_session(0, ses, server, nls_codepage); + if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) { + /* + * Try alternate password for next reconnect (key rotation + * could be enabled on the server e.g.) if an alternate + * password is available and the current password is expired, + * but do not swap on non pwd related errors like host down + */ + if (ses->password2) + swap(ses->password2, ses->password); + } + if ((rc == -EACCES) && !tcon->retry) { mutex_unlock(&ses->session_mutex); rc = -EHOSTDOWN; diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index 53dfaac425..dc729ab980 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -612,13 +612,23 @@ static int oplock_break_pending(struct oplock_info *opinfo, int req_op_level) if (opinfo->op_state == OPLOCK_CLOSING) return -ENOENT; - else if (!opinfo->is_lease && opinfo->level <= req_op_level) - return 1; + else if (opinfo->level <= req_op_level) { + if (opinfo->is_lease && + opinfo->o_lease->state != + (SMB2_LEASE_HANDLE_CACHING_LE | + SMB2_LEASE_READ_CACHING_LE)) + return 1; + } } - if (!opinfo->is_lease && opinfo->level <= req_op_level) { - wake_up_oplock_break(opinfo); - return 1; + if (opinfo->level <= req_op_level) { + if (opinfo->is_lease && + opinfo->o_lease->state != + (SMB2_LEASE_HANDLE_CACHING_LE | + SMB2_LEASE_READ_CACHING_LE)) { + wake_up_oplock_break(opinfo); + return 1; + } } return 0; } @@ -886,7 +896,6 @@ static int oplock_break(struct oplock_info *brk_opinfo, int req_op_level) struct lease *lease = brk_opinfo->o_lease; atomic_inc(&brk_opinfo->breaking_cnt); - err = oplock_break_pending(brk_opinfo, req_op_level); if (err) return err < 0 ? err : 0; @@ -1199,7 +1208,9 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid, /* Only v2 leases handle the directory */ if (S_ISDIR(file_inode(fp->filp)->i_mode)) { - if (!lctx || lctx->version != 2) + if (!lctx || lctx->version != 2 || + (lctx->flags != SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE && + !lctx->epoch)) return 0; } @@ -1461,8 +1472,9 @@ void create_lease_buf(u8 *rbuf, struct lease *lease) buf->lcontext.LeaseFlags = lease->flags; buf->lcontext.Epoch = cpu_to_le16(lease->epoch); buf->lcontext.LeaseState = lease->state; - memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key, - SMB2_LEASE_KEY_SIZE); + if (lease->flags == SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE) + memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key, + SMB2_LEASE_KEY_SIZE); buf->ccontext.DataOffset = cpu_to_le16(offsetof (struct create_lease_v2, lcontext)); buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2)); @@ -1527,8 +1539,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir) lreq->flags = lc->lcontext.LeaseFlags; lreq->epoch = lc->lcontext.Epoch; lreq->duration = lc->lcontext.LeaseDuration; - memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey, - SMB2_LEASE_KEY_SIZE); + if (lreq->flags == SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE) + memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey, + SMB2_LEASE_KEY_SIZE); lreq->version = 2; } else { struct create_lease *lc = (struct create_lease *)cc; diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c index 002a3f0dc7..6633fa78e9 100644 --- a/fs/smb/server/transport_tcp.c +++ b/fs/smb/server/transport_tcp.c @@ -448,6 +448,10 @@ static int create_socket(struct interface *iface) sin6.sin6_family = PF_INET6; sin6.sin6_addr = in6addr_any; sin6.sin6_port = htons(server_conf.tcp_port); + + lock_sock(ksmbd_socket->sk); + ksmbd_socket->sk->sk_ipv6only = false; + release_sock(ksmbd_socket->sk); } ksmbd_tcp_nodelay(ksmbd_socket); diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c index 110e8a2721..56d1741fe0 100644 --- a/fs/tracefs/event_inode.c +++ b/fs/tracefs/event_inode.c @@ -164,21 +164,7 @@ static int eventfs_set_attr(struct mnt_idmap *idmap, struct dentry *dentry, * determined by the parent directory. */ if (dentry->d_inode->i_mode & S_IFDIR) { - /* - * The events directory dentry is never freed, unless its - * part of an instance that is deleted. It's attr is the - * default for its child files and directories. - * Do not update it. It's not used for its own mode or ownership. - */ - if (ei->is_events) { - /* But it still needs to know if it was modified */ - if (iattr->ia_valid & ATTR_UID) - ei->attr.mode |= EVENTFS_SAVE_UID; - if (iattr->ia_valid & ATTR_GID) - ei->attr.mode |= EVENTFS_SAVE_GID; - } else { - update_attr(&ei->attr, iattr); - } + update_attr(&ei->attr, iattr); } else { name = dentry->d_name.name; @@ -265,6 +251,35 @@ static const struct file_operations eventfs_file_operations = { .llseek = generic_file_llseek, }; +/* + * On a remount of tracefs, if UID or GID options are set, then + * the mount point inode permissions should be used. + * Reset the saved permission flags appropriately. + */ +void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid) +{ + struct eventfs_inode *ei = ti->private; + + if (!ei) + return; + + if (update_uid) + ei->attr.mode &= ~EVENTFS_SAVE_UID; + + if (update_gid) + ei->attr.mode &= ~EVENTFS_SAVE_GID; + + if (!ei->entry_attrs) + return; + + for (int i = 0; i < ei->nr_entries; i++) { + if (update_uid) + ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_UID; + if (update_gid) + ei->entry_attrs[i].mode &= ~EVENTFS_SAVE_GID; + } +} + /* Return the evenfs_inode of the "events" directory */ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry) { diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index d65ffad4c3..e30b74228e 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -30,20 +30,47 @@ static struct vfsmount *tracefs_mount; static int tracefs_mount_count; static bool tracefs_registered; +/* + * Keep track of all tracefs_inodes in order to update their + * flags if necessary on a remount. + */ +static DEFINE_SPINLOCK(tracefs_inode_lock); +static LIST_HEAD(tracefs_inodes); + static struct inode *tracefs_alloc_inode(struct super_block *sb) { struct tracefs_inode *ti; + unsigned long flags; ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL); if (!ti) return NULL; + spin_lock_irqsave(&tracefs_inode_lock, flags); + list_add_rcu(&ti->list, &tracefs_inodes); + spin_unlock_irqrestore(&tracefs_inode_lock, flags); + return &ti->vfs_inode; } +static void tracefs_free_inode_rcu(struct rcu_head *rcu) +{ + struct tracefs_inode *ti; + + ti = container_of(rcu, struct tracefs_inode, rcu); + kmem_cache_free(tracefs_inode_cachep, ti); +} + static void tracefs_free_inode(struct inode *inode) { - kmem_cache_free(tracefs_inode_cachep, get_tracefs(inode)); + struct tracefs_inode *ti = get_tracefs(inode); + unsigned long flags; + + spin_lock_irqsave(&tracefs_inode_lock, flags); + list_del_rcu(&ti->list); + spin_unlock_irqrestore(&tracefs_inode_lock, flags); + + call_rcu(&ti->rcu, tracefs_free_inode_rcu); } static ssize_t default_read_file(struct file *file, char __user *buf, @@ -153,16 +180,39 @@ static void set_tracefs_inode_owner(struct inode *inode) { struct tracefs_inode *ti = get_tracefs(inode); struct inode *root_inode = ti->private; + kuid_t uid; + kgid_t gid; + + uid = root_inode->i_uid; + gid = root_inode->i_gid; + + /* + * If the root is not the mount point, then check the root's + * permissions. If it was never set, then default to the + * mount point. + */ + if (root_inode != d_inode(root_inode->i_sb->s_root)) { + struct tracefs_inode *rti; + + rti = get_tracefs(root_inode); + root_inode = d_inode(root_inode->i_sb->s_root); + + if (!(rti->flags & TRACEFS_UID_PERM_SET)) + uid = root_inode->i_uid; + + if (!(rti->flags & TRACEFS_GID_PERM_SET)) + gid = root_inode->i_gid; + } /* * If this inode has never been referenced, then update * the permissions to the superblock. */ if (!(ti->flags & TRACEFS_UID_PERM_SET)) - inode->i_uid = root_inode->i_uid; + inode->i_uid = uid; if (!(ti->flags & TRACEFS_GID_PERM_SET)) - inode->i_gid = root_inode->i_gid; + inode->i_gid = gid; } static int tracefs_permission(struct mnt_idmap *idmap, @@ -313,6 +363,8 @@ static int tracefs_apply_options(struct super_block *sb, bool remount) struct tracefs_fs_info *fsi = sb->s_fs_info; struct inode *inode = d_inode(sb->s_root); struct tracefs_mount_opts *opts = &fsi->mount_opts; + struct tracefs_inode *ti; + bool update_uid, update_gid; umode_t tmp_mode; /* @@ -332,6 +384,25 @@ static int tracefs_apply_options(struct super_block *sb, bool remount) if (!remount || opts->opts & BIT(Opt_gid)) inode->i_gid = opts->gid; + if (remount && (opts->opts & BIT(Opt_uid) || opts->opts & BIT(Opt_gid))) { + + update_uid = opts->opts & BIT(Opt_uid); + update_gid = opts->opts & BIT(Opt_gid); + + rcu_read_lock(); + list_for_each_entry_rcu(ti, &tracefs_inodes, list) { + if (update_uid) + ti->flags &= ~TRACEFS_UID_PERM_SET; + + if (update_gid) + ti->flags &= ~TRACEFS_GID_PERM_SET; + + if (ti->flags & TRACEFS_EVENT_INODE) + eventfs_remount(ti, update_uid, update_gid); + } + rcu_read_unlock(); + } + return 0; } @@ -398,7 +469,22 @@ static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags) return !(ei && ei->is_freed); } +static void tracefs_d_iput(struct dentry *dentry, struct inode *inode) +{ + struct tracefs_inode *ti = get_tracefs(inode); + + /* + * This inode is being freed and cannot be used for + * eventfs. Clear the flag so that it doesn't call into + * eventfs during the remount flag updates. The eventfs_inode + * gets freed after an RCU cycle, so the content will still + * be safe if the iteration is going on now. + */ + ti->flags &= ~TRACEFS_EVENT_INODE; +} + static const struct dentry_operations tracefs_dentry_operations = { + .d_iput = tracefs_d_iput, .d_revalidate = tracefs_d_revalidate, .d_release = tracefs_d_release, }; diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h index beb3dcd0e4..824cbe8367 100644 --- a/fs/tracefs/internal.h +++ b/fs/tracefs/internal.h @@ -11,8 +11,12 @@ enum { }; struct tracefs_inode { - struct inode vfs_inode; + union { + struct inode vfs_inode; + struct rcu_head rcu; + }; /* The below gets initialized with memset_after(ti, 0, vfs_inode) */ + struct list_head list; unsigned long flags; void *private; }; @@ -75,6 +79,7 @@ struct dentry *tracefs_end_creating(struct dentry *dentry); struct dentry *tracefs_failed_creating(struct dentry *dentry); struct inode *tracefs_get_inode(struct super_block *sb); +void eventfs_remount(struct tracefs_inode *ti, bool update_uid, bool update_gid); void eventfs_d_release(struct dentry *dentry); #endif /* _TRACEFS_INTERNAL_H */ diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 959551ff9a..13ef4e1fc1 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -925,6 +925,10 @@ static int userfaultfd_release(struct inode *inode, struct file *file) prev = vma; continue; } + /* Reset ptes for the whole vma range if wr-protected */ + if (userfaultfd_wp(vma)) + uffd_wp_range(vma, vma->vm_start, + vma->vm_end - vma->vm_start, false); new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS; vma = vma_modify_flags_uffd(&vmi, prev, vma, vma->vm_start, vma->vm_end, new_flags, diff --git a/fs/vboxsf/file.c b/fs/vboxsf/file.c index 2307f8037e..118dedef8e 100644 --- a/fs/vboxsf/file.c +++ b/fs/vboxsf/file.c @@ -218,6 +218,7 @@ const struct file_operations vboxsf_reg_fops = { .release = vboxsf_file_release, .fsync = noop_fsync, .splice_read = filemap_splice_read, + .setlease = simple_nosetlease, }; const struct inode_operations vboxsf_reg_iops = { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b43ca3b9d2..6ee8e7d738 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -233,6 +233,19 @@ static inline unsigned int disk_openers(struct gendisk *disk) return atomic_read(&disk->part0->bd_openers); } +/** + * disk_has_partscan - return %true if partition scanning is enabled on a disk + * @disk: disk to check + * + * Returns %true if partitions scanning is enabled for @disk, or %false if + * partition scanning is disabled either permanently or temporarily. + */ +static inline bool disk_has_partscan(struct gendisk *disk) +{ + return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) && + !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state); +} + /* * The gendisk is refcounted by the part0 block_device, and the bd_device * therein is also used for device model presentation in sysfs. diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 0caf354cb9..a6a2895283 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -278,6 +278,17 @@ struct ftrace_likely_data { # define __no_kcsan #endif +#ifdef __SANITIZE_MEMORY__ +/* + * Similarly to KASAN and KCSAN, KMSAN loses function attributes of inlined + * functions, therefore disabling KMSAN checks also requires disabling inlining. + * + * __no_sanitize_or_inline effectively prevents KMSAN from reporting errors + * within the function and marks all its outputs as initialized. + */ +# define __no_sanitize_or_inline __no_kmsan_checks notrace __maybe_unused +#endif + #ifndef __no_sanitize_or_inline #define __no_sanitize_or_inline __always_inline #endif diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index c3f9bb6602..e06bad467f 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -682,11 +682,4 @@ static inline bool dma_fence_is_container(struct dma_fence *fence) return dma_fence_is_array(fence) || dma_fence_is_chain(fence); } -#define DMA_FENCE_WARN(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ - ##args); \ - } while (0) - #endif /* __LINUX_DMA_FENCE_H */ diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 1b6053da87..495ebf5f2c 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -2,6 +2,8 @@ #ifndef __LINUX_GFP_TYPES_H #define __LINUX_GFP_TYPES_H +#include <linux/bits.h> + /* The typedef is in types.h but we want the documentation here */ #if 0 /** diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6ef0557b4b..96ceb40954 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -832,6 +832,7 @@ struct vmbus_gpadl { u32 gpadl_handle; u32 size; void *buffer; + bool decrypted; }; struct vmbus_channel { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index a0c75e467d..5bfa57a5f7 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2686,8 +2686,10 @@ #define PCI_DEVICE_ID_INTEL_I960 0x0960 #define PCI_DEVICE_ID_INTEL_I960RM 0x0962 #define PCI_DEVICE_ID_INTEL_HDA_HSW_0 0x0a0c +#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 #define PCI_DEVICE_ID_INTEL_HDA_HSW_2 0x0c0c #define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60 +#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe #define PCI_DEVICE_ID_INTEL_HDA_HSW_3 0x0d0c #define PCI_DEVICE_ID_INTEL_HDA_BYT 0x0f04 #define PCI_DEVICE_ID_INTEL_SST_BYT 0x0f28 diff --git a/include/linux/regmap.h b/include/linux/regmap.h index c9182a4773..113261287a 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1225,6 +1225,7 @@ int regmap_multi_reg_write_bypassed(struct regmap *map, int regmap_raw_write_async(struct regmap *map, unsigned int reg, const void *val, size_t val_len); int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); +int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val); int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t val_len); int regmap_noinc_read(struct regmap *map, unsigned int reg, @@ -1734,6 +1735,13 @@ static inline int regmap_read(struct regmap *map, unsigned int reg, return -EINVAL; } +static inline int regmap_read_bypassed(struct regmap *map, unsigned int reg, + unsigned int *val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t val_len) { diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 4660582a33..ed180ca419 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -320,13 +320,13 @@ devm_regulator_get_exclusive(struct device *dev, const char *id) static inline int devm_regulator_get_enable(struct device *dev, const char *id) { - return -ENODEV; + return 0; } static inline int devm_regulator_get_enable_optional(struct device *dev, const char *id) { - return -ENODEV; + return 0; } static inline struct regulator *__must_check diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5bafcfe18b..f86f9396f7 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2972,6 +2972,21 @@ static inline void skb_mac_header_rebuild(struct sk_buff *skb) } } +/* Move the full mac header up to current network_header. + * Leaves skb->data pointing at offset skb->mac_len into the mac_header. + * Must be provided the complete mac header length. + */ +static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len) +{ + if (skb_mac_header_was_set(skb)) { + const unsigned char *old_mac = skb_mac_header(skb); + + skb_set_mac_header(skb, -full_mac_len); + memmove(skb_mac_header(skb), old_mac, full_mac_len); + __skb_push(skb, full_mac_len - skb->mac_len); + } +} + static inline int skb_checksum_start_offset(const struct sk_buff *skb) { return skb->csum_start - skb_headroom(skb); diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index e65ec3fd27..a509caf823 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -461,10 +461,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) { + read_lock_bh(&sk->sk_callback_lock); if (psock->saved_data_ready) psock->saved_data_ready(sk); else sk->sk_data_ready(sk); + read_unlock_bh(&sk->sk_callback_lock); } static inline void psock_set_prog(struct bpf_prog **pprog, diff --git a/include/linux/slab.h b/include/linux/slab.h index b5f5ee8308..34b32c59ce 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -228,7 +228,7 @@ void kfree(const void *objp); void kfree_sensitive(const void *objp); size_t __ksize(const void *objp); -DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) +DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) /** * ksize - Report actual allocation size of associated object @@ -754,7 +754,7 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); extern void kvfree(const void *addr); -DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T)) +DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T)) extern void kvfree_sensitive(const void *addr, size_t len); diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 307961b415..317200cd3a 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -50,11 +50,36 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, return 0; } +/* Deprecated. + * This is unsafe, unless caller checked user provided optlen. + * Prefer copy_safe_from_sockptr() instead. + */ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) { return copy_from_sockptr_offset(dst, src, 0, size); } +/** + * copy_safe_from_sockptr: copy a struct from sockptr + * @dst: Destination address, in kernel space. This buffer must be @ksize + * bytes long. + * @ksize: Size of @dst struct. + * @optval: Source address. (in user or kernel space) + * @optlen: Size of @optval data. + * + * Returns: + * * -EINVAL: @optlen < @ksize + * * -EFAULT: access to userspace failed. + * * 0 : @ksize bytes were copied + */ +static inline int copy_safe_from_sockptr(void *dst, size_t ksize, + sockptr_t optval, unsigned int optlen) +{ + if (optlen < ksize) + return -EINVAL; + return copy_from_sockptr(dst, optval, ksize); +} + static inline int copy_struct_from_sockptr(void *dst, size_t ksize, sockptr_t src, size_t usize) { diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 5e9d1469c6..5321585c77 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -139,6 +139,7 @@ struct rpc_create_args { const char *servername; const char *nodename; const struct rpc_program *program; + struct rpc_stat *stats; u32 prognumber; /* overrides program->number */ u32 version; rpc_authflavor_t authflavor; diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 2cfd8d8626..f9db2d1ca5 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1665,6 +1665,15 @@ struct hci_cp_le_set_event_mask { __u8 mask[8]; } __packed; +/* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E + * 7.8.2 LE Read Buffer Size command + * MAX_LE_MTU is 0xffff. + * 0 is also valid. It means that no dedicated LE Buffer exists. + * It should use the HCI_Read_Buffer_Size command and mtu is shared + * between BR/EDR and LE. + */ +#define HCI_MIN_LE_MTU 0x001b + #define HCI_OP_LE_READ_BUFFER_SIZE 0x2002 struct hci_rp_le_read_buffer_size { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index fe9e1524d3..8504e10f51 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -706,6 +706,7 @@ struct hci_conn { __u16 handle; __u16 sync_handle; __u16 state; + __u16 mtu; __u8 mode; __u8 type; __u8 role; diff --git a/include/net/gro.h b/include/net/gro.h index b435f0ddbf..62b006f3ed 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -86,6 +86,15 @@ struct napi_gro_cb { /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; + + /* L3 offsets */ + union { + struct { + u16 network_offset; + u16 inner_network_offset; + }; + u16 network_offsets[2]; + }; }; #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 1d107241b9..5d0f8f40b8 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1047,6 +1047,9 @@ struct xfrm_offload { #define CRYPTO_INVALID_PACKET_SYNTAX 64 #define CRYPTO_INVALID_PROTOCOL 128 + /* Used to keep whole l2 header for transport mode GRO */ + __u32 orig_mac_len; + __u8 proto; __u8 inner_ipproto; }; diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 1af9e68193..9cc10fab01 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h @@ -1684,8 +1684,7 @@ struct snd_emu1010 { unsigned int clock_fallback; unsigned int optical_in; /* 0:SPDIF, 1:ADAT */ unsigned int optical_out; /* 0:SPDIF, 1:ADAT */ - struct work_struct firmware_work; - struct work_struct clock_work; + struct work_struct work; }; struct snd_emu10k1 { diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 87b8de9b6c..ecf9da5462 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -83,7 +83,7 @@ EM(rxrpc_badmsg_bad_abort, "bad-abort") \ EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \ EM(rxrpc_badmsg_short_ack, "short-ack") \ - EM(rxrpc_badmsg_short_ack_info, "short-ack-info") \ + EM(rxrpc_badmsg_short_ack_trailer, "short-ack-trailer") \ EM(rxrpc_badmsg_short_hdr, "short-hdr") \ EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \ EM(rxrpc_badmsg_zero_call, "zero-call") \ diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index f0ed68974c..5fdaf0ab46 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -912,14 +912,25 @@ enum kfd_dbg_trap_exception_code { KFD_EC_MASK(EC_DEVICE_NEW)) #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \ KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE)) +#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)) /* Checks for exception code types for KFD search */ +#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX) #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \ - (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) + (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \ - (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) + (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \ - (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) + (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) +#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \ + (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET)) /* Runtime enable states */ diff --git a/include/uapi/scsi/scsi_bsg_mpi3mr.h b/include/uapi/scsi/scsi_bsg_mpi3mr.h index c72ce38728..30a5c1a593 100644 --- a/include/uapi/scsi/scsi_bsg_mpi3mr.h +++ b/include/uapi/scsi/scsi_bsg_mpi3mr.h @@ -382,7 +382,7 @@ struct mpi3mr_bsg_in_reply_buf { __u8 mpi_reply_type; __u8 rsvd1; __u16 rsvd2; - __u8 reply_buf[1]; + __u8 reply_buf[]; }; /** diff --git a/kernel/bpf/bloom_filter.c b/kernel/bpf/bloom_filter.c index addf3dd57b..35e1ddca74 100644 --- a/kernel/bpf/bloom_filter.c +++ b/kernel/bpf/bloom_filter.c @@ -80,6 +80,18 @@ static int bloom_map_get_next_key(struct bpf_map *map, void *key, void *next_key return -EOPNOTSUPP; } +/* Called from syscall */ +static int bloom_map_alloc_check(union bpf_attr *attr) +{ + if (attr->value_size > KMALLOC_MAX_SIZE) + /* if value_size is bigger, the user space won't be able to + * access the elements. + */ + return -E2BIG; + + return 0; +} + static struct bpf_map *bloom_map_alloc(union bpf_attr *attr) { u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits; @@ -191,6 +203,7 @@ static u64 bloom_map_mem_usage(const struct bpf_map *map) BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter) const struct bpf_map_ops bloom_filter_map_ops = { .map_meta_equal = bpf_map_meta_equal, + .map_alloc_check = bloom_map_alloc_check, .map_alloc = bloom_map_alloc, .map_free = bloom_map_free, .map_get_next_key = bloom_map_get_next_key, diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 19e575e6b7..11bc3af33f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -18040,8 +18040,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) f = fdget(fd); map = __bpf_map_get(f); if (IS_ERR(map)) { - verbose(env, "fd %d is not pointing to valid bpf_map\n", - insn[0].imm); + verbose(env, "fd %d is not pointing to valid bpf_map\n", fd); return PTR_ERR(map); } diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 877c4b8fad..1955b42f42 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -1717,6 +1717,7 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, mem->for_alloc = true; #ifdef CONFIG_SWIOTLB_DYNAMIC spin_lock_init(&mem->lock); + INIT_LIST_HEAD_RCU(&mem->pools); #endif add_mem_pool(mem, pool); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 7b482a26d7..223aa99bb7 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1141,8 +1141,12 @@ static bool kick_pool(struct worker_pool *pool) !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) { struct work_struct *work = list_first_entry(&pool->worklist, struct work_struct, entry); - p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask); - get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; + int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask, + cpu_online_mask); + if (wake_cpu < nr_cpu_ids) { + p->wake_cpu = wake_cpu; + get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++; + } } #endif wake_up_process(p); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ef36b829ae..6352d59c57 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -375,7 +375,7 @@ config DEBUG_INFO_SPLIT Incompatible with older versions of ccache. config DEBUG_INFO_BTF - bool "Generate BTF typeinfo" + bool "Generate BTF type information" depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST depends on BPF_SYSCALL @@ -408,7 +408,8 @@ config PAHOLE_HAS_LANG_EXCLUDE using DEBUG_INFO_BTF_MODULES. config DEBUG_INFO_BTF_MODULES - def_bool y + bool "Generate BTF type information for kernel modules" + default y depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF help Generate compact split BTF type information for kernel modules. diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 6fba6423cc..a5a687e1c9 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -302,7 +302,11 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords) } else { for (end = buf; *end && !isspace(*end); end++) ; - BUG_ON(end == buf); + if (end == buf) { + pr_err("parse err after word:%d=%s\n", nwords, + nwords ? words[nwords - 1] : "<none>"); + return -EINVAL; + } } /* `buf' is start of word, `end' is one past its end */ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 6f241bb387..d70db05757 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5061,18 +5061,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, if (size == 0 || max - min < size - 1) return -EINVAL; - if (mas_is_start(mas)) { + if (mas_is_start(mas)) mas_start(mas); - mas->offset = mas_data_end(mas); - } else if (mas->offset >= 2) { - mas->offset -= 2; - } else if (!mas_rewind_node(mas)) { + else if ((mas->offset < 2) && (!mas_rewind_node(mas))) return -EBUSY; - } - /* Empty set. */ - if (mas_is_none(mas) || mas_is_ptr(mas)) + if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) return mas_sparse_area(mas, min, max, size, false); + else if (mas->offset >= 2) + mas->offset -= 2; + else + mas->offset = mas_data_end(mas); + /* The start of the window can only be within these values. */ mas->index = min; diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 68b45c82c3..7bc2220fea 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -1124,7 +1124,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter, do { res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max, extraction_flags, &off); - if (res < 0) + if (res <= 0) goto failed; len = res; diff --git a/mm/readahead.c b/mm/readahead.c index 2648ec4f04..89139a8721 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -490,6 +490,7 @@ void page_cache_ra_order(struct readahead_control *ractl, pgoff_t index = readahead_index(ractl); pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; pgoff_t mark = index + ra->size - ra->async_size; + unsigned int nofs; int err = 0; gfp_t gfp = readahead_gfp_mask(mapping); @@ -506,6 +507,8 @@ void page_cache_ra_order(struct readahead_control *ractl, new_order--; } + /* See comment in page_cache_ra_unbounded() */ + nofs = memalloc_nofs_save(); filemap_invalidate_lock_shared(mapping); while (index <= limit) { unsigned int order = new_order; @@ -532,6 +535,7 @@ void page_cache_ra_order(struct readahead_control *ractl, read_pages(ractl); filemap_invalidate_unlock_shared(mapping); + memalloc_nofs_restore(nofs); /* * If there were already pages in the page cache, then we may have @@ -557,6 +557,26 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); } +/* + * See comment in calculate_sizes(). + */ +static inline bool freeptr_outside_object(struct kmem_cache *s) +{ + return s->offset >= s->inuse; +} + +/* + * Return offset of the end of info block which is inuse + free pointer if + * not overlapping with object. + */ +static inline unsigned int get_info_end(struct kmem_cache *s) +{ + if (freeptr_outside_object(s)) + return s->inuse + sizeof(void *); + else + return s->inuse; +} + /* Loop over all objects in a slab */ #define for_each_object(__p, __s, __addr, __objects) \ for (__p = fixup_red_left(__s, __addr); \ @@ -845,26 +865,6 @@ static void print_section(char *level, char *text, u8 *addr, metadata_access_disable(); } -/* - * See comment in calculate_sizes(). - */ -static inline bool freeptr_outside_object(struct kmem_cache *s) -{ - return s->offset >= s->inuse; -} - -/* - * Return offset of the end of info block which is inuse + free pointer if - * not overlapping with object. - */ -static inline unsigned int get_info_end(struct kmem_cache *s) -{ - if (freeptr_outside_object(s)) - return s->inuse + sizeof(void *); - else - return s->inuse; -} - static struct track *get_track(struct kmem_cache *s, void *object, enum track_item alloc) { @@ -2107,15 +2107,20 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init) * * The initialization memset's clear the object and the metadata, * but don't touch the SLAB redzone. + * + * The object's freepointer is also avoided if stored outside the + * object. */ if (unlikely(init)) { int rsize; + unsigned int inuse; + inuse = get_info_end(s); if (!kasan_has_integrated_init()) memset(kasan_reset_tag(x), 0, s->object_size); rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; - memset((char *)kasan_reset_tag(x) + s->inuse, 0, - s->size - s->inuse - rsize); + memset((char *)kasan_reset_tag(x) + inuse, 0, + s->size - inuse - rsize); } /* KASAN might put x into memory quarantine, delaying its reuse. */ return !kasan_slab_free(s, x, init); @@ -3737,7 +3742,8 @@ static void *__slab_alloc_node(struct kmem_cache *s, static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, void *obj) { - if (unlikely(slab_want_init_on_free(s)) && obj) + if (unlikely(slab_want_init_on_free(s)) && obj && + !freeptr_outside_object(s)) memset((void *)((char *)kasan_reset_tag(obj) + s->offset), 0, sizeof(void *)); } diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index f001582345..9404dd551d 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c @@ -478,6 +478,8 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head, if (unlikely(!vhdr)) goto out; + NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen; + type = vhdr->h_vlan_encapsulated_proto; ptype = gro_find_receive_by_type(type); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 18f97b2288..9a369bc14f 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -909,11 +909,37 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, { struct hci_conn *conn; + switch (type) { + case ACL_LINK: + if (!hdev->acl_mtu) + return ERR_PTR(-ECONNREFUSED); + break; + case ISO_LINK: + if (hdev->iso_mtu) + /* Dedicated ISO Buffer exists */ + break; + fallthrough; + case LE_LINK: + if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) + return ERR_PTR(-ECONNREFUSED); + if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU) + return ERR_PTR(-ECONNREFUSED); + break; + case SCO_LINK: + case ESCO_LINK: + if (!hdev->sco_pkts) + /* Controller does not support SCO or eSCO over HCI */ + return ERR_PTR(-ECONNREFUSED); + break; + default: + return ERR_PTR(-ECONNREFUSED); + } + bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle); conn = kzalloc(sizeof(*conn), GFP_KERNEL); if (!conn) - return NULL; + return ERR_PTR(-ENOMEM); bacpy(&conn->dst, dst); bacpy(&conn->src, &hdev->bdaddr); @@ -944,10 +970,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, switch (type) { case ACL_LINK: conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; + conn->mtu = hdev->acl_mtu; break; case LE_LINK: /* conn->src should reflect the local identity address */ hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; break; case ISO_LINK: /* conn->src should reflect the local identity address */ @@ -959,6 +987,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, else if (conn->role == HCI_ROLE_MASTER) conn->cleanup = cis_cleanup; + conn->mtu = hdev->iso_mtu ? hdev->iso_mtu : + hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; break; case SCO_LINK: if (lmp_esco_capable(hdev)) @@ -966,9 +996,12 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, (hdev->esco_type & EDR_ESCO_MASK); else conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; + + conn->mtu = hdev->sco_mtu; break; case ESCO_LINK: conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; + conn->mtu = hdev->sco_mtu; break; } @@ -1011,7 +1044,7 @@ struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, handle = hci_conn_hash_alloc_unset(hdev); if (unlikely(handle < 0)) - return NULL; + return ERR_PTR(-ECONNREFUSED); return hci_conn_add(hdev, type, dst, role, handle); } @@ -1317,8 +1350,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, bacpy(&conn->dst, dst); } else { conn = hci_conn_add_unset(hdev, LE_LINK, dst, role); - if (!conn) - return ERR_PTR(-ENOMEM); + if (IS_ERR(conn)) + return conn; hci_conn_hold(conn); conn->pending_sec_level = sec_level; } @@ -1494,8 +1527,8 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, return ERR_PTR(-EADDRINUSE); conn = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); - if (!conn) - return ERR_PTR(-ENOMEM); + if (IS_ERR(conn)) + return conn; conn->state = BT_CONNECT; @@ -1538,8 +1571,8 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, BT_DBG("requesting refresh of dst_addr"); conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER); - if (!conn) - return ERR_PTR(-ENOMEM); + if (IS_ERR(conn)) + return conn; if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { hci_conn_del(conn); @@ -1586,8 +1619,8 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); if (!acl) { acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); - if (!acl) - return ERR_PTR(-ENOMEM); + if (IS_ERR(acl)) + return acl; } hci_conn_hold(acl); @@ -1655,9 +1688,9 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, sco = hci_conn_hash_lookup_ba(hdev, type, dst); if (!sco) { sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER); - if (!sco) { + if (IS_ERR(sco)) { hci_conn_drop(acl); - return ERR_PTR(-ENOMEM); + return sco; } } @@ -1847,8 +1880,8 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, qos->ucast.cis); if (!cis) { cis = hci_conn_add_unset(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); - if (!cis) - return ERR_PTR(-ENOMEM); + if (IS_ERR(cis)) + return cis; cis->cleanup = cis_cleanup; cis->dst_type = dst_type; cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET; @@ -1983,14 +2016,8 @@ static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn, struct bt_iso_io_qos *qos, __u8 phy) { /* Only set MTU if PHY is enabled */ - if (!qos->sdu && qos->phy) { - if (hdev->iso_mtu > 0) - qos->sdu = hdev->iso_mtu; - else if (hdev->le_mtu > 0) - qos->sdu = hdev->le_mtu; - else - qos->sdu = hdev->acl_mtu; - } + if (!qos->sdu && qos->phy) + qos->sdu = conn->mtu; /* Use the same PHY as ACL if set to any */ if (qos->phy == BT_ISO_PHY_ANY) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 0592369579..befe645d3f 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2736,8 +2736,6 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_unregister_suspend_notifier(hdev); - msft_unregister(hdev); - hci_dev_do_close(hdev); if (!test_bit(HCI_INIT, &hdev->flags) && @@ -2791,6 +2789,7 @@ void hci_release_dev(struct hci_dev *hdev) hci_discovery_filter_clear(hdev); hci_blocked_keys_clear(hdev); hci_codec_list_clear(&hdev->local_codecs); + msft_release(hdev); hci_dev_unlock(hdev); ida_destroy(&hdev->unset_handle_ida); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 9d1063c51e..c19d78e5d2 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -954,6 +954,9 @@ static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); + if (!hdev->acl_mtu || !hdev->acl_pkts) + return HCI_ERROR_INVALID_PARAMETERS; + return rp->status; } @@ -1263,6 +1266,9 @@ static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); + if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) + return HCI_ERROR_INVALID_PARAMETERS; + return rp->status; } @@ -2342,8 +2348,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) if (!conn) { conn = hci_conn_add_unset(hdev, ACL_LINK, &cp->bdaddr, HCI_ROLE_MASTER); - if (!conn) - bt_dev_err(hdev, "no memory for new connection"); + if (IS_ERR(conn)) + bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); } } @@ -3154,8 +3160,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, BDADDR_BREDR)) { conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, HCI_ROLE_SLAVE); - if (!conn) { - bt_dev_err(hdev, "no memory for new conn"); + if (IS_ERR(conn)) { + bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); goto unlock; } } else { @@ -3343,8 +3349,8 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data, if (!conn) { conn = hci_conn_add_unset(hdev, ev->link_type, &ev->bdaddr, HCI_ROLE_SLAVE); - if (!conn) { - bt_dev_err(hdev, "no memory for new connection"); + if (IS_ERR(conn)) { + bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); goto unlock; } } @@ -3821,6 +3827,9 @@ static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); + if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) + return HCI_ERROR_INVALID_PARAMETERS; + return rp->status; } @@ -5912,8 +5921,8 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, goto unlock; conn = hci_conn_add_unset(hdev, LE_LINK, bdaddr, role); - if (!conn) { - bt_dev_err(hdev, "no memory for new connection"); + if (IS_ERR(conn)) { + bt_dev_err(hdev, "connection err: %ld", PTR_ERR(conn)); goto unlock; } @@ -7042,7 +7051,7 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, if (!cis) { cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE, cis_handle); - if (!cis) { + if (IS_ERR(cis)) { hci_le_reject_cis(hdev, ev->cis_handle); goto unlock; } @@ -7151,7 +7160,7 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, if (!bis) { bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, HCI_ROLE_SLAVE, handle); - if (!bis) + if (IS_ERR(bis)) continue; } @@ -7181,6 +7190,8 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, u16 handle = le16_to_cpu(ev->bis[i]); bis = hci_conn_hash_lookup_handle(hdev, handle); + if (!bis) + continue; set_bit(HCI_CONN_BIG_SYNC_FAILED, &bis->flags); hci_connect_cfm(bis, ev->status); @@ -7221,7 +7232,7 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, pa_sync = hci_conn_add_unset(hdev, ISO_LINK, BDADDR_ANY, HCI_ROLE_SLAVE); - if (!pa_sync) + if (IS_ERR(pa_sync)) goto unlock; pa_sync->sync_handle = le16_to_cpu(ev->sync_handle); diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index fa6c2e95d5..6d217df75c 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -1264,7 +1264,7 @@ static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, return -ENOTCONN; } - mtu = iso_pi(sk)->conn->hcon->hdev->iso_mtu; + mtu = iso_pi(sk)->conn->hcon->mtu; release_sock(sk); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 84fc70862d..3f7a82f10f 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -415,6 +415,9 @@ static void l2cap_chan_timeout(struct work_struct *work) BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); + if (!conn) + return; + mutex_lock(&conn->chan_lock); /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling * this work. No need to call l2cap_chan_hold(chan) here again. @@ -3902,13 +3905,12 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, return 0; } -static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, - struct l2cap_cmd_hdr *cmd, - u8 *data, u8 rsp_code, u8 amp_id) +static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, + u8 *data, u8 rsp_code, u8 amp_id) { struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; struct l2cap_conn_rsp rsp; - struct l2cap_chan *chan = NULL, *pchan; + struct l2cap_chan *chan = NULL, *pchan = NULL; int result, status = L2CAP_CS_NO_INFO; u16 dcid = 0, scid = __le16_to_cpu(req->scid); @@ -3921,7 +3923,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, &conn->hcon->dst, ACL_LINK); if (!pchan) { result = L2CAP_CR_BAD_PSM; - goto sendresp; + goto response; } mutex_lock(&conn->chan_lock); @@ -4008,17 +4010,15 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, } response: - l2cap_chan_unlock(pchan); - mutex_unlock(&conn->chan_lock); - l2cap_chan_put(pchan); - -sendresp: rsp.scid = cpu_to_le16(scid); rsp.dcid = cpu_to_le16(dcid); rsp.result = cpu_to_le16(result); rsp.status = cpu_to_le16(status); l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); + if (!pchan) + return; + if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); @@ -4041,7 +4041,9 @@ sendresp: chan->num_conf_req++; } - return chan; + l2cap_chan_unlock(pchan); + mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); } static int l2cap_connect_req(struct l2cap_conn *conn, @@ -6239,7 +6241,7 @@ static int l2cap_finish_move(struct l2cap_chan *chan) BT_DBG("chan %p", chan); chan->rx_state = L2CAP_RX_STATE_RECV; - chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; + chan->conn->mtu = chan->conn->hcon->mtu; return l2cap_resegment(chan); } @@ -6306,7 +6308,7 @@ static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, */ chan->next_tx_seq = control->reqseq; chan->unacked_frames = 0; - chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; + chan->conn->mtu = chan->conn->hcon->mtu; err = l2cap_resegment(chan); @@ -6846,18 +6848,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); - switch (hcon->type) { - case LE_LINK: - if (hcon->hdev->le_mtu) { - conn->mtu = hcon->hdev->le_mtu; - break; - } - fallthrough; - default: - conn->mtu = hcon->hdev->acl_mtu; - break; - } - + conn->mtu = hcon->mtu; conn->feat_mask = 0; conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index 9612c5d1b1..d039683d3b 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -769,7 +769,7 @@ void msft_register(struct hci_dev *hdev) mutex_init(&msft->filter_lock); } -void msft_unregister(struct hci_dev *hdev) +void msft_release(struct hci_dev *hdev) { struct msft_data *msft = hdev->msft_data; diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index 2a63205b37..fe538e9c91 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -14,7 +14,7 @@ bool msft_monitor_supported(struct hci_dev *hdev); void msft_register(struct hci_dev *hdev); -void msft_unregister(struct hci_dev *hdev); +void msft_release(struct hci_dev *hdev); void msft_do_open(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev); void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb); @@ -35,7 +35,7 @@ static inline bool msft_monitor_supported(struct hci_dev *hdev) } static inline void msft_register(struct hci_dev *hdev) {} -static inline void msft_unregister(struct hci_dev *hdev) {} +static inline void msft_release(struct hci_dev *hdev) {} static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_vendor_evt(struct hci_dev *hdev, void *data, diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 5d03c5440b..71d36582d4 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -83,6 +83,10 @@ static void sco_sock_timeout(struct work_struct *work) struct sock *sk; sco_conn_lock(conn); + if (!conn->hcon) { + sco_conn_unlock(conn); + return; + } sk = conn->sk; if (sk) sock_hold(sk); @@ -122,7 +126,6 @@ static void sco_sock_clear_timer(struct sock *sk) /* ---- SCO connections ---- */ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { - struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; if (conn) { @@ -140,9 +143,10 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) hcon->sco_data = conn; conn->hcon = hcon; + conn->mtu = hcon->mtu; - if (hdev->sco_mtu > 0) - conn->mtu = hdev->sco_mtu; + if (hcon->mtu > 0) + conn->mtu = hcon->mtu; else conn->mtu = 60; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 7431f89e89..d97064d460 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -258,6 +258,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; const unsigned char *src = eth_hdr(skb)->h_source; + struct sk_buff *nskb; if (!should_deliver(p, skb)) return; @@ -266,12 +267,16 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, if (skb->dev == p->dev && ether_addr_equal(src, addr)) return; - skb = skb_copy(skb, GFP_ATOMIC); - if (!skb) { + __skb_push(skb, ETH_HLEN); + nskb = pskb_copy(skb, GFP_ATOMIC); + __skb_pull(skb, ETH_HLEN); + if (!nskb) { DEV_STATS_INC(dev, tx_dropped); return; } + skb = nskb; + __skb_pull(skb, ETH_HLEN); if (!is_broadcast_ether_addr(addr)) memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN); diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index d415833fce..f17dbac7d8 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -455,7 +455,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, u32 filter_mask, const struct net_device *dev, bool getlink) { - u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; + u8 operstate = netif_running(dev) ? READ_ONCE(dev->operstate) : + IF_OPER_DOWN; struct nlattr *af = NULL; struct net_bridge *br; struct ifinfomsg *hdr; diff --git a/net/core/filter.c b/net/core/filter.c index ef3e78b6a3..75cdaa1604 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4360,10 +4360,12 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, enum bpf_map_type map_type = ri->map_type; void *fwd = ri->tgt_value; u32 map_id = ri->map_id; + u32 flags = ri->flags; struct bpf_map *map; int err; ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ + ri->flags = 0; ri->map_type = BPF_MAP_TYPE_UNSPEC; if (unlikely(!xdpf)) { @@ -4375,11 +4377,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, case BPF_MAP_TYPE_DEVMAP: fallthrough; case BPF_MAP_TYPE_DEVMAP_HASH: - map = READ_ONCE(ri->map); - if (unlikely(map)) { + if (unlikely(flags & BPF_F_BROADCAST)) { + map = READ_ONCE(ri->map); + + /* The map pointer is cleared when the map is being torn + * down by bpf_clear_redirect_map() + */ + if (unlikely(!map)) { + err = -ENOENT; + break; + } + WRITE_ONCE(ri->map, NULL); err = dev_map_enqueue_multi(xdpf, dev, map, - ri->flags & BPF_F_EXCLUDE_INGRESS); + flags & BPF_F_EXCLUDE_INGRESS); } else { err = dev_map_enqueue(fwd, xdpf, dev); } @@ -4442,9 +4453,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame); static int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, - void *fwd, - enum bpf_map_type map_type, u32 map_id) + struct bpf_prog *xdp_prog, void *fwd, + enum bpf_map_type map_type, u32 map_id, + u32 flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map; @@ -4454,11 +4465,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, case BPF_MAP_TYPE_DEVMAP: fallthrough; case BPF_MAP_TYPE_DEVMAP_HASH: - map = READ_ONCE(ri->map); - if (unlikely(map)) { + if (unlikely(flags & BPF_F_BROADCAST)) { + map = READ_ONCE(ri->map); + + /* The map pointer is cleared when the map is being torn + * down by bpf_clear_redirect_map() + */ + if (unlikely(!map)) { + err = -ENOENT; + break; + } + WRITE_ONCE(ri->map, NULL); err = dev_map_redirect_multi(dev, skb, xdp_prog, map, - ri->flags & BPF_F_EXCLUDE_INGRESS); + flags & BPF_F_EXCLUDE_INGRESS); } else { err = dev_map_generic_redirect(fwd, skb, xdp_prog); } @@ -4495,9 +4515,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, enum bpf_map_type map_type = ri->map_type; void *fwd = ri->tgt_value; u32 map_id = ri->map_id; + u32 flags = ri->flags; int err; ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */ + ri->flags = 0; ri->map_type = BPF_MAP_TYPE_UNSPEC; if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) { @@ -4517,7 +4539,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, return 0; } - return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id); + return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags); err: _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err); return err; diff --git a/net/core/gro.c b/net/core/gro.c index cefddf65f7..31e40f25fd 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -373,6 +373,7 @@ static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) const struct skb_shared_info *pinfo = skb_shinfo(skb); const skb_frag_t *frag0 = &pinfo->frags[0]; + NAPI_GRO_CB(skb)->network_offset = 0; NAPI_GRO_CB(skb)->data_offset = 0; NAPI_GRO_CB(skb)->frag0 = NULL; NAPI_GRO_CB(skb)->frag0_len = 0; diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 429571c258..1b93e054c9 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -67,7 +67,7 @@ static void rfc2863_policy(struct net_device *dev) { unsigned char operstate = default_operstate(dev); - if (operstate == dev->operstate) + if (operstate == READ_ONCE(dev->operstate)) return; write_lock(&dev_base_lock); @@ -87,7 +87,7 @@ static void rfc2863_policy(struct net_device *dev) break; } - dev->operstate = operstate; + WRITE_ONCE(dev->operstate, operstate); write_unlock(&dev_base_lock); } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index a09d507c5b..676048ae11 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -318,11 +318,9 @@ static ssize_t operstate_show(struct device *dev, const struct net_device *netdev = to_net_dev(dev); unsigned char operstate; - read_lock(&dev_base_lock); - operstate = netdev->operstate; + operstate = READ_ONCE(netdev->operstate); if (!netif_running(netdev)) operstate = IF_OPER_DOWN; - read_unlock(&dev_base_lock); if (operstate >= ARRAY_SIZE(operstates)) return -EINVAL; /* should not happen */ diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 7279953342..b36444e381 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -69,12 +69,15 @@ DEFINE_COOKIE(net_cookie); static struct net_generic *net_alloc_generic(void) { + unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs); + unsigned int generic_size; struct net_generic *ng; - unsigned int generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]); + + generic_size = offsetof(struct net_generic, ptr[gen_ptrs]); ng = kzalloc(generic_size, GFP_KERNEL); if (ng) - ng->s.len = max_gen_ptrs; + ng->s.len = gen_ptrs; return ng; } @@ -1278,7 +1281,11 @@ static int register_pernet_operations(struct list_head *list, if (error < 0) return error; *ops->id = error; - max_gen_ptrs = max(max_gen_ptrs, *ops->id + 1); + /* This does not require READ_ONCE as writers already hold + * pernet_ops_rwsem. But WRITE_ONCE is needed to protect + * net_alloc_generic. + */ + WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1)); } error = __register_pernet_operations(list, ops); if (error) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index bd50e9fe32..95e62235b5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -875,9 +875,9 @@ static void set_operstate(struct net_device *dev, unsigned char transition) break; } - if (dev->operstate != operstate) { + if (READ_ONCE(dev->operstate) != operstate) { write_lock(&dev_base_lock); - dev->operstate = operstate; + WRITE_ONCE(dev->operstate, operstate); write_unlock(&dev_base_lock); netdev_state_change(dev); } @@ -2552,7 +2552,7 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) nla_for_each_nested(attr, tb[IFLA_VF_VLAN_LIST], rem) { if (nla_type(attr) != IFLA_VF_VLAN_INFO || - nla_len(attr) < NLA_HDRLEN) { + nla_len(attr) < sizeof(struct ifla_vf_vlan_info)) { return -EINVAL; } if (len >= MAX_VLAN_LIST_LEN) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 71dee435d5..570269c477 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2014,11 +2014,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb) struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { - int headerlen = skb_headroom(skb); - unsigned int size = skb_end_offset(skb) + skb->data_len; - struct sk_buff *n = __alloc_skb(size, gfp_mask, - skb_alloc_rx_flag(skb), NUMA_NO_NODE); + struct sk_buff *n; + unsigned int size; + int headerlen; + + if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) + return NULL; + headerlen = skb_headroom(skb); + size = skb_end_offset(skb) + skb->data_len; + n = __alloc_skb(size, gfp_mask, + skb_alloc_rx_flag(skb), NUMA_NO_NODE); if (!n) return NULL; @@ -2346,12 +2352,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, /* * Allocate the copy buffer */ - struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, - gfp_mask, skb_alloc_rx_flag(skb), - NUMA_NO_NODE); - int oldheadroom = skb_headroom(skb); int head_copy_len, head_copy_off; + struct sk_buff *n; + int oldheadroom; + + if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST)) + return NULL; + oldheadroom = skb_headroom(skb); + n = __alloc_skb(newheadroom + skb->len + newtailroom, + gfp_mask, skb_alloc_rx_flag(skb), + NUMA_NO_NODE); if (!n) return NULL; diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 4d75ef9d24..fd20aae30b 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1226,11 +1226,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk) rcu_read_lock(); psock = sk_psock(sk); - if (psock) { - read_lock_bh(&sk->sk_callback_lock); + if (psock) sk_psock_data_ready(sk, psock); - read_unlock_bh(&sk->sk_callback_lock); - } rcu_read_unlock(); } } diff --git a/net/core/sock.c b/net/core/sock.c index 9cf404e803..599f186fe3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -482,7 +482,7 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { + if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { atomic_inc(&sk->sk_drops); trace_sock_rcvqueue_full(sk, skb); return -ENOMEM; @@ -552,7 +552,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, skb->dev = NULL; - if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { + if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { atomic_inc(&sk->sk_drops); goto discard_and_relse; } diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 9d71b66183..03a42de751 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -31,8 +31,8 @@ static bool is_slave_up(struct net_device *dev) static void __hsr_set_operstate(struct net_device *dev, int transition) { write_lock(&dev_base_lock); - if (dev->operstate != transition) { - dev->operstate = transition; + if (READ_ONCE(dev->operstate) != transition) { + WRITE_ONCE(dev->operstate, transition); write_unlock(&dev_base_lock); netdev_state_change(dev); } else { @@ -71,39 +71,36 @@ static bool hsr_check_carrier(struct hsr_port *master) return false; } -static void hsr_check_announce(struct net_device *hsr_dev, - unsigned char old_operstate) +static void hsr_check_announce(struct net_device *hsr_dev) { struct hsr_priv *hsr; hsr = netdev_priv(hsr_dev); - - if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) { - /* Went up */ - hsr->announce_count = 0; - mod_timer(&hsr->announce_timer, - jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL)); + if (netif_running(hsr_dev) && netif_oper_up(hsr_dev)) { + /* Enable announce timer and start sending supervisory frames */ + if (!timer_pending(&hsr->announce_timer)) { + hsr->announce_count = 0; + mod_timer(&hsr->announce_timer, jiffies + + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL)); + } + } else { + /* Deactivate the announce timer */ + timer_delete(&hsr->announce_timer); } - - if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP) - /* Went down */ - del_timer(&hsr->announce_timer); } void hsr_check_carrier_and_operstate(struct hsr_priv *hsr) { struct hsr_port *master; - unsigned char old_operstate; bool has_carrier; master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); /* netif_stacked_transfer_operstate() cannot be used here since * it doesn't set IF_OPER_LOWERLAYERDOWN (?) */ - old_operstate = master->dev->operstate; has_carrier = hsr_check_carrier(master); hsr_set_operstate(master, has_carrier); - hsr_check_announce(master->dev, old_operstate); + hsr_check_announce(master->dev); } int hsr_get_max_mtu(struct hsr_priv *hsr) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index a5a820ee20..ce5c26cf1e 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1571,6 +1571,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) /* The above will be needed by the transport layer if there is one * immediately following this IP hdr. */ + NAPI_GRO_CB(skb)->inner_network_offset = off; /* Note : No need to call skb_gro_postpull_rcsum() here, * as we already checked checksum over ipv4 header was 0 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 67d8466223..a38e63669c 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1473,7 +1473,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, * by icmp_hdr(skb)->type. */ if (sk->sk_type == SOCK_RAW && - !inet_test_bit(HDRINCL, sk)) + !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH)) icmp_type = fl4->fl4_icmp_type; else icmp_type = icmp_hdr(skb)->type; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 288f1846b3..a1d8218fa1 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -605,6 +605,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); + fl4.fl4_icmp_type = 0; + fl4.fl4_icmp_code = 0; + if (!hdrincl) { rfv.msg = msg; rfv.hlen = 0; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5887eac87b..94b129301d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2709,7 +2709,7 @@ void tcp_shutdown(struct sock *sk, int how) /* If we've already sent a FIN, or it's a closed state, skip this. */ if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_SYN_SENT | - TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { + TCPF_CLOSE_WAIT)) { /* Clear out any half completed packets. FIN if needed. */ if (tcp_close_state(sk)) tcp_send_fin(sk); @@ -2818,7 +2818,7 @@ void __tcp_close(struct sock *sk, long timeout) * machine. State transitions: * * TCP_ESTABLISHED -> TCP_FIN_WAIT1 - * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) + * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult) * TCP_CLOSE_WAIT -> TCP_LAST_ACK * * are legal only when FIN has been sent (i.e. in window), diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index df7b13f0e5..ff10be8c76 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6752,6 +6752,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tcp_initialize_rcv_mss(sk); tcp_fast_path_on(tp); + if (sk->sk_shutdown & SEND_SHUTDOWN) + tcp_shutdown(sk, SEND_SHUTDOWN); break; case TCP_FIN_WAIT1: { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0c50c5a32b..68a065c0e5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -154,6 +154,12 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) if (tcptw->tw_ts_recent_stamp && (!twp || (reuse && time_after32(ktime_get_seconds(), tcptw->tw_ts_recent_stamp)))) { + /* inet_twsk_hashdance() sets sk_refcnt after putting twsk + * and releasing the bucket lock. + */ + if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt))) + return 0; + /* In case of repair and re-using TIME-WAIT sockets we still * want to be sure that it is safe as above but honor the * sequence numbers and time stamps set as part of the repair @@ -174,7 +180,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) tp->rx_opt.ts_recent = tcptw->tw_ts_recent; tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; } - sock_hold(sktw); + return 1; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e3167ad965..02caeb7bcf 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3563,7 +3563,9 @@ void tcp_send_fin(struct sock *sk) return; } } else { - skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); + skb = alloc_skb_fclone(MAX_TCP_HEADER, + sk_gfp_mask(sk, GFP_ATOMIC | + __GFP_NOWARN)); if (unlikely(!skb)) return; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 40282a3418..9120694359 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -534,7 +534,8 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb, __be16 sport, __be16 dport) { - const struct iphdr *iph = ip_hdr(skb); + const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; + const struct iphdr *iph = (struct iphdr *)(skb->data + offset); struct net *net = dev_net(skb->dev); int iif, sdif; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index c3d67423ae..e5971890d6 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -471,6 +471,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head, struct sk_buff *p; unsigned int ulen; int ret = 0; + int flush; /* requires non zero csum, for symmetry with GSO */ if (!uh->check) { @@ -504,13 +505,22 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head, return p; } + flush = NAPI_GRO_CB(p)->flush; + + if (NAPI_GRO_CB(p)->flush_id != 1 || + NAPI_GRO_CB(p)->count != 1 || + !NAPI_GRO_CB(p)->is_atomic) + flush |= NAPI_GRO_CB(p)->flush_id; + else + NAPI_GRO_CB(p)->is_atomic = false; + /* Terminate the flow on len mismatch or if it grow "too much". * Under small packet flood GRO count could elsewhere grow a lot * leading to excessive truesize values. * On len mismatch merge the first packet shorter than gso_size, * otherwise complete the GRO packet. */ - if (ulen > ntohs(uh2->len)) { + if (ulen > ntohs(uh2->len) || flush) { pp = p; } else { if (NAPI_GRO_CB(skb)->is_flist) { @@ -718,7 +728,8 @@ EXPORT_SYMBOL(udp_gro_complete); INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff) { - const struct iphdr *iph = ip_hdr(skb); + const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; + const struct iphdr *iph = (struct iphdr *)(skb->data + offset); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); /* do fraglist only if there is no outer UDP encap (or we already processed it) */ diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index c54676998e..801404f7d6 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -63,7 +63,11 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async) ip_send_check(iph); if (xo && (xo->flags & XFRM_GRO)) { - skb_mac_header_rebuild(skb); + /* The full l2 header needs to be preserved so that re-injecting the packet at l2 + * works correctly in the presence of vlan tags. + */ + skb_mac_header_rebuild_full(skb, xo->orig_mac_len); + skb_reset_network_header(skb); skb_reset_transport_header(skb); return 0; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 37d48aa073..c4e2f9d35e 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4164,7 +4164,7 @@ static void addrconf_dad_work(struct work_struct *w) if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) && ipv6_addr_equal(&ifp->addr, &addr)) { /* DAD failed for link-local based on MAC */ - idev->cnf.disable_ipv6 = 1; + WRITE_ONCE(idev->cnf.disable_ipv6, 1); pr_info("%s: IPv6 being disabled!\n", ifp->idev->dev->name); @@ -6015,7 +6015,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, (dev->ifindex != dev_get_iflink(dev) && nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) || nla_put_u8(skb, IFLA_OPERSTATE, - netif_running(dev) ? dev->operstate : IF_OPER_DOWN)) + netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN)) goto nla_put_failure; protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO); if (!protoinfo) @@ -6325,7 +6325,8 @@ static void addrconf_disable_change(struct net *net, __s32 newf) idev = __in6_dev_get(dev); if (idev) { int changed = (!idev->cnf.disable_ipv6) ^ (!newf); - idev->cnf.disable_ipv6 = newf; + + WRITE_ONCE(idev->cnf.disable_ipv6, newf); if (changed) dev_disable_change(idev); } @@ -6342,7 +6343,7 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) net = (struct net *)table->extra2; old = *p; - *p = newf; + WRITE_ONCE(*p, newf); if (p == &net->ipv6.devconf_dflt->disable_ipv6) { rtnl_unlock(); @@ -6350,7 +6351,7 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) } if (p == &net->ipv6.devconf_all->disable_ipv6) { - net->ipv6.devconf_dflt->disable_ipv6 = newf; + WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf); addrconf_disable_change(net, newf); } else if ((!newf) ^ (!old)) dev_disable_change((struct inet6_dev *)table->extra1); diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 52c04f0ac4..9e254de746 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -233,8 +233,12 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp, rt = pol_lookup_func(lookup, net, table, flp6, arg->lookup_data, flags); if (rt != net->ipv6.ip6_null_entry) { + struct inet6_dev *idev = ip6_dst_idev(&rt->dst); + + if (!idev) + goto again; err = fib6_rule_saddr(net, rule, flags, flp6, - ip6_dst_idev(&rt->dst)->dev); + idev->dev); if (err == -EAGAIN) goto again; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index b837881453..1ba97933c7 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -168,9 +168,9 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, SKB_DR_SET(reason, NOT_SPECIFIED); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || - !idev || unlikely(idev->cnf.disable_ipv6)) { + !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); - if (idev && unlikely(idev->cnf.disable_ipv6)) + if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6))) SKB_DR_SET(reason, IPV6DISABLED); goto drop; } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index cca64c7809..19ae2d4cd2 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -237,6 +237,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, goto out; skb_set_network_header(skb, off); + NAPI_GRO_CB(skb)->inner_network_offset = off; flush += ntohs(iph->payload_len) != skb->len - hlen; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 31b86fe661..51b358f6c3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -234,7 +234,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb) skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; - if (unlikely(idev->cnf.disable_ipv6)) { + if (unlikely(!idev || READ_ONCE(idev->cnf.disable_ipv6))) { IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED); return 0; @@ -1933,7 +1933,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, u8 icmp6_type; if (sk->sk_socket->type == SOCK_RAW && - !inet_test_bit(HDRINCL, sk)) + !(fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH)) icmp6_type = fl6->fl6_icmp_type; else icmp6_type = icmp6_hdr(skb)->icmp6_type; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8c14c4cc82..785b2d076a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -275,7 +275,8 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, __be16 sport, __be16 dport) { - const struct ipv6hdr *iph = ipv6_hdr(skb); + const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; + const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset); struct net *net = dev_net(skb->dev); int iif, sdif; diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 626d7b362d..639a4b506f 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -164,7 +164,8 @@ flush: INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff) { - const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation]; + const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + offset); struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); /* do fraglist only if there is no outer UDP encap (or we already processed it) */ diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 6e36e5047f..4e6dcefd63 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -58,7 +58,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) skb_postpush_rcsum(skb, skb_network_header(skb), nhlen); if (xo && (xo->flags & XFRM_GRO)) { - skb_mac_header_rebuild(skb); + /* The full l2 header needs to be preserved so that re-injecting the packet at l2 + * works correctly in the presence of vlan tags. + */ + skb_mac_header_rebuild_full(skb, xo->orig_mac_len); + skb_reset_network_header(skb); skb_reset_transport_header(skb); return 0; } diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 25ca89f804..9dacfa91ac 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -127,6 +127,9 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, /* checksums verified by L2TP */ skb->ip_summed = CHECKSUM_NONE; + /* drop outer flow-hash */ + skb_clear_hash(skb); + skb_dst_drop(skb); nf_reset_ct(skb); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index a18361afea..399fcb6665 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -131,7 +131,7 @@ struct ieee80211_bss { }; /** - * enum ieee80211_corrupt_data_flags - BSS data corruption flags + * enum ieee80211_bss_corrupt_data_flags - BSS data corruption flags * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted * @@ -144,7 +144,7 @@ enum ieee80211_bss_corrupt_data_flags { }; /** - * enum ieee80211_valid_data_flags - BSS valid data flags + * enum ieee80211_bss_valid_data_flags - BSS valid data flags * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 94028b541b..ac0073c8f9 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -7294,7 +7294,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, sdata_info(sdata, "failed to insert STA entry for the AP (error %d)\n", err); - goto out_err; + goto out_release_chan; } } else WARN_ON_ONCE(!ether_addr_equal(link->u.mgd.bssid, cbss->bssid)); @@ -7305,8 +7305,9 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, return 0; +out_release_chan: + ieee80211_link_release_channel(link); out_err: - ieee80211_link_release_channel(&sdata->deflink); ieee80211_vif_set_links(sdata, 0, 0); return err; } diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c index 13fe0748dd..2963ba84e2 100644 --- a/net/mptcp/ctrl.c +++ b/net/mptcp/ctrl.c @@ -96,6 +96,43 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet) } #ifdef CONFIG_SYSCTL +static int mptcp_set_scheduler(const struct net *net, const char *name) +{ + struct mptcp_pernet *pernet = mptcp_get_pernet(net); + struct mptcp_sched_ops *sched; + int ret = 0; + + rcu_read_lock(); + sched = mptcp_sched_find(name); + if (sched) + strscpy(pernet->scheduler, name, MPTCP_SCHED_NAME_MAX); + else + ret = -ENOENT; + rcu_read_unlock(); + + return ret; +} + +static int proc_scheduler(struct ctl_table *ctl, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + const struct net *net = current->nsproxy->net_ns; + char val[MPTCP_SCHED_NAME_MAX]; + struct ctl_table tbl = { + .data = val, + .maxlen = MPTCP_SCHED_NAME_MAX, + }; + int ret; + + strscpy(val, mptcp_get_scheduler(net), MPTCP_SCHED_NAME_MAX); + + ret = proc_dostring(&tbl, write, buffer, lenp, ppos); + if (write && ret == 0) + ret = mptcp_set_scheduler(net, val); + + return ret; +} + static struct ctl_table mptcp_sysctl_table[] = { { .procname = "enabled", @@ -148,7 +185,7 @@ static struct ctl_table mptcp_sysctl_table[] = { .procname = "scheduler", .maxlen = MPTCP_SCHED_NAME_MAX, .mode = 0644, - .proc_handler = proc_dostring, + .proc_handler = proc_scheduler, }, { .procname = "close_timeout", diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 2b921af271..74c1faec27 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3703,6 +3703,9 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT); mptcp_subflow_early_fallback(msk, subflow); } + + WRITE_ONCE(msk->write_seq, subflow->idsn); + WRITE_ONCE(msk->snd_nxt, subflow->idsn); if (likely(!__mptcp_check_fallback(msk))) MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 819157bbb5..d5344563e5 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -252,10 +252,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), + optval, optlen); + if (err) break; - } if (opt > LLCP_MAX_RW) { err = -EINVAL; @@ -274,10 +274,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), + optval, optlen); + if (err) break; - } if (opt > LLCP_MAX_MIUX) { err = -EINVAL; diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 0d26c8ec99..b133dc5530 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -1518,6 +1518,7 @@ static void nci_rx_work(struct work_struct *work) if (!nci_plen(skb->data)) { kfree_skb(skb); + kcov_remote_stop(); break; } diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index f4a38bd6a7..bfb7758063 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c @@ -77,13 +77,15 @@ EXPORT_SYMBOL_GPL(nsh_pop); static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, netdev_features_t features) { + unsigned int outer_hlen, mac_len, nsh_len; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; - unsigned int nsh_len, mac_len; - __be16 proto; + __be16 outer_proto, proto; skb_reset_network_header(skb); + outer_proto = skb->protocol; + outer_hlen = skb_mac_header_len(skb); mac_len = skb->mac_len; if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) @@ -113,10 +115,10 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, } for (skb = segs; skb; skb = skb->next) { - skb->protocol = htons(ETH_P_NSH); - __skb_push(skb, nsh_len); - skb->mac_header = mac_offset; - skb->network_header = skb->mac_header + mac_len; + skb->protocol = outer_proto; + __skb_push(skb, nsh_len + outer_hlen); + skb_reset_mac_header(skb); + skb_set_network_header(skb, outer_hlen); skb->mac_len = mac_len; } diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 59aebe2968..dd4c7e9a63 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -193,7 +193,7 @@ void rtm_phonet_notify(int event, struct net_device *dev, u8 dst) struct sk_buff *skb; int err = -ENOBUFS; - skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) + + skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct rtmsg)) + nla_total_size(1) + nla_total_size(4), GFP_KERNEL); if (skb == NULL) goto errout; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 7818aae1be..4301dd20b4 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -692,7 +692,7 @@ struct rxrpc_call { * packets) rather than bytes. */ #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN -#define RXRPC_MIN_CWND (RXRPC_TX_SMSS > 2190 ? 2 : RXRPC_TX_SMSS > 1095 ? 3 : 4) +#define RXRPC_MIN_CWND 4 u8 cong_cwnd; /* Congestion window size */ u8 cong_extra; /* Extra to send for congestion management */ u8 cong_ssthresh; /* Slow-start threshold */ diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 9fc9a6c3f6..3847b14af7 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -175,12 +175,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp, call->rx_winsize = rxrpc_rx_window_size; call->tx_winsize = 16; - if (RXRPC_TX_SMSS > 2190) - call->cong_cwnd = 2; - else if (RXRPC_TX_SMSS > 1095) - call->cong_cwnd = 3; - else - call->cong_cwnd = 4; + call->cong_cwnd = RXRPC_MIN_CWND; call->cong_ssthresh = RXRPC_TX_MAX_WINDOW; call->rxnet = rxnet; diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 1f251d758c..598b4ee389 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -88,7 +88,7 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct rxrpc_ackpacket ack; }; } __attribute__((packed)) pkt; - struct rxrpc_ackinfo ack_info; + struct rxrpc_acktrailer trailer; size_t len; int ret, ioc; u32 serial, mtu, call_id, padding; @@ -122,8 +122,8 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, iov[0].iov_len = sizeof(pkt.whdr); iov[1].iov_base = &padding; iov[1].iov_len = 3; - iov[2].iov_base = &ack_info; - iov[2].iov_len = sizeof(ack_info); + iov[2].iov_base = &trailer; + iov[2].iov_len = sizeof(trailer); serial = rxrpc_get_next_serial(conn); @@ -158,14 +158,14 @@ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0); pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE; pkt.ack.nAcks = 0; - ack_info.rxMTU = htonl(rxrpc_rx_mtu); - ack_info.maxMTU = htonl(mtu); - ack_info.rwind = htonl(rxrpc_rx_window_size); - ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max); + trailer.maxMTU = htonl(rxrpc_rx_mtu); + trailer.ifMTU = htonl(mtu); + trailer.rwind = htonl(rxrpc_rx_window_size); + trailer.jumbo_max = htonl(rxrpc_rx_jumbo_max); pkt.whdr.flags |= RXRPC_SLOW_START_OK; padding = 0; iov[0].iov_len += sizeof(pkt.ack); - len += sizeof(pkt.ack) + 3 + sizeof(ack_info); + len += sizeof(pkt.ack) + 3 + sizeof(trailer); ioc = 3; trace_rxrpc_tx_ack(chan->call_debug_id, serial, diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index df8a271948..7aa58129ae 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -118,18 +118,13 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo switch (srx->transport.family) { case AF_INET: if (peer->srx.transport.sin.sin_port != - srx->transport.sin.sin_port || - peer->srx.transport.sin.sin_addr.s_addr != - srx->transport.sin.sin_addr.s_addr) + srx->transport.sin.sin_port) goto not_found; break; #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: if (peer->srx.transport.sin6.sin6_port != - srx->transport.sin6.sin6_port || - memcmp(&peer->srx.transport.sin6.sin6_addr, - &srx->transport.sin6.sin6_addr, - sizeof(struct in6_addr)) != 0) + srx->transport.sin6.sin6_port) goto not_found; break; #endif diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 9691de00ad..5dfda1ac51 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -9,6 +9,17 @@ #include "ar-internal.h" +/* Override priority when generating ACKs for received DATA */ +static const u8 rxrpc_ack_priority[RXRPC_ACK__INVALID] = { + [RXRPC_ACK_IDLE] = 1, + [RXRPC_ACK_DELAY] = 2, + [RXRPC_ACK_REQUESTED] = 3, + [RXRPC_ACK_DUPLICATE] = 4, + [RXRPC_ACK_EXCEEDS_WINDOW] = 5, + [RXRPC_ACK_NOSPACE] = 6, + [RXRPC_ACK_OUT_OF_SEQUENCE] = 7, +}; + static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq, enum rxrpc_abort_reason why) { @@ -366,7 +377,7 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb, * Process a DATA packet. */ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb, - bool *_notify) + bool *_notify, rxrpc_serial_t *_ack_serial, int *_ack_reason) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct sk_buff *oos; @@ -419,8 +430,6 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb, /* Send an immediate ACK if we fill in a hole */ else if (!skb_queue_empty(&call->rx_oos_queue)) ack_reason = RXRPC_ACK_DELAY; - else - call->ackr_nr_unacked++; window++; if (after(window, wtop)) { @@ -498,12 +507,16 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb, } send_ack: - if (ack_reason >= 0) - rxrpc_send_ACK(call, ack_reason, serial, - rxrpc_propose_ack_input_data); - else - rxrpc_propose_delay_ACK(call, serial, - rxrpc_propose_ack_input_data); + if (ack_reason >= 0) { + if (rxrpc_ack_priority[ack_reason] > rxrpc_ack_priority[*_ack_reason]) { + *_ack_serial = serial; + *_ack_reason = ack_reason; + } else if (rxrpc_ack_priority[ack_reason] == rxrpc_ack_priority[*_ack_reason] && + ack_reason == RXRPC_ACK_REQUESTED) { + *_ack_serial = serial; + *_ack_reason = ack_reason; + } + } } /* @@ -514,9 +527,11 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb struct rxrpc_jumbo_header jhdr; struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp; struct sk_buff *jskb; + rxrpc_serial_t ack_serial = 0; unsigned int offset = sizeof(struct rxrpc_wire_header); unsigned int len = skb->len - offset; bool notify = false; + int ack_reason = 0; while (sp->hdr.flags & RXRPC_JUMBO_PACKET) { if (len < RXRPC_JUMBO_SUBPKTLEN) @@ -536,7 +551,7 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb jsp = rxrpc_skb(jskb); jsp->offset = offset; jsp->len = RXRPC_JUMBO_DATALEN; - rxrpc_input_data_one(call, jskb, ¬ify); + rxrpc_input_data_one(call, jskb, ¬ify, &ack_serial, &ack_reason); rxrpc_free_skb(jskb, rxrpc_skb_put_jumbo_subpacket); sp->hdr.flags = jhdr.flags; @@ -549,7 +564,16 @@ static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb sp->offset = offset; sp->len = len; - rxrpc_input_data_one(call, skb, ¬ify); + rxrpc_input_data_one(call, skb, ¬ify, &ack_serial, &ack_reason); + + if (ack_reason > 0) { + rxrpc_send_ACK(call, ack_reason, ack_serial, + rxrpc_propose_ack_input_data); + } else { + call->ackr_nr_unacked++; + rxrpc_propose_delay_ACK(call, sp->hdr.serial, + rxrpc_propose_ack_input_data); + } if (notify) { trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial); rxrpc_notify_socket(call); @@ -670,14 +694,14 @@ static void rxrpc_complete_rtt_probe(struct rxrpc_call *call, /* * Process the extra information that may be appended to an ACK packet */ -static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, - struct rxrpc_ackinfo *ackinfo) +static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb, + struct rxrpc_acktrailer *trailer) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_peer *peer; unsigned int mtu; bool wake = false; - u32 rwind = ntohl(ackinfo->rwind); + u32 rwind = ntohl(trailer->rwind); if (rwind > RXRPC_TX_MAX_WINDOW) rwind = RXRPC_TX_MAX_WINDOW; @@ -688,10 +712,7 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, call->tx_winsize = rwind; } - if (call->cong_ssthresh > rwind) - call->cong_ssthresh = rwind; - - mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU)); + mtu = min(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU)); peer = call->peer; if (mtu < peer->maxdata) { @@ -837,7 +858,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) struct rxrpc_ack_summary summary = { 0 }; struct rxrpc_ackpacket ack; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); - struct rxrpc_ackinfo info; + struct rxrpc_acktrailer trailer; rxrpc_serial_t ack_serial, acked_serial; rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt, since; int nr_acks, offset, ioffset; @@ -917,11 +938,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) goto send_response; } - info.rxMTU = 0; + trailer.maxMTU = 0; ioffset = offset + nr_acks + 3; - if (skb->len >= ioffset + sizeof(info) && - skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0) - return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info); + if (skb->len >= ioffset + sizeof(trailer) && + skb_copy_bits(skb, ioffset, &trailer, sizeof(trailer)) < 0) + return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_trailer); if (nr_acks > 0) skb_condense(skb); @@ -950,8 +971,8 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) } /* Parse rwind and mtu sizes if provided. */ - if (info.rxMTU) - rxrpc_input_ackinfo(call, skb, &info); + if (trailer.maxMTU) + rxrpc_input_ack_trailer(call, skb, &trailer); if (first_soft_ack == 0) return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 4a292f860a..cad6a7d18e 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -83,7 +83,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, struct rxrpc_txbuf *txb, u16 *_rwind) { - struct rxrpc_ackinfo ackinfo; + struct rxrpc_acktrailer trailer; unsigned int qsize, sack, wrap, to; rxrpc_seq_t window, wtop; int rsize; @@ -126,16 +126,16 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, qsize = (window - 1) - call->rx_consumed; rsize = max_t(int, call->rx_winsize - qsize, 0); *_rwind = rsize; - ackinfo.rxMTU = htonl(rxrpc_rx_mtu); - ackinfo.maxMTU = htonl(mtu); - ackinfo.rwind = htonl(rsize); - ackinfo.jumbo_max = htonl(jmax); + trailer.maxMTU = htonl(rxrpc_rx_mtu); + trailer.ifMTU = htonl(mtu); + trailer.rwind = htonl(rsize); + trailer.jumbo_max = htonl(jmax); *ackp++ = 0; *ackp++ = 0; *ackp++ = 0; - memcpy(ackp, &ackinfo, sizeof(ackinfo)); - return txb->ack.nAcks + 3 + sizeof(ackinfo); + memcpy(ackp, &trailer, sizeof(trailer)); + return txb->ack.nAcks + 3 + sizeof(trailer); } /* diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h index e8ee4af43c..4fe6b4d20a 100644 --- a/net/rxrpc/protocol.h +++ b/net/rxrpc/protocol.h @@ -135,9 +135,9 @@ struct rxrpc_ackpacket { /* * ACK packets can have a further piece of information tagged on the end */ -struct rxrpc_ackinfo { - __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */ - __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */ +struct rxrpc_acktrailer { + __be32 maxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */ + __be32 ifMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */ __be32 rwind; /* Rx window size (packets) [AFS 3.4] */ __be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */ }; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 97704a9e84..9297dc20bf 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -209,13 +209,18 @@ int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr, if (IS_ERR(rt)) goto out; if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET) - goto out; - neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr); - if (neigh) { - memcpy(nexthop_mac, neigh->ha, ETH_ALEN); - *uses_gateway = rt->rt_uses_gateway; - return 0; - } + goto out_rt; + neigh = dst_neigh_lookup(&rt->dst, &fl4.daddr); + if (!neigh) + goto out_rt; + memcpy(nexthop_mac, neigh->ha, ETH_ALEN); + *uses_gateway = rt->rt_uses_gateway; + neigh_release(neigh); + ip_rt_put(rt); + return 0; + +out_rt: + ip_rt_put(rt); out: return -ENOENT; } diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index cda0935a68..28f3749f6d 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -405,7 +405,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, clnt->cl_maxproc = version->nrprocs; clnt->cl_prog = args->prognumber ? : program->number; clnt->cl_vers = version->number; - clnt->cl_stats = program->stats; + clnt->cl_stats = args->stats ? : program->stats; clnt->cl_metrics = rpc_alloc_iostats(clnt); rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); err = -ENOMEM; @@ -691,6 +691,7 @@ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) .version = clnt->cl_vers, .authflavor = clnt->cl_auth->au_flavor, .cred = clnt->cl_cred, + .stats = clnt->cl_stats, }; return __rpc_clone_client(&args, clnt); } @@ -713,6 +714,7 @@ rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) .version = clnt->cl_vers, .authflavor = flavor, .cred = clnt->cl_cred, + .stats = clnt->cl_stats, }; return __rpc_clone_client(&args, clnt); } @@ -1068,6 +1070,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, .version = vers, .authflavor = old->cl_auth->au_flavor, .cred = old->cl_cred, + .stats = old->cl_stats, }; struct rpc_clnt *clnt; int err; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 58f3dc8d0d..004d2bd8b4 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -2645,6 +2645,7 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work) .xprtsec = { .policy = RPC_XPRTSEC_NONE, }, + .stats = upper_clnt->cl_stats, }; unsigned int pflags = current->flags; struct rpc_clnt *lower_clnt; diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 5c9fd4791c..76284fc538 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -142,9 +142,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (fragid == FIRST_FRAGMENT) { if (unlikely(head)) goto err; - *buf = NULL; if (skb_has_frag_list(frag) && __skb_linearize(frag)) goto err; + *buf = NULL; frag = skb_unshare(frag, GFP_ATOMIC); if (unlikely(!frag)) goto err; @@ -156,6 +156,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (!head) goto err; + /* Either the input skb ownership is transferred to headskb + * or the input skb is freed, clear the reference to avoid + * bad access on error path. + */ + *buf = NULL; if (skb_try_coalesce(head, frag, &headstolen, &delta)) { kfree_skb_partial(frag, headstolen); } else { @@ -179,7 +184,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) *headbuf = NULL; return 1; } - *buf = NULL; return 0; err: kfree_skb(*buf); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index bd54a928ba..daac83aa89 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -14092,6 +14092,8 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info) error: for (i = 0; i < new_coalesce.n_rules; i++) { tmp_rule = &new_coalesce.rules[i]; + if (!tmp_rule) + continue; for (j = 0; j < tmp_rule->n_patterns; j++) kfree(tmp_rule->patterns[j].mask); kfree(tmp_rule->patterns); diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 1f374c8a17..cc3fd4177b 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -1013,7 +1013,7 @@ TRACE_EVENT(rdev_get_mpp, TRACE_EVENT(rdev_dump_mpp, TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx, u8 *dst, u8 *mpp), - TP_ARGS(wiphy, netdev, _idx, mpp, dst), + TP_ARGS(wiphy, netdev, _idx, dst, mpp), TP_STRUCT__entry( WIPHY_ENTRY NETDEV_ENTRY diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index bd4ce21d76..b2f7af63b7 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -388,11 +388,15 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) */ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { + struct xfrm_offload *xo = xfrm_offload(skb); int ihl = skb->data - skb_transport_header(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); + if (xo) + xo->orig_mac_len = + skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0; skb->network_header = skb->transport_header; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); @@ -403,11 +407,15 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) { #if IS_ENABLED(CONFIG_IPV6) + struct xfrm_offload *xo = xfrm_offload(skb); int ihl = skb->data - skb_transport_header(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); + if (xo) + xo->orig_mac_len = + skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0; skb->network_header = skb->transport_header; } ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - diff --git a/rust/macros/module.rs b/rust/macros/module.rs index d62d8710d7..acd0393b50 100644 --- a/rust/macros/module.rs +++ b/rust/macros/module.rs @@ -199,17 +199,6 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { /// Used by the printing macros, e.g. [`info!`]. const __LOG_PREFIX: &[u8] = b\"{name}\\0\"; - /// The \"Rust loadable module\" mark. - // - // This may be best done another way later on, e.g. as a new modinfo - // key or a new section. For the moment, keep it simple. - #[cfg(MODULE)] - #[doc(hidden)] - #[used] - static __IS_RUST_MODULE: () = (); - - static mut __MOD: Option<{type_}> = None; - // SAFETY: `__this_module` is constructed by the kernel at load time and will not be // freed until the module is unloaded. #[cfg(MODULE)] @@ -221,76 +210,132 @@ pub(crate) fn module(ts: TokenStream) -> TokenStream { kernel::ThisModule::from_ptr(core::ptr::null_mut()) }}; - // Loadable modules need to export the `{{init,cleanup}}_module` identifiers. - #[cfg(MODULE)] - #[doc(hidden)] - #[no_mangle] - pub extern \"C\" fn init_module() -> core::ffi::c_int {{ - __init() - }} - - #[cfg(MODULE)] - #[doc(hidden)] - #[no_mangle] - pub extern \"C\" fn cleanup_module() {{ - __exit() - }} + // Double nested modules, since then nobody can access the public items inside. + mod __module_init {{ + mod __module_init {{ + use super::super::{type_}; + + /// The \"Rust loadable module\" mark. + // + // This may be best done another way later on, e.g. as a new modinfo + // key or a new section. For the moment, keep it simple. + #[cfg(MODULE)] + #[doc(hidden)] + #[used] + static __IS_RUST_MODULE: () = (); + + static mut __MOD: Option<{type_}> = None; + + // Loadable modules need to export the `{{init,cleanup}}_module` identifiers. + /// # Safety + /// + /// This function must not be called after module initialization, because it may be + /// freed after that completes. + #[cfg(MODULE)] + #[doc(hidden)] + #[no_mangle] + #[link_section = \".init.text\"] + pub unsafe extern \"C\" fn init_module() -> core::ffi::c_int {{ + // SAFETY: This function is inaccessible to the outside due to the double + // module wrapping it. It is called exactly once by the C side via its + // unique name. + unsafe {{ __init() }} + }} - // Built-in modules are initialized through an initcall pointer - // and the identifiers need to be unique. - #[cfg(not(MODULE))] - #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))] - #[doc(hidden)] - #[link_section = \"{initcall_section}\"] - #[used] - pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init; + #[cfg(MODULE)] + #[doc(hidden)] + #[no_mangle] + pub extern \"C\" fn cleanup_module() {{ + // SAFETY: + // - This function is inaccessible to the outside due to the double + // module wrapping it. It is called exactly once by the C side via its + // unique name, + // - furthermore it is only called after `init_module` has returned `0` + // (which delegates to `__init`). + unsafe {{ __exit() }} + }} - #[cfg(not(MODULE))] - #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)] - core::arch::global_asm!( - r#\".section \"{initcall_section}\", \"a\" - __{name}_initcall: - .long __{name}_init - . - .previous - \"# - ); + // Built-in modules are initialized through an initcall pointer + // and the identifiers need to be unique. + #[cfg(not(MODULE))] + #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))] + #[doc(hidden)] + #[link_section = \"{initcall_section}\"] + #[used] + pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init; + + #[cfg(not(MODULE))] + #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)] + core::arch::global_asm!( + r#\".section \"{initcall_section}\", \"a\" + __{name}_initcall: + .long __{name}_init - . + .previous + \"# + ); + + #[cfg(not(MODULE))] + #[doc(hidden)] + #[no_mangle] + pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{ + // SAFETY: This function is inaccessible to the outside due to the double + // module wrapping it. It is called exactly once by the C side via its + // placement above in the initcall section. + unsafe {{ __init() }} + }} - #[cfg(not(MODULE))] - #[doc(hidden)] - #[no_mangle] - pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{ - __init() - }} + #[cfg(not(MODULE))] + #[doc(hidden)] + #[no_mangle] + pub extern \"C\" fn __{name}_exit() {{ + // SAFETY: + // - This function is inaccessible to the outside due to the double + // module wrapping it. It is called exactly once by the C side via its + // unique name, + // - furthermore it is only called after `__{name}_init` has returned `0` + // (which delegates to `__init`). + unsafe {{ __exit() }} + }} - #[cfg(not(MODULE))] - #[doc(hidden)] - #[no_mangle] - pub extern \"C\" fn __{name}_exit() {{ - __exit() - }} + /// # Safety + /// + /// This function must only be called once. + unsafe fn __init() -> core::ffi::c_int {{ + match <{type_} as kernel::Module>::init(&super::super::THIS_MODULE) {{ + Ok(m) => {{ + // SAFETY: No data race, since `__MOD` can only be accessed by this + // module and there only `__init` and `__exit` access it. These + // functions are only called once and `__exit` cannot be called + // before or during `__init`. + unsafe {{ + __MOD = Some(m); + }} + return 0; + }} + Err(e) => {{ + return e.to_errno(); + }} + }} + }} - fn __init() -> core::ffi::c_int {{ - match <{type_} as kernel::Module>::init(&THIS_MODULE) {{ - Ok(m) => {{ + /// # Safety + /// + /// This function must + /// - only be called once, + /// - be called after `__init` has been called and returned `0`. + unsafe fn __exit() {{ + // SAFETY: No data race, since `__MOD` can only be accessed by this module + // and there only `__init` and `__exit` access it. These functions are only + // called once and `__init` was already called. unsafe {{ - __MOD = Some(m); + // Invokes `drop()` on `__MOD`, which should be used for cleanup. + __MOD = None; }} - return 0; - }} - Err(e) => {{ - return e.to_errno(); }} - }} - }} - fn __exit() {{ - unsafe {{ - // Invokes `drop()` on `__MOD`, which should be used for cleanup. - __MOD = None; + {modinfo} }} }} - - {modinfo} ", type_ = info.type_, name = info.name, diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal index 8568d256d6..79fcf27316 100644 --- a/scripts/Makefile.modfinal +++ b/scripts/Makefile.modfinal @@ -23,7 +23,7 @@ modname = $(notdir $(@:.mod.o=)) part-of-module = y quiet_cmd_cc_o_c = CC [M] $@ - cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV), $(c_flags)) -c -o $@ $< + cmd_cc_o_c = $(CC) $(filter-out $(CC_FLAGS_CFI) $(CFLAGS_GCOV) $(CFLAGS_KCSAN), $(c_flags)) -c -o $@ $< %.mod.o: %.mod.c FORCE $(call if_changed_dep,cc_o_c) diff --git a/security/keys/key.c b/security/keys/key.c index 5b10641deb..74dfd961af 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -464,7 +464,8 @@ static int __key_instantiate_and_link(struct key *key, if (authkey) key_invalidate(authkey); - key_set_expiry(key, prep->expiry); + if (prep->expiry != TIME64_MAX) + key_set_expiry(key, prep->expiry); } } diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c index bc700f85f8..ea277c55a3 100644 --- a/security/keys/trusted-keys/trusted_tpm2.c +++ b/security/keys/trusted-keys/trusted_tpm2.c @@ -38,6 +38,7 @@ static int tpm2_key_encode(struct trusted_key_payload *payload, u8 *end_work = scratch + SCRATCH_SIZE; u8 *priv, *pub; u16 priv_len, pub_len; + int ret; priv_len = get_unaligned_be16(src) + 2; priv = src; @@ -57,8 +58,10 @@ static int tpm2_key_encode(struct trusted_key_payload *payload, unsigned char bool[3], *w = bool; /* tag 0 is emptyAuth */ w = asn1_encode_boolean(w, w + sizeof(bool), true); - if (WARN(IS_ERR(w), "BUG: Boolean failed to encode")) - return PTR_ERR(w); + if (WARN(IS_ERR(w), "BUG: Boolean failed to encode")) { + ret = PTR_ERR(w); + goto err; + } work = asn1_encode_tag(work, end_work, 0, bool, w - bool); } @@ -69,8 +72,10 @@ static int tpm2_key_encode(struct trusted_key_payload *payload, * trigger, so if it does there's something nefarious going on */ if (WARN(work - scratch + pub_len + priv_len + 14 > SCRATCH_SIZE, - "BUG: scratch buffer is too small")) - return -EINVAL; + "BUG: scratch buffer is too small")) { + ret = -EINVAL; + goto err; + } work = asn1_encode_integer(work, end_work, options->keyhandle); work = asn1_encode_octet_string(work, end_work, pub, pub_len); @@ -79,10 +84,18 @@ static int tpm2_key_encode(struct trusted_key_payload *payload, work1 = payload->blob; work1 = asn1_encode_sequence(work1, work1 + sizeof(payload->blob), scratch, work - scratch); - if (WARN(IS_ERR(work1), "BUG: ASN.1 encoder failed")) - return PTR_ERR(work1); + if (IS_ERR(work1)) { + ret = PTR_ERR(work1); + pr_err("BUG: ASN.1 encoder failed with %d\n", ret); + goto err; + } + kfree(scratch); return work1 - payload->blob; + +err: + kfree(scratch); + return ret; } struct tpm2_key_context { diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c index b57d72ea45..4e376994bf 100644 --- a/sound/hda/intel-sdw-acpi.c +++ b/sound/hda/intel-sdw-acpi.c @@ -41,6 +41,8 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx) "intel-quirk-mask", &quirk_mask); + fwnode_handle_put(link); + if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE) return false; diff --git a/sound/oss/dmasound/dmasound_paula.c b/sound/oss/dmasound/dmasound_paula.c index 0ba8f0c4cd..3a593da092 100644 --- a/sound/oss/dmasound/dmasound_paula.c +++ b/sound/oss/dmasound/dmasound_paula.c @@ -725,7 +725,13 @@ static void __exit amiga_audio_remove(struct platform_device *pdev) dmasound_deinit(); } -static struct platform_driver amiga_audio_driver = { +/* + * amiga_audio_remove() lives in .exit.text. For drivers registered via + * module_platform_driver_probe() this is ok because they cannot get unbound at + * runtime. So mark the driver struct with __refdata to prevent modpost + * triggering a section mismatch warning. + */ +static struct platform_driver amiga_audio_driver __refdata = { .remove_new = __exit_p(amiga_audio_remove), .driver = { .name = "amiga-audio", diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c index fe72e7d772..dadeda7758 100644 --- a/sound/pci/emu10k1/emu10k1.c +++ b/sound/pci/emu10k1/emu10k1.c @@ -189,8 +189,7 @@ static int snd_emu10k1_suspend(struct device *dev) emu->suspend = 1; - cancel_work_sync(&emu->emu1010.firmware_work); - cancel_work_sync(&emu->emu1010.clock_work); + cancel_work_sync(&emu->emu1010.work); snd_ac97_suspend(emu->ac97); diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index de5c41e578..ade90c7ecd 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -732,69 +732,67 @@ static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, int dock, return snd_emu1010_load_firmware_entry(emu, *fw); } -static void emu1010_firmware_work(struct work_struct *work) +static void snd_emu1010_load_dock_firmware(struct snd_emu10k1 *emu) { - struct snd_emu10k1 *emu; - u32 tmp, tmp2, reg; + u32 tmp, tmp2; int err; - emu = container_of(work, struct snd_emu10k1, - emu1010.firmware_work); - if (emu->card->shutdown) + // The docking events clearly arrive prematurely - while the + // Dock's FPGA seems to be successfully programmed, the Dock + // fails to initialize subsequently if we don't give it some + // time to "warm up" here. + msleep(200); + + dev_info(emu->card->dev, "emu1010: Loading Audio Dock Firmware\n"); + /* Return to Audio Dock programming mode */ + snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, + EMU_HANA_FPGA_CONFIG_AUDIODOCK); + err = snd_emu1010_load_firmware(emu, 1, &emu->dock_fw); + if (err < 0) return; -#ifdef CONFIG_PM_SLEEP - if (emu->suspend) + snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0); + + snd_emu1010_fpga_read(emu, EMU_HANA_ID, &tmp); + dev_dbg(emu->card->dev, "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", tmp); + if ((tmp & 0x1f) != 0x15) { + /* FPGA failed to be programmed */ + dev_err(emu->card->dev, + "emu1010: Loading Audio Dock Firmware failed, reg = 0x%x\n", + tmp); return; -#endif + } + dev_info(emu->card->dev, "emu1010: Audio Dock Firmware loaded\n"); + + snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp); + snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2); + dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n", tmp, tmp2); + + /* Allow DLL to settle, to sync clocking between 1010 and Dock */ + msleep(10); +} + +static void emu1010_dock_event(struct snd_emu10k1 *emu) +{ + u32 reg; + snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, ®); /* OPTIONS: Which cards are attached to the EMU */ if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) { /* Audio Dock attached */ - /* Return to Audio Dock programming mode */ - dev_info(emu->card->dev, - "emu1010: Loading Audio Dock Firmware\n"); - snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, - EMU_HANA_FPGA_CONFIG_AUDIODOCK); - err = snd_emu1010_load_firmware(emu, 1, &emu->dock_fw); - if (err < 0) - return; - snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0); - snd_emu1010_fpga_read(emu, EMU_HANA_ID, &tmp); - dev_info(emu->card->dev, - "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", tmp); - if ((tmp & 0x1f) != 0x15) { - /* FPGA failed to be programmed */ - dev_info(emu->card->dev, - "emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n", - tmp); - return; - } - dev_info(emu->card->dev, - "emu1010: Audio Dock Firmware loaded\n"); - snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp); - snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2); - dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n", tmp, tmp2); - /* Sync clocking between 1010 and Dock */ - /* Allow DLL to settle */ - msleep(10); + snd_emu1010_load_dock_firmware(emu); /* Unmute all. Default is muted after a firmware load */ snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE); + } else if (!(reg & EMU_HANA_OPTION_DOCK_ONLINE)) { + /* Audio Dock removed */ + dev_info(emu->card->dev, "emu1010: Audio Dock detached\n"); + /* The hardware auto-mutes all, so we unmute again */ + snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE); } } -static void emu1010_clock_work(struct work_struct *work) +static void emu1010_clock_event(struct snd_emu10k1 *emu) { - struct snd_emu10k1 *emu; struct snd_ctl_elem_id id; - emu = container_of(work, struct snd_emu10k1, - emu1010.clock_work); - if (emu->card->shutdown) - return; -#ifdef CONFIG_PM_SLEEP - if (emu->suspend) - return; -#endif - spin_lock_irq(&emu->reg_lock); // This is the only thing that can actually happen. emu->emu1010.clock_source = emu->emu1010.clock_fallback; @@ -805,21 +803,40 @@ static void emu1010_clock_work(struct work_struct *work) snd_ctl_notify(emu->card, SNDRV_CTL_EVENT_MASK_VALUE, &id); } -static void emu1010_interrupt(struct snd_emu10k1 *emu) +static void emu1010_work(struct work_struct *work) { + struct snd_emu10k1 *emu; u32 sts; + emu = container_of(work, struct snd_emu10k1, emu1010.work); + if (emu->card->shutdown) + return; +#ifdef CONFIG_PM_SLEEP + if (emu->suspend) + return; +#endif + snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &sts); - if (sts & EMU_HANA_IRQ_DOCK_LOST) { - /* Audio Dock removed */ - dev_info(emu->card->dev, "emu1010: Audio Dock detached\n"); - /* The hardware auto-mutes all, so we unmute again */ - snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE); - } else if (sts & EMU_HANA_IRQ_DOCK) { - schedule_work(&emu->emu1010.firmware_work); - } + + // The distinction of the IRQ status bits is unreliable, + // so we dispatch later based on option card status. + if (sts & (EMU_HANA_IRQ_DOCK | EMU_HANA_IRQ_DOCK_LOST)) + emu1010_dock_event(emu); + if (sts & EMU_HANA_IRQ_WCLK_CHANGED) - schedule_work(&emu->emu1010.clock_work); + emu1010_clock_event(emu); +} + +static void emu1010_interrupt(struct snd_emu10k1 *emu) +{ + // We get an interrupt on each GPIO input pin change, but we + // care only about the ones triggered by the dedicated pin. + u16 sts = inw(emu->port + A_GPIO); + u16 bit = emu->card_capabilities->ca0108_chip ? 0x2000 : 0x8000; + if (!(sts & bit)) + return; + + schedule_work(&emu->emu1010.work); } /* @@ -889,7 +906,7 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu) snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, ®); dev_info(emu->card->dev, "emu1010: Card options = 0x%x\n", reg); if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) - schedule_work(&emu->emu1010.firmware_work); + snd_emu1010_load_dock_firmware(emu); if (emu->card_capabilities->no_adat) { emu->emu1010.optical_in = 0; /* IN_SPDIF */ emu->emu1010.optical_out = 0; /* OUT_SPDIF */ @@ -960,8 +977,7 @@ static void snd_emu10k1_free(struct snd_card *card) /* Disable 48Volt power to Audio Dock */ snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0); } - cancel_work_sync(&emu->emu1010.firmware_work); - cancel_work_sync(&emu->emu1010.clock_work); + cancel_work_sync(&emu->emu1010.work); release_firmware(emu->firmware); release_firmware(emu->dock_fw); snd_util_memhdr_free(emu->memhdr); @@ -1540,8 +1556,7 @@ int snd_emu10k1_create(struct snd_card *card, emu->irq = -1; emu->synth = NULL; emu->get_synth_voice = NULL; - INIT_WORK(&emu->emu1010.firmware_work, emu1010_firmware_work); - INIT_WORK(&emu->emu1010.clock_work, emu1010_clock_work); + INIT_WORK(&emu->emu1010.work, emu1010_work); /* read revision & serial */ emu->revision = pci->revision; pci_read_config_dword(pci, PCI_SUBSYSTEM_VENDOR_ID, &emu->serial); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index bf0618ab4f..7c82f93ba9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -7502,6 +7502,7 @@ enum { ALC287_FIXUP_YOGA7_14ITL_SPEAKERS, ALC298_FIXUP_LENOVO_C940_DUET7, ALC287_FIXUP_LENOVO_14IRP8_DUETITL, + ALC287_FIXUP_LENOVO_LEGION_7, ALC287_FIXUP_13S_GEN2_SPEAKERS, ALC256_FIXUP_SET_COEF_DEFAULTS, ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE, @@ -7574,6 +7575,23 @@ static void alc287_fixup_lenovo_14irp8_duetitl(struct hda_codec *codec, __snd_hda_apply_fixup(codec, id, action, 0); } +/* Another hilarious PCI SSID conflict with Lenovo Legion Pro 7 16ARX8H (with + * TAS2781 codec) and Legion 7i 16IAX7 (with CS35L41 codec); + * we apply a corresponding fixup depending on the codec SSID instead + */ +static void alc287_fixup_lenovo_legion_7(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + int id; + + if (codec->core.subsystem_id == 0x17aa38a8) + id = ALC287_FIXUP_TAS2781_I2C; /* Legion Pro 7 16ARX8H */ + else + id = ALC287_FIXUP_CS35L41_I2C_2; /* Legion 7i 16IAX7 */ + __snd_hda_apply_fixup(codec, id, action, 0); +} + static const struct hda_fixup alc269_fixups[] = { [ALC269_FIXUP_GPIO2] = { .type = HDA_FIXUP_FUNC, @@ -9468,6 +9486,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc287_fixup_lenovo_14irp8_duetitl, }, + [ALC287_FIXUP_LENOVO_LEGION_7] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc287_fixup_lenovo_legion_7, + }, [ALC287_FIXUP_13S_GEN2_SPEAKERS] = { .type = HDA_FIXUP_VERBS, .v.verbs = (const struct hda_verb[]) { @@ -9934,6 +9956,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), @@ -10371,7 +10394,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6), SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), - SND_PCI_QUIRK(0x17aa, 0x386f, "Legion 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7/7i", ALC287_FIXUP_LENOVO_LEGION_7), SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C), SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x3878, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2), diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c index cbcd02ec6b..a3dbeebaeb 100644 --- a/sound/soc/codecs/es8326.c +++ b/sound/soc/codecs/es8326.c @@ -326,9 +326,9 @@ static const struct _coeff_div coeff_div_v3[] = { {125, 48000, 6000000, 0x04, 0x04, 0x1F, 0x2D, 0x8A, 0x0A, 0x27, 0x27}, {128, 8000, 1024000, 0x60, 0x00, 0x05, 0x75, 0x8A, 0x1B, 0x1F, 0x7F}, - {128, 16000, 2048000, 0x20, 0x00, 0x31, 0x35, 0x8A, 0x1B, 0x1F, 0x3F}, - {128, 44100, 5644800, 0xE0, 0x00, 0x01, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F}, - {128, 48000, 6144000, 0xE0, 0x00, 0x01, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F}, + {128, 16000, 2048000, 0x20, 0x00, 0x31, 0x35, 0x08, 0x19, 0x1F, 0x3F}, + {128, 44100, 5644800, 0xE0, 0x00, 0x01, 0x2D, 0x48, 0x08, 0x1F, 0x1F}, + {128, 48000, 6144000, 0xE0, 0x00, 0x01, 0x2D, 0x48, 0x08, 0x1F, 0x1F}, {144, 8000, 1152000, 0x20, 0x00, 0x03, 0x35, 0x8A, 0x1B, 0x23, 0x47}, {144, 16000, 2304000, 0x20, 0x00, 0x11, 0x35, 0x8A, 0x1B, 0x23, 0x47}, {192, 8000, 1536000, 0x60, 0x02, 0x0D, 0x75, 0x8A, 0x1B, 0x1F, 0x7F}, @@ -337,10 +337,10 @@ static const struct _coeff_div coeff_div_v3[] = { {200, 48000, 9600000, 0x04, 0x04, 0x0F, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F}, {250, 48000, 12000000, 0x04, 0x04, 0x0F, 0x2D, 0xCA, 0x0A, 0x27, 0x27}, - {256, 8000, 2048000, 0x60, 0x00, 0x31, 0x35, 0x8A, 0x1B, 0x1F, 0x7F}, - {256, 16000, 4096000, 0x20, 0x00, 0x01, 0x35, 0x8A, 0x1B, 0x1F, 0x3F}, - {256, 44100, 11289600, 0xE0, 0x00, 0x30, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F}, - {256, 48000, 12288000, 0xE0, 0x00, 0x30, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F}, + {256, 8000, 2048000, 0x60, 0x00, 0x31, 0x35, 0x08, 0x19, 0x1F, 0x7F}, + {256, 16000, 4096000, 0x20, 0x00, 0x01, 0x35, 0x08, 0x19, 0x1F, 0x3F}, + {256, 44100, 11289600, 0xE0, 0x01, 0x01, 0x2D, 0x48, 0x08, 0x1F, 0x1F}, + {256, 48000, 12288000, 0xE0, 0x01, 0x01, 0x2D, 0x48, 0x08, 0x1F, 0x1F}, {288, 8000, 2304000, 0x20, 0x00, 0x01, 0x35, 0x8A, 0x1B, 0x23, 0x47}, {384, 8000, 3072000, 0x60, 0x02, 0x05, 0x75, 0x8A, 0x1B, 0x1F, 0x7F}, {384, 16000, 6144000, 0x20, 0x02, 0x03, 0x35, 0x8A, 0x1B, 0x1F, 0x3F}, @@ -349,10 +349,10 @@ static const struct _coeff_div coeff_div_v3[] = { {400, 48000, 19200000, 0xE4, 0x04, 0x35, 0x6d, 0xCA, 0x0A, 0x1F, 0x1F}, {500, 48000, 24000000, 0xF8, 0x04, 0x3F, 0x6D, 0xCA, 0x0A, 0x1F, 0x1F}, - {512, 8000, 4096000, 0x60, 0x00, 0x01, 0x35, 0x8A, 0x1B, 0x1F, 0x7F}, - {512, 16000, 8192000, 0x20, 0x00, 0x30, 0x35, 0x8A, 0x1B, 0x1F, 0x3F}, - {512, 44100, 22579200, 0xE0, 0x00, 0x00, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F}, - {512, 48000, 24576000, 0xE0, 0x00, 0x00, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F}, + {512, 8000, 4096000, 0x60, 0x00, 0x01, 0x08, 0x19, 0x1B, 0x1F, 0x7F}, + {512, 16000, 8192000, 0x20, 0x00, 0x30, 0x35, 0x08, 0x19, 0x1F, 0x3F}, + {512, 44100, 22579200, 0xE0, 0x00, 0x00, 0x2D, 0x48, 0x08, 0x1F, 0x1F}, + {512, 48000, 24576000, 0xE0, 0x00, 0x00, 0x2D, 0x48, 0x08, 0x1F, 0x1F}, {768, 8000, 6144000, 0x60, 0x02, 0x11, 0x35, 0x8A, 0x1B, 0x1F, 0x7F}, {768, 16000, 12288000, 0x20, 0x02, 0x01, 0x35, 0x8A, 0x1B, 0x1F, 0x3F}, {768, 32000, 24576000, 0xE0, 0x02, 0x30, 0x2D, 0xCA, 0x0A, 0x1F, 0x1F}, @@ -757,6 +757,7 @@ static void es8326_jack_detect_handler(struct work_struct *work) regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01); regmap_write(es8326->regmap, ES8326_SYS_BIAS, 0x0a); regmap_update_bits(es8326->regmap, ES8326_HP_DRIVER_REF, 0x0f, 0x03); + regmap_write(es8326->regmap, ES8326_INT_SOURCE, ES8326_INT_SRC_PIN9); /* * Inverted HPJACK_POL bit to trigger one IRQ to double check HP Removal event */ @@ -779,6 +780,8 @@ static void es8326_jack_detect_handler(struct work_struct *work) * set auto-check mode, then restart jack_detect_work after 400ms. * Don't report jack status. */ + regmap_write(es8326->regmap, ES8326_INT_SOURCE, + (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON)); regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x01); es8326_enable_micbias(es8326->component); usleep_range(50000, 70000); @@ -901,7 +904,7 @@ static int es8326_resume(struct snd_soc_component *component) regmap_write(es8326->regmap, ES8326_VMIDSEL, 0x0E); regmap_write(es8326->regmap, ES8326_ANA_LP, 0xf0); usleep_range(10000, 15000); - regmap_write(es8326->regmap, ES8326_HPJACK_TIMER, 0xe9); + regmap_write(es8326->regmap, ES8326_HPJACK_TIMER, 0xd9); regmap_write(es8326->regmap, ES8326_ANA_MICBIAS, 0xcb); /* set headphone default type and detect pin */ regmap_write(es8326->regmap, ES8326_HPDET_TYPE, 0x83); @@ -952,8 +955,7 @@ static int es8326_resume(struct snd_soc_component *component) es8326_enable_micbias(es8326->component); usleep_range(50000, 70000); regmap_update_bits(es8326->regmap, ES8326_HPDET_TYPE, 0x03, 0x00); - regmap_write(es8326->regmap, ES8326_INT_SOURCE, - (ES8326_INT_SRC_PIN9 | ES8326_INT_SRC_BUTTON)); + regmap_write(es8326->regmap, ES8326_INT_SOURCE, ES8326_INT_SRC_PIN9); regmap_write(es8326->regmap, ES8326_INTOUT_IO, es8326->interrupt_clk); regmap_write(es8326->regmap, ES8326_SDINOUT1_IO, diff --git a/sound/soc/codecs/es8326.h b/sound/soc/codecs/es8326.h index 4234bbb900..dfef808673 100644 --- a/sound/soc/codecs/es8326.h +++ b/sound/soc/codecs/es8326.h @@ -101,7 +101,7 @@ #define ES8326_MUTE (3 << 0) /* ES8326_CLK_CTL */ -#define ES8326_CLK_ON (0x7e << 0) +#define ES8326_CLK_ON (0x7f << 0) #define ES8326_CLK_OFF (0 << 0) /* ES8326_CLK_INV */ diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 3c025dabaf..1253695beb 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -1155,6 +1155,7 @@ static int wsa881x_probe(struct sdw_slave *pdev, pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0); pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop; pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY; + pdev->prop.clk_stop_mode1 = true; gpiod_direction_output(wsa881x->sd_n, !wsa881x->sd_n_val); wsa881x->regmap = devm_regmap_init_sdw(pdev, &wsa881x_regmap_config); diff --git a/sound/soc/intel/avs/topology.c b/sound/soc/intel/avs/topology.c index 48b3c67c91..d8223e2b4c 100644 --- a/sound/soc/intel/avs/topology.c +++ b/sound/soc/intel/avs/topology.c @@ -1458,6 +1458,8 @@ static int avs_widget_load(struct snd_soc_component *comp, int index, if (!le32_to_cpu(dw->priv.size)) return 0; + w->no_wname_in_kcontrol_name = true; + if (w->ignore_suspend && !AVS_S0IX_SUPPORTED) { dev_info_once(comp->dev, "Device does not support S0IX, check BIOS settings\n"); w->ignore_suspend = false; diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig index b93ea33739..6458d5dc49 100644 --- a/sound/soc/meson/Kconfig +++ b/sound/soc/meson/Kconfig @@ -99,6 +99,7 @@ config SND_MESON_AXG_PDM config SND_MESON_CARD_UTILS tristate + select SND_DYNAMIC_MINORS config SND_MESON_CODEC_GLUE tristate diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c index 3180aa4d3a..8c5605c1e3 100644 --- a/sound/soc/meson/axg-card.c +++ b/sound/soc/meson/axg-card.c @@ -318,6 +318,7 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np, dai_link->cpus = cpu; dai_link->num_cpus = 1; + dai_link->nonatomic = true; ret = meson_card_parse_dai(card, np, dai_link->cpus); if (ret) diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c index 65541fdb00..ecb3eb7a97 100644 --- a/sound/soc/meson/axg-fifo.c +++ b/sound/soc/meson/axg-fifo.c @@ -3,6 +3,7 @@ // Copyright (c) 2018 BayLibre, SAS. // Author: Jerome Brunet <jbrunet@baylibre.com> +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/of_irq.h> #include <linux/of_platform.h> @@ -145,8 +146,8 @@ int axg_fifo_pcm_hw_params(struct snd_soc_component *component, /* Enable irq if necessary */ irq_en = runtime->no_period_wakeup ? 0 : FIFO_INT_COUNT_REPEAT; regmap_update_bits(fifo->map, FIFO_CTRL0, - CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), - CTRL0_INT_EN(irq_en)); + CTRL0_INT_EN, + FIELD_PREP(CTRL0_INT_EN, irq_en)); return 0; } @@ -176,9 +177,9 @@ int axg_fifo_pcm_hw_free(struct snd_soc_component *component, { struct axg_fifo *fifo = axg_fifo_data(ss); - /* Disable the block count irq */ + /* Disable irqs */ regmap_update_bits(fifo->map, FIFO_CTRL0, - CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0); + CTRL0_INT_EN, 0); return 0; } @@ -187,13 +188,13 @@ EXPORT_SYMBOL_GPL(axg_fifo_pcm_hw_free); static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask) { regmap_update_bits(fifo->map, FIFO_CTRL1, - CTRL1_INT_CLR(FIFO_INT_MASK), - CTRL1_INT_CLR(mask)); + CTRL1_INT_CLR, + FIELD_PREP(CTRL1_INT_CLR, mask)); /* Clear must also be cleared */ regmap_update_bits(fifo->map, FIFO_CTRL1, - CTRL1_INT_CLR(FIFO_INT_MASK), - 0); + CTRL1_INT_CLR, + FIELD_PREP(CTRL1_INT_CLR, 0)); } static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id) @@ -203,18 +204,26 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id) unsigned int status; regmap_read(fifo->map, FIFO_STATUS1, &status); + status = FIELD_GET(STATUS1_INT_STS, status); + axg_fifo_ack_irq(fifo, status); - status = STATUS1_INT_STS(status) & FIFO_INT_MASK; + /* Use the thread to call period elapsed on nonatomic links */ if (status & FIFO_INT_COUNT_REPEAT) - snd_pcm_period_elapsed(ss); - else - dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", - status); + return IRQ_WAKE_THREAD; - /* Ack irqs */ - axg_fifo_ack_irq(fifo, status); + dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n", + status); + + return IRQ_NONE; +} + +static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id) +{ + struct snd_pcm_substream *ss = dev_id; + + snd_pcm_period_elapsed(ss); - return IRQ_RETVAL(status); + return IRQ_HANDLED; } int axg_fifo_pcm_open(struct snd_soc_component *component, @@ -242,8 +251,9 @@ int axg_fifo_pcm_open(struct snd_soc_component *component, if (ret) return ret; - ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0, - dev_name(dev), ss); + ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block, + axg_fifo_pcm_irq_block_thread, + IRQF_ONESHOT, dev_name(dev), ss); if (ret) return ret; @@ -254,15 +264,15 @@ int axg_fifo_pcm_open(struct snd_soc_component *component, /* Setup status2 so it reports the memory pointer */ regmap_update_bits(fifo->map, FIFO_CTRL1, - CTRL1_STATUS2_SEL_MASK, - CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ)); + CTRL1_STATUS2_SEL, + FIELD_PREP(CTRL1_STATUS2_SEL, STATUS2_SEL_DDR_READ)); /* Make sure the dma is initially disabled */ __dma_enable(fifo, false); /* Disable irqs until params are ready */ regmap_update_bits(fifo->map, FIFO_CTRL0, - CTRL0_INT_EN(FIFO_INT_MASK), 0); + CTRL0_INT_EN, 0); /* Clear any pending interrupt */ axg_fifo_ack_irq(fifo, FIFO_INT_MASK); diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h index df528e8cb7..84a2664139 100644 --- a/sound/soc/meson/axg-fifo.h +++ b/sound/soc/meson/axg-fifo.h @@ -42,21 +42,19 @@ struct snd_soc_pcm_runtime; #define FIFO_CTRL0 0x00 #define CTRL0_DMA_EN BIT(31) -#define CTRL0_INT_EN(x) ((x) << 16) +#define CTRL0_INT_EN GENMASK(23, 16) #define CTRL0_SEL_MASK GENMASK(2, 0) #define CTRL0_SEL_SHIFT 0 #define FIFO_CTRL1 0x04 -#define CTRL1_INT_CLR(x) ((x) << 0) -#define CTRL1_STATUS2_SEL_MASK GENMASK(11, 8) -#define CTRL1_STATUS2_SEL(x) ((x) << 8) +#define CTRL1_INT_CLR GENMASK(7, 0) +#define CTRL1_STATUS2_SEL GENMASK(11, 8) #define STATUS2_SEL_DDR_READ 0 -#define CTRL1_FRDDR_DEPTH_MASK GENMASK(31, 24) -#define CTRL1_FRDDR_DEPTH(x) ((x) << 24) +#define CTRL1_FRDDR_DEPTH GENMASK(31, 24) #define FIFO_START_ADDR 0x08 #define FIFO_FINISH_ADDR 0x0c #define FIFO_INT_ADDR 0x10 #define FIFO_STATUS1 0x14 -#define STATUS1_INT_STS(x) ((x) << 0) +#define STATUS1_INT_STS GENMASK(7, 0) #define FIFO_STATUS2 0x18 #define FIFO_INIT_ADDR 0x24 #define FIFO_CTRL2 0x28 diff --git a/sound/soc/meson/axg-frddr.c b/sound/soc/meson/axg-frddr.c index 8c166a5f33..747a900c0b 100644 --- a/sound/soc/meson/axg-frddr.c +++ b/sound/soc/meson/axg-frddr.c @@ -7,6 +7,7 @@ * This driver implements the frontend playback DAI of AXG and G12A based SoCs */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/regmap.h> #include <linux/module.h> @@ -59,8 +60,8 @@ static int axg_frddr_dai_hw_params(struct snd_pcm_substream *substream, /* Trim the FIFO depth if the period is small to improve latency */ depth = min(period, fifo->depth); val = (depth / AXG_FIFO_BURST) - 1; - regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH_MASK, - CTRL1_FRDDR_DEPTH(val)); + regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH, + FIELD_PREP(CTRL1_FRDDR_DEPTH, val)); return 0; } diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c index 2cedbce738..a71790908e 100644 --- a/sound/soc/meson/axg-tdm-interface.c +++ b/sound/soc/meson/axg-tdm-interface.c @@ -349,26 +349,31 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream, return 0; } -static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream, +static int axg_tdm_iface_trigger(struct snd_pcm_substream *substream, + int cmd, struct snd_soc_dai *dai) { - struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream); + struct axg_tdm_stream *ts = + snd_soc_dai_get_dma_data(dai, substream); - /* Stop all attached formatters */ - axg_tdm_stream_stop(ts); + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + axg_tdm_stream_start(ts); + break; + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + case SNDRV_PCM_TRIGGER_STOP: + axg_tdm_stream_stop(ts); + break; + default: + return -EINVAL; + } return 0; } -static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream, - struct snd_soc_dai *dai) -{ - struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream); - - /* Force all attached formatters to update */ - return axg_tdm_stream_reset(ts); -} - static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai) { int stream; @@ -412,8 +417,7 @@ static const struct snd_soc_dai_ops axg_tdm_iface_ops = { .set_fmt = axg_tdm_iface_set_fmt, .startup = axg_tdm_iface_startup, .hw_params = axg_tdm_iface_hw_params, - .prepare = axg_tdm_iface_prepare, - .hw_free = axg_tdm_iface_hw_free, + .trigger = axg_tdm_iface_trigger, }; /* TDM Backend DAIs */ diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c index 1a0be177b8..972ad99f31 100644 --- a/sound/soc/meson/axg-toddr.c +++ b/sound/soc/meson/axg-toddr.c @@ -5,6 +5,7 @@ /* This driver implements the frontend capture DAI of AXG based SoCs */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/regmap.h> #include <linux/module.h> @@ -19,12 +20,9 @@ #define CTRL0_TODDR_EXT_SIGNED BIT(29) #define CTRL0_TODDR_PP_MODE BIT(28) #define CTRL0_TODDR_SYNC_CH BIT(27) -#define CTRL0_TODDR_TYPE_MASK GENMASK(15, 13) -#define CTRL0_TODDR_TYPE(x) ((x) << 13) -#define CTRL0_TODDR_MSB_POS_MASK GENMASK(12, 8) -#define CTRL0_TODDR_MSB_POS(x) ((x) << 8) -#define CTRL0_TODDR_LSB_POS_MASK GENMASK(7, 3) -#define CTRL0_TODDR_LSB_POS(x) ((x) << 3) +#define CTRL0_TODDR_TYPE GENMASK(15, 13) +#define CTRL0_TODDR_MSB_POS GENMASK(12, 8) +#define CTRL0_TODDR_LSB_POS GENMASK(7, 3) #define CTRL1_TODDR_FORCE_FINISH BIT(25) #define CTRL1_SEL_SHIFT 28 @@ -76,12 +74,12 @@ static int axg_toddr_dai_hw_params(struct snd_pcm_substream *substream, width = params_width(params); regmap_update_bits(fifo->map, FIFO_CTRL0, - CTRL0_TODDR_TYPE_MASK | - CTRL0_TODDR_MSB_POS_MASK | - CTRL0_TODDR_LSB_POS_MASK, - CTRL0_TODDR_TYPE(type) | - CTRL0_TODDR_MSB_POS(TODDR_MSB_POS) | - CTRL0_TODDR_LSB_POS(TODDR_MSB_POS - (width - 1))); + CTRL0_TODDR_TYPE | + CTRL0_TODDR_MSB_POS | + CTRL0_TODDR_LSB_POS, + FIELD_PREP(CTRL0_TODDR_TYPE, type) | + FIELD_PREP(CTRL0_TODDR_MSB_POS, TODDR_MSB_POS) | + FIELD_PREP(CTRL0_TODDR_LSB_POS, TODDR_MSB_POS - (width - 1))); return 0; } diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c index 2445ae7f6b..1506982a56 100644 --- a/sound/soc/sof/intel/hda-dsp.c +++ b/sound/soc/sof/intel/hda-dsp.c @@ -681,17 +681,27 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend) struct sof_intel_hda_dev *hda = sdev->pdata->hw_pdata; const struct sof_intel_dsp_desc *chip = hda->desc; struct hdac_bus *bus = sof_to_bus(sdev); + bool imr_lost = false; int ret, j; /* - * The memory used for IMR boot loses its content in deeper than S3 state - * We must not try IMR boot on next power up (as it will fail). - * + * The memory used for IMR boot loses its content in deeper than S3 + * state on CAVS platforms. + * On ACE platforms due to the system architecture the IMR content is + * lost at S3 state already, they are tailored for s2idle use. + * We must not try IMR boot on next power up in these cases as it will + * fail. + */ + if (sdev->system_suspend_target > SOF_SUSPEND_S3 || + (chip->hw_ip_version >= SOF_INTEL_ACE_1_0 && + sdev->system_suspend_target == SOF_SUSPEND_S3)) + imr_lost = true; + + /* * In case of firmware crash or boot failure set the skip_imr_boot to true * as well in order to try to re-load the firmware to do a 'cold' boot. */ - if (sdev->system_suspend_target > SOF_SUSPEND_S3 || - sdev->fw_state == SOF_FW_CRASHED || + if (imr_lost || sdev->fw_state == SOF_FW_CRASHED || sdev->fw_state == SOF_FW_BOOT_FAILED) hda->skip_imr_boot = true; diff --git a/sound/soc/sof/intel/pci-lnl.c b/sound/soc/sof/intel/pci-lnl.c index b26ffe767f..b14e508f1f 100644 --- a/sound/soc/sof/intel/pci-lnl.c +++ b/sound/soc/sof/intel/pci-lnl.c @@ -35,6 +35,9 @@ static const struct sof_dev_desc lnl_desc = { .default_fw_path = { [SOF_IPC_TYPE_4] = "intel/sof-ipc4/lnl", }, + .default_lib_path = { + [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/lnl", + }, .default_tplg_path = { [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg", }, diff --git a/sound/soc/tegra/tegra186_dspk.c b/sound/soc/tegra/tegra186_dspk.c index aa37c4ab0a..21cd41fec7 100644 --- a/sound/soc/tegra/tegra186_dspk.c +++ b/sound/soc/tegra/tegra186_dspk.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // tegra186_dspk.c - Tegra186 DSPK driver -// -// Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. #include <linux/clk.h> #include <linux/device.h> @@ -241,14 +240,14 @@ static int tegra186_dspk_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } - cif_conf.client_bits = TEGRA_ACIF_BITS_24; - switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: cif_conf.audio_bits = TEGRA_ACIF_BITS_16; + cif_conf.client_bits = TEGRA_ACIF_BITS_16; break; case SNDRV_PCM_FORMAT_S32_LE: cif_conf.audio_bits = TEGRA_ACIF_BITS_32; + cif_conf.client_bits = TEGRA_ACIF_BITS_24; break; default: dev_err(dev, "unsupported format!\n"); diff --git a/sound/soc/ti/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c index b892d66f78..1e760c3155 100644 --- a/sound/soc/ti/davinci-mcasp.c +++ b/sound/soc/ti/davinci-mcasp.c @@ -2417,12 +2417,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev) mcasp_reparent_fck(pdev); - ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component, - &davinci_mcasp_dai[mcasp->op_mode], 1); - - if (ret != 0) - goto err; - ret = davinci_mcasp_get_dma_type(mcasp); switch (ret) { case PCM_EDMA: @@ -2449,6 +2443,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev) goto err; } + ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component, + &davinci_mcasp_dai[mcasp->op_mode], 1); + + if (ret != 0) + goto err; + no_audio: ret = davinci_mcasp_init_gpiochip(mcasp); if (ret) { diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index b67617b68e..f4437015d4 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -202,7 +202,7 @@ int line6_send_raw_message_async(struct usb_line6 *line6, const char *buffer, struct urb *urb; /* create message: */ - msg = kmalloc(sizeof(struct message), GFP_ATOMIC); + msg = kzalloc(sizeof(struct message), GFP_ATOMIC); if (msg == NULL) return -ENOMEM; @@ -688,7 +688,7 @@ static int line6_init_cap_control(struct usb_line6 *line6) int ret; /* initialize USB buffers: */ - line6->buffer_listen = kmalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL); + line6->buffer_listen = kzalloc(LINE6_BUFSIZE_LISTEN, GFP_KERNEL); if (!line6->buffer_listen) return -ENOMEM; @@ -697,7 +697,7 @@ static int line6_init_cap_control(struct usb_line6 *line6) return -ENOMEM; if (line6->properties->capabilities & LINE6_CAP_CONTROL_MIDI) { - line6->buffer_message = kmalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL); + line6->buffer_message = kzalloc(LINE6_MIDI_MESSAGE_MAXLEN, GFP_KERNEL); if (!line6->buffer_message) return -ENOMEM; diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index 4b0673bf52..07cfad817d 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h @@ -8,6 +8,7 @@ #include <linux/build_bug.h> #include <linux/compiler.h> #include <linux/math.h> +#include <linux/panic.h> #include <endian.h> #include <byteswap.h> diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h index f3c82ab5b1..7d73da0980 100644 --- a/tools/include/linux/mm.h +++ b/tools/include/linux/mm.h @@ -37,4 +37,9 @@ static inline void totalram_pages_add(long count) { } +static inline int early_pfn_to_nid(unsigned long pfn) +{ + return 0; +} + #endif diff --git a/tools/include/linux/panic.h b/tools/include/linux/panic.h new file mode 100644 index 0000000000..9c8f17a41c --- /dev/null +++ b/tools/include/linux/panic.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TOOLS_LINUX_PANIC_H +#define _TOOLS_LINUX_PANIC_H + +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> + +static inline void panic(const char *fmt, ...) +{ + va_list argp; + + va_start(argp, fmt); + vfprintf(stderr, fmt, argp); + va_end(argp); + exit(-1); +} + +#endif diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index 8f08c3fd49..1ba6340d3b 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -370,7 +370,7 @@ below the processor's base frequency. Busy% = MPERF_delta/TSC_delta -Bzy_MHz = TSC_delta/APERF_delta/MPERF_delta/measurement_interval +Bzy_MHz = TSC_delta*APERF_delta/MPERF_delta/measurement_interval Note that these calculations depend on TSC_delta, so they are not reliable during intervals when TSC_MHz is not running at the base frequency. diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 7a334377f9..53b764422e 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -53,6 +53,8 @@ #define NAME_BYTES 20 #define PATH_BYTES 128 +#define MAX_NOFILE 0x8000 + enum counter_scope { SCOPE_CPU, SCOPE_CORE, SCOPE_PACKAGE }; enum counter_type { COUNTER_ITEMS, COUNTER_CYCLES, COUNTER_SECONDS, COUNTER_USEC }; enum counter_format { FORMAT_RAW, FORMAT_DELTA, FORMAT_PERCENT }; @@ -989,8 +991,8 @@ struct pkg_data { unsigned long long pc8; unsigned long long pc9; unsigned long long pc10; - unsigned long long cpu_lpi; - unsigned long long sys_lpi; + long long cpu_lpi; + long long sys_lpi; unsigned long long pkg_wtd_core_c0; unsigned long long pkg_any_core_c0; unsigned long long pkg_any_gfxe_c0; @@ -1976,12 +1978,22 @@ int format_counters(struct thread_data *t, struct core_data *c, struct pkg_data if (DO_BIC(BIC_Pkgpc10)) outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->pc10 / tsc); - if (DO_BIC(BIC_CPU_LPI)) - outp += - sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->cpu_lpi / 1000000.0 / interval_float); - if (DO_BIC(BIC_SYS_LPI)) - outp += - sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); + if (DO_BIC(BIC_CPU_LPI)) { + if (p->cpu_lpi >= 0) + outp += + sprintf(outp, "%s%.2f", (printed++ ? delim : ""), + 100.0 * p->cpu_lpi / 1000000.0 / interval_float); + else + outp += sprintf(outp, "%s(neg)", (printed++ ? delim : "")); + } + if (DO_BIC(BIC_SYS_LPI)) { + if (p->sys_lpi >= 0) + outp += + sprintf(outp, "%s%.2f", (printed++ ? delim : ""), + 100.0 * p->sys_lpi / 1000000.0 / interval_float); + else + outp += sprintf(outp, "%s(neg)", (printed++ ? delim : "")); + } if (DO_BIC(BIC_PkgWatt)) outp += @@ -2444,9 +2456,10 @@ int sum_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) average.packages.rapl_dram_perf_status += p->rapl_dram_perf_status; for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { - if (mp->format == FORMAT_RAW) - continue; - average.packages.counter[i] += p->counter[i]; + if ((mp->format == FORMAT_RAW) && (topo.num_packages == 0)) + average.packages.counter[i] = p->counter[i]; + else + average.packages.counter[i] += p->counter[i]; } return 0; } @@ -2599,7 +2612,7 @@ unsigned long long get_uncore_mhz(int package, int die) { char path[128]; - sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/current_freq_khz", package, + sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d/current_freq_khz", package, die); return (snapshot_sysfs_counter(path) / 1000); @@ -3829,7 +3842,8 @@ void re_initialize(void) { free_all_buffers(); setup_all_buffers(false); - fprintf(outf, "turbostat: re-initialized with num_cpus %d, allowed_cpus %d\n", topo.num_cpus, topo.allowed_cpus); + fprintf(outf, "turbostat: re-initialized with num_cpus %d, allowed_cpus %d\n", topo.num_cpus, + topo.allowed_cpus); } void set_max_cpu_num(void) @@ -4567,20 +4581,15 @@ static void dump_sysfs_file(char *path) static void probe_intel_uncore_frequency(void) { int i, j; - char path[128]; + char path[256]; if (!genuine_intel) return; - if (access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00", R_OK)) - return; - - /* Cluster level sysfs not supported yet. */ - if (!access("/sys/devices/system/cpu/intel_uncore_frequency/uncore00", R_OK)) - return; + if (access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz", R_OK)) + goto probe_cluster; - if (!access("/sys/devices/system/cpu/intel_uncore_frequency/package_00_die_00/current_freq_khz", R_OK)) - BIC_PRESENT(BIC_UNCORE_MHZ); + BIC_PRESENT(BIC_UNCORE_MHZ); if (quiet) return; @@ -4588,26 +4597,73 @@ static void probe_intel_uncore_frequency(void) for (i = 0; i < topo.num_packages; ++i) { for (j = 0; j < topo.num_die; ++j) { int k, l; + char path_base[128]; + + sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/package_%02d_die_%02d", i, + j); - sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/min_freq_khz", - i, j); + sprintf(path, "%s/min_freq_khz", path_base); k = read_sysfs_int(path); - sprintf(path, "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/max_freq_khz", - i, j); + sprintf(path, "%s/max_freq_khz", path_base); l = read_sysfs_int(path); - fprintf(outf, "Uncore Frequency pkg%d die%d: %d - %d MHz ", i, j, k / 1000, l / 1000); + fprintf(outf, "Uncore Frequency package%d die%d: %d - %d MHz ", i, j, k / 1000, l / 1000); - sprintf(path, - "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/initial_min_freq_khz", - i, j); + sprintf(path, "%s/initial_min_freq_khz", path_base); k = read_sysfs_int(path); - sprintf(path, - "/sys/devices/system/cpu/intel_uncore_frequency/package_0%d_die_0%d/initial_max_freq_khz", - i, j); + sprintf(path, "%s/initial_max_freq_khz", path_base); l = read_sysfs_int(path); - fprintf(outf, "(%d - %d MHz)\n", k / 1000, l / 1000); + fprintf(outf, "(%d - %d MHz)", k / 1000, l / 1000); + + sprintf(path, "%s/current_freq_khz", path_base); + k = read_sysfs_int(path); + fprintf(outf, " %d MHz\n", k / 1000); } } + return; + +probe_cluster: + if (access("/sys/devices/system/cpu/intel_uncore_frequency/uncore00/current_freq_khz", R_OK)) + return; + + if (quiet) + return; + + for (i = 0;; ++i) { + int k, l; + char path_base[128]; + int package_id, domain_id, cluster_id; + + sprintf(path_base, "/sys/devices/system/cpu/intel_uncore_frequency/uncore%02d", i); + + if (access(path_base, R_OK)) + break; + + sprintf(path, "%s/package_id", path_base); + package_id = read_sysfs_int(path); + + sprintf(path, "%s/domain_id", path_base); + domain_id = read_sysfs_int(path); + + sprintf(path, "%s/fabric_cluster_id", path_base); + cluster_id = read_sysfs_int(path); + + sprintf(path, "%s/min_freq_khz", path_base); + k = read_sysfs_int(path); + sprintf(path, "%s/max_freq_khz", path_base); + l = read_sysfs_int(path); + fprintf(outf, "Uncore Frequency package%d domain%d cluster%d: %d - %d MHz ", package_id, domain_id, + cluster_id, k / 1000, l / 1000); + + sprintf(path, "%s/initial_min_freq_khz", path_base); + k = read_sysfs_int(path); + sprintf(path, "%s/initial_max_freq_khz", path_base); + l = read_sysfs_int(path); + fprintf(outf, "(%d - %d MHz)", k / 1000, l / 1000); + + sprintf(path, "%s/current_freq_khz", path_base); + k = read_sysfs_int(path); + fprintf(outf, " %d MHz\n", k / 1000); + } } static void probe_graphics(void) @@ -5489,7 +5545,8 @@ void print_dev_latency(void) fd = open(path, O_RDONLY); if (fd < 0) { - warnx("capget(CAP_SYS_ADMIN) failed, try \"# setcap cap_sys_admin=ep %s\"", progname); + if (debug) + warnx("Read %s failed", path); return; } @@ -5623,6 +5680,7 @@ void process_cpuid() unsigned int eax, ebx, ecx, edx; unsigned int fms, family, model, stepping, ecx_flags, edx_flags; unsigned long long ucode_patch = 0; + bool ucode_patch_valid = false; eax = ebx = ecx = edx = 0; @@ -5652,6 +5710,8 @@ void process_cpuid() if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch)) warnx("get_msr(UCODE)"); + else + ucode_patch_valid = true; /* * check max extended function levels of CPUID. @@ -5662,9 +5722,12 @@ void process_cpuid() __cpuid(0x80000000, max_extended_level, ebx, ecx, edx); if (!quiet) { - fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d) microcode 0x%x\n", - family, model, stepping, family, model, stepping, - (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF)); + fprintf(outf, "CPUID(1): family:model:stepping 0x%x:%x:%x (%d:%d:%d)", + family, model, stepping, family, model, stepping); + if (ucode_patch_valid) + fprintf(outf, " microcode 0x%x", (unsigned int)((ucode_patch >> 32) & 0xFFFFFFFF)); + fputc('\n', outf); + fprintf(outf, "CPUID(0x80000000): max_extended_levels: 0x%x\n", max_extended_level); fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n", ecx_flags & (1 << 0) ? "SSE3" : "-", @@ -6142,6 +6205,7 @@ void topology_update(void) topo.allowed_packages = 0; for_all_cpus(update_topo, ODD_COUNTERS); } + void setup_all_buffers(bool startup) { topology_probe(startup); @@ -6704,6 +6768,22 @@ void cmdline(int argc, char **argv) } } +void set_rlimit(void) +{ + struct rlimit limit; + + if (getrlimit(RLIMIT_NOFILE, &limit) < 0) + err(1, "Failed to get rlimit"); + + if (limit.rlim_max < MAX_NOFILE) + limit.rlim_max = MAX_NOFILE; + if (limit.rlim_cur < MAX_NOFILE) + limit.rlim_cur = MAX_NOFILE; + + if (setrlimit(RLIMIT_NOFILE, &limit) < 0) + err(1, "Failed to set rlimit"); +} + int main(int argc, char **argv) { int fd, ret; @@ -6729,6 +6809,9 @@ skip_cgroup_setting: probe_sysfs(); + if (!getuid()) + set_rlimit(); + turbostat_init(); msr_sum_record(); diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index 053f4d6da7..cc184e4420 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -2,6 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <sys/syscall.h> +#include <limits.h> #include <test_progs.h> #include "bloom_filter_map.skel.h" @@ -21,6 +22,11 @@ static void test_fail_cases(void) if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value size 0")) close(fd); + /* Invalid value size: too big */ + fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, INT32_MAX, 100, NULL); + if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid value too large")) + close(fd); + /* Invalid max entries size */ fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 0, NULL); if (!ASSERT_LT(fd, 0, "bpf_map_create bloom filter invalid max entries size")) diff --git a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc index 2de7c61d1a..3f74c09c56 100644 --- a/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc +++ b/tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc @@ -24,7 +24,7 @@ echo 0 > events/enable echo "Get the most frequently calling function" sample_events -target_func=`cut -d: -f3 trace | sed 's/call_site=\([^+]*\)+0x.*/\1/' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'` +target_func=`cat trace | grep -o 'call_site=\([^+]*\)' | sed 's/call_site=//' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'` if [ -z "$target_func" ]; then exit_fail fi diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 2453add65d..c04a81535e 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -12,7 +12,7 @@ uname_M := $(shell uname -m 2>/dev/null || echo not) else uname_M := $(shell echo $(CROSS_COMPILE) | grep -o '^[a-z0-9]\+') endif -ARCH ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/') +ARCH ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/powerpc/') endif # Without this, failed build products remain, with up-to-date timestamps, @@ -97,13 +97,13 @@ TEST_GEN_FILES += $(BINARIES_64) endif else -ifneq (,$(findstring $(ARCH),ppc64)) +ifneq (,$(findstring $(ARCH),powerpc)) TEST_GEN_FILES += protection_keys endif endif -ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sparc64 x86_64)) +ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 powerpc riscv64 s390x sparc64 x86_64)) TEST_GEN_FILES += va_high_addr_switch TEST_GEN_FILES += virtual_address_range TEST_GEN_FILES += write_to_hugetlbfs diff --git a/tools/testing/selftests/net/test_bridge_neigh_suppress.sh b/tools/testing/selftests/net/test_bridge_neigh_suppress.sh index 8533393a4f..02b986c9c2 100755 --- a/tools/testing/selftests/net/test_bridge_neigh_suppress.sh +++ b/tools/testing/selftests/net/test_bridge_neigh_suppress.sh @@ -154,17 +154,9 @@ setup_topo() setup_topo_ns $ns done - ip link add name veth0 type veth peer name veth1 - ip link set dev veth0 netns $h1 name eth0 - ip link set dev veth1 netns $sw1 name swp1 - - ip link add name veth0 type veth peer name veth1 - ip link set dev veth0 netns $sw1 name veth0 - ip link set dev veth1 netns $sw2 name veth0 - - ip link add name veth0 type veth peer name veth1 - ip link set dev veth0 netns $h2 name eth0 - ip link set dev veth1 netns $sw2 name swp1 + ip -n $h1 link add name eth0 type veth peer name swp1 netns $sw1 + ip -n $sw1 link add name veth0 type veth peer name veth0 netns $sw2 + ip -n $h2 link add name eth0 type veth peer name swp1 netns $sw2 } setup_host_common() diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c index 48b9a80323..d13ebde203 100644 --- a/tools/testing/selftests/timers/valid-adjtimex.c +++ b/tools/testing/selftests/timers/valid-adjtimex.c @@ -21,9 +21,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ - - - #include <stdio.h> #include <stdlib.h> #include <time.h> @@ -62,45 +59,47 @@ int clear_time_state(void) #define NUM_FREQ_OUTOFRANGE 4 #define NUM_FREQ_INVALID 2 +#define SHIFTED_PPM (1 << 16) + long valid_freq[NUM_FREQ_VALID] = { - -499<<16, - -450<<16, - -400<<16, - -350<<16, - -300<<16, - -250<<16, - -200<<16, - -150<<16, - -100<<16, - -75<<16, - -50<<16, - -25<<16, - -10<<16, - -5<<16, - -1<<16, + -499 * SHIFTED_PPM, + -450 * SHIFTED_PPM, + -400 * SHIFTED_PPM, + -350 * SHIFTED_PPM, + -300 * SHIFTED_PPM, + -250 * SHIFTED_PPM, + -200 * SHIFTED_PPM, + -150 * SHIFTED_PPM, + -100 * SHIFTED_PPM, + -75 * SHIFTED_PPM, + -50 * SHIFTED_PPM, + -25 * SHIFTED_PPM, + -10 * SHIFTED_PPM, + -5 * SHIFTED_PPM, + -1 * SHIFTED_PPM, -1000, - 1<<16, - 5<<16, - 10<<16, - 25<<16, - 50<<16, - 75<<16, - 100<<16, - 150<<16, - 200<<16, - 250<<16, - 300<<16, - 350<<16, - 400<<16, - 450<<16, - 499<<16, + 1 * SHIFTED_PPM, + 5 * SHIFTED_PPM, + 10 * SHIFTED_PPM, + 25 * SHIFTED_PPM, + 50 * SHIFTED_PPM, + 75 * SHIFTED_PPM, + 100 * SHIFTED_PPM, + 150 * SHIFTED_PPM, + 200 * SHIFTED_PPM, + 250 * SHIFTED_PPM, + 300 * SHIFTED_PPM, + 350 * SHIFTED_PPM, + 400 * SHIFTED_PPM, + 450 * SHIFTED_PPM, + 499 * SHIFTED_PPM, }; long outofrange_freq[NUM_FREQ_OUTOFRANGE] = { - -1000<<16, - -550<<16, - 550<<16, - 1000<<16, + -1000 * SHIFTED_PPM, + -550 * SHIFTED_PPM, + 550 * SHIFTED_PPM, + 1000 * SHIFTED_PPM, }; #define LONG_MAX (~0UL>>1) |