From 9f0fc191371843c4fc000a226b0a26b6c059aacd Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:40:19 +0200 Subject: Merging upstream version 6.7.7. Signed-off-by: Daniel Baumann --- arch/arm64/include/asm/tlbflush.h | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch/arm64/include/asm/tlbflush.h') diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b149cf9f91..bb2c2833a9 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -105,7 +105,7 @@ static inline unsigned long get_trans_granule(void) #define __tlbi_level(op, addr, level) do { \ u64 arg = addr; \ \ - if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) && \ + if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \ level) { \ u64 ttl = level & 3; \ ttl |= get_trans_granule() << 2; \ @@ -284,16 +284,15 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) { -#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI /* * TLB flush deferral is not required on systems which are affected by * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation * will have two consecutive TLBI instructions with a dsb(ish) in between * defeating the purpose (i.e save overall 'dsb ish' cost). */ - if (unlikely(cpus_have_const_cap(ARM64_WORKAROUND_REPEAT_TLBI))) + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI)) return false; -#endif + return true; } @@ -333,7 +332,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) * This is meant to avoid soft lock-ups on large TLB flushing ranges and not * necessarily a performance improvement. */ -#define MAX_TLBI_OPS PTRS_PER_PTE +#define MAX_DVM_OPS PTRS_PER_PTE /* * __flush_tlb_range_op - Perform TLBI operation upon a range @@ -413,12 +412,12 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, /* * When not uses TLB range ops, we can handle up to - * (MAX_TLBI_OPS - 1) pages; + * (MAX_DVM_OPS - 1) pages; * When uses TLB range ops, we can handle up to * (MAX_TLBI_RANGE_PAGES - 1) pages. */ if ((!system_supports_tlb_range() && - (end - start) >= (MAX_TLBI_OPS * stride)) || + (end - start) >= (MAX_DVM_OPS * stride)) || pages >= MAX_TLBI_RANGE_PAGES) { flush_tlb_mm(vma->vm_mm); return; @@ -451,7 +450,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end { unsigned long addr; - if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) { + if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) { flush_tlb_all(); return; } -- cgit v1.2.3