diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:40 +0000 |
commit | 8b0a8165cdad0f4133837d753649ef4682e42c3b (patch) | |
tree | 5c58f869f31ddb1f7bd6e8bdea269b680b36c5b6 /arch/sparc/include/asm | |
parent | Releasing progress-linux version 6.8.12-1~progress7.99u1. (diff) | |
download | linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.tar.xz linux-8b0a8165cdad0f4133837d753649ef4682e42c3b.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r-- | arch/sparc/include/asm/cachetype.h | 14 | ||||
-rw-r--r-- | arch/sparc/include/asm/hypervisor.h | 6 | ||||
-rw-r--r-- | arch/sparc/include/asm/ldc.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_context_64.h | 4 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_32.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_64.h | 3 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 10 | ||||
-rw-r--r-- | arch/sparc/include/asm/smp_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/switch_to_64.h | 2 |
9 files changed, 29 insertions, 16 deletions
diff --git a/arch/sparc/include/asm/cachetype.h b/arch/sparc/include/asm/cachetype.h new file mode 100644 index 0000000000..caf1c00458 --- /dev/null +++ b/arch/sparc/include/asm/cachetype.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_SPARC_CACHETYPE_H +#define __ASM_SPARC_CACHETYPE_H + +#include <asm/page.h> + +#ifdef CONFIG_SPARC32 +extern int vac_cache_size; +#define cpu_dcache_is_aliasing() (vac_cache_size > PAGE_SIZE) +#else +#define cpu_dcache_is_aliasing() (L1DCACHE_SIZE > PAGE_SIZE) +#endif + +#endif diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 08650d503c..f220edcf17 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -430,7 +430,7 @@ unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, * ERRORS: No errors defined. * * Return the hypervisor ID handle for the current CPU. Use by a - * virtual CPU to discover it's own identity. + * virtual CPU to discover its own identity. */ #define HV_FAST_CPU_MYID 0x16 @@ -1221,7 +1221,7 @@ unsigned long sun4v_con_write(unsigned long buffer, * EBADALIGNED software state description is not correctly * aligned * - * This allows the guest to report it's soft state to the hypervisor. There + * This allows the guest to report its soft state to the hypervisor. There * are two primary components to this state. The first part states whether * the guest software is running or not. The second containts optional * details specific to the software. @@ -1502,7 +1502,7 @@ struct hv_trap_trace_entry { * configuration error of some sort. * * The dump services provide an opaque buffer into which the - * hypervisor can place it's internal state in order to assist in + * hypervisor can place its internal state in order to assist in * debugging such situations. The contents are opaque and extremely * platform and hypervisor implementation specific. The guest, during * a core dump, requests that the hypervisor update any information in diff --git a/arch/sparc/include/asm/ldc.h b/arch/sparc/include/asm/ldc.h index ca973955ca..4294738d40 100644 --- a/arch/sparc/include/asm/ldc.h +++ b/arch/sparc/include/asm/ldc.h @@ -13,7 +13,7 @@ void ldom_power_off(void); * or data becomes available on the receive side. * * For non-RAW links, if the LDC_EVENT_RESET event arrives the - * driver should reset all of it's internal state and reinvoke + * driver should reset all of its internal state and reinvoke * ldc_connect() to try and bring the link up again. * * For RAW links, ldc_connect() is not used. Instead the driver diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 799e797c5c..08160bf9a0 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -93,7 +93,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str /* We have to be extremely careful here or else we will miss * a TSB grow if we switch back and forth between a kernel - * thread and an address space which has it's TSB size increased + * thread and an address space which has its TSB size increased * on another processor. * * It is possible to play some games in order to optimize the @@ -118,7 +118,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str * * At that point cpu0 continues to use a stale TSB, the one from * before the TSB grow performed on cpu1. cpu1 did not cross-call - * cpu0 to update it's TSB because at that point the cpu_vm_mask + * cpu0 to update its TSB because at that point the cpu_vm_mask * only had cpu1 set in it. */ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context)); diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h index 6be6f683f9..9977c77374 100644 --- a/arch/sparc/include/asm/page_32.h +++ b/arch/sparc/include/asm/page_32.h @@ -11,7 +11,7 @@ #include <linux/const.h> -#define PAGE_SHIFT 12 +#define PAGE_SHIFT CONFIG_PAGE_SHIFT #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index 254dffd85f..e9bd24821c 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -4,8 +4,7 @@ #include <linux/const.h> -#define PAGE_SHIFT 13 - +#define PAGE_SHIFT CONFIG_PAGE_SHIFT #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index a8c871b7d7..4d1bafaba9 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -680,8 +680,8 @@ static inline unsigned long pte_special(pte_t pte) return pte_val(pte) & _PAGE_SPECIAL; } -#define pmd_leaf pmd_large -static inline unsigned long pmd_large(pmd_t pmd) +#define pmd_leaf pmd_leaf +static inline bool pmd_leaf(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); @@ -867,8 +867,8 @@ static inline pmd_t *pud_pgtable(pud_t pud) /* only used by the stubbed out hugetlb gup code, should never be called */ #define p4d_page(p4d) NULL -#define pud_leaf pud_large -static inline unsigned long pud_large(pud_t pud) +#define pud_leaf pud_leaf +static inline bool pud_leaf(pud_t pud) { pte_t pte = __pte(pud_val(pud)); @@ -929,6 +929,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT); } +#define PFN_PTE_SHIFT PAGE_SHIFT + static inline void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned int nr) { diff --git a/arch/sparc/include/asm/smp_64.h b/arch/sparc/include/asm/smp_64.h index 505b670080..0964fede0b 100644 --- a/arch/sparc/include/asm/smp_64.h +++ b/arch/sparc/include/asm/smp_64.h @@ -47,7 +47,6 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask); int hard_smp_processor_id(void); #define raw_smp_processor_id() (current_thread_info()->cpu) -void smp_fill_in_cpu_possible_map(void); void smp_fill_in_sib_core_maps(void); void __noreturn cpu_play_dead(void); @@ -77,7 +76,6 @@ void __cpu_die(unsigned int cpu); #define smp_fill_in_sib_core_maps() do { } while (0) #define smp_fetch_global_regs() do { } while (0) #define smp_fetch_global_pmu() do { } while (0) -#define smp_fill_in_cpu_possible_map() do { } while (0) #define smp_init_cpu_poke() do { } while (0) #define scheduler_poke() do { } while (0) diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h index 14f3c49bfd..d93963ff7c 100644 --- a/arch/sparc/include/asm/switch_to_64.h +++ b/arch/sparc/include/asm/switch_to_64.h @@ -15,7 +15,7 @@ do { \ * for l0/l1. It will use one for 'next' and the other to hold * the output value of 'last'. 'next' is not referenced again * past the invocation of switch_to in the scheduler, so we need - * not preserve it's value. Hairy, but it lets us remove 2 loads + * not preserve its value. Hairy, but it lets us remove 2 loads * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ |