diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /arch/mips/mm | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 15 | ||||
-rw-r--r-- | arch/mips/mm/cex-gen.S | 2 | ||||
-rw-r--r-- | arch/mips/mm/dma-noncoherent.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 1 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 9 | ||||
-rw-r--r-- | arch/mips/mm/ioremap.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/pgtable-64.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r3k.c | 6 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 4 |
11 files changed, 31 insertions, 30 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 187d1c1636..10413b6f66 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1485,10 +1485,6 @@ static void loongson3_sc_init(void) return; } -extern int r5k_sc_init(void); -extern int rm7k_sc_init(void); -extern int mips_sc_init(void); - static void setup_scache(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -1654,7 +1650,7 @@ static void coherency_setup(void) /* * c0_status.cu=0 specifies that updates by the sc instruction use - * the coherency mode specified by the TLB; 1 means cachable + * the coherency mode specified by the TLB; 1 means cacheable * coherent update on write will be used. Not all processors have * this bit and; some wire it to zero, others like Toshiba had the * silly idea of putting something else there ... @@ -1828,7 +1824,7 @@ static struct notifier_block r4k_cache_pm_notifier_block = { .notifier_call = r4k_cache_pm_notifier, }; -int __init r4k_cache_init_pm(void) +static int __init r4k_cache_init_pm(void) { return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block); } diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 7f830634db..df1ced4fc3 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -205,22 +205,13 @@ static inline void setup_protection_map(void) void cpu_cache_init(void) { - if (cpu_has_3k_cache) { - extern void __weak r3k_cache_init(void); - + if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache) r3k_cache_init(); - } - if (cpu_has_4k_cache) { - extern void __weak r4k_cache_init(void); - + if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache) r4k_cache_init(); - } - - if (cpu_has_octeon_cache) { - extern void __weak octeon_cache_init(void); + if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache) octeon_cache_init(); - } setup_protection_map(); } diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S index 45dff5cd4b..e528583d13 100644 --- a/arch/mips/mm/cex-gen.S +++ b/arch/mips/mm/cex-gen.S @@ -25,7 +25,7 @@ * This is a very bad place to be. Our cache error * detection has triggered. If we have write-back data * in the cache, we may not be able to recover. As a - * first-order desperate measure, turn off KSEG0 cacheing. + * first-order desperate measure, turn off KSEG0 caching. */ mfc0 k0,CP0_CONFIG li k1,~CONF_CM_CMASK diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 3c4fc97b9f..0f3cec663a 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -138,7 +138,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent) + bool coherent) { dev->dma_coherent = coherent; } diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index d7878208bd..aaa9a242eb 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -26,6 +26,7 @@ #include <asm/mmu_context.h> #include <asm/ptrace.h> #include <asm/highmem.h> /* For VMALLOC_END */ +#include <asm/traps.h> #include <linux/kdebug.h> int show_unhandled_signals = 1; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 6e368a4658..39f129205b 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -38,6 +38,7 @@ #include <asm/dma.h> #include <asm/maar.h> #include <asm/mmu_context.h> +#include <asm/mmzone.h> #include <asm/sections.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -421,9 +422,13 @@ void __init paging_init(void) " %ldk highmem ignored\n", (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10)); max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn; - } - max_mapnr = highend_pfn ? highend_pfn : max_low_pfn; + max_mapnr = max_low_pfn; + } else if (highend_pfn) { + max_mapnr = highend_pfn; + } else { + max_mapnr = max_low_pfn; + } #else max_mapnr = max_low_pfn; #endif diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c index b6dad2fd55..d8243d61ef 100644 --- a/arch/mips/mm/ioremap.c +++ b/arch/mips/mm/ioremap.c @@ -72,6 +72,10 @@ void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size, flags == _CACHE_UNCACHED) return (void __iomem *) CKSEG1ADDR(phys_addr); + /* Early remaps should use the unmapped regions til' VM is available */ + if (WARN_ON_ONCE(!slab_is_available())) + return NULL; + /* * Don't allow anybody to remap RAM that may be allocated by the page * allocator, since that could lead to races & data clobbering. diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c index c76d21f7df..1e544827de 100644 --- a/arch/mips/mm/pgtable-64.c +++ b/arch/mips/mm/pgtable-64.c @@ -89,6 +89,7 @@ void pud_init(void *addr) } #endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE pmd_t mk_pmd(struct page *page, pgprot_t prot) { pmd_t pmd; @@ -103,6 +104,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, { *pmdp = pmd; } +#endif void __init pagetable_init(void) { diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 53dfa2b931..173f7b3603 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -23,11 +23,11 @@ #include <asm/io.h> #include <asm/bootinfo.h> #include <asm/cpu.h> +#include <asm/setup.h> +#include <asm/tlbex.h> #undef DEBUG_TLB -extern void build_tlb_refill_handler(void); - /* CP0 hazard avoidance. */ #define BARRIER \ __asm__ __volatile__( \ @@ -183,7 +183,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) int idx, pid; /* - * Handle debugger faulting in for debugee. + * Handle debugger faulting in for debuggee. */ if (current->active_mm != vma->vm_mm) return; diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 93c2d69558..4106084e57 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -22,9 +22,9 @@ #include <asm/hazards.h> #include <asm/mmu_context.h> #include <asm/tlb.h> +#include <asm/tlbex.h> #include <asm/tlbmisc.h> - -extern void build_tlb_refill_handler(void); +#include <asm/setup.h> /* * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has @@ -301,7 +301,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) int idx, pid; /* - * Handle debugger faulting in for debugee. + * Handle debugger faulting in for debuggee. */ if (current->active_mm != vma->vm_mm) return; @@ -458,6 +458,7 @@ EXPORT_SYMBOL(has_transparent_hugepage); int temp_tlb_entry; +#ifndef CONFIG_64BIT __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long entryhi, unsigned long pagemask) { @@ -496,6 +497,7 @@ out: local_irq_restore(flags); return ret; } +#endif static int ntlb; static int __init set_ntlb(char *str) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index b4e1c783e6..4017fa0e2f 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -789,7 +789,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, if (check_for_high_segbits) { /* - * The kernel currently implicitely assumes that the + * The kernel currently implicitly assumes that the * MIPS SEGBITS parameter for the processor is * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never * allocate virtual addresses outside the maximum @@ -1715,7 +1715,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, /* * Check if PTE is present, if not then jump to LABEL. PTR points to * the page table where this PTE is located, PTE will be re-loaded - * with it's original value. + * with its original value. */ static void build_pte_present(u32 **p, struct uasm_reloc **r, |