diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 7 | ||||
-rw-r--r-- | arch/sh/mm/cache-shx3.c | 1 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 16 | ||||
-rw-r--r-- | arch/sh/mm/hugetlbpage.c | 10 | ||||
-rw-r--r-- | arch/sh/mm/mmap.c | 5 | ||||
-rw-r--r-- | arch/sh/mm/nommu.c | 2 | ||||
-rw-r--r-- | arch/sh/mm/pgtable.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/tlbex_32.c | 1 |
8 files changed, 13 insertions, 33 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 862046f269..46393b0013 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -241,13 +241,14 @@ static void sh4_flush_cache_page(void *args) if ((vma->vm_mm == current->active_mm)) vaddr = NULL; else { + struct folio *folio = page_folio(page); /* * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ map_coherent = (current_cpu_data.dcache.n_aliases && - test_bit(PG_dcache_clean, &page->flags) && - page_mapcount(page)); + test_bit(PG_dcache_clean, folio_flags(folio, 0)) && + page_mapped(page)); if (map_coherent) vaddr = kmap_coherent(page, address); else @@ -376,8 +377,6 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, } while (--way_count != 0); } -extern void __weak sh4__flush_region_init(void); - /* * SH-4 has virtually indexed and physically tagged cache. */ diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c index 24c58b7dc0..dec039a756 100644 --- a/arch/sh/mm/cache-shx3.c +++ b/arch/sh/mm/cache-shx3.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/io.h> #include <asm/cache.h> +#include <asm/cacheflush.h> #define CCR_CACHE_SNM 0x40000 /* Hardware-assisted synonym avoidance */ #define CCR_CACHE_IBE 0x1000000 /* ICBI broadcast */ diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 9bcaa5619e..6ebdeaff30 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -84,7 +84,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, { struct folio *folio = page_folio(page); - if (boot_cpu_data.dcache.n_aliases && page_mapcount(page) && + if (boot_cpu_data.dcache.n_aliases && folio_mapped(folio) && test_bit(PG_dcache_clean, &folio->flags)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); @@ -320,30 +320,20 @@ void __init cpu_cache_init(void) goto skip; if (boot_cpu_data.type == CPU_J2) { - extern void __weak j2_cache_init(void); - j2_cache_init(); } else if (boot_cpu_data.family == CPU_FAMILY_SH2) { - extern void __weak sh2_cache_init(void); - sh2_cache_init(); } if (boot_cpu_data.family == CPU_FAMILY_SH2A) { - extern void __weak sh2a_cache_init(void); - sh2a_cache_init(); } if (boot_cpu_data.family == CPU_FAMILY_SH3) { - extern void __weak sh3_cache_init(void); - sh3_cache_init(); if ((boot_cpu_data.type == CPU_SH7705) && (boot_cpu_data.dcache.sets == 512)) { - extern void __weak sh7705_cache_init(void); - sh7705_cache_init(); } } @@ -351,14 +341,10 @@ void __init cpu_cache_init(void) if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { - extern void __weak sh4_cache_init(void); - sh4_cache_init(); if ((boot_cpu_data.type == CPU_SH7786) || (boot_cpu_data.type == CPU_SHX3)) { - extern void __weak shx3_cache_init(void); - shx3_cache_init(); } } diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 6cb0ad73db..ff209b5528 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -70,13 +70,3 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return pte; } - -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c index b82199878b..bee329d414 100644 --- a/arch/sh/mm/mmap.c +++ b/arch/sh/mm/mmap.c @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int do_colour_align; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = {}; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -88,7 +88,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } - info.flags = 0; info.length = len; info.low_limit = TASK_UNMAPPED_BASE; info.high_limit = TASK_SIZE; @@ -106,7 +105,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, struct mm_struct *mm = current->mm; unsigned long addr = addr0; int do_colour_align; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = {}; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c index 78c4b6e6d3..fa3dc9428a 100644 --- a/arch/sh/mm/nommu.c +++ b/arch/sh/mm/nommu.c @@ -10,6 +10,8 @@ #include <linux/init.h> #include <linux/string.h> #include <linux/mm.h> + +#include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/page.h> #include <linux/uaccess.h> diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c index cf7ce4b573..3a4085ea01 100644 --- a/arch/sh/mm/pgtable.c +++ b/arch/sh/mm/pgtable.c @@ -2,12 +2,14 @@ #include <linux/mm.h> #include <linux/slab.h> +#include <asm/pgalloc.h> + static struct kmem_cache *pgd_cachep; #if PAGETABLE_LEVELS > 2 static struct kmem_cache *pmd_cachep; #endif -void pgd_ctor(void *x) +static void pgd_ctor(void *x) { pgd_t *pgd = x; diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c index 1c53868632..7d58578c15 100644 --- a/arch/sh/mm/tlbex_32.c +++ b/arch/sh/mm/tlbex_32.c @@ -14,6 +14,7 @@ #include <linux/kdebug.h> #include <asm/mmu_context.h> #include <asm/thread_info.h> +#include <asm/tlb.h> /* * Called with interrupts disabled. |