diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /arch/sparc/include/asm/highmem.h | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/sparc/include/asm/highmem.h')
-rw-r--r-- | arch/sparc/include/asm/highmem.h | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h new file mode 100644 index 000000000..c7b2e2083 --- /dev/null +++ b/arch/sparc/include/asm/highmem.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * highmem.h: virtual kernel memory mappings for high memory + * + * Used in CONFIG_HIGHMEM systems for memory pages which + * are not addressable by direct kernel virtual addresses. + * + * Copyright (C) 1999 Gerhard Wichert, Siemens AG + * Gerhard.Wichert@pdb.siemens.de + * + * + * Redesigned the x86 32-bit VM architecture to deal with + * up to 16 Terrabyte physical memory. With current x86 CPUs + * we now support up to 64 Gigabytes physical RAM. + * + * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> + */ + +#ifndef _ASM_HIGHMEM_H +#define _ASM_HIGHMEM_H + +#ifdef __KERNEL__ + +#include <linux/interrupt.h> +#include <linux/pgtable.h> +#include <asm/vaddrs.h> +#include <asm/pgtsrmmu.h> + +/* declarations for highmem.c */ +extern unsigned long highstart_pfn, highend_pfn; + +#define kmap_prot __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE) +extern pte_t *pkmap_page_table; + +/* + * Right now we initialize only a single pte table. It can be extended + * easily, subsequent pte tables have to be allocated in one physical + * chunk of RAM. Currently the simplest way to do this is to align the + * pkmap region on a pagetable boundary (4MB). + */ +#define LAST_PKMAP 1024 +#define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT) +#define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT)) + +#define LAST_PKMAP_MASK (LAST_PKMAP - 1) +#define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) +#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) + +#define PKMAP_END (PKMAP_ADDR(LAST_PKMAP)) + +#define flush_cache_kmaps() flush_cache_all() + +/* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */ +#define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all() +#define arch_kmap_local_pre_unmap(vaddr) flush_cache_all() +#define arch_kmap_local_post_map(vaddr, pteval) flush_tlb_all() +#define arch_kmap_local_post_unmap(vaddr) flush_tlb_all() + +#endif /* __KERNEL__ */ + +#endif /* _ASM_HIGHMEM_H */ |