summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/arm64/mm/fault.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c3
-rw-r--r--arch/arm64/mm/init.c151
-rw-r--r--arch/arm64/mm/kasan_init.c6
-rw-r--r--arch/arm64/mm/mmap.c2
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/arm64/mm/proc.S3
7 files changed, 30 insertions, 142 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 2e5d1e238a..55f6455a82 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -571,7 +571,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
/* Write implies read */
vm_flags |= VM_WRITE;
/* If EPAN is absent then exec implies read */
- if (!cpus_have_const_cap(ARM64_HAS_EPAN))
+ if (!alternative_has_cap_unlikely(ARM64_HAS_EPAN))
vm_flags |= VM_EXEC;
}
@@ -607,6 +607,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ mm_flags |= FAULT_FLAG_TRIED;
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 13fd592228..f5aae34263 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -544,8 +544,7 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
- if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
- cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+ if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 8a0f860434..74c1db8ce2 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -16,6 +16,7 @@
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/gfp.h>
+#include <linux/math.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of.h>
@@ -64,15 +65,6 @@ EXPORT_SYMBOL(memstart_addr);
*/
phys_addr_t __ro_after_init arm64_dma_phys_limit;
-/* Current arm64 boot protocol requires 2MB alignment */
-#define CRASH_ALIGN SZ_2M
-
-#define CRASH_ADDR_LOW_MAX arm64_dma_phys_limit
-#define CRASH_ADDR_HIGH_MAX (PHYS_MASK + 1)
-#define CRASH_HIGH_SEARCH_BASE SZ_4G
-
-#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
-
/*
* To make optimal use of block mappings when laying out the linear
* mapping, round down the base of physical memory to a size that can
@@ -100,140 +92,25 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
#endif
-static int __init reserve_crashkernel_low(unsigned long long low_size)
+static void __init arch_reserve_crashkernel(void)
{
- unsigned long long low_base;
-
- low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
- if (!low_base) {
- pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
- return -ENOMEM;
- }
-
- pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n",
- low_base, low_base + low_size, low_size >> 20);
-
- crashk_low_res.start = low_base;
- crashk_low_res.end = low_base + low_size - 1;
- insert_resource(&iomem_resource, &crashk_low_res);
-
- return 0;
-}
-
-/*
- * reserve_crashkernel() - reserves memory for crash kernel
- *
- * This function reserves memory area given in "crashkernel=" kernel command
- * line parameter. The memory reserved is used by dump capture kernel when
- * primary kernel is crashing.
- */
-static void __init reserve_crashkernel(void)
-{
- unsigned long long crash_low_size = 0, search_base = 0;
- unsigned long long crash_max = CRASH_ADDR_LOW_MAX;
+ unsigned long long low_size = 0;
unsigned long long crash_base, crash_size;
char *cmdline = boot_command_line;
- bool fixed_base = false;
bool high = false;
int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- /* crashkernel=X[@offset] */
ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
- &crash_size, &crash_base);
- if (ret == -ENOENT) {
- ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
- if (ret || !crash_size)
- return;
-
- /*
- * crashkernel=Y,low can be specified or not, but invalid value
- * is not allowed.
- */
- ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
- if (ret == -ENOENT)
- crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
- else if (ret)
- return;
-
- search_base = CRASH_HIGH_SEARCH_BASE;
- crash_max = CRASH_ADDR_HIGH_MAX;
- high = true;
- } else if (ret || !crash_size) {
- /* The specified value is invalid */
- return;
- }
-
- crash_size = PAGE_ALIGN(crash_size);
-
- /* User specifies base address explicitly. */
- if (crash_base) {
- fixed_base = true;
- search_base = crash_base;
- crash_max = crash_base + crash_size;
- }
-
-retry:
- crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
- search_base, crash_max);
- if (!crash_base) {
- /*
- * For crashkernel=size[KMG]@offset[KMG], print out failure
- * message if can't reserve the specified region.
- */
- if (fixed_base) {
- pr_warn("crashkernel reservation failed - memory is in use.\n");
- return;
- }
-
- /*
- * For crashkernel=size[KMG], if the first attempt was for
- * low memory, fall back to high memory, the minimum required
- * low memory will be reserved later.
- */
- if (!high && crash_max == CRASH_ADDR_LOW_MAX) {
- crash_max = CRASH_ADDR_HIGH_MAX;
- search_base = CRASH_ADDR_LOW_MAX;
- crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
- goto retry;
- }
-
- /*
- * For crashkernel=size[KMG],high, if the first attempt was
- * for high memory, fall back to low memory.
- */
- if (high && crash_max == CRASH_ADDR_HIGH_MAX) {
- crash_max = CRASH_ADDR_LOW_MAX;
- search_base = 0;
- goto retry;
- }
- pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
- crash_size);
+ &crash_size, &crash_base,
+ &low_size, &high);
+ if (ret)
return;
- }
-
- if ((crash_base >= CRASH_ADDR_LOW_MAX) && crash_low_size &&
- reserve_crashkernel_low(crash_low_size)) {
- memblock_phys_free(crash_base, crash_size);
- return;
- }
-
- pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
- crash_base, crash_base + crash_size, crash_size >> 20);
- /*
- * The crashkernel memory will be removed from the kernel linear
- * map. Inform kmemleak so that it won't try to access it.
- */
- kmemleak_ignore_phys(crash_base);
- if (crashk_low_res.end)
- kmemleak_ignore_phys(crashk_low_res.start);
-
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
- insert_resource(&iomem_resource, &crashk_res);
+ reserve_crashkernel_generic(cmdline, crash_size, crash_base,
+ low_size, high);
}
/*
@@ -479,7 +356,7 @@ void __init bootmem_init(void)
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
*/
- reserve_crashkernel();
+ arch_reserve_crashkernel();
memblock_dump_all();
}
@@ -493,8 +370,16 @@ void __init mem_init(void)
{
bool swiotlb = max_pfn > PFN_DOWN(arm64_dma_phys_limit);
- if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC))
+ if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) && !swiotlb) {
+ /*
+ * If no bouncing needed for ZONE_DMA, reduce the swiotlb
+ * buffer for kmalloc() bouncing to 1MB per 1GB of RAM.
+ */
+ unsigned long size =
+ DIV_ROUND_UP(memblock_phys_mem_size(), 1024);
+ swiotlb_adjust_size(min(swiotlb_size_or_default(), size));
swiotlb = true;
+ }
swiotlb_init(swiotlb, SWIOTLB_VERBOSE);
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index f17d066e85..555285ebd5 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -300,7 +300,11 @@ void __init kasan_init(void)
kasan_init_shadow();
kasan_init_depth();
#if defined(CONFIG_KASAN_GENERIC)
- /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
+ /*
+ * Generic KASAN is now fully initialized.
+ * Software and Hardware Tag-Based modes still require
+ * kasan_init_sw_tags() and kasan_init_hw_tags() correspondingly.
+ */
pr_info("KernelAddressSanitizer initialized (generic)\n");
#endif
}
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 8f5b7ce857..645fe60d00 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -68,7 +68,7 @@ static int __init adjust_protection_map(void)
* With Enhanced PAN we can honour the execute-only permissions as
* there is no PAN override with such mappings.
*/
- if (cpus_have_const_cap(ARM64_HAS_EPAN)) {
+ if (cpus_have_cap(ARM64_HAS_EPAN)) {
protection_map[VM_EXEC] = PAGE_EXECONLY;
protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 47781bec61..15f6347d23 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1469,8 +1469,7 @@ early_initcall(prevent_bootmem_remove_init);
pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
- if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
- cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+ if (alternative_has_cap_unlikely(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 14fdf645ed..f66c37a161 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -405,8 +405,7 @@ SYM_FUNC_START(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB
dsb nsh
- mov x1, #3 << 20
- msr cpacr_el1, x1 // Enable FP/ASIMD
+ msr cpacr_el1, xzr // Reset cpacr_el1
mov x1, #1 << 12 // Reset mdscr_el1 and disable
msr mdscr_el1, x1 // access to the DCC from EL0
isb // Unmask debug exceptions now,