diff options
Diffstat (limited to 'arch/s390/boot')
-rw-r--r-- | arch/s390/boot/ipl_parm.c | 10 | ||||
-rw-r--r-- | arch/s390/boot/startup.c | 49 | ||||
-rw-r--r-- | arch/s390/boot/vmem.c | 34 |
3 files changed, 81 insertions, 12 deletions
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index 7b75217626..b24de9aabf 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -3,6 +3,7 @@ #include <linux/init.h> #include <linux/ctype.h> #include <linux/pgtable.h> +#include <asm/page-states.h> #include <asm/ebcdic.h> #include <asm/sclp.h> #include <asm/sections.h> @@ -24,6 +25,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL; struct ipl_parameter_block __bootdata_preserved(ipl_block); int __bootdata_preserved(ipl_block_valid); int __bootdata_preserved(__kaslr_enabled); +int __bootdata_preserved(cmma_flag) = 1; unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE; unsigned long memory_limit; @@ -272,7 +274,7 @@ void parse_boot_command_line(void) memory_limit = round_down(memparse(val, NULL), PAGE_SIZE); if (!strcmp(param, "vmalloc") && val) { - vmalloc_size = round_up(memparse(val, NULL), PAGE_SIZE); + vmalloc_size = round_up(memparse(val, NULL), _SEGMENT_SIZE); vmalloc_size_set = 1; } @@ -295,6 +297,12 @@ void parse_boot_command_line(void) if (!strcmp(param, "nokaslr")) __kaslr_enabled = 0; + if (!strcmp(param, "cmma")) { + rc = kstrtobool(val, &enabled); + if (!rc && !enabled) + cmma_flag = 0; + } + #if IS_ENABLED(CONFIG_KVM) if (!strcmp(param, "prot_virt")) { rc = kstrtobool(val, &enabled); diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index d3e48bd9c3..9cc76e6317 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/string.h> #include <linux/elf.h> +#include <asm/page-states.h> #include <asm/boot_data.h> #include <asm/sections.h> #include <asm/maccess.h> @@ -49,7 +50,7 @@ static void detect_facilities(void) { if (test_facility(8)) { machine.has_edat1 = 1; - __ctl_set_bit(0, 23); + local_ctl_set_bit(0, CR0_EDAT_BIT); } if (test_facility(78)) machine.has_edat2 = 1; @@ -57,6 +58,48 @@ static void detect_facilities(void) machine.has_nx = 1; } +static int cmma_test_essa(void) +{ + unsigned long reg1, reg2, tmp = 0; + int rc = 1; + psw_t old; + + /* Test ESSA_GET_STATE */ + asm volatile( + " mvc 0(16,%[psw_old]),0(%[psw_pgm])\n" + " epsw %[reg1],%[reg2]\n" + " st %[reg1],0(%[psw_pgm])\n" + " st %[reg2],4(%[psw_pgm])\n" + " larl %[reg1],1f\n" + " stg %[reg1],8(%[psw_pgm])\n" + " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" + " la %[rc],0\n" + "1: mvc 0(16,%[psw_pgm]),0(%[psw_old])\n" + : [reg1] "=&d" (reg1), + [reg2] "=&a" (reg2), + [rc] "+&d" (rc), + [tmp] "=&d" (tmp), + "+Q" (S390_lowcore.program_new_psw), + "=Q" (old) + : [psw_old] "a" (&old), + [psw_pgm] "a" (&S390_lowcore.program_new_psw), + [cmd] "i" (ESSA_GET_STATE) + : "cc", "memory"); + return rc; +} + +static void cmma_init(void) +{ + if (!cmma_flag) + return; + if (cmma_test_essa()) { + cmma_flag = 0; + return; + } + if (test_facility(147)) + cmma_flag = 2; +} + static void setup_lpp(void) { S390_lowcore.current_pid = 0; @@ -212,7 +255,8 @@ static unsigned long setup_kernel_memory_layout(void) VMALLOC_END = MODULES_VADDR; /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */ - vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE)); + vsize = round_down(VMALLOC_END / 2, _SEGMENT_SIZE); + vmalloc_size = min(vmalloc_size, vsize); VMALLOC_START = VMALLOC_END - vmalloc_size; /* split remaining virtual space between 1:1 mapping & vmemmap array */ @@ -306,6 +350,7 @@ void startup_kernel(void) setup_boot_command_line(); parse_boot_command_line(); detect_facilities(); + cmma_init(); sanitize_prot_virt_host(); max_physmem_end = detect_max_physmem_end(); setup_ident_map_size(max_physmem_end); diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index 442a74f113..e3a4500a5a 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -2,16 +2,18 @@ #include <linux/sched/task.h> #include <linux/pgtable.h> #include <linux/kasan.h> +#include <asm/page-states.h> #include <asm/pgalloc.h> #include <asm/facility.h> #include <asm/sections.h> +#include <asm/ctlreg.h> #include <asm/physmem_info.h> #include <asm/maccess.h> #include <asm/abs_lowcore.h> #include "decompressor.h" #include "boot.h" -unsigned long __bootdata_preserved(s390_invalid_asce); +struct ctlreg __bootdata_preserved(s390_invalid_asce); #ifdef CONFIG_PROC_FS atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]); @@ -69,6 +71,10 @@ static void kasan_populate_shadow(void) crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z)); crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z)); memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE); + __arch_set_page_dat(kasan_early_shadow_p4d, 1UL << CRST_ALLOC_ORDER); + __arch_set_page_dat(kasan_early_shadow_pud, 1UL << CRST_ALLOC_ORDER); + __arch_set_page_dat(kasan_early_shadow_pmd, 1UL << CRST_ALLOC_ORDER); + __arch_set_page_dat(kasan_early_shadow_pte, 1); /* * Current memory layout: @@ -166,8 +172,6 @@ static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr, static bool kasan_pte_populate_zero_shadow(pte_t *pte, enum populate_mode mode) { - pte_t entry; - if (mode == POPULATE_KASAN_ZERO_SHADOW) { set_pte(pte, pte_z); return true; @@ -224,6 +228,7 @@ static void *boot_crst_alloc(unsigned long val) table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size); crst_table_init(table, val); + __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER); return table; } @@ -239,6 +244,7 @@ static pte_t *boot_pte_alloc(void) if (!pte_leftover) { pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE); pte = pte_leftover + _PAGE_TABLE_SIZE; + __arch_set_page_dat(pte, 1); } else { pte = pte_leftover; pte_leftover = NULL; @@ -419,6 +425,14 @@ void setup_vmem(unsigned long asce_limit) unsigned long asce_bits; int i; + /* + * Mark whole memory as no-dat. This must be done before any + * page tables are allocated, or kernel image builtin pages + * are marked as dat tables. + */ + for_each_physmem_online_range(i, &start, &end) + __arch_set_page_nodat((void *)start, (end - start) >> PAGE_SHIFT); + if (asce_limit == _REGION1_SIZE) { asce_type = _REGION2_ENTRY_EMPTY; asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; @@ -426,10 +440,12 @@ void setup_vmem(unsigned long asce_limit) asce_type = _REGION3_ENTRY_EMPTY; asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; } - s390_invalid_asce = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; + s390_invalid_asce.val = invalid_pg_dir | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; crst_table_init((unsigned long *)swapper_pg_dir, asce_type); crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY); + __arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER); + __arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER); /* * To allow prefixing the lowcore must be mapped with 4KB pages. @@ -447,12 +463,12 @@ void setup_vmem(unsigned long asce_limit) kasan_populate_shadow(); - S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits; + S390_lowcore.kernel_asce.val = swapper_pg_dir | asce_bits; S390_lowcore.user_asce = s390_invalid_asce; - __ctl_load(S390_lowcore.kernel_asce, 1, 1); - __ctl_load(S390_lowcore.user_asce, 7, 7); - __ctl_load(S390_lowcore.kernel_asce, 13, 13); + local_ctl_load(1, &S390_lowcore.kernel_asce); + local_ctl_load(7, &S390_lowcore.user_asce); + local_ctl_load(13, &S390_lowcore.kernel_asce); - init_mm.context.asce = S390_lowcore.kernel_asce; + init_mm.context.asce = S390_lowcore.kernel_asce.val; } |