diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:46 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:46 +0000 |
commit | 7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 (patch) | |
tree | bcc69b5f4609f348fac49e2f59e210b29eaea783 /arch/s390/kernel | |
parent | Adding upstream version 6.9.12. (diff) | |
download | linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.tar.xz linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.zip |
Adding upstream version 6.10.3.upstream/6.10.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/s390/kernel')
30 files changed, 315 insertions, 511 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index db2d9ba5a8..7241fa1947 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -59,7 +59,6 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o obj-$(CONFIG_COMPAT) += $(compat-obj-y) obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_KPROBES) += kprobes_insn_page.o obj-$(CONFIG_KPROBES) += mcount.o obj-$(CONFIG_RETHOOK) += rethook.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c index e7bca29f9c..1ac5f707dd 100644 --- a/arch/s390/kernel/alternative.c +++ b/arch/s390/kernel/alternative.c @@ -33,13 +33,6 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start, if (!__test_facility(a->facility, alt_stfle_fac_list)) continue; - - if (unlikely(a->instrlen % 2)) { - WARN_ONCE(1, "cpu alternatives instructions length is " - "odd, skipping patching\n"); - continue; - } - s390_kernel_write(instr, replacement, a->instrlen); } } diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 2f65bca2f3..f55979f64d 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -13,7 +13,6 @@ #include <linux/purgatory.h> #include <linux/pgtable.h> #include <linux/ftrace.h> -#include <asm/idle.h> #include <asm/gmap.h> #include <asm/stacktrace.h> @@ -71,11 +70,6 @@ int main(void) OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address); DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper)); BLANK(); - /* idle data offsets */ - OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter); - OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter); - OFFSET(__MT_CYCLES_ENTER, s390_idle_data, mt_cycles_enter); - BLANK(); /* hardware defined lowcore locations 0x000 - 0x1ff */ OFFSET(__LC_EXT_PARAMS, lowcore, ext_params); OFFSET(__LC_EXT_CPU_ADDR, lowcore, ext_cpu_addr); diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c index 554447768b..bf983513dd 100644 --- a/arch/s390/kernel/cert_store.c +++ b/arch/s390/kernel/cert_store.c @@ -21,6 +21,7 @@ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/sysfs.h> +#include <linux/vmalloc.h> #include <crypto/sha2.h> #include <keys/user-type.h> #include <asm/debug.h> diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index d09ebb6f52..edae134161 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -451,7 +451,7 @@ static void *nt_final(void *ptr) /* * Initialize ELF header (new kernel) */ -static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) +static void *ehdr_init(Elf64_Ehdr *ehdr, int phdr_count) { memset(ehdr, 0, sizeof(*ehdr)); memcpy(ehdr->e_ident, ELFMAG, SELFMAG); @@ -465,7 +465,8 @@ static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) ehdr->e_phoff = sizeof(Elf64_Ehdr); ehdr->e_ehsize = sizeof(Elf64_Ehdr); ehdr->e_phentsize = sizeof(Elf64_Phdr); - ehdr->e_phnum = mem_chunk_cnt + 1; + /* Number of PT_LOAD program headers plus PT_NOTE program header */ + ehdr->e_phnum = phdr_count + 1; return ehdr + 1; } @@ -499,17 +500,20 @@ static int get_mem_chunk_cnt(void) /* * Initialize ELF loads (new kernel) */ -static void loads_init(Elf64_Phdr *phdr) +static void loads_init(Elf64_Phdr *phdr, bool os_info_has_vm) { + unsigned long old_identity_base = 0; phys_addr_t start, end; u64 idx; + if (os_info_has_vm) + old_identity_base = os_info_old_value(OS_INFO_IDENTITY_BASE); for_each_physmem_range(idx, &oldmem_type, &start, &end) { - phdr->p_filesz = end - start; phdr->p_type = PT_LOAD; + phdr->p_vaddr = old_identity_base + start; phdr->p_offset = start; - phdr->p_vaddr = (unsigned long)__va(start); phdr->p_paddr = start; + phdr->p_filesz = end - start; phdr->p_memsz = end - start; phdr->p_flags = PF_R | PF_W | PF_X; phdr->p_align = PAGE_SIZE; @@ -517,6 +521,30 @@ static void loads_init(Elf64_Phdr *phdr) } } +static bool os_info_has_vm(void) +{ + return os_info_old_value(OS_INFO_KASLR_OFFSET); +} + +/* + * Prepare PT_LOAD type program header for kernel image region + */ +static void text_init(Elf64_Phdr *phdr) +{ + unsigned long start_phys = os_info_old_value(OS_INFO_IMAGE_PHYS); + unsigned long start = os_info_old_value(OS_INFO_IMAGE_START); + unsigned long end = os_info_old_value(OS_INFO_IMAGE_END); + + phdr->p_type = PT_LOAD; + phdr->p_vaddr = start; + phdr->p_filesz = end - start; + phdr->p_memsz = end - start; + phdr->p_offset = start_phys; + phdr->p_paddr = start_phys; + phdr->p_flags = PF_R | PF_W | PF_X; + phdr->p_align = PAGE_SIZE; +} + /* * Initialize notes (new kernel) */ @@ -542,7 +570,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) return ptr; } -static size_t get_elfcorehdr_size(int mem_chunk_cnt) +static size_t get_elfcorehdr_size(int phdr_count) { size_t size; @@ -558,7 +586,7 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt) /* nt_final */ size += sizeof(Elf64_Nhdr); /* PT_LOADS */ - size += mem_chunk_cnt * sizeof(Elf64_Phdr); + size += phdr_count * sizeof(Elf64_Phdr); return size; } @@ -568,9 +596,9 @@ static size_t get_elfcorehdr_size(int mem_chunk_cnt) */ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) { - Elf64_Phdr *phdr_notes, *phdr_loads; + Elf64_Phdr *phdr_notes, *phdr_loads, *phdr_text; + int mem_chunk_cnt, phdr_text_cnt; size_t alloc_size; - int mem_chunk_cnt; void *ptr, *hdr; u64 hdr_off; @@ -589,12 +617,14 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) } mem_chunk_cnt = get_mem_chunk_cnt(); + phdr_text_cnt = os_info_has_vm() ? 1 : 0; - alloc_size = get_elfcorehdr_size(mem_chunk_cnt); + alloc_size = get_elfcorehdr_size(mem_chunk_cnt + phdr_text_cnt); hdr = kzalloc(alloc_size, GFP_KERNEL); - /* Without elfcorehdr /proc/vmcore cannot be created. Thus creating + /* + * Without elfcorehdr /proc/vmcore cannot be created. Thus creating * a dump with this crash kernel will fail. Panic now to allow other * dump mechanisms to take over. */ @@ -602,18 +632,25 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size) panic("s390 kdump allocating elfcorehdr failed"); /* Init elf header */ - ptr = ehdr_init(hdr, mem_chunk_cnt); + phdr_notes = ehdr_init(hdr, mem_chunk_cnt + phdr_text_cnt); /* Init program headers */ - phdr_notes = ptr; - ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr)); - phdr_loads = ptr; - ptr = PTR_ADD(ptr, sizeof(Elf64_Phdr) * mem_chunk_cnt); + if (phdr_text_cnt) { + phdr_text = phdr_notes + 1; + phdr_loads = phdr_text + 1; + } else { + phdr_loads = phdr_notes + 1; + } + ptr = PTR_ADD(phdr_loads, sizeof(Elf64_Phdr) * mem_chunk_cnt); /* Init notes */ hdr_off = PTR_DIFF(ptr, hdr); ptr = notes_init(phdr_notes, ptr, ((unsigned long) hdr) + hdr_off); + /* Init kernel text program header */ + if (phdr_text_cnt) + text_init(phdr_text); /* Init loads */ + loads_init(phdr_loads, phdr_text_cnt); + /* Finalize program headers */ hdr_off = PTR_DIFF(ptr, hdr); - loads_init(phdr_loads); *addr = (unsigned long long) hdr; *size = (unsigned long long) hdr_off; BUG_ON(elfcorehdr_size > alloc_size); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 6a1e0fbbaa..60cf917a71 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -441,29 +441,6 @@ INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq /* - * Load idle PSW. - */ -SYM_FUNC_START(psw_idle) - stg %r14,(__SF_GPRS+8*8)(%r15) - stg %r3,__SF_EMPTY(%r15) - larl %r1,psw_idle_exit - stg %r1,__SF_EMPTY+8(%r15) - larl %r1,smp_cpu_mtid - llgf %r1,0(%r1) - ltgr %r1,%r1 - jz .Lpsw_idle_stcctm - .insn rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2) -.Lpsw_idle_stcctm: - oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT - BPON - stckf __CLOCK_IDLE_ENTER(%r2) - stpt __TIMER_IDLE_ENTER(%r2) - lpswe __SF_EMPTY(%r15) -SYM_INNER_LABEL(psw_idle_exit, SYM_L_GLOBAL) - BR_EX %r14 -SYM_FUNC_END(psw_idle) - -/* * Machine check handler routines */ SYM_CODE_START(mcck_int_handler) diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 7f6f8c438c..ddf2ee47cb 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -7,13 +7,13 @@ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#include <linux/moduleloader.h> #include <linux/hardirq.h> #include <linux/uaccess.h> #include <linux/ftrace.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/kprobes.h> +#include <linux/execmem.h> #include <trace/syscall.h> #include <asm/asm-offsets.h> #include <asm/text-patching.h> @@ -220,7 +220,7 @@ static int __init ftrace_plt_init(void) { const char *start, *end; - ftrace_plt = module_alloc(PAGE_SIZE); + ftrace_plt = execmem_alloc(EXECMEM_FTRACE, PAGE_SIZE); if (!ftrace_plt) panic("cannot allocate ftrace plt\n"); diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c index e7239aaf42..af9c97c0ad 100644 --- a/arch/s390/kernel/idle.c +++ b/arch/s390/kernel/idle.c @@ -57,9 +57,13 @@ void noinstr arch_cpu_idle(void) psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; clear_cpu_flag(CIF_NOHZ_DELAY); - - /* psw_idle() returns with interrupts disabled. */ - psw_idle(idle, psw_mask); + set_cpu_flag(CIF_ENABLED_WAIT); + if (smp_cpu_mtid) + stcctm(MT_DIAG, smp_cpu_mtid, (u64 *)&idle->mt_cycles_enter); + idle->clock_idle_enter = get_tod_clock_fast(); + idle->timer_idle_enter = get_cpu_timer(); + bpon(); + __load_psw_mask(psw_mask); } static ssize_t show_idle_count(struct device *dev, diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 469e8d3fbf..3a7d6e1722 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -20,6 +20,7 @@ #include <linux/gfp.h> #include <linux/crash_dump.h> #include <linux/debug_locks.h> +#include <linux/vmalloc.h> #include <asm/asm-extable.h> #include <asm/diag.h> #include <asm/ipl.h> @@ -266,7 +267,11 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ struct kobj_attribute *attr, \ const char *buf, size_t len) \ { \ - strscpy(_value, buf, sizeof(_value)); \ + if (len >= sizeof(_value)) \ + return -E2BIG; \ + len = strscpy(_value, buf, sizeof(_value)); \ + if (len < 0) \ + return len; \ strim(_value); \ return len; \ } \ @@ -275,6 +280,61 @@ static struct kobj_attribute sys_##_prefix##_##_name##_attr = \ sys_##_prefix##_##_name##_show, \ sys_##_prefix##_##_name##_store) +#define IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \ +static ssize_t sys_##_prefix##_scp_data_show(struct file *filp, \ + struct kobject *kobj, \ + struct bin_attribute *attr, \ + char *buf, loff_t off, \ + size_t count) \ +{ \ + size_t size = _ipl_block.scp_data_len; \ + void *scp_data = _ipl_block.scp_data; \ + \ + return memory_read_from_buffer(buf, count, &off, \ + scp_data, size); \ +} + +#define IPL_ATTR_SCP_DATA_STORE_FN(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len)\ +static ssize_t sys_##_prefix##_scp_data_store(struct file *filp, \ + struct kobject *kobj, \ + struct bin_attribute *attr, \ + char *buf, loff_t off, \ + size_t count) \ +{ \ + size_t scpdata_len = count; \ + size_t padding; \ + \ + if (off) \ + return -EINVAL; \ + \ + memcpy(_ipl_block.scp_data, buf, count); \ + if (scpdata_len % 8) { \ + padding = 8 - (scpdata_len % 8); \ + memset(_ipl_block.scp_data + scpdata_len, \ + 0, padding); \ + scpdata_len += padding; \ + } \ + \ + _ipl_block_hdr.len = _ipl_bp_len + scpdata_len; \ + _ipl_block.len = _ipl_bp0_len + scpdata_len; \ + _ipl_block.scp_data_len = scpdata_len; \ + \ + return count; \ +} + +#define DEFINE_IPL_ATTR_SCP_DATA_RO(_prefix, _ipl_block, _size) \ +IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \ +static struct bin_attribute sys_##_prefix##_scp_data_attr = \ + __BIN_ATTR(scp_data, 0444, sys_##_prefix##_scp_data_show, \ + NULL, _size) + +#define DEFINE_IPL_ATTR_SCP_DATA_RW(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len, _size)\ +IPL_ATTR_SCP_DATA_SHOW_FN(_prefix, _ipl_block) \ +IPL_ATTR_SCP_DATA_STORE_FN(_prefix, _ipl_block_hdr, _ipl_block, _ipl_bp_len, _ipl_bp0_len)\ +static struct bin_attribute sys_##_prefix##_scp_data_attr = \ + __BIN_ATTR(scp_data, 0644, sys_##_prefix##_scp_data_show, \ + sys_##_prefix##_scp_data_store, _size) + /* * ipl section */ @@ -373,71 +433,38 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj, static struct kobj_attribute sys_ipl_device_attr = __ATTR(device, 0444, sys_ipl_device_show, NULL); -static ssize_t ipl_parameter_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t off, size_t count) +static ssize_t sys_ipl_parameter_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) { return memory_read_from_buffer(buf, count, &off, &ipl_block, ipl_block.hdr.len); } -static struct bin_attribute ipl_parameter_attr = - __BIN_ATTR(binary_parameter, 0444, ipl_parameter_read, NULL, +static struct bin_attribute sys_ipl_parameter_attr = + __BIN_ATTR(binary_parameter, 0444, sys_ipl_parameter_read, NULL, PAGE_SIZE); -static ssize_t ipl_scp_data_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t off, size_t count) -{ - unsigned int size = ipl_block.fcp.scp_data_len; - void *scp_data = &ipl_block.fcp.scp_data; - - return memory_read_from_buffer(buf, count, &off, scp_data, size); -} - -static ssize_t ipl_nvme_scp_data_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t off, size_t count) -{ - unsigned int size = ipl_block.nvme.scp_data_len; - void *scp_data = &ipl_block.nvme.scp_data; - - return memory_read_from_buffer(buf, count, &off, scp_data, size); -} - -static ssize_t ipl_eckd_scp_data_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, char *buf, - loff_t off, size_t count) -{ - unsigned int size = ipl_block.eckd.scp_data_len; - void *scp_data = &ipl_block.eckd.scp_data; - - return memory_read_from_buffer(buf, count, &off, scp_data, size); -} - -static struct bin_attribute ipl_scp_data_attr = - __BIN_ATTR(scp_data, 0444, ipl_scp_data_read, NULL, PAGE_SIZE); - -static struct bin_attribute ipl_nvme_scp_data_attr = - __BIN_ATTR(scp_data, 0444, ipl_nvme_scp_data_read, NULL, PAGE_SIZE); - -static struct bin_attribute ipl_eckd_scp_data_attr = - __BIN_ATTR(scp_data, 0444, ipl_eckd_scp_data_read, NULL, PAGE_SIZE); +DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_fcp, ipl_block.fcp, PAGE_SIZE); static struct bin_attribute *ipl_fcp_bin_attrs[] = { - &ipl_parameter_attr, - &ipl_scp_data_attr, + &sys_ipl_parameter_attr, + &sys_ipl_fcp_scp_data_attr, NULL, }; +DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_nvme, ipl_block.nvme, PAGE_SIZE); + static struct bin_attribute *ipl_nvme_bin_attrs[] = { - &ipl_parameter_attr, - &ipl_nvme_scp_data_attr, + &sys_ipl_parameter_attr, + &sys_ipl_nvme_scp_data_attr, NULL, }; +DEFINE_IPL_ATTR_SCP_DATA_RO(ipl_eckd, ipl_block.eckd, PAGE_SIZE); + static struct bin_attribute *ipl_eckd_bin_attrs[] = { - &ipl_parameter_attr, - &ipl_eckd_scp_data_attr, + &sys_ipl_parameter_attr, + &sys_ipl_eckd_scp_data_attr, NULL, }; @@ -776,44 +803,10 @@ static struct kobj_attribute sys_reipl_ccw_vmparm_attr = /* FCP reipl device attributes */ -static ssize_t reipl_fcp_scpdata_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) -{ - size_t size = reipl_block_fcp->fcp.scp_data_len; - void *scp_data = reipl_block_fcp->fcp.scp_data; - - return memory_read_from_buffer(buf, count, &off, scp_data, size); -} - -static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) -{ - size_t scpdata_len = count; - size_t padding; - - - if (off) - return -EINVAL; - - memcpy(reipl_block_fcp->fcp.scp_data, buf, count); - if (scpdata_len % 8) { - padding = 8 - (scpdata_len % 8); - memset(reipl_block_fcp->fcp.scp_data + scpdata_len, - 0, padding); - scpdata_len += padding; - } - - reipl_block_fcp->hdr.len = IPL_BP_FCP_LEN + scpdata_len; - reipl_block_fcp->fcp.len = IPL_BP0_FCP_LEN + scpdata_len; - reipl_block_fcp->fcp.scp_data_len = scpdata_len; - - return count; -} -static struct bin_attribute sys_reipl_fcp_scp_data_attr = - __BIN_ATTR(scp_data, 0644, reipl_fcp_scpdata_read, - reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE); +DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_fcp, reipl_block_fcp->hdr, + reipl_block_fcp->fcp, + IPL_BP_FCP_LEN, IPL_BP0_FCP_LEN, + DIAG308_SCPDATA_SIZE); static struct bin_attribute *reipl_fcp_bin_attrs[] = { &sys_reipl_fcp_scp_data_attr, @@ -934,44 +927,10 @@ static struct kobj_attribute sys_reipl_fcp_clear_attr = /* NVME reipl device attributes */ -static ssize_t reipl_nvme_scpdata_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) -{ - size_t size = reipl_block_nvme->nvme.scp_data_len; - void *scp_data = reipl_block_nvme->nvme.scp_data; - - return memory_read_from_buffer(buf, count, &off, scp_data, size); -} - -static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) -{ - size_t scpdata_len = count; - size_t padding; - - if (off) - return -EINVAL; - - memcpy(reipl_block_nvme->nvme.scp_data, buf, count); - if (scpdata_len % 8) { - padding = 8 - (scpdata_len % 8); - memset(reipl_block_nvme->nvme.scp_data + scpdata_len, - 0, padding); - scpdata_len += padding; - } - - reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN + scpdata_len; - reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN + scpdata_len; - reipl_block_nvme->nvme.scp_data_len = scpdata_len; - - return count; -} - -static struct bin_attribute sys_reipl_nvme_scp_data_attr = - __BIN_ATTR(scp_data, 0644, reipl_nvme_scpdata_read, - reipl_nvme_scpdata_write, DIAG308_SCPDATA_SIZE); +DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_nvme, reipl_block_nvme->hdr, + reipl_block_nvme->nvme, + IPL_BP_NVME_LEN, IPL_BP0_NVME_LEN, + DIAG308_SCPDATA_SIZE); static struct bin_attribute *reipl_nvme_bin_attrs[] = { &sys_reipl_nvme_scp_data_attr, @@ -1067,44 +1026,10 @@ static struct attribute_group reipl_ccw_attr_group_lpar = { /* ECKD reipl device attributes */ -static ssize_t reipl_eckd_scpdata_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) -{ - size_t size = reipl_block_eckd->eckd.scp_data_len; - void *scp_data = reipl_block_eckd->eckd.scp_data; - - return memory_read_from_buffer(buf, count, &off, scp_data, size); -} - -static ssize_t reipl_eckd_scpdata_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) -{ - size_t scpdata_len = count; - size_t padding; - - if (off) - return -EINVAL; - - memcpy(reipl_block_eckd->eckd.scp_data, buf, count); - if (scpdata_len % 8) { - padding = 8 - (scpdata_len % 8); - memset(reipl_block_eckd->eckd.scp_data + scpdata_len, - 0, padding); - scpdata_len += padding; - } - - reipl_block_eckd->hdr.len = IPL_BP_ECKD_LEN + scpdata_len; - reipl_block_eckd->eckd.len = IPL_BP0_ECKD_LEN + scpdata_len; - reipl_block_eckd->eckd.scp_data_len = scpdata_len; - - return count; -} - -static struct bin_attribute sys_reipl_eckd_scp_data_attr = - __BIN_ATTR(scp_data, 0644, reipl_eckd_scpdata_read, - reipl_eckd_scpdata_write, DIAG308_SCPDATA_SIZE); +DEFINE_IPL_ATTR_SCP_DATA_RW(reipl_eckd, reipl_block_eckd->hdr, + reipl_block_eckd->eckd, + IPL_BP_ECKD_LEN, IPL_BP0_ECKD_LEN, + DIAG308_SCPDATA_SIZE); static struct bin_attribute *reipl_eckd_bin_attrs[] = { &sys_reipl_eckd_scp_data_attr, @@ -1209,8 +1134,8 @@ static struct attribute_group reipl_nss_attr_group = { void set_os_info_reipl_block(void) { - os_info_entry_add(OS_INFO_REIPL_BLOCK, reipl_block_actual, - reipl_block_actual->hdr.len); + os_info_entry_add_data(OS_INFO_REIPL_BLOCK, reipl_block_actual, + reipl_block_actual->hdr.len); } /* reipl type */ @@ -1648,6 +1573,11 @@ DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n", DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", dump_block_fcp->fcp.devno); +DEFINE_IPL_ATTR_SCP_DATA_RW(dump_fcp, dump_block_fcp->hdr, + dump_block_fcp->fcp, + IPL_BP_FCP_LEN, IPL_BP0_FCP_LEN, + DIAG308_SCPDATA_SIZE); + static struct attribute *dump_fcp_attrs[] = { &sys_dump_fcp_device_attr.attr, &sys_dump_fcp_wwpn_attr.attr, @@ -1657,9 +1587,15 @@ static struct attribute *dump_fcp_attrs[] = { NULL, }; +static struct bin_attribute *dump_fcp_bin_attrs[] = { + &sys_dump_fcp_scp_data_attr, + NULL, +}; + static struct attribute_group dump_fcp_attr_group = { .name = IPL_FCP_STR, .attrs = dump_fcp_attrs, + .bin_attrs = dump_fcp_bin_attrs, }; /* NVME dump device attributes */ @@ -1672,6 +1608,11 @@ DEFINE_IPL_ATTR_RW(dump_nvme, bootprog, "%lld\n", "%llx\n", DEFINE_IPL_ATTR_RW(dump_nvme, br_lba, "%lld\n", "%llx\n", dump_block_nvme->nvme.br_lba); +DEFINE_IPL_ATTR_SCP_DATA_RW(dump_nvme, dump_block_nvme->hdr, + dump_block_nvme->nvme, + IPL_BP_NVME_LEN, IPL_BP0_NVME_LEN, + DIAG308_SCPDATA_SIZE); + static struct attribute *dump_nvme_attrs[] = { &sys_dump_nvme_fid_attr.attr, &sys_dump_nvme_nsid_attr.attr, @@ -1680,9 +1621,15 @@ static struct attribute *dump_nvme_attrs[] = { NULL, }; +static struct bin_attribute *dump_nvme_bin_attrs[] = { + &sys_dump_nvme_scp_data_attr, + NULL, +}; + static struct attribute_group dump_nvme_attr_group = { .name = IPL_NVME_STR, .attrs = dump_nvme_attrs, + .bin_attrs = dump_nvme_bin_attrs, }; /* ECKD dump device attributes */ @@ -1696,6 +1643,11 @@ IPL_ATTR_BR_CHR_STORE_FN(dump, dump_block_eckd->eckd); static struct kobj_attribute sys_dump_eckd_br_chr_attr = __ATTR(br_chr, 0644, eckd_dump_br_chr_show, eckd_dump_br_chr_store); +DEFINE_IPL_ATTR_SCP_DATA_RW(dump_eckd, dump_block_eckd->hdr, + dump_block_eckd->eckd, + IPL_BP_ECKD_LEN, IPL_BP0_ECKD_LEN, + DIAG308_SCPDATA_SIZE); + static struct attribute *dump_eckd_attrs[] = { &sys_dump_eckd_device_attr.attr, &sys_dump_eckd_bootprog_attr.attr, @@ -1703,9 +1655,15 @@ static struct attribute *dump_eckd_attrs[] = { NULL, }; +static struct bin_attribute *dump_eckd_bin_attrs[] = { + &sys_dump_eckd_scp_data_attr, + NULL, +}; + static struct attribute_group dump_eckd_attr_group = { .name = IPL_ECKD_STR, .attrs = dump_eckd_attrs, + .bin_attrs = dump_eckd_bin_attrs, }; /* CCW dump device attributes */ @@ -1940,7 +1898,7 @@ static void dump_reipl_run(struct shutdown_trigger *trigger) reipl_type == IPL_TYPE_NSS || reipl_type == IPL_TYPE_UNKNOWN) os_info_flags |= OS_INFO_FLAG_REIPL_CLEAR; - os_info_entry_add(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags)); + os_info_entry_add_data(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags)); csum = (__force unsigned int)cksm(reipl_block_actual, reipl_block_actual->hdr.len, 0); abs_lc = get_abs_lowcore(); abs_lc->ipib = __pa(reipl_block_actual); @@ -1958,11 +1916,13 @@ static struct shutdown_action __refdata dump_reipl_action = { * vmcmd shutdown action: Trigger vm command on shutdown. */ -static char vmcmd_on_reboot[128]; -static char vmcmd_on_panic[128]; -static char vmcmd_on_halt[128]; -static char vmcmd_on_poff[128]; -static char vmcmd_on_restart[128]; +#define VMCMD_MAX_SIZE 240 + +static char vmcmd_on_reboot[VMCMD_MAX_SIZE + 1]; +static char vmcmd_on_panic[VMCMD_MAX_SIZE + 1]; +static char vmcmd_on_halt[VMCMD_MAX_SIZE + 1]; +static char vmcmd_on_poff[VMCMD_MAX_SIZE + 1]; +static char vmcmd_on_restart[VMCMD_MAX_SIZE + 1]; DEFINE_IPL_ATTR_STR_RW(vmcmd, on_reboot, "%s\n", "%s\n", vmcmd_on_reboot); DEFINE_IPL_ATTR_STR_RW(vmcmd, on_panic, "%s\n", "%s\n", vmcmd_on_panic); @@ -2288,8 +2248,8 @@ static int __init vmcmd_on_reboot_setup(char *str) { if (!MACHINE_IS_VM) return 1; - strncpy_skip_quote(vmcmd_on_reboot, str, 127); - vmcmd_on_reboot[127] = 0; + strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE); + vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0; on_reboot_trigger.action = &vmcmd_action; return 1; } @@ -2299,8 +2259,8 @@ static int __init vmcmd_on_panic_setup(char *str) { if (!MACHINE_IS_VM) return 1; - strncpy_skip_quote(vmcmd_on_panic, str, 127); - vmcmd_on_panic[127] = 0; + strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE); + vmcmd_on_panic[VMCMD_MAX_SIZE] = 0; on_panic_trigger.action = &vmcmd_action; return 1; } @@ -2310,8 +2270,8 @@ static int __init vmcmd_on_halt_setup(char *str) { if (!MACHINE_IS_VM) return 1; - strncpy_skip_quote(vmcmd_on_halt, str, 127); - vmcmd_on_halt[127] = 0; + strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE); + vmcmd_on_halt[VMCMD_MAX_SIZE] = 0; on_halt_trigger.action = &vmcmd_action; return 1; } @@ -2321,8 +2281,8 @@ static int __init vmcmd_on_poff_setup(char *str) { if (!MACHINE_IS_VM) return 1; - strncpy_skip_quote(vmcmd_on_poff, str, 127); - vmcmd_on_poff[127] = 0; + strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE); + vmcmd_on_poff[VMCMD_MAX_SIZE] = 0; on_poff_trigger.action = &vmcmd_action; return 1; } diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 6f71b0ce10..9acc6630ab 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c @@ -29,6 +29,7 @@ #include <asm/hw_irq.h> #include <asm/stacktrace.h> #include <asm/softirq_stack.h> +#include <asm/vtime.h> #include "entry.h" DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); @@ -150,6 +151,7 @@ void noinstr do_io_irq(struct pt_regs *regs) if (from_idle) account_idle_time_irq(); + set_cpu_flag(CIF_NOHZ_DELAY); do { regs->tpi_info = S390_lowcore.tpi_info; if (S390_lowcore.tpi_info.adapter_IO) diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index f0cf20d4b3..05c83505e9 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -9,7 +9,6 @@ #define pr_fmt(fmt) "kprobes: " fmt -#include <linux/moduleloader.h> #include <linux/kprobes.h> #include <linux/ptrace.h> #include <linux/preempt.h> @@ -21,10 +20,10 @@ #include <linux/slab.h> #include <linux/hardirq.h> #include <linux/ftrace.h> +#include <linux/execmem.h> #include <asm/set_memory.h> #include <asm/sections.h> #include <asm/dis.h> -#include "kprobes.h" #include "entry.h" DEFINE_PER_CPU(struct kprobe *, current_kprobe); @@ -32,39 +31,17 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); struct kretprobe_blackpoint kretprobe_blacklist[] = { }; -static int insn_page_in_use; - void *alloc_insn_page(void) { void *page; - page = module_alloc(PAGE_SIZE); + page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE); if (!page) return NULL; set_memory_rox((unsigned long)page, 1); return page; } -static void *alloc_s390_insn_page(void) -{ - if (xchg(&insn_page_in_use, 1) == 1) - return NULL; - return &kprobes_insn_page; -} - -static void free_s390_insn_page(void *page) -{ - xchg(&insn_page_in_use, 0); -} - -struct kprobe_insn_cache kprobe_s390_insn_slots = { - .mutex = __MUTEX_INITIALIZER(kprobe_s390_insn_slots.mutex), - .alloc = alloc_s390_insn_page, - .free = free_s390_insn_page, - .pages = LIST_HEAD_INIT(kprobe_s390_insn_slots.pages), - .insn_size = MAX_INSN_SIZE, -}; - static void copy_instruction(struct kprobe *p) { kprobe_opcode_t insn[MAX_INSN_SIZE]; @@ -78,10 +55,10 @@ static void copy_instruction(struct kprobe *p) if (probe_is_insn_relative_long(&insn[0])) { /* * For pc-relative instructions in RIL-b or RIL-c format patch - * the RI2 displacement field. We have already made sure that - * the insn slot for the patched instruction is within the same - * 2GB area as the original instruction (either kernel image or - * module area). Therefore the new displacement will always fit. + * the RI2 displacement field. The insn slot for the to be + * patched instruction is within the same 4GB area like the + * original instruction. Therefore the new displacement will + * always fit. */ disp = *(s32 *)&insn[1]; addr = (u64)(unsigned long)p->addr; @@ -93,34 +70,6 @@ static void copy_instruction(struct kprobe *p) } NOKPROBE_SYMBOL(copy_instruction); -static int s390_get_insn_slot(struct kprobe *p) -{ - /* - * Get an insn slot that is within the same 2GB area like the original - * instruction. That way instructions with a 32bit signed displacement - * field can be patched and executed within the insn slot. - */ - p->ainsn.insn = NULL; - if (is_kernel((unsigned long)p->addr)) - p->ainsn.insn = get_s390_insn_slot(); - else if (is_module_addr(p->addr)) - p->ainsn.insn = get_insn_slot(); - return p->ainsn.insn ? 0 : -ENOMEM; -} -NOKPROBE_SYMBOL(s390_get_insn_slot); - -static void s390_free_insn_slot(struct kprobe *p) -{ - if (!p->ainsn.insn) - return; - if (is_kernel((unsigned long)p->addr)) - free_s390_insn_slot(p->ainsn.insn, 0); - else - free_insn_slot(p->ainsn.insn, 0); - p->ainsn.insn = NULL; -} -NOKPROBE_SYMBOL(s390_free_insn_slot); - /* Check if paddr is at an instruction boundary */ static bool can_probe(unsigned long paddr) { @@ -174,7 +123,8 @@ int arch_prepare_kprobe(struct kprobe *p) /* Make sure the probe isn't going on a difficult instruction */ if (probe_is_prohibited_opcode(p->addr)) return -EINVAL; - if (s390_get_insn_slot(p)) + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) return -ENOMEM; copy_instruction(p); return 0; @@ -216,7 +166,10 @@ NOKPROBE_SYMBOL(arch_disarm_kprobe); void arch_remove_kprobe(struct kprobe *p) { - s390_free_insn_slot(p); + if (!p->ainsn.insn) + return; + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; } NOKPROBE_SYMBOL(arch_remove_kprobe); diff --git a/arch/s390/kernel/kprobes.h b/arch/s390/kernel/kprobes.h deleted file mode 100644 index dc3ed5098e..0000000000 --- a/arch/s390/kernel/kprobes.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -#ifndef _ARCH_S390_KPROBES_H -#define _ARCH_S390_KPROBES_H - -#include <linux/kprobes.h> - -DEFINE_INSN_CACHE_OPS(s390_insn); - -#endif diff --git a/arch/s390/kernel/kprobes_insn_page.S b/arch/s390/kernel/kprobes_insn_page.S deleted file mode 100644 index 0fe4d725e9..0000000000 --- a/arch/s390/kernel/kprobes_insn_page.S +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#include <linux/linkage.h> - -/* - * insn_page is a special 4k aligned dummy function for kprobes. - * It will contain all kprobed instructions that are out-of-line executed. - * The page must be within the kernel image to guarantee that the - * out-of-line instructions are within 2GB distance of their original - * location. Using a dummy function ensures that the insn_page is within - * the text section of the kernel and mapped read-only/executable from - * the beginning on, thus avoiding to split large mappings if the page - * would be in the data section instead. - */ - .section .kprobes.text, "ax" - .balign 4096 -SYM_CODE_START(kprobes_insn_page) - .rept 2048 - .word 0x07fe - .endr -SYM_CODE_END(kprobes_insn_page) - .previous diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c index 42215f9404..91e207b503 100644 --- a/arch/s390/kernel/module.c +++ b/arch/s390/kernel/module.c @@ -21,6 +21,7 @@ #include <linux/moduleloader.h> #include <linux/bug.h> #include <linux/memory.h> +#include <linux/execmem.h> #include <asm/alternative.h> #include <asm/nospec-branch.h> #include <asm/facility.h> @@ -36,47 +37,10 @@ #define PLT_ENTRY_SIZE 22 -static unsigned long get_module_load_offset(void) -{ - static DEFINE_MUTEX(module_kaslr_mutex); - static unsigned long module_load_offset; - - if (!kaslr_enabled()) - return 0; - /* - * Calculate the module_load_offset the first time this code - * is called. Once calculated it stays the same until reboot. - */ - mutex_lock(&module_kaslr_mutex); - if (!module_load_offset) - module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE; - mutex_unlock(&module_kaslr_mutex); - return module_load_offset; -} - -void *module_alloc(unsigned long size) -{ - gfp_t gfp_mask = GFP_KERNEL; - void *p; - - if (PAGE_ALIGN(size) > MODULES_LEN) - return NULL; - p = __vmalloc_node_range(size, MODULE_ALIGN, - MODULES_VADDR + get_module_load_offset(), - MODULES_END, gfp_mask, PAGE_KERNEL, - VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK, - NUMA_NO_NODE, __builtin_return_address(0)); - if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { - vfree(p); - return NULL; - } - return p; -} - #ifdef CONFIG_FUNCTION_TRACER void module_arch_cleanup(struct module *mod) { - module_memfree(mod->arch.trampolines_start); + execmem_free(mod->arch.trampolines_start); } #endif @@ -510,7 +474,7 @@ static int module_alloc_ftrace_hotpatch_trampolines(struct module *me, size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size); numpages = DIV_ROUND_UP(size, PAGE_SIZE); - start = module_alloc(numpages * PAGE_SIZE); + start = execmem_alloc(EXECMEM_FTRACE, numpages * PAGE_SIZE); if (!start) return -ENOMEM; set_memory_rox((unsigned long)start, numpages); diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index c77382a673..230d010bac 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -31,6 +31,7 @@ #include <asm/crw.h> #include <asm/asm-offsets.h> #include <asm/pai.h> +#include <asm/vtime.h> struct mcck_struct { unsigned int kill_task : 1; diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c index d1b16d83e4..9b8c24ebb0 100644 --- a/arch/s390/kernel/nospec-branch.c +++ b/arch/s390/kernel/nospec-branch.c @@ -114,10 +114,10 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end) type = BRASL_EXPOLINE; /* brasl instruction */ else continue; - thunk = instr + (*(int *)(instr + 2)) * 2; + thunk = instr + (long)(*(int *)(instr + 2)) * 2; if (thunk[0] == 0xc6 && thunk[1] == 0x00) /* exrl %r0,<target-br> */ - br = thunk + (*(int *)(thunk + 2)) * 2; + br = thunk + (long)(*(int *)(thunk + 2)) * 2; else continue; if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0) diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c index a801e6bd53..b695f980bb 100644 --- a/arch/s390/kernel/os_info.c +++ b/arch/s390/kernel/os_info.c @@ -15,8 +15,10 @@ #include <asm/checksum.h> #include <asm/abs_lowcore.h> #include <asm/os_info.h> +#include <asm/physmem_info.h> #include <asm/maccess.h> #include <asm/asm-offsets.h> +#include <asm/ipl.h> /* * OS info structure has to be page aligned @@ -43,9 +45,9 @@ void os_info_crashkernel_add(unsigned long base, unsigned long size) } /* - * Add OS info entry and update checksum + * Add OS info data entry and update checksum */ -void os_info_entry_add(int nr, void *ptr, u64 size) +void os_info_entry_add_data(int nr, void *ptr, u64 size) { os_info.entry[nr].addr = __pa(ptr); os_info.entry[nr].size = size; @@ -54,15 +56,36 @@ void os_info_entry_add(int nr, void *ptr, u64 size) } /* + * Add OS info value entry and update checksum + */ +void os_info_entry_add_val(int nr, u64 value) +{ + os_info.entry[nr].val = value; + os_info.entry[nr].size = 0; + os_info.entry[nr].csum = 0; + os_info.csum = os_info_csum(&os_info); +} + +/* * Initialize OS info structure and set lowcore pointer */ void __init os_info_init(void) { struct lowcore *abs_lc; + BUILD_BUG_ON(sizeof(struct os_info) != PAGE_SIZE); os_info.version_major = OS_INFO_VERSION_MAJOR; os_info.version_minor = OS_INFO_VERSION_MINOR; os_info.magic = OS_INFO_MAGIC; + os_info_entry_add_val(OS_INFO_IDENTITY_BASE, __identity_base); + os_info_entry_add_val(OS_INFO_KASLR_OFFSET, kaslr_offset()); + os_info_entry_add_val(OS_INFO_KASLR_OFF_PHYS, __kaslr_offset_phys); + os_info_entry_add_val(OS_INFO_VMEMMAP, (unsigned long)vmemmap); + os_info_entry_add_val(OS_INFO_AMODE31_START, AMODE31_START); + os_info_entry_add_val(OS_INFO_AMODE31_END, AMODE31_END); + os_info_entry_add_val(OS_INFO_IMAGE_START, (unsigned long)_stext); + os_info_entry_add_val(OS_INFO_IMAGE_END, (unsigned long)_end); + os_info_entry_add_val(OS_INFO_IMAGE_PHYS, __pa_symbol(_stext)); os_info.csum = os_info_csum(&os_info); abs_lc = get_abs_lowcore(); abs_lc->os_info = __pa(&os_info); @@ -125,7 +148,7 @@ static void os_info_old_init(void) if (os_info_init) return; - if (!oldmem_data.start) + if (!oldmem_data.start && !is_ipl_type_dump()) goto fail; if (copy_oldmem_kernel(&addr, __LC_OS_INFO, sizeof(addr))) goto fail; diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 41ed6e0f0a..6968be98af 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -428,7 +428,7 @@ static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset) case CPUMF_CTR_SET_CRYPTO: if (cpumf_ctr_info.csvn >= 1 && cpumf_ctr_info.csvn <= 5) ctrset_size = 16; - else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7) + else if (cpumf_ctr_info.csvn >= 6) ctrset_size = 20; break; case CPUMF_CTR_SET_EXT: @@ -556,25 +556,31 @@ static int cfdiag_diffctr(struct cpu_cf_events *cpuhw, unsigned long auth) struct cf_trailer_entry *trailer_start, *trailer_stop; struct cf_ctrset_entry *ctrstart, *ctrstop; size_t offset = 0; + int i; - auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1; - do { + for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) { ctrstart = (struct cf_ctrset_entry *)(cpuhw->start + offset); ctrstop = (struct cf_ctrset_entry *)(cpuhw->stop + offset); + /* Counter set not authorized */ + if (!(auth & cpumf_ctr_ctl[i])) + continue; + /* Counter set size zero was not saved */ + if (!cpum_cf_read_setsize(i)) + continue; + if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) { pr_err_once("cpum_cf_diag counter set compare error " "in set %i\n", ctrstart->set); return 0; } - auth &= ~cpumf_ctr_ctl[ctrstart->set]; if (ctrstart->def == CF_DIAG_CTRSET_DEF) { cfdiag_diffctrset((u64 *)(ctrstart + 1), (u64 *)(ctrstop + 1), ctrstart->ctr); offset += ctrstart->ctr * sizeof(u64) + sizeof(*ctrstart); } - } while (ctrstart->def && auth); + } /* Save time_stamp from start of event in stop's trailer */ trailer_start = (struct cf_trailer_entry *)(cpuhw->start + offset); diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 0d64aafd15..e4a6bfc910 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -855,16 +855,11 @@ __init const struct attribute_group **cpumf_cf_event_group(void) } /* Determine version specific crypto set */ - switch (ci.csvn) { - case 1 ... 5: + csvn = none; + if (ci.csvn >= 1 && ci.csvn <= 5) csvn = cpumcf_svn_12345_pmu_event_attr; - break; - case 6 ... 7: + else if (ci.csvn >= 6) csvn = cpumcf_svn_67_pmu_event_attr; - break; - default: - csvn = none; - } /* Determine model-specific counter set(s) */ get_cpu_id(&cpu_id); diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index dd456b4758..d8740631df 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -86,11 +86,6 @@ void arch_release_task_struct(struct task_struct *tsk) int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - /* - * Save the floating-point or vector register state of the current - * task and set the TIF_FPU flag to lazy restore the FPU register - * state when returning to user space. - */ save_user_fpu_regs(); *dst = *src; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 7ecd27c62d..610e6f7945 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -146,10 +146,10 @@ static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31; static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31; unsigned long __bootdata_preserved(max_mappable); -unsigned long __bootdata(ident_map_size); struct physmem_info __bootdata(physmem_info); -unsigned long __bootdata_preserved(__kaslr_offset); +struct vm_layout __bootdata_preserved(vm_layout); +EXPORT_SYMBOL(vm_layout); int __bootdata_preserved(__kaslr_enabled); unsigned int __bootdata_preserved(zlib_dfltcc_support); EXPORT_SYMBOL(zlib_dfltcc_support); @@ -765,7 +765,7 @@ static void __init relocate_amode31_section(void) unsigned long amode31_size = __eamode31 - __samode31; long amode31_offset, *ptr; - amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31; + amode31_offset = AMODE31_START - (unsigned long)__samode31; pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); /* Move original AMODE31 section to the new one */ diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c index dc2355c623..50cbcbbaa0 100644 --- a/arch/s390/kernel/syscall.c +++ b/arch/s390/kernel/syscall.c @@ -38,33 +38,6 @@ #include "entry.h" -/* - * Perform the mmap() system call. Linux for S/390 isn't able to handle more - * than 5 system call parameters, so this system call uses a memory block - * for parameter passing. - */ - -struct s390_mmap_arg_struct { - unsigned long addr; - unsigned long len; - unsigned long prot; - unsigned long flags; - unsigned long fd; - unsigned long offset; -}; - -SYSCALL_DEFINE1(mmap2, struct s390_mmap_arg_struct __user *, arg) -{ - struct s390_mmap_arg_struct a; - int error = -EFAULT; - - if (copy_from_user(&a, arg, sizeof(a))) - goto out; - error = ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); -out: - return error; -} - #ifdef CONFIG_SYSVIPC /* * sys_ipc() is the de-multiplexer for the SysV IPC calls. diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile index fb85e79794..1bb78b9468 100644 --- a/arch/s390/kernel/syscalls/Makefile +++ b/arch/s390/kernel/syscalls/Makefile @@ -4,8 +4,8 @@ gen := arch/$(ARCH)/include/generated kapi := $(gen)/asm uapi := $(gen)/uapi/asm -syscall := $(srctree)/$(src)/syscall.tbl -systbl := $(srctree)/$(src)/syscalltbl +syscall := $(src)/syscall.tbl +systbl := $(src)/syscalltbl gen-y := $(kapi)/syscall_table.h kapi-hdrs-y := $(kapi)/unistd_nr.h diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index 07a942cab8..0107118276 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -464,3 +464,4 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal sys_mseal diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index fc07bc39e6..016993e9eb 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -21,6 +21,7 @@ /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */ #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST int __bootdata_preserved(prot_virt_guest); +EXPORT_SYMBOL(prot_virt_guest); #endif /* @@ -181,36 +182,36 @@ int uv_convert_owned_from_secure(unsigned long paddr) } /* - * Calculate the expected ref_count for a page that would otherwise have no + * Calculate the expected ref_count for a folio that would otherwise have no * further pins. This was cribbed from similar functions in other places in * the kernel, but with some slight modifications. We know that a secure - * page can not be a huge page for example. + * folio can not be a large folio, for example. */ -static int expected_page_refs(struct page *page) +static int expected_folio_refs(struct folio *folio) { int res; - res = page_mapcount(page); - if (PageSwapCache(page)) { + res = folio_mapcount(folio); + if (folio_test_swapcache(folio)) { res++; - } else if (page_mapping(page)) { + } else if (folio_mapping(folio)) { res++; - if (page_has_private(page)) + if (folio->private) res++; } return res; } -static int make_page_secure(struct page *page, struct uv_cb_header *uvcb) +static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb) { int expected, cc = 0; - if (PageWriteback(page)) + if (folio_test_writeback(folio)) return -EAGAIN; - expected = expected_page_refs(page); - if (!page_ref_freeze(page, expected)) + expected = expected_folio_refs(folio); + if (!folio_ref_freeze(folio, expected)) return -EBUSY; - set_bit(PG_arch_1, &page->flags); + set_bit(PG_arch_1, &folio->flags); /* * If the UVC does not succeed or fail immediately, we don't want to * loop for long, or we might get stall notifications. @@ -220,9 +221,9 @@ static int make_page_secure(struct page *page, struct uv_cb_header *uvcb) * -EAGAIN and we let the callers deal with it. */ cc = __uv_call(0, (u64)uvcb); - page_ref_unfreeze(page, expected); + folio_ref_unfreeze(folio, expected); /* - * Return -ENXIO if the page was not mapped, -EINVAL for other errors. + * Return -ENXIO if the folio was not mapped, -EINVAL for other errors. * If busy or partially completed, return -EAGAIN. */ if (cc == UVC_CC_OK) @@ -277,7 +278,7 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb) bool local_drain = false; spinlock_t *ptelock; unsigned long uaddr; - struct page *page; + struct folio *folio; pte_t *ptep; int rc; @@ -306,15 +307,26 @@ again: if (!ptep) goto out; if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) { - page = pte_page(*ptep); + folio = page_folio(pte_page(*ptep)); + rc = -EINVAL; + if (folio_test_large(folio)) + goto unlock; rc = -EAGAIN; - if (trylock_page(page)) { + if (folio_trylock(folio)) { if (should_export_before_import(uvcb, gmap->mm)) - uv_convert_from_secure(page_to_phys(page)); - rc = make_page_secure(page, uvcb); - unlock_page(page); + uv_convert_from_secure(PFN_PHYS(folio_pfn(folio))); + rc = make_folio_secure(folio, uvcb); + folio_unlock(folio); } + + /* + * Once we drop the PTL, the folio may get unmapped and + * freed immediately. We need a temporary reference. + */ + if (rc == -EAGAIN) + folio_get(folio); } +unlock: pte_unmap_unlock(ptep, ptelock); out: mmap_read_unlock(gmap->mm); @@ -324,10 +336,11 @@ out: * If we are here because the UVC returned busy or partial * completion, this is just a useless check, but it is safe. */ - wait_on_page_writeback(page); + folio_wait_writeback(folio); + folio_put(folio); } else if (rc == -EBUSY) { /* - * If we have tried a local drain and the page refcount + * If we have tried a local drain and the folio refcount * still does not match our expected safe value, try with a * system wide drain. This is needed if the pagevecs holding * the page are on a different CPU. @@ -338,7 +351,7 @@ out: return -EAGAIN; } /* - * We are here if the page refcount does not match the + * We are here if the folio refcount does not match the * expected safe value. The main culprits are usually * pagevecs. With lru_add_drain() we drain the pagevecs * on the local CPU so that hopefully the refcount will diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index 4800d80dec..2c5afb88d2 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -1,8 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # List of files in the vdso -KCOV_INSTRUMENT := n - # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile obj-vdso32 = vdso_user_wrapper-32.o note-32.o @@ -34,19 +32,13 @@ obj-y += vdso32_wrapper.o targets += vdso32.lds CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) -# Disable gcov profiling, ubsan and kasan for VDSO code -GCOV_PROFILE := n -UBSAN_SANITIZE := n -KASAN_SANITIZE := n -KCSAN_SANITIZE := n - # Force dependency (incbin is bad) $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so quiet_cmd_vdso_and_check = VDSO $@ cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check) -$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE +$(obj)/vdso32.so.dbg: $(obj)/vdso32.lds $(obj-vdso32) FORCE $(call if_changed,vdso_and_check) # strip rule for the .so file @@ -64,7 +56,7 @@ quiet_cmd_vdso32cc = VDSO32C $@ cmd_vdso32cc = $(CC) $(c_flags) -c -o $@ $< # Generate VDSO offsets using helper script -gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +gen-vdsosym := $(src)/gen_vdso_offsets.sh quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index 2f2e4e9970..ba19c0ca7c 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -1,8 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # List of files in the vdso -KCOV_INSTRUMENT := n - # Include the generic Makefile to check the built vdso. include $(srctree)/lib/vdso/Makefile obj-vdso64 = vdso_user_wrapper.o note.o @@ -39,12 +37,6 @@ obj-y += vdso64_wrapper.o targets += vdso64.lds CPPFLAGS_vdso64.lds += -P -C -U$(ARCH) -# Disable gcov profiling, ubsan and kasan for VDSO code -GCOV_PROFILE := n -UBSAN_SANITIZE := n -KASAN_SANITIZE := n -KCSAN_SANITIZE := n - # Force dependency (incbin is bad) $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so @@ -52,7 +44,7 @@ quiet_cmd_vdso_and_check = VDSO $@ cmd_vdso_and_check = $(cmd_ld); $(cmd_vdso_check) # link rule for the .so file, .lds has to be first -$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE +$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) $(obj-cvdso64) FORCE $(call if_changed,vdso_and_check) # strip rule for the .so file @@ -74,7 +66,7 @@ quiet_cmd_vdso64cc = VDSO64C $@ cmd_vdso64cc = $(CC) $(c_flags) -c -o $@ $< # Generate VDSO offsets using helper script -gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +gen-vdsosym := $(src)/gen_vdso_offsets.sh quiet_cmd_vdsosym = VDSOSYM $@ cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ diff --git a/arch/s390/kernel/vmcore_info.c b/arch/s390/kernel/vmcore_info.c index d296dfc221..23f7d7619a 100644 --- a/arch/s390/kernel/vmcore_info.c +++ b/arch/s390/kernel/vmcore_info.c @@ -14,7 +14,9 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31); vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31); + vmcoreinfo_append_str("IDENTITYBASE=%lx\n", __identity_base); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); + vmcoreinfo_append_str("KERNELOFFPHYS=%lx\n", __kaslr_offset_phys); abs_lc = get_abs_lowcore(); abs_lc->vmcore_info = paddr_vmcoreinfo_note(); put_abs_lowcore(abs_lc); diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index fb9b32f936..a1ce3925ec 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -39,7 +39,7 @@ PHDRS { SECTIONS { - . = 0x100000; + . = __START_KERNEL; .text : { _stext = .; /* Start of text section */ _text = .; /* Text and read-only data */ @@ -183,7 +183,7 @@ SECTIONS .amode31.data : { *(.amode31.data) } - . = ALIGN(PAGE_SIZE); + . = _samode31 + AMODE31_SIZE; _eamode31 = .; /* early.c uses stsi, which requires page aligned data. */ @@ -192,31 +192,6 @@ SECTIONS PERCPU_SECTION(0x100) -#ifdef CONFIG_PIE_BUILD - .dynsym ALIGN(8) : { - __dynsym_start = .; - *(.dynsym) - __dynsym_end = .; - } - .rela.dyn ALIGN(8) : { - __rela_dyn_start = .; - *(.rela*) - __rela_dyn_end = .; - } - .dynamic ALIGN(8) : { - *(.dynamic) - } - .dynstr ALIGN(8) : { - *(.dynstr) - } - .hash ALIGN(8) : { - *(.hash) - } - .gnu.hash ALIGN(8) : { - *(.gnu.hash) - } -#endif - . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ @@ -230,7 +205,6 @@ SECTIONS * it should match struct vmlinux_info */ .vmlinux.info 0 (INFO) : { - QUAD(_stext) /* default_lma */ QUAD(startup_continue) /* entry */ QUAD(__bss_start - _stext) /* image_size */ QUAD(__bss_stop - __bss_start) /* bss_size */ @@ -239,14 +213,8 @@ SECTIONS QUAD(__boot_data_preserved_start) /* bootdata_preserved_off */ QUAD(__boot_data_preserved_end - __boot_data_preserved_start) /* bootdata_preserved_size */ -#ifdef CONFIG_PIE_BUILD - QUAD(__dynsym_start) /* dynsym_start */ - QUAD(__rela_dyn_start) /* rela_dyn_start */ - QUAD(__rela_dyn_end) /* rela_dyn_end */ -#else QUAD(__got_start) /* got_start */ QUAD(__got_end) /* got_end */ -#endif QUAD(_eamode31 - _samode31) /* amode31_size */ QUAD(init_mm) QUAD(swapper_pg_dir) @@ -282,12 +250,10 @@ SECTIONS *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) } ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") -#ifndef CONFIG_PIE_BUILD .rela.dyn : { *(.rela.*) *(.rela_*) } ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") -#endif /* Sections to be discarded */ DISCARDS diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 24a18e5ef6..ffc1db0cbf 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -33,14 +33,6 @@ static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; static DEFINE_PER_CPU(u64, mt_scaling_jiffies); -static inline u64 get_vtimer(void) -{ - u64 timer; - - asm volatile("stpt %0" : "=Q" (timer)); - return timer; -} - static inline void set_vtimer(u64 expires) { u64 timer; @@ -223,7 +215,7 @@ static u64 vtime_delta(void) { u64 timer = S390_lowcore.last_update_timer; - S390_lowcore.last_update_timer = get_vtimer(); + S390_lowcore.last_update_timer = get_cpu_timer(); return timer - S390_lowcore.last_update_timer; } |