diff options
Diffstat (limited to 'arch/sh/kernel/vsyscall')
-rw-r--r-- | arch/sh/kernel/vsyscall/.gitignore | 1 | ||||
-rw-r--r-- | arch/sh/kernel/vsyscall/Makefile | 37 | ||||
-rw-r--r-- | arch/sh/kernel/vsyscall/vsyscall-note.S | 26 | ||||
-rw-r--r-- | arch/sh/kernel/vsyscall/vsyscall-sigreturn.S | 75 | ||||
-rw-r--r-- | arch/sh/kernel/vsyscall/vsyscall-syscall.S | 11 | ||||
-rw-r--r-- | arch/sh/kernel/vsyscall/vsyscall-trapa.S | 40 | ||||
-rw-r--r-- | arch/sh/kernel/vsyscall/vsyscall.c | 96 | ||||
-rw-r--r-- | arch/sh/kernel/vsyscall/vsyscall.lds.S | 85 |
8 files changed, 371 insertions, 0 deletions
diff --git a/arch/sh/kernel/vsyscall/.gitignore b/arch/sh/kernel/vsyscall/.gitignore new file mode 100644 index 000000000..40836ad90 --- /dev/null +++ b/arch/sh/kernel/vsyscall/.gitignore @@ -0,0 +1 @@ +vsyscall.lds diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile new file mode 100644 index 000000000..5db6579bc --- /dev/null +++ b/arch/sh/kernel/vsyscall/Makefile @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += vsyscall.o vsyscall-syscall.o vsyscall-syms.o + +$(obj)/vsyscall-syscall.o: \ + $(foreach F,trapa,$(obj)/vsyscall-$F.so) + +# Teach kbuild about targets +targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so) +targets += vsyscall-note.o vsyscall.lds vsyscall-dummy.o + +# The DSO images are built using a special linker script +quiet_cmd_syscall = SYSCALL $@ + cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \ + -Wl,-T,$(filter-out FORCE,$^) -o $@ + +export CPPFLAGS_vsyscall.lds += -P -C -Ush + +vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + +SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags) + +$(obj)/vsyscall-trapa.so: \ +$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE + $(call if_changed,syscall) + +# We also create a special relocatable object that should mirror the symbol +# table and layout of the linked DSO. With ld -R we can then refer to +# these symbols in the kernel code rather than hand-coded addresses. +SYSCFLAGS_vsyscall-dummy.o = -r +$(obj)/vsyscall-dummy.o: $(src)/vsyscall.lds \ + $(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE + $(call if_changed,syscall) + +LDFLAGS_vsyscall-syms.o := -r -R +$(obj)/vsyscall-syms.o: $(obj)/vsyscall-dummy.o FORCE + $(call if_changed,ld) diff --git a/arch/sh/kernel/vsyscall/vsyscall-note.S b/arch/sh/kernel/vsyscall/vsyscall-note.S new file mode 100644 index 000000000..bb350918b --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall-note.S @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include <linux/uts.h> +#include <linux/version.h> + +#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ + .section name, flags; \ + .balign 4; \ + .long 1f - 0f; /* name length */ \ + .long 3f - 2f; /* data length */ \ + .long type; /* note type */ \ +0: .asciz vendor; /* vendor name */ \ +1: .balign 4; \ +2: + +#define ASM_ELF_NOTE_END \ +3: .balign 4; /* pad out section */ \ + .previous + + ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) + .long LINUX_VERSION_CODE + ASM_ELF_NOTE_END diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S new file mode 100644 index 000000000..bece5fa73 --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <asm/unistd.h> + + .text + .balign 32 + .globl __kernel_sigreturn + .type __kernel_sigreturn,@function +__kernel_sigreturn: +.LSTART_sigreturn: + mov.w 1f, r3 + trapa #0x10 + or r0, r0 + or r0, r0 + or r0, r0 + or r0, r0 + or r0, r0 + +1: .short __NR_sigreturn +.LEND_sigreturn: + .size __kernel_sigreturn,.-.LSTART_sigreturn + + .balign 32 + .globl __kernel_rt_sigreturn + .type __kernel_rt_sigreturn,@function +__kernel_rt_sigreturn: +.LSTART_rt_sigreturn: + mov.w 1f, r3 + trapa #0x10 + or r0, r0 + or r0, r0 + or r0, r0 + or r0, r0 + or r0, r0 + +1: .short __NR_rt_sigreturn +.LEND_rt_sigreturn: + .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn + .previous + + .section .eh_frame,"a",@progbits +.LCIE1: + .ualong .LCIE1_end - .LCIE1_start +.LCIE1_start: + .ualong 0 /* CIE ID */ + .byte 0x1 /* Version number */ + .string "zRS" /* NUL-terminated augmentation string */ + .uleb128 0x1 /* Code alignment factor */ + .sleb128 -4 /* Data alignment factor */ + .byte 0x11 /* Return address register column */ + .uleb128 0x1 /* Augmentation length and data */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */ + + .align 2 +.LCIE1_end: + + .ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */ +.LFDE0_start: + .ualong .LFDE0_start-.LCIE1 /* CIE pointer */ + .ualong .LSTART_sigreturn-. /* PC-relative start address */ + .ualong .LEND_sigreturn-.LSTART_sigreturn + .uleb128 0 /* Augmentation */ + .align 2 +.LFDE0_end: + + .ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */ +.LFDE1_start: + .ualong .LFDE1_start-.LCIE1 /* CIE pointer */ + .ualong .LSTART_rt_sigreturn-. /* PC-relative start address */ + .ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn + .uleb128 0 /* Augmentation */ + .align 2 +.LFDE1_end: + + .previous diff --git a/arch/sh/kernel/vsyscall/vsyscall-syscall.S b/arch/sh/kernel/vsyscall/vsyscall-syscall.S new file mode 100644 index 000000000..2aeaa2dde --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall-syscall.S @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/init.h> + +__INITDATA + + .globl vsyscall_trapa_start, vsyscall_trapa_end +vsyscall_trapa_start: + .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so" +vsyscall_trapa_end: + +__FINIT diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S new file mode 100644 index 000000000..854ea3235 --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + .text + .globl __kernel_vsyscall + .type __kernel_vsyscall,@function +__kernel_vsyscall: +.LSTART_vsyscall: + trapa #0x10 + nop +.LEND_vsyscall: + .size __kernel_vsyscall,.-.LSTART_vsyscall + .previous + + .section .eh_frame,"a",@progbits +.LCIE: + .ualong .LCIE_end - .LCIE_start +.LCIE_start: + .ualong 0 /* CIE ID */ + .byte 0x1 /* Version number */ + .string "zR" /* NUL-terminated augmentation string */ + .uleb128 0x1 /* Code alignment factor */ + .sleb128 -4 /* Data alignment factor */ + .byte 0x11 /* Return address register column */ + .uleb128 0x1 /* Augmentation length and data */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */ + .align 2 +.LCIE_end: + + .ualong .LFDE_end-.LFDE_start /* Length FDE */ +.LFDE_start: + .ualong .LFDE_start-.LCIE /* CIE pointer */ + .ualong .LSTART_vsyscall-. /* PC-relative start address */ + .ualong .LEND_vsyscall-.LSTART_vsyscall + .uleb128 0 /* Augmentation */ + .align 2 +.LFDE_end: + .previous + +/* Get the common code for the sigreturn entry points */ +#include "vsyscall-sigreturn.S" diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c new file mode 100644 index 000000000..cc0cc5b4f --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -0,0 +1,96 @@ +/* + * arch/sh/kernel/vsyscall/vsyscall.c + * + * Copyright (C) 2006 Paul Mundt + * + * vDSO randomization + * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#include <linux/mm.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/elf.h> +#include <linux/sched.h> +#include <linux/err.h> + +/* + * Should the kernel map a VDSO page into processes and pass its + * address down to glibc upon exec()? + */ +unsigned int __read_mostly vdso_enabled = 1; +EXPORT_SYMBOL_GPL(vdso_enabled); + +static int __init vdso_setup(char *s) +{ + vdso_enabled = simple_strtoul(s, NULL, 0); + return 1; +} +__setup("vdso=", vdso_setup); + +/* + * These symbols are defined by vsyscall.o to mark the bounds + * of the ELF DSO images included therein. + */ +extern const char vsyscall_trapa_start, vsyscall_trapa_end; +static struct page *syscall_pages[1]; + +int __init vsyscall_init(void) +{ + void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); + syscall_pages[0] = virt_to_page(syscall_page); + + /* + * XXX: Map this page to a fixmap entry if we get around + * to adding the page to ELF core dumps + */ + + memcpy(syscall_page, + &vsyscall_trapa_start, + &vsyscall_trapa_end - &vsyscall_trapa_start); + + return 0; +} + +/* Setup a VMA at program startup for the vsyscall page */ +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long addr; + int ret; + + if (down_write_killable(&mm->mmap_sem)) + return -EINTR; + + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + syscall_pages); + if (unlikely(ret)) + goto up_fail; + + current->mm->context.vdso = (void *)addr; + +up_fail: + up_write(&mm->mmap_sem); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + return "[vdso]"; + + return NULL; +} diff --git a/arch/sh/kernel/vsyscall/vsyscall.lds.S b/arch/sh/kernel/vsyscall/vsyscall.lds.S new file mode 100644 index 000000000..e3582e03c --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall.lds.S @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linker script for vsyscall DSO. The vsyscall page is an ELF shared + * object prelinked to its virtual address, and with only one read-only + * segment (that fits in one page). This script controls its layout. + */ +#include <asm/asm-offsets.h> + +#ifdef CONFIG_CPU_LITTLE_ENDIAN +OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux") +#else +OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux") +#endif +OUTPUT_ARCH(sh) + +/* The ELF entry point can be used to set the AT_SYSINFO value. */ +ENTRY(__kernel_vsyscall); + +SECTIONS +{ + . = SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + /* + * This linker script is used both with -r and with -shared. + * For the layouts to match, we need to skip more than enough + * space for the dynamic symbol table et al. If this amount + * is insufficient, ld -shared will barf. Just increase it here. + */ + . = 0x400; + + .text : { *(.text) } :text =0x90909090 + .note : { *(.note.*) } :text :note + .eh_frame_hdr : { *(.eh_frame_hdr ) } :text :eh_frame_hdr + .eh_frame : { + KEEP (*(.eh_frame)) + LONG (0) + } :text + .dynamic : { *(.dynamic) } :text :dynamic + .useless : { + *(.got.plt) *(.got) + *(.data .data.* .gnu.linkonce.d.*) + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + } :text +} + +/* + * Very old versions of ld do not recognize this name token; use the constant. + */ +#define PT_GNU_EH_FRAME 0x6474e550 + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_2.6 { + global: + __kernel_vsyscall; + __kernel_sigreturn; + __kernel_rt_sigreturn; + + local: *; + }; +} |