diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-19 21:00:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-19 21:00:30 +0000 |
commit | e54def4ad8144ab15f826416e2e0f290ef1901b4 (patch) | |
tree | 583f8d4bd95cd67c44ff37b878a7eddfca9ab97a /arch/x86/entry/vdso/vma.c | |
parent | Adding upstream version 6.8.12. (diff) | |
download | linux-e54def4ad8144ab15f826416e2e0f290ef1901b4.tar.xz linux-e54def4ad8144ab15f826416e2e0f290ef1901b4.zip |
Adding upstream version 6.9.2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/x86/entry/vdso/vma.c')
-rw-r--r-- | arch/x86/entry/vdso/vma.c | 57 |
1 files changed, 2 insertions, 55 deletions
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 7645730dc2..6d83ceb7f1 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -274,59 +274,6 @@ up_fail: return ret; } -#ifdef CONFIG_X86_64 -/* - * Put the vdso above the (randomized) stack with another randomized - * offset. This way there is no hole in the middle of address space. - * To save memory make sure it is still in the same PTE as the stack - * top. This doesn't give that many random bits. - * - * Note that this algorithm is imperfect: the distribution of the vdso - * start address within a PMD is biased toward the end. - * - * Only used for the 64-bit and x32 vdsos. - */ -static unsigned long vdso_addr(unsigned long start, unsigned len) -{ - unsigned long addr, end; - unsigned offset; - - /* - * Round up the start address. It can start out unaligned as a result - * of stack start randomization. - */ - start = PAGE_ALIGN(start); - - /* Round the lowest possible end address up to a PMD boundary. */ - end = (start + len + PMD_SIZE - 1) & PMD_MASK; - if (end >= DEFAULT_MAP_WINDOW) - end = DEFAULT_MAP_WINDOW; - end -= len; - - if (end > start) { - offset = get_random_u32_below(((end - start) >> PAGE_SHIFT) + 1); - addr = start + (offset << PAGE_SHIFT); - } else { - addr = start; - } - - /* - * Forcibly align the final address in case we have a hardware - * issue that requires alignment for performance reasons. - */ - addr = align_vdso_addr(addr); - - return addr; -} - -static int map_vdso_randomized(const struct vdso_image *image) -{ - unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start); - - return map_vdso(image, addr); -} -#endif - int map_vdso_once(const struct vdso_image *image, unsigned long addr) { struct mm_struct *mm = current->mm; @@ -369,7 +316,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) if (!vdso64_enabled) return 0; - return map_vdso_randomized(&vdso_image_64); + return map_vdso(&vdso_image_64, 0); } #ifdef CONFIG_COMPAT @@ -380,7 +327,7 @@ int compat_arch_setup_additional_pages(struct linux_binprm *bprm, if (x32) { if (!vdso64_enabled) return 0; - return map_vdso_randomized(&vdso_image_x32); + return map_vdso(&vdso_image_x32, 0); } #endif #ifdef CONFIG_IA32_EMULATION |