summaryrefslogtreecommitdiffstats
path: root/kexec/arch/arm64
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 02:56:35 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 02:56:35 +0000
commiteba0cfa6b0bef4f2e73c8630a7efa3944df8b0f8 (patch)
tree74c37eede1f0634cc5de1c63c934edaa1630c6bc /kexec/arch/arm64
parentInitial commit. (diff)
downloadkexec-tools-7bbe6b040c9123991377749057cfc0356c289ceb.tar.xz
kexec-tools-7bbe6b040c9123991377749057cfc0356c289ceb.zip
Adding upstream version 1:2.0.27.upstream/1%2.0.27upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'kexec/arch/arm64')
-rw-r--r--kexec/arch/arm64/Makefile50
-rw-r--r--kexec/arch/arm64/crashdump-arm64.c248
-rw-r--r--kexec/arch/arm64/crashdump-arm64.h29
-rw-r--r--kexec/arch/arm64/image-header.h147
-rw-r--r--kexec/arch/arm64/include/arch/options.h43
-rw-r--r--kexec/arch/arm64/iomem.h10
-rw-r--r--kexec/arch/arm64/kexec-arm64.c1365
-rw-r--r--kexec/arch/arm64/kexec-arm64.h84
-rw-r--r--kexec/arch/arm64/kexec-elf-arm64.c170
-rw-r--r--kexec/arch/arm64/kexec-image-arm64.c119
-rw-r--r--kexec/arch/arm64/kexec-uImage-arm64.c52
-rw-r--r--kexec/arch/arm64/kexec-vmlinuz-arm64.c110
12 files changed, 2427 insertions, 0 deletions
diff --git a/kexec/arch/arm64/Makefile b/kexec/arch/arm64/Makefile
new file mode 100644
index 0000000..59212f1
--- /dev/null
+++ b/kexec/arch/arm64/Makefile
@@ -0,0 +1,50 @@
+
+arm64_FS2DT += kexec/fs2dt.c
+arm64_FS2DT_INCLUDE += \
+ -include $(srcdir)/kexec/arch/arm64/crashdump-arm64.h \
+ -include $(srcdir)/kexec/arch/arm64/kexec-arm64.h
+
+arm64_DT_OPS += kexec/dt-ops.c
+
+arm64_MEM_REGIONS = kexec/mem_regions.c
+
+arm64_CPPFLAGS += -I $(srcdir)/kexec/
+
+arm64_KEXEC_SRCS += \
+ kexec/arch/arm64/crashdump-arm64.c \
+ kexec/arch/arm64/kexec-arm64.c \
+ kexec/arch/arm64/kexec-elf-arm64.c \
+ kexec/arch/arm64/kexec-uImage-arm64.c \
+ kexec/arch/arm64/kexec-image-arm64.c \
+ kexec/arch/arm64/kexec-vmlinuz-arm64.c
+
+arm64_UIMAGE = kexec/kexec-uImage.c
+
+arm64_ARCH_REUSE_INITRD =
+arm64_ADD_SEGMENT =
+arm64_VIRT_TO_PHYS =
+arm64_PHYS_TO_VIRT =
+
+dist += $(arm64_KEXEC_SRCS) \
+ kexec/arch/arm64/include/arch/options.h \
+ kexec/arch/arm64/crashdump-arm64.h \
+ kexec/arch/arm64/image-header.h \
+ kexec/arch/arm64/iomem.h \
+ kexec/arch/arm64/kexec-arm64.h \
+ kexec/arch/arm64/Makefile
+
+ifdef HAVE_LIBFDT
+
+LIBS += -lfdt
+
+else
+
+include $(srcdir)/kexec/libfdt/Makefile.libfdt
+
+libfdt_SRCS += $(LIBFDT_SRCS:%=kexec/libfdt/%)
+
+arm64_CPPFLAGS += -I$(srcdir)/kexec/libfdt
+
+arm64_KEXEC_SRCS += $(libfdt_SRCS)
+
+endif
diff --git a/kexec/arch/arm64/crashdump-arm64.c b/kexec/arch/arm64/crashdump-arm64.c
new file mode 100644
index 0000000..3098315
--- /dev/null
+++ b/kexec/arch/arm64/crashdump-arm64.c
@@ -0,0 +1,248 @@
+/*
+ * ARM64 crashdump.
+ * partly derived from arm implementation
+ *
+ * Copyright (c) 2014-2017 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <linux/elf.h>
+
+#include "kexec.h"
+#include "crashdump.h"
+#include "crashdump-arm64.h"
+#include "iomem.h"
+#include "kexec-arm64.h"
+#include "kexec-elf.h"
+#include "mem_regions.h"
+
+/* memory ranges of crashed kernel */
+static struct memory_ranges system_memory_rgns;
+
+/* memory range reserved for crashkernel */
+struct memory_range crash_reserved_mem[CRASH_MAX_RESERVED_RANGES];
+struct memory_ranges usablemem_rgns = {
+ .size = 0,
+ .max_size = CRASH_MAX_RESERVED_RANGES,
+ .ranges = crash_reserved_mem,
+};
+
+struct memory_range elfcorehdr_mem;
+
+static struct crash_elf_info elf_info = {
+ .class = ELFCLASS64,
+#if (__BYTE_ORDER == __LITTLE_ENDIAN)
+ .data = ELFDATA2LSB,
+#else
+ .data = ELFDATA2MSB,
+#endif
+ .machine = EM_AARCH64,
+};
+
+/*
+ * iomem_range_callback() - callback called for each iomem region
+ * @data: not used
+ * @nr: not used
+ * @str: name of the memory region
+ * @base: start address of the memory region
+ * @length: size of the memory region
+ *
+ * This function is called once for each memory region found in /proc/iomem.
+ * It locates system RAM and crashkernel reserved memory and places these to
+ * variables, respectively, system_memory_rgns and usablemem_rgns.
+ */
+
+static int iomem_range_callback(void *UNUSED(data), int UNUSED(nr),
+ char *str, unsigned long long base,
+ unsigned long long length)
+{
+ if (strncmp(str, CRASH_KERNEL, strlen(CRASH_KERNEL)) == 0)
+ return mem_regions_alloc_and_add(&usablemem_rgns,
+ base, length, RANGE_RAM);
+ else if (strncmp(str, SYSTEM_RAM, strlen(SYSTEM_RAM)) == 0)
+ return mem_regions_alloc_and_add(&system_memory_rgns,
+ base, length, RANGE_RAM);
+ else if (strncmp(str, KERNEL_CODE, strlen(KERNEL_CODE)) == 0) {
+
+ unsigned long long kva_text = get_kernel_sym("_text");
+ unsigned long long kva_stext = get_kernel_sym("_stext");
+ unsigned long long kva_text_end = get_kernel_sym("__init_begin");
+
+ /*
+ * old: kernel_code.start = __pa_symbol(_text);
+ * new: kernel_code.start = __pa_symbol(_stext);
+ *
+ * For compatibility, deduce by comparing the gap "__init_begin - _stext"
+ * and the res size of "Kernel code" in /proc/iomem
+ */
+ if (kva_text_end - kva_stext == length)
+ elf_info.kern_paddr_start = base - (kva_stext - kva_text);
+ else
+ elf_info.kern_paddr_start = base;
+ }
+ else if (strncmp(str, KERNEL_DATA, strlen(KERNEL_DATA)) == 0)
+ elf_info.kern_size = base + length - elf_info.kern_paddr_start;
+
+ return 0;
+}
+
+int is_crashkernel_mem_reserved(void)
+{
+ if (!usablemem_rgns.size)
+ kexec_iomem_for_each_line(NULL, iomem_range_callback, NULL);
+
+ return usablemem_rgns.size;
+}
+
+/*
+ * crash_get_memory_ranges() - read system physical memory
+ *
+ * Function reads through system physical memory and stores found memory
+ * regions in system_memory_ranges.
+ * Regions are sorted in ascending order.
+ *
+ * Returns 0 in case of success and a negative value otherwise.
+ */
+static int crash_get_memory_ranges(void)
+{
+ int i;
+
+ /*
+ * First read all memory regions that can be considered as
+ * system memory including the crash area.
+ */
+ if (!usablemem_rgns.size)
+ kexec_iomem_for_each_line(NULL, iomem_range_callback, NULL);
+
+ /* allow one or two regions for crash dump kernel */
+ if (!usablemem_rgns.size)
+ return -EINVAL;
+
+ dbgprint_mem_range("Reserved memory range",
+ usablemem_rgns.ranges, usablemem_rgns.size);
+
+ for (i = 0; i < usablemem_rgns.size; i++) {
+ if (mem_regions_alloc_and_exclude(&system_memory_rgns,
+ &crash_reserved_mem[i])) {
+ fprintf(stderr, "Cannot allocate memory for ranges\n");
+ return -ENOMEM;
+ }
+ }
+
+ /*
+ * Make sure that the memory regions are sorted.
+ */
+ mem_regions_sort(&system_memory_rgns);
+
+ dbgprint_mem_range("Coredump memory ranges",
+ system_memory_rgns.ranges, system_memory_rgns.size);
+
+ /*
+ * For additional kernel code/data segment.
+ * kern_paddr_start/kern_size are determined in iomem_range_callback
+ */
+ elf_info.kern_vaddr_start = get_kernel_sym("_text");
+ if (!elf_info.kern_vaddr_start)
+ elf_info.kern_vaddr_start = UINT64_MAX;
+
+ return 0;
+}
+
+/*
+ * load_crashdump_segments() - load the elf core header
+ * @info: kexec info structure
+ *
+ * This function creates and loads an additional segment of elf core header
+ : which is used to construct /proc/vmcore on crash dump kernel.
+ *
+ * Return 0 in case of success and -1 in case of error.
+ */
+
+int load_crashdump_segments(struct kexec_info *info)
+{
+ unsigned long elfcorehdr;
+ unsigned long bufsz;
+ void *buf;
+ int err;
+
+ /*
+ * First fetch all the memory (RAM) ranges that we are going to
+ * pass to the crash dump kernel during panic.
+ */
+
+ err = crash_get_memory_ranges();
+
+ if (err)
+ return EFAILED;
+
+ get_page_offset((unsigned long *)&elf_info.page_offset);
+ dbgprintf("%s: page_offset: %016llx\n", __func__,
+ elf_info.page_offset);
+
+ err = crash_create_elf64_headers(info, &elf_info,
+ system_memory_rgns.ranges, system_memory_rgns.size,
+ &buf, &bufsz, ELF_CORE_HEADER_ALIGN);
+
+ if (err)
+ return EFAILED;
+
+ elfcorehdr = add_buffer_phys_virt(info, buf, bufsz, bufsz, 0,
+ crash_reserved_mem[usablemem_rgns.size - 1].start,
+ crash_reserved_mem[usablemem_rgns.size - 1].end,
+ -1, 0);
+
+ elfcorehdr_mem.start = elfcorehdr;
+ elfcorehdr_mem.end = elfcorehdr + bufsz - 1;
+
+ dbgprintf("%s: elfcorehdr 0x%llx-0x%llx\n", __func__,
+ elfcorehdr_mem.start, elfcorehdr_mem.end);
+
+ return 0;
+}
+
+/*
+ * e_entry and p_paddr are actually in virtual address space.
+ * Those values will be translated to physcal addresses by using
+ * virt_to_phys() in add_segment().
+ * So let's fix up those values for later use so the memory base
+ * (arm64_mm.phys_offset) will be correctly replaced with
+ * crash_reserved_mem[usablemem_rgns.size - 1].start.
+ */
+void fixup_elf_addrs(struct mem_ehdr *ehdr)
+{
+ struct mem_phdr *phdr;
+ int i;
+
+ ehdr->e_entry += -arm64_mem.phys_offset +
+ crash_reserved_mem[usablemem_rgns.size - 1].start;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &ehdr->e_phdr[i];
+ if (phdr->p_type != PT_LOAD)
+ continue;
+ phdr->p_paddr +=
+ (-arm64_mem.phys_offset +
+ crash_reserved_mem[usablemem_rgns.size - 1].start);
+ }
+}
+
+int get_crash_kernel_load_range(uint64_t *start, uint64_t *end)
+{
+ if (!usablemem_rgns.size)
+ kexec_iomem_for_each_line(NULL, iomem_range_callback, NULL);
+
+ if (!usablemem_rgns.size)
+ return -1;
+
+ *start = crash_reserved_mem[usablemem_rgns.size - 1].start;
+ *end = crash_reserved_mem[usablemem_rgns.size - 1].end;
+
+ return 0;
+}
diff --git a/kexec/arch/arm64/crashdump-arm64.h b/kexec/arch/arm64/crashdump-arm64.h
new file mode 100644
index 0000000..82fa69b
--- /dev/null
+++ b/kexec/arch/arm64/crashdump-arm64.h
@@ -0,0 +1,29 @@
+/*
+ * ARM64 crashdump.
+ *
+ * Copyright (c) 2014-2017 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef CRASHDUMP_ARM64_H
+#define CRASHDUMP_ARM64_H
+
+#include "kexec.h"
+
+#define CRASH_MAX_MEMORY_RANGES 32768
+
+/* crash dump kernel support at most two regions, low_region and high region. */
+#define CRASH_MAX_RESERVED_RANGES 2
+
+extern struct memory_ranges usablemem_rgns;
+extern struct memory_range crash_reserved_mem[];
+extern struct memory_range elfcorehdr_mem;
+
+extern int load_crashdump_segments(struct kexec_info *info);
+extern void fixup_elf_addrs(struct mem_ehdr *ehdr);
+
+#endif /* CRASHDUMP_ARM64_H */
diff --git a/kexec/arch/arm64/image-header.h b/kexec/arch/arm64/image-header.h
new file mode 100644
index 0000000..26bb02f
--- /dev/null
+++ b/kexec/arch/arm64/image-header.h
@@ -0,0 +1,147 @@
+/*
+ * ARM64 binary image header.
+ */
+
+#if !defined(__ARM64_IMAGE_HEADER_H)
+#define __ARM64_IMAGE_HEADER_H
+
+#include <endian.h>
+#include <stdint.h>
+
+/**
+ * struct arm64_image_header - arm64 kernel image header.
+ *
+ * @pe_sig: Optional PE format 'MZ' signature.
+ * @branch_code: Reserved for instructions to branch to stext.
+ * @text_offset: The image load offset in LSB byte order.
+ * @image_size: An estimated size of the memory image size in LSB byte order.
+ * @flags: Bit flags in LSB byte order:
+ * Bit 0: Image byte order: 1=MSB.
+ * Bit 1-2: Kernel page size: 1=4K, 2=16K, 3=64K.
+ * Bit 3: Image placement: 0=low.
+ * @reserved_1: Reserved.
+ * @magic: Magic number, "ARM\x64".
+ * @pe_header: Optional offset to a PE format header.
+ **/
+
+struct arm64_image_header {
+ uint8_t pe_sig[2];
+ uint16_t branch_code[3];
+ uint64_t text_offset;
+ uint64_t image_size;
+ uint64_t flags;
+ uint64_t reserved_1[3];
+ uint8_t magic[4];
+ uint32_t pe_header;
+};
+
+static const uint8_t arm64_image_magic[4] = {'A', 'R', 'M', 0x64U};
+static const uint8_t arm64_image_pe_sig[2] = {'M', 'Z'};
+static const uint8_t arm64_pe_machtype[6] = {'P','E', 0x0, 0x0, 0x64, 0xAA};
+static const uint64_t arm64_image_flag_be = (1UL << 0);
+static const uint64_t arm64_image_flag_page_size = (3UL << 1);
+static const uint64_t arm64_image_flag_placement = (1UL << 3);
+
+/**
+ * enum arm64_header_page_size
+ */
+
+enum arm64_header_page_size {
+ arm64_header_page_size_invalid = 0,
+ arm64_header_page_size_4k,
+ arm64_header_page_size_16k,
+ arm64_header_page_size_64k
+};
+
+/**
+ * arm64_header_check_magic - Helper to check the arm64 image header.
+ *
+ * Returns non-zero if header is OK.
+ */
+
+static inline int arm64_header_check_magic(const struct arm64_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return (h->magic[0] == arm64_image_magic[0]
+ && h->magic[1] == arm64_image_magic[1]
+ && h->magic[2] == arm64_image_magic[2]
+ && h->magic[3] == arm64_image_magic[3]);
+}
+
+/**
+ * arm64_header_check_pe_sig - Helper to check the arm64 image header.
+ *
+ * Returns non-zero if 'MZ' signature is found.
+ */
+
+static inline int arm64_header_check_pe_sig(const struct arm64_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return (h->pe_sig[0] == arm64_image_pe_sig[0]
+ && h->pe_sig[1] == arm64_image_pe_sig[1]);
+}
+
+/**
+ * arm64_header_check_msb - Helper to check the arm64 image header.
+ *
+ * Returns non-zero if the image was built as big endian.
+ */
+
+static inline int arm64_header_check_msb(const struct arm64_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return (le64toh(h->flags) & arm64_image_flag_be) >> 0;
+}
+
+/**
+ * arm64_header_page_size
+ */
+
+static inline enum arm64_header_page_size arm64_header_page_size(
+ const struct arm64_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return (le64toh(h->flags) & arm64_image_flag_page_size) >> 1;
+}
+
+/**
+ * arm64_header_placement
+ *
+ * Returns non-zero if the image has no physical placement restrictions.
+ */
+
+static inline int arm64_header_placement(const struct arm64_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return (le64toh(h->flags) & arm64_image_flag_placement) >> 3;
+}
+
+static inline uint64_t arm64_header_text_offset(
+ const struct arm64_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return le64toh(h->text_offset);
+}
+
+static inline uint64_t arm64_header_image_size(
+ const struct arm64_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return le64toh(h->image_size);
+}
+
+#endif
diff --git a/kexec/arch/arm64/include/arch/options.h b/kexec/arch/arm64/include/arch/options.h
new file mode 100644
index 0000000..8c695f3
--- /dev/null
+++ b/kexec/arch/arm64/include/arch/options.h
@@ -0,0 +1,43 @@
+#if !defined(KEXEC_ARCH_ARM64_OPTIONS_H)
+#define KEXEC_ARCH_ARM64_OPTIONS_H
+
+#define OPT_APPEND ((OPT_MAX)+0)
+#define OPT_DTB ((OPT_MAX)+1)
+#define OPT_INITRD ((OPT_MAX)+2)
+#define OPT_REUSE_CMDLINE ((OPT_MAX)+3)
+#define OPT_SERIAL ((OPT_MAX)+4)
+#define OPT_ARCH_MAX ((OPT_MAX)+5)
+
+#define KEXEC_ARCH_OPTIONS \
+ KEXEC_OPTIONS \
+ { "append", 1, NULL, OPT_APPEND }, \
+ { "command-line", 1, NULL, OPT_APPEND }, \
+ { "dtb", 1, NULL, OPT_DTB }, \
+ { "initrd", 1, NULL, OPT_INITRD }, \
+ { "serial", 1, NULL, OPT_SERIAL }, \
+ { "ramdisk", 1, NULL, OPT_INITRD }, \
+ { "reuse-cmdline", 0, NULL, OPT_REUSE_CMDLINE }, \
+
+#define KEXEC_ARCH_OPT_STR KEXEC_OPT_STR /* Only accept long arch options. */
+#define KEXEC_ALL_OPTIONS KEXEC_ARCH_OPTIONS
+#define KEXEC_ALL_OPT_STR KEXEC_ARCH_OPT_STR
+
+static const char arm64_opts_usage[] __attribute__ ((unused)) =
+" --append=STRING Set the kernel command line to STRING.\n"
+" --command-line=STRING Set the kernel command line to STRING.\n"
+" --dtb=FILE Use FILE as the device tree blob.\n"
+" --initrd=FILE Use FILE as the kernel initial ramdisk.\n"
+" --serial=STRING Name of console used for purgatory printing. (e.g. ttyAMA0)\n"
+" --ramdisk=FILE Use FILE as the kernel initial ramdisk.\n"
+" --reuse-cmdline Use kernel command line from running system.\n";
+
+struct arm64_opts {
+ const char *command_line;
+ const char *dtb;
+ const char *initrd;
+ const char *console;
+};
+
+extern struct arm64_opts arm64_opts;
+
+#endif
diff --git a/kexec/arch/arm64/iomem.h b/kexec/arch/arm64/iomem.h
new file mode 100644
index 0000000..d4864bb
--- /dev/null
+++ b/kexec/arch/arm64/iomem.h
@@ -0,0 +1,10 @@
+#ifndef IOMEM_H
+#define IOMEM_H
+
+#define SYSTEM_RAM "System RAM\n"
+#define KERNEL_CODE "Kernel code\n"
+#define KERNEL_DATA "Kernel data\n"
+#define CRASH_KERNEL "Crash kernel\n"
+#define IOMEM_RESERVED "reserved\n"
+
+#endif
diff --git a/kexec/arch/arm64/kexec-arm64.c b/kexec/arch/arm64/kexec-arm64.c
new file mode 100644
index 0000000..4a67b0d
--- /dev/null
+++ b/kexec/arch/arm64/kexec-arm64.c
@@ -0,0 +1,1365 @@
+/*
+ * ARM64 kexec.
+ */
+
+#define _GNU_SOURCE
+
+#include <assert.h>
+#include <errno.h>
+#include <getopt.h>
+#include <inttypes.h>
+#include <libfdt.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <linux/elf-em.h>
+#include <elf.h>
+#include <elf_info.h>
+
+#include <unistd.h>
+#include <syscall.h>
+#include <errno.h>
+#include <linux/random.h>
+
+#include "kexec.h"
+#include "kexec-arm64.h"
+#include "crashdump.h"
+#include "crashdump-arm64.h"
+#include "dt-ops.h"
+#include "fs2dt.h"
+#include "iomem.h"
+#include "kexec-syscall.h"
+#include "mem_regions.h"
+#include "arch/options.h"
+
+#define ROOT_NODE_ADDR_CELLS_DEFAULT 1
+#define ROOT_NODE_SIZE_CELLS_DEFAULT 1
+
+#define PROP_ADDR_CELLS "#address-cells"
+#define PROP_SIZE_CELLS "#size-cells"
+#define PROP_ELFCOREHDR "linux,elfcorehdr"
+#define PROP_USABLE_MEM_RANGE "linux,usable-memory-range"
+
+#define PAGE_OFFSET_36 ((0xffffffffffffffffUL) << 36)
+#define PAGE_OFFSET_39 ((0xffffffffffffffffUL) << 39)
+#define PAGE_OFFSET_42 ((0xffffffffffffffffUL) << 42)
+#define PAGE_OFFSET_47 ((0xffffffffffffffffUL) << 47)
+#define PAGE_OFFSET_48 ((0xffffffffffffffffUL) << 48)
+
+/* Global flag which indicates that we have tried reading
+ * PHYS_OFFSET from 'kcore' already.
+ */
+static bool try_read_phys_offset_from_kcore = false;
+
+/* Machine specific details. */
+static int va_bits = -1;
+static unsigned long page_offset;
+
+/* Global varables the core kexec routines expect. */
+
+unsigned char reuse_initrd;
+
+off_t initrd_base;
+off_t initrd_size;
+
+const struct arch_map_entry arches[] = {
+ { "aarch64", KEXEC_ARCH_ARM64 },
+ { "aarch64_be", KEXEC_ARCH_ARM64 },
+ { NULL, 0 },
+};
+
+struct file_type file_type[] = {
+ {"vmlinux", elf_arm64_probe, elf_arm64_load, elf_arm64_usage},
+ {"Image", image_arm64_probe, image_arm64_load, image_arm64_usage},
+ {"uImage", uImage_arm64_probe, uImage_arm64_load, uImage_arm64_usage},
+ {"vmlinuz", pez_arm64_probe, pez_arm64_load, pez_arm64_usage},
+};
+
+int file_types = sizeof(file_type) / sizeof(file_type[0]);
+
+/* arm64 global varables. */
+
+struct arm64_opts arm64_opts;
+struct arm64_mem arm64_mem = {
+ .phys_offset = arm64_mem_ngv,
+ .vp_offset = arm64_mem_ngv,
+};
+
+uint64_t get_phys_offset(void)
+{
+ assert(arm64_mem.phys_offset != arm64_mem_ngv);
+ return arm64_mem.phys_offset;
+}
+
+uint64_t get_vp_offset(void)
+{
+ assert(arm64_mem.vp_offset != arm64_mem_ngv);
+ return arm64_mem.vp_offset;
+}
+
+/**
+ * arm64_process_image_header - Process the arm64 image header.
+ *
+ * Make a guess that KERNEL_IMAGE_SIZE will be enough for older kernels.
+ */
+
+int arm64_process_image_header(const struct arm64_image_header *h)
+{
+#if !defined(KERNEL_IMAGE_SIZE)
+# define KERNEL_IMAGE_SIZE MiB(16)
+#endif
+
+ if (!arm64_header_check_magic(h))
+ return EFAILED;
+
+ if (h->image_size) {
+ arm64_mem.text_offset = arm64_header_text_offset(h);
+ arm64_mem.image_size = arm64_header_image_size(h);
+ } else {
+ /* For 3.16 and older kernels. */
+ arm64_mem.text_offset = 0x80000;
+ arm64_mem.image_size = KERNEL_IMAGE_SIZE;
+ fprintf(stderr,
+ "kexec: %s: Warning: Kernel image size set to %lu MiB.\n"
+ " Please verify compatability with lodaed kernel.\n",
+ __func__, KERNEL_IMAGE_SIZE / 1024UL / 1024UL);
+ }
+
+ return 0;
+}
+
+void arch_usage(void)
+{
+ printf(arm64_opts_usage);
+}
+
+int arch_process_options(int argc, char **argv)
+{
+ static const char short_options[] = KEXEC_OPT_STR "";
+ static const struct option options[] = {
+ KEXEC_ARCH_OPTIONS
+ { 0 }
+ };
+ int opt;
+ char *cmdline = NULL;
+ const char *append = NULL;
+ int do_kexec_file_syscall = 0;
+
+ for (opt = 0; opt != -1; ) {
+ opt = getopt_long(argc, argv, short_options, options, 0);
+
+ switch (opt) {
+ case OPT_APPEND:
+ append = optarg;
+ break;
+ case OPT_REUSE_CMDLINE:
+ cmdline = get_command_line();
+ break;
+ case OPT_DTB:
+ arm64_opts.dtb = optarg;
+ break;
+ case OPT_INITRD:
+ arm64_opts.initrd = optarg;
+ break;
+ case OPT_KEXEC_FILE_SYSCALL:
+ do_kexec_file_syscall = 1;
+ case OPT_SERIAL:
+ arm64_opts.console = optarg;
+ break;
+ default:
+ break; /* Ignore core and unknown options. */
+ }
+ }
+
+ arm64_opts.command_line = concat_cmdline(cmdline, append);
+
+ dbgprintf("%s:%d: command_line: %s\n", __func__, __LINE__,
+ arm64_opts.command_line);
+ dbgprintf("%s:%d: initrd: %s\n", __func__, __LINE__,
+ arm64_opts.initrd);
+ dbgprintf("%s:%d: dtb: %s\n", __func__, __LINE__,
+ (do_kexec_file_syscall && arm64_opts.dtb ? "(ignored)" :
+ arm64_opts.dtb));
+ dbgprintf("%s:%d: console: %s\n", __func__, __LINE__,
+ arm64_opts.console);
+
+ if (do_kexec_file_syscall)
+ arm64_opts.dtb = NULL;
+
+ return 0;
+}
+
+/**
+ * find_purgatory_sink - Find a sink for purgatory output.
+ */
+
+static uint64_t find_purgatory_sink(const char *console)
+{
+ int fd, ret;
+ char device[255], mem[255];
+ struct stat sb;
+ char buffer[10];
+ uint64_t iomem = 0x0;
+
+ if (!console)
+ return 0;
+
+ ret = snprintf(device, sizeof(device), "/sys/class/tty/%s", console);
+ if (ret < 0 || ret >= sizeof(device)) {
+ fprintf(stderr, "snprintf failed: %s\n", strerror(errno));
+ return 0;
+ }
+
+ if (stat(device, &sb) || !S_ISDIR(sb.st_mode)) {
+ fprintf(stderr, "kexec: %s: No valid console found for %s\n",
+ __func__, device);
+ return 0;
+ }
+
+ ret = snprintf(mem, sizeof(mem), "%s%s", device, "/iomem_base");
+ if (ret < 0 || ret >= sizeof(mem)) {
+ fprintf(stderr, "snprintf failed: %s\n", strerror(errno));
+ return 0;
+ }
+
+ printf("console memory read from %s\n", mem);
+
+ fd = open(mem, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "kexec: %s: No able to open %s\n",
+ __func__, mem);
+ return 0;
+ }
+
+ memset(buffer, '\0', sizeof(buffer));
+ ret = read(fd, buffer, sizeof(buffer));
+ if (ret < 0) {
+ fprintf(stderr, "kexec: %s: not able to read fd\n", __func__);
+ close(fd);
+ return 0;
+ }
+
+ sscanf(buffer, "%lx", &iomem);
+ printf("console memory is at %#lx\n", iomem);
+
+ close(fd);
+ return iomem;
+}
+
+/**
+ * struct dtb - Info about a binary device tree.
+ *
+ * @buf: Device tree data.
+ * @size: Device tree data size.
+ * @name: Shorthand name of this dtb for messages.
+ * @path: Filesystem path.
+ */
+
+struct dtb {
+ char *buf;
+ off_t size;
+ const char *name;
+ const char *path;
+};
+
+/**
+ * dump_reservemap - Dump the dtb's reservemap.
+ */
+
+static void dump_reservemap(const struct dtb *dtb)
+{
+ int i;
+
+ for (i = 0; ; i++) {
+ uint64_t address;
+ uint64_t size;
+
+ fdt_get_mem_rsv(dtb->buf, i, &address, &size);
+
+ if (!size)
+ break;
+
+ dbgprintf("%s: %s {%" PRIx64 ", %" PRIx64 "}\n", __func__,
+ dtb->name, address, size);
+ }
+}
+
+/**
+ * set_bootargs - Set the dtb's bootargs.
+ */
+
+static int set_bootargs(struct dtb *dtb, const char *command_line)
+{
+ int result;
+
+ if (!command_line || !command_line[0])
+ return 0;
+
+ result = dtb_set_bootargs(&dtb->buf, &dtb->size, command_line);
+
+ if (result) {
+ fprintf(stderr,
+ "kexec: Set device tree bootargs failed.\n");
+ return EFAILED;
+ }
+
+ return 0;
+}
+
+/**
+ * read_proc_dtb - Read /proc/device-tree.
+ */
+
+static int read_proc_dtb(struct dtb *dtb)
+{
+ int result;
+ struct stat s;
+ static const char path[] = "/proc/device-tree";
+
+ result = stat(path, &s);
+
+ if (result) {
+ dbgprintf("%s: %s\n", __func__, strerror(errno));
+ return EFAILED;
+ }
+
+ dtb->path = path;
+ create_flatten_tree((char **)&dtb->buf, &dtb->size, NULL);
+
+ return 0;
+}
+
+/**
+ * read_sys_dtb - Read /sys/firmware/fdt.
+ */
+
+static int read_sys_dtb(struct dtb *dtb)
+{
+ int result;
+ struct stat s;
+ static const char path[] = "/sys/firmware/fdt";
+
+ result = stat(path, &s);
+
+ if (result) {
+ dbgprintf("%s: %s\n", __func__, strerror(errno));
+ return EFAILED;
+ }
+
+ dtb->path = path;
+ dtb->buf = slurp_file(path, &dtb->size);
+
+ return 0;
+}
+
+/**
+ * read_1st_dtb - Read the 1st stage kernel's dtb.
+ */
+
+static int read_1st_dtb(struct dtb *dtb)
+{
+ int result;
+
+ dtb->name = "dtb_sys";
+ result = read_sys_dtb(dtb);
+
+ if (!result)
+ goto on_success;
+
+ dtb->name = "dtb_proc";
+ result = read_proc_dtb(dtb);
+
+ if (!result)
+ goto on_success;
+
+ dbgprintf("%s: not found\n", __func__);
+ return EFAILED;
+
+on_success:
+ dbgprintf("%s: found %s\n", __func__, dtb->path);
+ return 0;
+}
+
+static int get_cells_size(void *fdt, uint32_t *address_cells,
+ uint32_t *size_cells)
+{
+ int nodeoffset;
+ const uint32_t *prop = NULL;
+ int prop_len;
+
+ /* default values */
+ *address_cells = ROOT_NODE_ADDR_CELLS_DEFAULT;
+ *size_cells = ROOT_NODE_SIZE_CELLS_DEFAULT;
+
+ /* under root node */
+ nodeoffset = fdt_path_offset(fdt, "/");
+ if (nodeoffset < 0)
+ goto on_error;
+
+ prop = fdt_getprop(fdt, nodeoffset, PROP_ADDR_CELLS, &prop_len);
+ if (prop) {
+ if (prop_len == sizeof(*prop))
+ *address_cells = fdt32_to_cpu(*prop);
+ else
+ goto on_error;
+ }
+
+ prop = fdt_getprop(fdt, nodeoffset, PROP_SIZE_CELLS, &prop_len);
+ if (prop) {
+ if (prop_len == sizeof(*prop))
+ *size_cells = fdt32_to_cpu(*prop);
+ else
+ goto on_error;
+ }
+
+ dbgprintf("%s: #address-cells:%d #size-cells:%d\n", __func__,
+ *address_cells, *size_cells);
+ return 0;
+
+on_error:
+ return EFAILED;
+}
+
+static bool cells_size_fitted(uint32_t address_cells, uint32_t size_cells,
+ struct memory_range *range)
+{
+ dbgprintf("%s: %llx-%llx\n", __func__, range->start, range->end);
+
+ /* if *_cells >= 2, cells can hold 64-bit values anyway */
+ if ((address_cells == 1) && (range->start >= (1ULL << 32)))
+ return false;
+
+ if ((size_cells == 1) &&
+ ((range->end - range->start + 1) >= (1ULL << 32)))
+ return false;
+
+ return true;
+}
+
+static void fill_property(void *buf, uint64_t val, uint32_t cells)
+{
+ uint32_t val32;
+ int i;
+
+ if (cells == 1) {
+ val32 = cpu_to_fdt32((uint32_t)val);
+ memcpy(buf, &val32, sizeof(uint32_t));
+ } else {
+ for (i = 0;
+ i < (cells * sizeof(uint32_t) - sizeof(uint64_t)); i++)
+ *(char *)buf++ = 0;
+
+ val = cpu_to_fdt64(val);
+ memcpy(buf, &val, sizeof(uint64_t));
+ }
+}
+
+static int fdt_setprop_ranges(void *fdt, int nodeoffset, const char *name,
+ struct memory_range *ranges, int nr_ranges, bool reverse,
+ uint32_t address_cells, uint32_t size_cells)
+{
+ void *buf, *prop;
+ size_t buf_size;
+ int i, result;
+ struct memory_range *range;
+
+ buf_size = (address_cells + size_cells) * sizeof(uint32_t) * nr_ranges;
+ prop = buf = xmalloc(buf_size);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_ranges; i++) {
+ if (reverse)
+ range = ranges + (nr_ranges - 1 - i);
+ else
+ range = ranges + i;
+
+ fill_property(prop, range->start, address_cells);
+ prop += address_cells * sizeof(uint32_t);
+
+ fill_property(prop, range->end - range->start + 1, size_cells);
+ prop += size_cells * sizeof(uint32_t);
+ }
+
+ result = fdt_setprop(fdt, nodeoffset, name, buf, buf_size);
+
+ free(buf);
+
+ return result;
+}
+
+/**
+ * setup_2nd_dtb - Setup the 2nd stage kernel's dtb.
+ */
+
+static int setup_2nd_dtb(struct dtb *dtb, char *command_line, int on_crash)
+{
+ uint32_t address_cells, size_cells;
+ uint64_t fdt_val64;
+ uint64_t *prop;
+ char *new_buf = NULL;
+ int len, range_len;
+ int nodeoffset;
+ int new_size;
+ int i, result, kaslr_seed;
+
+ result = fdt_check_header(dtb->buf);
+
+ if (result) {
+ fprintf(stderr, "kexec: Invalid 2nd device tree.\n");
+ return EFAILED;
+ }
+
+ result = set_bootargs(dtb, command_line);
+ if (result) {
+ fprintf(stderr, "kexec: cannot set bootargs.\n");
+ result = -EINVAL;
+ goto on_error;
+ }
+
+ /* determine #address-cells and #size-cells */
+ result = get_cells_size(dtb->buf, &address_cells, &size_cells);
+ if (result) {
+ fprintf(stderr, "kexec: cannot determine cells-size.\n");
+ result = -EINVAL;
+ goto on_error;
+ }
+
+ if (!cells_size_fitted(address_cells, size_cells,
+ &elfcorehdr_mem)) {
+ fprintf(stderr, "kexec: elfcorehdr doesn't fit cells-size.\n");
+ result = -EINVAL;
+ goto on_error;
+ }
+
+ for (i = 0; i < usablemem_rgns.size; i++) {
+ if (!cells_size_fitted(address_cells, size_cells,
+ &crash_reserved_mem[i])) {
+ fprintf(stderr, "kexec: usable memory range doesn't fit cells-size.\n");
+ result = -EINVAL;
+ goto on_error;
+ }
+ }
+
+ /* duplicate dt blob */
+ range_len = sizeof(uint32_t) * (address_cells + size_cells);
+ new_size = fdt_totalsize(dtb->buf)
+ + fdt_prop_len(PROP_ELFCOREHDR, range_len)
+ + fdt_prop_len(PROP_USABLE_MEM_RANGE, range_len * usablemem_rgns.size);
+
+ new_buf = xmalloc(new_size);
+ result = fdt_open_into(dtb->buf, new_buf, new_size);
+ if (result) {
+ dbgprintf("%s: fdt_open_into failed: %s\n", __func__,
+ fdt_strerror(result));
+ result = -ENOSPC;
+ goto on_error;
+ }
+
+ /* fixup 'kaslr-seed' with a random value, if supported */
+ nodeoffset = fdt_path_offset(new_buf, "/chosen");
+ prop = fdt_getprop_w(new_buf, nodeoffset,
+ "kaslr-seed", &len);
+ if (!prop || len != sizeof(uint64_t)) {
+ dbgprintf("%s: no kaslr-seed found\n",
+ __func__);
+ /* for kexec warm reboot case, we don't need to fixup
+ * other dtb properties
+ */
+ if (!on_crash) {
+ dump_reservemap(dtb);
+ if (new_buf)
+ free(new_buf);
+
+ return result;
+ }
+ } else {
+ kaslr_seed = fdt64_to_cpu(*prop);
+
+ /* kaslr_seed must be wiped clean by primary
+ * kernel during boot
+ */
+ if (kaslr_seed != 0) {
+ dbgprintf("%s: kaslr-seed is not wiped to 0.\n",
+ __func__);
+ result = -EINVAL;
+ goto on_error;
+ }
+
+ /*
+ * Invoke the getrandom system call with
+ * GRND_NONBLOCK, to make sure we
+ * have a valid random seed to pass to the
+ * secondary kernel.
+ */
+ result = syscall(SYS_getrandom, &fdt_val64,
+ sizeof(fdt_val64),
+ GRND_NONBLOCK);
+
+ if(result == -1) {
+ fprintf(stderr, "%s: Reading random bytes failed.\n",
+ __func__);
+
+ /* Currently on some arm64 platforms this
+ * 'getrandom' system call fails while booting
+ * the platform.
+ *
+ * In case, this happens at best we can set
+ * the 'kaslr_seed' as 0, indicating that the
+ * 2nd kernel will be booted with a 'nokaslr'
+ * like behaviour.
+ */
+ fdt_val64 = 0UL;
+ dbgprintf("%s: Disabling KASLR in secondary kernel.\n",
+ __func__);
+ }
+
+ nodeoffset = fdt_path_offset(new_buf, "/chosen");
+ result = fdt_setprop_inplace(new_buf,
+ nodeoffset, "kaslr-seed",
+ &fdt_val64, sizeof(fdt_val64));
+ if (result) {
+ dbgprintf("%s: fdt_setprop failed: %s\n",
+ __func__, fdt_strerror(result));
+ result = -EINVAL;
+ goto on_error;
+ }
+ }
+
+ if (on_crash) {
+ /* add linux,elfcorehdr */
+ nodeoffset = fdt_path_offset(new_buf, "/chosen");
+ result = fdt_setprop_ranges(new_buf, nodeoffset,
+ PROP_ELFCOREHDR, &elfcorehdr_mem, 1, false,
+ address_cells, size_cells);
+ if (result) {
+ dbgprintf("%s: fdt_setprop failed: %s\n", __func__,
+ fdt_strerror(result));
+ result = -EINVAL;
+ goto on_error;
+ }
+
+ /*
+ * add linux,usable-memory-range
+ *
+ * crash dump kernel support one or two regions, to make
+ * compatibility with existing user-space and older kdump, the
+ * low region is always the last one.
+ */
+ nodeoffset = fdt_path_offset(new_buf, "/chosen");
+ result = fdt_setprop_ranges(new_buf, nodeoffset,
+ PROP_USABLE_MEM_RANGE,
+ usablemem_rgns.ranges, usablemem_rgns.size, true,
+ address_cells, size_cells);
+ if (result) {
+ dbgprintf("%s: fdt_setprop failed: %s\n", __func__,
+ fdt_strerror(result));
+ result = -EINVAL;
+ goto on_error;
+ }
+ }
+
+ fdt_pack(new_buf);
+ dtb->buf = new_buf;
+ dtb->size = fdt_totalsize(new_buf);
+
+ dump_reservemap(dtb);
+
+ return result;
+
+on_error:
+ fprintf(stderr, "kexec: %s failed.\n", __func__);
+ if (new_buf)
+ free(new_buf);
+
+ return result;
+}
+
+unsigned long arm64_locate_kernel_segment(struct kexec_info *info)
+{
+ unsigned long hole;
+
+ if (info->kexec_flags & KEXEC_ON_CRASH) {
+ unsigned long hole_end;
+
+ hole = (crash_reserved_mem[usablemem_rgns.size - 1].start < mem_min ?
+ mem_min : crash_reserved_mem[usablemem_rgns.size - 1].start);
+ hole = _ALIGN_UP(hole, MiB(2));
+ hole_end = hole + arm64_mem.text_offset + arm64_mem.image_size;
+
+ if ((hole_end > mem_max) ||
+ (hole_end > crash_reserved_mem[usablemem_rgns.size - 1].end)) {
+ dbgprintf("%s: Crash kernel out of range\n", __func__);
+ hole = ULONG_MAX;
+ }
+ } else {
+ hole = locate_hole(info,
+ arm64_mem.text_offset + arm64_mem.image_size,
+ MiB(2), 0, ULONG_MAX, 1);
+
+ if (hole == ULONG_MAX)
+ dbgprintf("%s: locate_hole failed\n", __func__);
+ }
+
+ return hole;
+}
+
+/**
+ * arm64_load_other_segments - Prepare the dtb, initrd and purgatory segments.
+ */
+
+int arm64_load_other_segments(struct kexec_info *info,
+ unsigned long image_base)
+{
+ int result;
+ unsigned long dtb_base;
+ unsigned long hole_min;
+ unsigned long hole_max;
+ unsigned long initrd_end;
+ uint64_t purgatory_sink;
+ char *initrd_buf = NULL;
+ struct dtb dtb;
+ char command_line[COMMAND_LINE_SIZE] = "";
+
+ if (arm64_opts.command_line) {
+ if (strlen(arm64_opts.command_line) >
+ sizeof(command_line) - 1) {
+ fprintf(stderr,
+ "Kernel command line too long for kernel!\n");
+ return EFAILED;
+ }
+
+ strncpy(command_line, arm64_opts.command_line,
+ sizeof(command_line) - 1);
+ command_line[sizeof(command_line) - 1] = 0;
+ }
+
+ purgatory_sink = find_purgatory_sink(arm64_opts.console);
+
+ dbgprintf("%s:%d: purgatory sink: 0x%" PRIx64 "\n", __func__, __LINE__,
+ purgatory_sink);
+
+ if (arm64_opts.dtb) {
+ dtb.name = "dtb_user";
+ dtb.buf = slurp_file(arm64_opts.dtb, &dtb.size);
+ } else {
+ result = read_1st_dtb(&dtb);
+
+ if (result) {
+ fprintf(stderr,
+ "kexec: Error: No device tree available.\n");
+ return EFAILED;
+ }
+ }
+
+ result = setup_2nd_dtb(&dtb, command_line,
+ info->kexec_flags & KEXEC_ON_CRASH);
+
+ if (result)
+ return EFAILED;
+
+ /* Put the other segments after the image. */
+
+ hole_min = image_base + arm64_mem.image_size;
+ if (info->kexec_flags & KEXEC_ON_CRASH)
+ hole_max = crash_reserved_mem[usablemem_rgns.size - 1].end;
+ else
+ hole_max = ULONG_MAX;
+
+ if (arm64_opts.initrd) {
+ initrd_buf = slurp_file(arm64_opts.initrd, &initrd_size);
+
+ if (!initrd_buf)
+ fprintf(stderr, "kexec: Empty ramdisk file.\n");
+ else {
+ /* Put the initrd after the kernel. */
+
+ initrd_base = add_buffer_phys_virt(info, initrd_buf,
+ initrd_size, initrd_size, 0,
+ hole_min, hole_max, 1, 0);
+
+ initrd_end = initrd_base + initrd_size;
+
+ /* Check limits as specified in booting.txt.
+ * The kernel may have as little as 32 GB of address space to map
+ * system memory and both kernel and initrd must be 1GB aligend.
+ */
+
+ if (_ALIGN_UP(initrd_end, GiB(1)) - _ALIGN_DOWN(image_base, GiB(1)) > GiB(32)) {
+ fprintf(stderr, "kexec: Error: image + initrd too big.\n");
+ return EFAILED;
+ }
+
+ dbgprintf("initrd: base %lx, size %lxh (%ld)\n",
+ initrd_base, initrd_size, initrd_size);
+
+ result = dtb_set_initrd((char **)&dtb.buf,
+ &dtb.size, initrd_base,
+ initrd_base + initrd_size);
+
+ if (result)
+ return EFAILED;
+ }
+ }
+
+ if (!initrd_buf) {
+ /* Don't reuse the initrd addresses from 1st DTB */
+ dtb_clear_initrd((char **)&dtb.buf, &dtb.size);
+ }
+
+ /* Check size limit as specified in booting.txt. */
+
+ if (dtb.size > MiB(2)) {
+ fprintf(stderr, "kexec: Error: dtb too big.\n");
+ return EFAILED;
+ }
+
+ dtb_base = add_buffer_phys_virt(info, dtb.buf, dtb.size, dtb.size,
+ 0, hole_min, hole_max, 1, 0);
+
+ /* dtb_base is valid if we got here. */
+
+ dbgprintf("dtb: base %lx, size %lxh (%ld)\n", dtb_base, dtb.size,
+ dtb.size);
+
+ elf_rel_build_load(info, &info->rhdr, purgatory, purgatory_size,
+ hole_min, hole_max, 1, 0);
+
+ info->entry = (void *)elf_rel_get_addr(&info->rhdr, "purgatory_start");
+
+ elf_rel_set_symbol(&info->rhdr, "arm64_sink", &purgatory_sink,
+ sizeof(purgatory_sink));
+
+ elf_rel_set_symbol(&info->rhdr, "arm64_kernel_entry", &image_base,
+ sizeof(image_base));
+
+ elf_rel_set_symbol(&info->rhdr, "arm64_dtb_addr", &dtb_base,
+ sizeof(dtb_base));
+
+ return 0;
+}
+
+/**
+ * virt_to_phys - For processing elf file values.
+ */
+
+unsigned long virt_to_phys(unsigned long v)
+{
+ unsigned long p;
+
+ p = v - get_vp_offset() + get_phys_offset();
+
+ return p;
+}
+
+/**
+ * phys_to_virt - For crashdump setup.
+ */
+
+unsigned long phys_to_virt(struct crash_elf_info *elf_info,
+ unsigned long long p)
+{
+ unsigned long v;
+
+ v = p - get_phys_offset() + elf_info->page_offset;
+
+ return v;
+}
+
+/**
+ * add_segment - Use virt_to_phys when loading elf files.
+ */
+
+void add_segment(struct kexec_info *info, const void *buf, size_t bufsz,
+ unsigned long base, size_t memsz)
+{
+ add_segment_phys_virt(info, buf, bufsz, base, memsz, 1);
+}
+
+static inline void set_phys_offset(int64_t v, char *set_method)
+{
+ if (arm64_mem.phys_offset == arm64_mem_ngv
+ || v < arm64_mem.phys_offset) {
+ arm64_mem.phys_offset = v;
+ dbgprintf("%s: phys_offset : %016lx (method : %s)\n",
+ __func__, arm64_mem.phys_offset,
+ set_method);
+ }
+}
+
+/**
+ * get_va_bits - Helper for getting VA_BITS
+ */
+
+static int get_va_bits(void)
+{
+ unsigned long long stext_sym_addr;
+
+ /*
+ * if already got from kcore
+ */
+ if (va_bits != -1)
+ goto out;
+
+
+ /* For kernel older than v4.19 */
+ fprintf(stderr, "Warning, can't get the VA_BITS from kcore\n");
+ stext_sym_addr = get_kernel_sym("_stext");
+
+ if (stext_sym_addr == 0) {
+ fprintf(stderr, "Can't get the symbol of _stext.\n");
+ return -1;
+ }
+
+ /* Derive va_bits as per arch/arm64/Kconfig */
+ if ((stext_sym_addr & PAGE_OFFSET_36) == PAGE_OFFSET_36) {
+ va_bits = 36;
+ } else if ((stext_sym_addr & PAGE_OFFSET_39) == PAGE_OFFSET_39) {
+ va_bits = 39;
+ } else if ((stext_sym_addr & PAGE_OFFSET_42) == PAGE_OFFSET_42) {
+ va_bits = 42;
+ } else if ((stext_sym_addr & PAGE_OFFSET_47) == PAGE_OFFSET_47) {
+ va_bits = 47;
+ } else if ((stext_sym_addr & PAGE_OFFSET_48) == PAGE_OFFSET_48) {
+ va_bits = 48;
+ } else {
+ fprintf(stderr,
+ "Cannot find a proper _stext for calculating VA_BITS\n");
+ return -1;
+ }
+
+out:
+ dbgprintf("va_bits : %d\n", va_bits);
+
+ return 0;
+}
+
+/**
+ * get_page_offset - Helper for getting PAGE_OFFSET
+ */
+
+int get_page_offset(unsigned long *page_offset)
+{
+ unsigned long long text_sym_addr, kernel_va_mid;
+ int ret;
+
+ text_sym_addr = get_kernel_sym("_text");
+ if (text_sym_addr == 0) {
+ fprintf(stderr, "Can't get the symbol of _text to calculate page_offset.\n");
+ return -1;
+ }
+
+ ret = get_va_bits();
+ if (ret < 0)
+ return ret;
+
+ /* Since kernel 5.4, kernel image is put above
+ * UINT64_MAX << (va_bits - 1)
+ */
+ kernel_va_mid = UINT64_MAX << (va_bits - 1);
+ /* older kernel */
+ if (text_sym_addr < kernel_va_mid)
+ *page_offset = UINT64_MAX << (va_bits - 1);
+ else
+ *page_offset = UINT64_MAX << va_bits;
+
+ dbgprintf("page_offset : %lx\n", *page_offset);
+
+ return 0;
+}
+
+static void arm64_scan_vmcoreinfo(char *pos)
+{
+ const char *str;
+
+ str = "NUMBER(VA_BITS)=";
+ if (memcmp(str, pos, strlen(str)) == 0)
+ va_bits = strtoul(pos + strlen(str), NULL, 10);
+}
+
+/**
+ * get_phys_offset_from_vmcoreinfo_pt_note - Helper for getting PHYS_OFFSET (and va_bits)
+ * from VMCOREINFO note inside 'kcore'.
+ */
+
+static int get_phys_offset_from_vmcoreinfo_pt_note(long *phys_offset)
+{
+ int fd, ret = 0;
+
+ if ((fd = open("/proc/kcore", O_RDONLY)) < 0) {
+ fprintf(stderr, "Can't open (%s).\n", "/proc/kcore");
+ return EFAILED;
+ }
+
+ arch_scan_vmcoreinfo = arm64_scan_vmcoreinfo;
+ ret = read_phys_offset_elf_kcore(fd, phys_offset);
+
+ close(fd);
+ return ret;
+}
+
+/**
+ * get_phys_base_from_pt_load - Helper for getting PHYS_OFFSET
+ * from PT_LOADs inside 'kcore'.
+ */
+
+int get_phys_base_from_pt_load(long *phys_offset)
+{
+ int i, fd, ret;
+ unsigned long long phys_start;
+ unsigned long long virt_start;
+
+ ret = get_page_offset(&page_offset);
+ if (ret < 0)
+ return ret;
+
+ if ((fd = open("/proc/kcore", O_RDONLY)) < 0) {
+ fprintf(stderr, "Can't open (%s).\n", "/proc/kcore");
+ return EFAILED;
+ }
+
+ read_elf(fd);
+
+ for (i = 0; get_pt_load(i,
+ &phys_start, NULL, &virt_start, NULL);
+ i++) {
+ if (virt_start != NOT_KV_ADDR
+ && virt_start >= page_offset
+ && phys_start != NOT_PADDR)
+ *phys_offset = phys_start -
+ (virt_start & ~page_offset);
+ }
+
+ close(fd);
+ return 0;
+}
+
+static bool to_be_excluded(char *str, unsigned long long start, unsigned long long end)
+{
+ if (!strncmp(str, CRASH_KERNEL, strlen(CRASH_KERNEL))) {
+ uint64_t load_start, load_end;
+
+ if (!get_crash_kernel_load_range(&load_start, &load_end) &&
+ (load_start == start) && (load_end == end))
+ return false;
+
+ return true;
+ }
+
+ if (!strncmp(str, SYSTEM_RAM, strlen(SYSTEM_RAM)) ||
+ !strncmp(str, KERNEL_CODE, strlen(KERNEL_CODE)) ||
+ !strncmp(str, KERNEL_DATA, strlen(KERNEL_DATA)))
+ return false;
+ else
+ return true;
+}
+
+/**
+ * get_memory_ranges - Try to get the memory ranges from
+ * /proc/iomem.
+ */
+int get_memory_ranges(struct memory_range **range, int *ranges,
+ unsigned long kexec_flags)
+{
+ long phys_offset = -1;
+ FILE *fp;
+ const char *iomem = proc_iomem();
+ char line[MAX_LINE], *str;
+ unsigned long long start, end;
+ int n, consumed;
+ struct memory_ranges memranges;
+ struct memory_range *last, excl_range;
+ int ret;
+
+ if (!try_read_phys_offset_from_kcore) {
+ /* Since kernel version 4.19, 'kcore' contains
+ * a new PT_NOTE which carries the VMCOREINFO
+ * information.
+ * If the same is available, one should prefer the
+ * same to retrieve 'PHYS_OFFSET' value exported by
+ * the kernel as this is now the standard interface
+ * exposed by kernel for sharing machine specific
+ * details with the userland.
+ */
+ ret = get_phys_offset_from_vmcoreinfo_pt_note(&phys_offset);
+ if (!ret) {
+ if (phys_offset != -1)
+ set_phys_offset(phys_offset,
+ "vmcoreinfo pt_note");
+ } else {
+ /* If we are running on a older kernel,
+ * try to retrieve the 'PHYS_OFFSET' value
+ * exported by the kernel in the 'kcore'
+ * file by reading the PT_LOADs and determining
+ * the correct combination.
+ */
+ ret = get_phys_base_from_pt_load(&phys_offset);
+ if (!ret)
+ if (phys_offset != -1)
+ set_phys_offset(phys_offset,
+ "pt_load");
+ }
+
+ try_read_phys_offset_from_kcore = true;
+ }
+
+ fp = fopen(iomem, "r");
+ if (!fp)
+ die("Cannot open %s\n", iomem);
+
+ memranges.ranges = NULL;
+ memranges.size = memranges.max_size = 0;
+
+ while (fgets(line, sizeof(line), fp) != 0) {
+ n = sscanf(line, "%llx-%llx : %n", &start, &end, &consumed);
+ if (n != 2)
+ continue;
+ str = line + consumed;
+
+ if (!strncmp(str, SYSTEM_RAM, strlen(SYSTEM_RAM))) {
+ ret = mem_regions_alloc_and_add(&memranges,
+ start, end - start + 1, RANGE_RAM);
+ if (ret) {
+ fprintf(stderr,
+ "Cannot allocate memory for ranges\n");
+ fclose(fp);
+ return -ENOMEM;
+ }
+
+ dbgprintf("%s:+[%d] %016llx - %016llx\n", __func__,
+ memranges.size - 1,
+ memranges.ranges[memranges.size - 1].start,
+ memranges.ranges[memranges.size - 1].end);
+ } else if (to_be_excluded(str, start, end)) {
+ if (!memranges.size)
+ continue;
+
+ /*
+ * Note: mem_regions_exclude() doesn't guarantee
+ * that the ranges are sorted out, but as long as
+ * we cope with /proc/iomem, we only operate on
+ * the last entry and so it is safe.
+ */
+
+ /* The last System RAM range */
+ last = &memranges.ranges[memranges.size - 1];
+
+ if (last->end < start)
+ /* New resource outside of System RAM */
+ continue;
+ if (end < last->start)
+ /* Already excluded by parent resource */
+ continue;
+
+ excl_range.start = start;
+ excl_range.end = end;
+ ret = mem_regions_alloc_and_exclude(&memranges, &excl_range);
+ if (ret) {
+ fprintf(stderr,
+ "Cannot allocate memory for ranges (exclude)\n");
+ fclose(fp);
+ return -ENOMEM;
+ }
+ dbgprintf("%s:- %016llx - %016llx\n",
+ __func__, start, end);
+ }
+ }
+
+ fclose(fp);
+
+ *range = memranges.ranges;
+ *ranges = memranges.size;
+
+ /* As a fallback option, we can try determining the PHYS_OFFSET
+ * value from the '/proc/iomem' entries as well.
+ *
+ * But note that this can be flaky, as on certain arm64
+ * platforms, it has been noticed that due to a hole at the
+ * start of physical ram exposed to kernel
+ * (i.e. it doesn't start from address 0), the kernel still
+ * calculates the 'memstart_addr' kernel variable as 0.
+ *
+ * Whereas the SYSTEM_RAM or IOMEM_RESERVED range in
+ * '/proc/iomem' would carry a first entry whose start address
+ * is non-zero (as the physical ram exposed to the kernel
+ * starts from a non-zero address).
+ *
+ * In such cases, if we rely on '/proc/iomem' entries to
+ * calculate the phys_offset, then we will have mismatch
+ * between the user-space and kernel space 'PHYS_OFFSET'
+ * value.
+ */
+ if (memranges.size)
+ set_phys_offset(memranges.ranges[0].start, "iomem");
+
+ dbgprint_mem_range("System RAM ranges;",
+ memranges.ranges, memranges.size);
+
+ return 0;
+}
+
+int arch_compat_trampoline(struct kexec_info *info)
+{
+ return 0;
+}
+
+int machine_verify_elf_rel(struct mem_ehdr *ehdr)
+{
+ return (ehdr->e_machine == EM_AARCH64);
+}
+
+enum aarch64_rel_type {
+ R_AARCH64_NONE = 0,
+ R_AARCH64_ABS64 = 257,
+ R_AARCH64_PREL32 = 261,
+ R_AARCH64_MOVW_UABS_G0_NC = 264,
+ R_AARCH64_MOVW_UABS_G1_NC = 266,
+ R_AARCH64_MOVW_UABS_G2_NC = 268,
+ R_AARCH64_MOVW_UABS_G3 =269,
+ R_AARCH64_LD_PREL_LO19 = 273,
+ R_AARCH64_ADR_PREL_LO21 = 274,
+ R_AARCH64_ADR_PREL_PG_HI21 = 275,
+ R_AARCH64_ADD_ABS_LO12_NC = 277,
+ R_AARCH64_JUMP26 = 282,
+ R_AARCH64_CALL26 = 283,
+ R_AARCH64_LDST64_ABS_LO12_NC = 286,
+ R_AARCH64_LDST128_ABS_LO12_NC = 299
+};
+
+static uint32_t get_bits(uint32_t value, int start, int end)
+{
+ uint32_t mask = ((uint32_t)1 << (end + 1 - start)) - 1;
+ return (value >> start) & mask;
+}
+
+void machine_apply_elf_rel(struct mem_ehdr *ehdr, struct mem_sym *UNUSED(sym),
+ unsigned long r_type, void *ptr, unsigned long address,
+ unsigned long value)
+{
+ uint64_t *loc64;
+ uint32_t *loc32;
+ uint64_t *location = (uint64_t *)ptr;
+ uint64_t data = *location;
+ uint64_t imm;
+ const char *type = NULL;
+
+ switch((enum aarch64_rel_type)r_type) {
+ case R_AARCH64_ABS64:
+ type = "ABS64";
+ loc64 = ptr;
+ *loc64 = cpu_to_elf64(ehdr, value);
+ break;
+ case R_AARCH64_PREL32:
+ type = "PREL32";
+ loc32 = ptr;
+ *loc32 = cpu_to_elf32(ehdr, value - address);
+ break;
+
+ /* Set a MOV[KZ] immediate field to bits [15:0] of X. No overflow check */
+ case R_AARCH64_MOVW_UABS_G0_NC:
+ type = "MOVW_UABS_G0_NC";
+ loc32 = ptr;
+ imm = get_bits(value, 0, 15);
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32) + (imm << 5));
+ break;
+ /* Set a MOV[KZ] immediate field to bits [31:16] of X. No overflow check */
+ case R_AARCH64_MOVW_UABS_G1_NC:
+ type = "MOVW_UABS_G1_NC";
+ loc32 = ptr;
+ imm = get_bits(value, 16, 31);
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32) + (imm << 5));
+ break;
+ /* Set a MOV[KZ] immediate field to bits [47:32] of X. No overflow check */
+ case R_AARCH64_MOVW_UABS_G2_NC:
+ type = "MOVW_UABS_G2_NC";
+ loc32 = ptr;
+ imm = get_bits(value, 32, 47);
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32) + (imm << 5));
+ break;
+ /* Set a MOV[KZ] immediate field to bits [63:48] of X */
+ case R_AARCH64_MOVW_UABS_G3:
+ type = "MOVW_UABS_G3";
+ loc32 = ptr;
+ imm = get_bits(value, 48, 63);
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32) + (imm << 5));
+ break;
+
+ case R_AARCH64_LD_PREL_LO19:
+ type = "LD_PREL_LO19";
+ loc32 = ptr;
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32)
+ + (((value - address) << 3) & 0xffffe0));
+ break;
+ case R_AARCH64_ADR_PREL_LO21:
+ if (value & 3)
+ die("%s: ERROR Unaligned value: %lx\n", __func__,
+ value);
+ type = "ADR_PREL_LO21";
+ loc32 = ptr;
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32)
+ + (((value - address) << 3) & 0xffffe0));
+ break;
+ case R_AARCH64_ADR_PREL_PG_HI21:
+ type = "ADR_PREL_PG_HI21";
+ imm = ((value & ~0xfff) - (address & ~0xfff)) >> 12;
+ loc32 = ptr;
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32)
+ + ((imm & 3) << 29) + ((imm & 0x1ffffc) << (5 - 2)));
+ break;
+ case R_AARCH64_ADD_ABS_LO12_NC:
+ type = "ADD_ABS_LO12_NC";
+ loc32 = ptr;
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32)
+ + ((value & 0xfff) << 10));
+ break;
+ case R_AARCH64_JUMP26:
+ type = "JUMP26";
+ loc32 = ptr;
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32)
+ + (((value - address) >> 2) & 0x3ffffff));
+ break;
+ case R_AARCH64_CALL26:
+ type = "CALL26";
+ loc32 = ptr;
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32)
+ + (((value - address) >> 2) & 0x3ffffff));
+ break;
+ /* encode imm field with bits [11:3] of value */
+ case R_AARCH64_LDST64_ABS_LO12_NC:
+ if (value & 7)
+ die("%s: ERROR Unaligned value: %lx\n", __func__,
+ value);
+ type = "LDST64_ABS_LO12_NC";
+ loc32 = ptr;
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32)
+ + ((value & 0xff8) << (10 - 3)));
+ break;
+
+ /* encode imm field with bits [11:4] of value */
+ case R_AARCH64_LDST128_ABS_LO12_NC:
+ if (value & 15)
+ die("%s: ERROR Unaligned value: %lx\n", __func__,
+ value);
+ type = "LDST128_ABS_LO12_NC";
+ loc32 = ptr;
+ imm = value & 0xff0;
+ *loc32 = cpu_to_le32(le32_to_cpu(*loc32) + (imm << (10 - 4)));
+ break;
+ default:
+ die("%s: ERROR Unknown type: %lu\n", __func__, r_type);
+ break;
+ }
+
+ dbgprintf("%s: %s %016lx->%016lx\n", __func__, type, data, *location);
+}
+
+void arch_reuse_initrd(void)
+{
+ reuse_initrd = 1;
+}
+
+void arch_update_purgatory(struct kexec_info *UNUSED(info))
+{
+}
diff --git a/kexec/arch/arm64/kexec-arm64.h b/kexec/arch/arm64/kexec-arm64.h
new file mode 100644
index 0000000..95fb5c2
--- /dev/null
+++ b/kexec/arch/arm64/kexec-arm64.h
@@ -0,0 +1,84 @@
+/*
+ * ARM64 kexec.
+ */
+
+#if !defined(KEXEC_ARM64_H)
+#define KEXEC_ARM64_H
+
+#include <stdbool.h>
+#include <sys/types.h>
+
+#include "image-header.h"
+#include "kexec.h"
+
+#define KEXEC_SEGMENT_MAX 64
+
+#define BOOT_BLOCK_VERSION 17
+#define BOOT_BLOCK_LAST_COMP_VERSION 16
+#define COMMAND_LINE_SIZE 2048 /* from kernel */
+
+#define KiB(x) ((x) * 1024UL)
+#define MiB(x) (KiB(x) * 1024UL)
+#define GiB(x) (MiB(x) * 1024UL)
+
+#define ULONGLONG_MAX (~0ULL)
+
+/*
+ * Incorrect address
+ */
+#define NOT_KV_ADDR (0x0)
+#define NOT_PADDR (ULONGLONG_MAX)
+
+int elf_arm64_probe(const char *kernel_buf, off_t kernel_size);
+int elf_arm64_load(int argc, char **argv, const char *kernel_buf,
+ off_t kernel_size, struct kexec_info *info);
+void elf_arm64_usage(void);
+
+int image_arm64_probe(const char *kernel_buf, off_t kernel_size);
+int image_arm64_load(int argc, char **argv, const char *kernel_buf,
+ off_t kernel_size, struct kexec_info *info);
+void image_arm64_usage(void);
+
+int uImage_arm64_probe(const char *buf, off_t len);
+int uImage_arm64_load(int argc, char **argv, const char *buf, off_t len,
+ struct kexec_info *info);
+void uImage_arm64_usage(void);
+
+int pez_arm64_probe(const char *kernel_buf, off_t kernel_size);
+int pez_arm64_load(int argc, char **argv, const char *buf, off_t len,
+ struct kexec_info *info);
+void pez_arm64_usage(void);
+
+
+extern off_t initrd_base;
+extern off_t initrd_size;
+
+/**
+ * struct arm64_mem - Memory layout info.
+ */
+
+struct arm64_mem {
+ int64_t phys_offset;
+ uint64_t text_offset;
+ uint64_t image_size;
+ uint64_t vp_offset;
+};
+
+#define arm64_mem_ngv UINT64_MAX
+extern struct arm64_mem arm64_mem;
+
+uint64_t get_phys_offset(void);
+uint64_t get_vp_offset(void);
+int get_page_offset(unsigned long *offset);
+
+static inline void reset_vp_offset(void)
+{
+ arm64_mem.vp_offset = arm64_mem_ngv;
+}
+
+int arm64_process_image_header(const struct arm64_image_header *h);
+unsigned long arm64_locate_kernel_segment(struct kexec_info *info);
+int arm64_load_other_segments(struct kexec_info *info,
+ unsigned long image_base);
+
+#endif
diff --git a/kexec/arch/arm64/kexec-elf-arm64.c b/kexec/arch/arm64/kexec-elf-arm64.c
new file mode 100644
index 0000000..e14f8e9
--- /dev/null
+++ b/kexec/arch/arm64/kexec-elf-arm64.c
@@ -0,0 +1,170 @@
+/*
+ * ARM64 kexec elf support.
+ */
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <linux/elf.h>
+
+#include "arch/options.h"
+#include "crashdump-arm64.h"
+#include "kexec-arm64.h"
+#include "kexec-elf.h"
+#include "kexec-syscall.h"
+
+int elf_arm64_probe(const char *kernel_buf, off_t kernel_size)
+{
+ struct mem_ehdr ehdr;
+ int result;
+
+ result = build_elf_exec_info(kernel_buf, kernel_size, &ehdr, 0);
+
+ if (result < 0) {
+ dbgprintf("%s: Not an ELF executable.\n", __func__);
+ goto on_exit;
+ }
+
+ if (ehdr.e_machine != EM_AARCH64) {
+ dbgprintf("%s: Not an AARCH64 ELF executable.\n", __func__);
+ result = -1;
+ goto on_exit;
+ }
+
+ result = 0;
+on_exit:
+ free_elf_info(&ehdr);
+ return result;
+}
+
+int elf_arm64_load(int argc, char **argv, const char *kernel_buf,
+ off_t kernel_size, struct kexec_info *info)
+{
+ const struct arm64_image_header *header = NULL;
+ unsigned long kernel_segment;
+ struct mem_ehdr ehdr;
+ int result;
+ int i;
+
+ if (info->file_mode) {
+ fprintf(stderr,
+ "ELF executable is not supported in kexec_file\n");
+
+ return EFAILED;
+ }
+
+ result = build_elf_exec_info(kernel_buf, kernel_size, &ehdr, 0);
+
+ if (result < 0) {
+ dbgprintf("%s: build_elf_exec_info failed\n", __func__);
+ goto exit;
+ }
+
+ /* Find and process the arm64 image header. */
+
+ for (i = 0; i < ehdr.e_phnum; i++) {
+ struct mem_phdr *phdr = &ehdr.e_phdr[i];
+ unsigned long header_offset;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ /*
+ * When CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET=y the image header
+ * could be offset in the elf segment. The linker script sets
+ * ehdr.e_entry to the start of text.
+ */
+
+ header_offset = ehdr.e_entry - phdr->p_vaddr;
+
+ header = (const struct arm64_image_header *)(
+ kernel_buf + phdr->p_offset + header_offset);
+
+ if (!arm64_process_image_header(header)) {
+ dbgprintf("%s: e_entry: %016llx\n", __func__,
+ ehdr.e_entry);
+ dbgprintf("%s: p_vaddr: %016llx\n", __func__,
+ phdr->p_vaddr);
+ dbgprintf("%s: header_offset: %016lx\n", __func__,
+ header_offset);
+
+ break;
+ }
+ }
+
+ if (i == ehdr.e_phnum) {
+ dbgprintf("%s: Valid arm64 header not found\n", __func__);
+ result = EFAILED;
+ goto exit;
+ }
+
+ kernel_segment = arm64_locate_kernel_segment(info);
+
+ if (kernel_segment == ULONG_MAX) {
+ dbgprintf("%s: Kernel segment is not allocated\n", __func__);
+ result = EFAILED;
+ goto exit;
+ }
+
+ arm64_mem.vp_offset = _ALIGN_DOWN(ehdr.e_entry, MiB(2));
+ if (!(info->kexec_flags & KEXEC_ON_CRASH))
+ arm64_mem.vp_offset -= kernel_segment - get_phys_offset();
+
+ dbgprintf("%s: kernel_segment: %016lx\n", __func__, kernel_segment);
+ dbgprintf("%s: text_offset: %016lx\n", __func__,
+ arm64_mem.text_offset);
+ dbgprintf("%s: image_size: %016lx\n", __func__,
+ arm64_mem.image_size);
+ dbgprintf("%s: phys_offset: %016lx\n", __func__,
+ arm64_mem.phys_offset);
+ dbgprintf("%s: vp_offset: %016lx\n", __func__,
+ arm64_mem.vp_offset);
+ dbgprintf("%s: PE format: %s\n", __func__,
+ (arm64_header_check_pe_sig(header) ? "yes" : "no"));
+
+ /* create and initialize elf core header segment */
+ if (info->kexec_flags & KEXEC_ON_CRASH) {
+ result = load_crashdump_segments(info);
+ if (result) {
+ dbgprintf("%s: Creating eflcorehdr failed.\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ /* load the kernel */
+ if (info->kexec_flags & KEXEC_ON_CRASH)
+ /*
+ * offset addresses in elf header in order to load
+ * vmlinux (elf_exec) into crash kernel's memory
+ */
+ fixup_elf_addrs(&ehdr);
+
+ result = elf_exec_load(&ehdr, info);
+
+ if (result) {
+ dbgprintf("%s: elf_exec_load failed\n", __func__);
+ goto exit;
+ }
+
+ /* load additional data */
+ result = arm64_load_other_segments(info, kernel_segment
+ + arm64_mem.text_offset);
+
+exit:
+ reset_vp_offset();
+ free_elf_info(&ehdr);
+ if (result)
+ fprintf(stderr, "kexec: Bad elf image file, load failed.\n");
+ return result;
+}
+
+void elf_arm64_usage(void)
+{
+ printf(
+" An ARM64 ELF image, big or little endian.\n"
+" Typically vmlinux or a stripped version of vmlinux.\n\n");
+}
diff --git a/kexec/arch/arm64/kexec-image-arm64.c b/kexec/arch/arm64/kexec-image-arm64.c
new file mode 100644
index 0000000..a196747
--- /dev/null
+++ b/kexec/arch/arm64/kexec-image-arm64.c
@@ -0,0 +1,119 @@
+/*
+ * ARM64 kexec binary image support.
+ */
+
+#define _GNU_SOURCE
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include "crashdump-arm64.h"
+#include "image-header.h"
+#include "kexec.h"
+#include "kexec-arm64.h"
+#include "kexec-syscall.h"
+#include "arch/options.h"
+
+int image_arm64_probe(const char *kernel_buf, off_t kernel_size)
+{
+ const struct arm64_image_header *h;
+
+ if (kernel_size < sizeof(struct arm64_image_header)) {
+ dbgprintf("%s: No arm64 image header.\n", __func__);
+ return -1;
+ }
+
+ h = (const struct arm64_image_header *)(kernel_buf);
+
+ if (!arm64_header_check_magic(h)) {
+ dbgprintf("%s: Bad arm64 image header.\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+int image_arm64_load(int argc, char **argv, const char *kernel_buf,
+ off_t kernel_size, struct kexec_info *info)
+{
+ const struct arm64_image_header *header;
+ unsigned long kernel_segment;
+ int result;
+
+ if (info->file_mode) {
+ if (arm64_opts.initrd) {
+ info->initrd_fd = open(arm64_opts.initrd, O_RDONLY);
+ if (info->initrd_fd == -1) {
+ fprintf(stderr,
+ "Could not open initrd file %s:%s\n",
+ arm64_opts.initrd, strerror(errno));
+ result = EFAILED;
+ goto exit;
+ }
+ }
+
+ if (arm64_opts.command_line) {
+ info->command_line = (char *)arm64_opts.command_line;
+ info->command_line_len =
+ strlen(arm64_opts.command_line) + 1;
+ }
+
+ return 0;
+ }
+
+ header = (const struct arm64_image_header *)(kernel_buf);
+
+ if (arm64_process_image_header(header))
+ return EFAILED;
+
+ kernel_segment = arm64_locate_kernel_segment(info);
+
+ if (kernel_segment == ULONG_MAX) {
+ dbgprintf("%s: Kernel segment is not allocated\n", __func__);
+ result = EFAILED;
+ goto exit;
+ }
+
+ dbgprintf("%s: kernel_segment: %016lx\n", __func__, kernel_segment);
+ dbgprintf("%s: text_offset: %016lx\n", __func__,
+ arm64_mem.text_offset);
+ dbgprintf("%s: image_size: %016lx\n", __func__,
+ arm64_mem.image_size);
+ dbgprintf("%s: phys_offset: %016lx\n", __func__,
+ arm64_mem.phys_offset);
+ dbgprintf("%s: vp_offset: %016lx\n", __func__,
+ arm64_mem.vp_offset);
+ dbgprintf("%s: PE format: %s\n", __func__,
+ (arm64_header_check_pe_sig(header) ? "yes" : "no"));
+
+ /* create and initialize elf core header segment */
+ if (info->kexec_flags & KEXEC_ON_CRASH) {
+ result = load_crashdump_segments(info);
+ if (result) {
+ dbgprintf("%s: Creating eflcorehdr failed.\n",
+ __func__);
+ goto exit;
+ }
+ }
+
+ /* load the kernel */
+ add_segment_phys_virt(info, kernel_buf, kernel_size,
+ kernel_segment + arm64_mem.text_offset,
+ arm64_mem.image_size, 0);
+
+ /* load additional data */
+ result = arm64_load_other_segments(info, kernel_segment
+ + arm64_mem.text_offset);
+
+exit:
+ if (result)
+ fprintf(stderr, "kexec: load failed.\n");
+ return result;
+}
+
+void image_arm64_usage(void)
+{
+ printf(
+" An ARM64 binary image, compressed or not, big or little endian.\n"
+" Typically an Image file.\n\n");
+}
diff --git a/kexec/arch/arm64/kexec-uImage-arm64.c b/kexec/arch/arm64/kexec-uImage-arm64.c
new file mode 100644
index 0000000..c466913
--- /dev/null
+++ b/kexec/arch/arm64/kexec-uImage-arm64.c
@@ -0,0 +1,52 @@
+/*
+ * uImage support added by David Woodhouse <dwmw2@infradead.org>
+ */
+#include <stdint.h>
+#include <string.h>
+#include <sys/types.h>
+#include <image.h>
+#include <kexec-uImage.h>
+#include "../../kexec.h"
+#include "kexec-arm64.h"
+
+int uImage_arm64_probe(const char *buf, off_t len)
+{
+ int ret;
+
+ ret = uImage_probe_kernel(buf, len, IH_ARCH_ARM64);
+
+ /* 0 - valid uImage.
+ * -1 - uImage is corrupted.
+ * 1 - image is not a uImage.
+ */
+ if (!ret)
+ return 0;
+ else
+ return -1;
+}
+
+int uImage_arm64_load(int argc, char **argv, const char *buf, off_t len,
+ struct kexec_info *info)
+{
+ struct Image_info img;
+ int ret;
+
+ if (info->file_mode) {
+ fprintf(stderr,
+ "uImage is not supported in kexec_file\n");
+
+ return EFAILED;
+ }
+
+ ret = uImage_load(buf, len, &img);
+ if (ret)
+ return ret;
+
+ return image_arm64_load(argc, argv, img.buf, img.len, info);
+}
+
+void uImage_arm64_usage(void)
+{
+ printf(
+" An ARM64 U-boot uImage file, compressed or not, big or little endian.\n\n");
+}
diff --git a/kexec/arch/arm64/kexec-vmlinuz-arm64.c b/kexec/arch/arm64/kexec-vmlinuz-arm64.c
new file mode 100644
index 0000000..c0ee47c
--- /dev/null
+++ b/kexec/arch/arm64/kexec-vmlinuz-arm64.c
@@ -0,0 +1,110 @@
+/*
+ * ARM64 PE compressed Image (vmlinuz, ZBOOT) support.
+ *
+ * Several distros use 'make zinstall' rule inside
+ * 'arch/arm64/boot/Makefile' to install the arm64
+ * ZBOOT compressed file inside the boot destination
+ * directory (for e.g. /boot).
+ *
+ * Currently we cannot use kexec_file_load() to load vmlinuz
+ * PE images that self decompress.
+ *
+ * To support ZBOOT, we should:
+ * a). Copy the compressed contents of vmlinuz to a temporary file.
+ * b). Decompress (gunzip-decompress) the contents inside the
+ * temporary file.
+ * c). Validate the resulting image and write it back to the
+ * temporary file.
+ * d). Pass the 'fd' of the temporary file to the kernel space.
+ *
+ * Note this, module doesn't provide a _load() function instead
+ * relying on image_arm64_load() to load the resulting decompressed
+ * image.
+ *
+ * So basically the kernel space still gets a decompressed
+ * kernel image to load via kexec-tools.
+ */
+
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include "kexec-arm64.h"
+#include <kexec-pe-zboot.h>
+#include "arch/options.h"
+
+static int kernel_fd = -1;
+
+/* Returns:
+ * -1 : in case of error/invalid format (not a valid PE+compressed ZBOOT format.
+ */
+int pez_arm64_probe(const char *kernel_buf, off_t kernel_size)
+{
+ int ret = -1;
+ const struct arm64_image_header *h;
+ char *buf;
+ off_t buf_sz;
+
+ buf = (char *)kernel_buf;
+ buf_sz = kernel_size;
+ if (!buf)
+ return -1;
+ h = (const struct arm64_image_header *)buf;
+
+ dbgprintf("%s: PROBE.\n", __func__);
+ if (buf_sz < sizeof(struct arm64_image_header)) {
+ dbgprintf("%s: Not large enough to be a PE image.\n", __func__);
+ return -1;
+ }
+ if (!arm64_header_check_pe_sig(h)) {
+ dbgprintf("%s: Not an PE image.\n", __func__);
+ return -1;
+ }
+
+ if (buf_sz < sizeof(struct arm64_image_header) + h->pe_header) {
+ dbgprintf("%s: PE image offset larger than image.\n", __func__);
+ return -1;
+ }
+
+ if (memcmp(&buf[h->pe_header],
+ arm64_pe_machtype, sizeof(arm64_pe_machtype))) {
+ dbgprintf("%s: PE header doesn't match machine type.\n", __func__);
+ return -1;
+ }
+
+ ret = pez_prepare(buf, buf_sz, &kernel_fd);
+
+ if (!ret) {
+ /* validate the arm64 specific header */
+ struct arm64_image_header hdr_check;
+ if (read(kernel_fd, &hdr_check, sizeof(hdr_check)) != sizeof(hdr_check))
+ goto bad_header;
+
+ lseek(kernel_fd, 0, SEEK_SET);
+
+ if (!arm64_header_check_magic(&hdr_check)) {
+ dbgprintf("%s: Bad arm64 image header.\n", __func__);
+ goto bad_header;
+ }
+ }
+
+ return ret;
+bad_header:
+ close(kernel_fd);
+ free(buf);
+ return -1;
+}
+
+int pez_arm64_load(int argc, char **argv, const char *buf, off_t len,
+ struct kexec_info *info)
+{
+ info->kernel_fd = kernel_fd;
+ return image_arm64_load(argc, argv, buf, len, info);
+}
+
+void pez_arm64_usage(void)
+{
+ printf(
+" An ARM64 vmlinuz, PE image of a compressed, little endian.\n"
+" kernel, built with ZBOOT enabled.\n\n");
+}