summaryrefslogtreecommitdiffstats
path: root/arch/arm/boot/compressed
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/arm/boot/compressed
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/arm/boot/compressed/.gitignore9
-rw-r--r--arch/arm/boot/compressed/Makefile200
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c217
-rw-r--r--arch/arm/boot/compressed/big-endian.S14
-rw-r--r--arch/arm/boot/compressed/debug.S48
-rw-r--r--arch/arm/boot/compressed/decompress.c65
-rw-r--r--arch/arm/boot/compressed/efi-header.S136
-rw-r--r--arch/arm/boot/compressed/fdt.c2
-rw-r--r--arch/arm/boot/compressed/fdt_ro.c2
-rw-r--r--arch/arm/boot/compressed/fdt_rw.c2
-rw-r--r--arch/arm/boot/compressed/fdt_wip.c2
-rw-r--r--arch/arm/boot/compressed/head-sa1100.S49
-rw-r--r--arch/arm/boot/compressed/head-sharpsl.S151
-rw-r--r--arch/arm/boot/compressed/head-xscale.S35
-rw-r--r--arch/arm/boot/compressed/head.S1519
-rw-r--r--arch/arm/boot/compressed/ll_char_wr.S131
-rw-r--r--arch/arm/boot/compressed/misc.c167
-rw-r--r--arch/arm/boot/compressed/misc.h10
-rw-r--r--arch/arm/boot/compressed/piggy.S7
-rw-r--r--arch/arm/boot/compressed/string.c142
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S141
21 files changed, 3049 insertions, 0 deletions
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
new file mode 100644
index 000000000..60606b0f3
--- /dev/null
+++ b/arch/arm/boot/compressed/.gitignore
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ashldi3.S
+bswapsdi2.S
+font.c
+lib1funcs.S
+hyp-stub.S
+piggy_data
+vmlinux
+vmlinux.lds
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
new file mode 100644
index 000000000..175213d7a
--- /dev/null
+++ b/arch/arm/boot/compressed/Makefile
@@ -0,0 +1,200 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# linux/arch/arm/boot/compressed/Makefile
+#
+# create a compressed vmlinuz image from the original vmlinux
+#
+
+OBJS =
+
+HEAD = head.o
+OBJS += misc.o decompress.o
+ifeq ($(CONFIG_DEBUG_UNCOMPRESS),y)
+OBJS += debug.o
+AFLAGS_head.o += -DDEBUG
+endif
+FONTC = $(srctree)/lib/fonts/font_acorn_8x8.c
+
+# string library code (-Os is enforced to keep it much smaller)
+OBJS += string.o
+CFLAGS_string.o := -Os
+
+ifeq ($(CONFIG_ARM_VIRT_EXT),y)
+OBJS += hyp-stub.o
+endif
+
+GCOV_PROFILE := n
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
+
+#
+# Architecture dependencies
+#
+ifeq ($(CONFIG_ARCH_ACORN),y)
+OBJS += ll_char_wr.o font.o
+endif
+
+ifeq ($(CONFIG_ARCH_SA1100),y)
+OBJS += head-sa1100.o
+endif
+
+ifeq ($(CONFIG_CPU_XSCALE),y)
+OBJS += head-xscale.o
+endif
+
+ifeq ($(CONFIG_PXA_SHARPSL_DETECT_MACH_ID),y)
+OBJS += head-sharpsl.o
+endif
+
+ifeq ($(CONFIG_CPU_ENDIAN_BE32),y)
+ifeq ($(CONFIG_CPU_CP15),y)
+OBJS += big-endian.o
+else
+# The endian should be set by h/w design.
+endif
+endif
+
+#
+# We now have a PIC decompressor implementation. Decompressors running
+# from RAM should not define ZTEXTADDR. Decompressors running directly
+# from ROM or Flash must define ZTEXTADDR (preferably via the config)
+# FIXME: Previous assignment to ztextaddr-y is lost here. See SHARK
+ifeq ($(CONFIG_ZBOOT_ROM),y)
+ZTEXTADDR := $(CONFIG_ZBOOT_ROM_TEXT)
+ZBSSADDR := $(CONFIG_ZBOOT_ROM_BSS)
+else
+ZTEXTADDR := 0
+ZBSSADDR := ALIGN(8)
+endif
+
+MALLOC_SIZE := 65536
+
+AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) -DMALLOC_SIZE=$(MALLOC_SIZE)
+CPPFLAGS_vmlinux.lds := -DTEXT_START="$(ZTEXTADDR)" -DBSS_START="$(ZBSSADDR)"
+CPPFLAGS_vmlinux.lds += -DTEXT_OFFSET="$(TEXT_OFFSET)"
+CPPFLAGS_vmlinux.lds += -DMALLOC_SIZE="$(MALLOC_SIZE)"
+
+compress-$(CONFIG_KERNEL_GZIP) = gzip
+compress-$(CONFIG_KERNEL_LZO) = lzo
+compress-$(CONFIG_KERNEL_LZMA) = lzma
+compress-$(CONFIG_KERNEL_XZ) = xzkern
+compress-$(CONFIG_KERNEL_LZ4) = lz4
+
+libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o
+
+ifeq ($(CONFIG_ARM_ATAG_DTB_COMPAT),y)
+CFLAGS_REMOVE_atags_to_fdt.o += -Wframe-larger-than=${CONFIG_FRAME_WARN}
+CFLAGS_atags_to_fdt.o += -Wframe-larger-than=1280
+OBJS += $(libfdt_objs) atags_to_fdt.o
+endif
+
+# -fstack-protector-strong triggers protection checks in this code,
+# but it is being used too early to link to meaningful stack_chk logic.
+$(foreach o, $(libfdt_objs) atags_to_fdt.o, \
+ $(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector))
+
+# These were previously generated C files. When you are building the kernel
+# with O=, make sure to remove the stale files in the output tree. Otherwise,
+# the build system wrongly compiles the stale ones.
+ifdef building_out_of_srctree
+$(shell rm -f $(addprefix $(obj)/, fdt_rw.c fdt_ro.c fdt_wip.c fdt.c))
+endif
+
+targets := vmlinux vmlinux.lds piggy_data piggy.o \
+ lib1funcs.o ashldi3.o bswapsdi2.o \
+ head.o $(OBJS)
+
+clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S
+
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+
+ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
+ -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += -pg
+asflags-y := -DZIMAGE
+
+# Supply kernel BSS size to the decompressor via a linker symbol.
+KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
+ sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
+ -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
+LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
+# Supply ZRELADDR to the decompressor via a linker symbol.
+ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+LDFLAGS_vmlinux += --defsym zreladdr=$(ZRELADDR)
+endif
+ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
+LDFLAGS_vmlinux += --be8
+endif
+# Report unresolved symbol references
+LDFLAGS_vmlinux += --no-undefined
+# Delete all temporary local symbols
+LDFLAGS_vmlinux += -X
+# Report orphan sections
+ifdef CONFIG_LD_ORPHAN_WARN
+LDFLAGS_vmlinux += --orphan-handling=warn
+endif
+# Next argument is a linker script
+LDFLAGS_vmlinux += -T
+
+# For __aeabi_uidivmod
+lib1funcs = $(obj)/lib1funcs.o
+
+$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S
+ $(call cmd,shipped)
+
+# For __aeabi_llsl
+ashldi3 = $(obj)/ashldi3.o
+
+$(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S
+ $(call cmd,shipped)
+
+# For __bswapsi2, __bswapdi2
+bswapsdi2 = $(obj)/bswapsdi2.o
+
+$(obj)/bswapsdi2.S: $(srctree)/arch/$(SRCARCH)/lib/bswapsdi2.S
+ $(call cmd,shipped)
+
+# We need to prevent any GOTOFF relocs being used with references
+# to symbols in the .bss section since we cannot relocate them
+# independently from the rest at run time. This can be achieved by
+# ensuring that no private .bss symbols exist, as global symbols
+# always have a GOT entry which is what we need.
+# The .data section is already discarded by the linker script so no need
+# to bother about it here.
+check_for_bad_syms = \
+bad_syms=$$($(NM) $@ | sed -n 's/^.\{8\} [bc] \(.*\)/\1/p') && \
+[ -z "$$bad_syms" ] || \
+ ( echo "following symbols must have non local/private scope:" >&2; \
+ echo "$$bad_syms" >&2; false )
+
+check_for_multiple_zreladdr = \
+if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \
+ echo 'multiple zreladdrs: $(ZRELADDR)'; \
+ echo 'This needs CONFIG_AUTO_ZRELADDR to be set'; \
+ false; \
+fi
+
+efi-obj-$(CONFIG_EFI_STUB) := $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.o \
+ $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) \
+ $(bswapsdi2) $(efi-obj-y) FORCE
+ @$(check_for_multiple_zreladdr)
+ $(call if_changed,ld)
+ @$(check_for_bad_syms)
+
+$(obj)/piggy_data: $(obj)/../Image FORCE
+ $(call if_changed,$(compress-y))
+
+$(obj)/piggy.o: $(obj)/piggy_data
+
+CFLAGS_font.o := -Dstatic=
+
+$(obj)/font.c: $(FONTC)
+ $(call cmd,shipped)
+
+AFLAGS_hyp-stub.o := -Wa,-march=armv7-a
+
+$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
+ $(call cmd,shipped)
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
new file mode 100644
index 000000000..31927d2fe
--- /dev/null
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/libfdt_env.h>
+#include <asm/setup.h>
+#include <libfdt.h>
+
+#if defined(CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND)
+#define do_extend_cmdline 1
+#else
+#define do_extend_cmdline 0
+#endif
+
+#define NR_BANKS 16
+
+static int node_offset(void *fdt, const char *node_path)
+{
+ int offset = fdt_path_offset(fdt, node_path);
+ if (offset == -FDT_ERR_NOTFOUND)
+ /* Add the node to root if not found, dropping the leading '/' */
+ offset = fdt_add_subnode(fdt, 0, node_path + 1);
+ return offset;
+}
+
+static int setprop(void *fdt, const char *node_path, const char *property,
+ void *val_array, int size)
+{
+ int offset = node_offset(fdt, node_path);
+ if (offset < 0)
+ return offset;
+ return fdt_setprop(fdt, offset, property, val_array, size);
+}
+
+static int setprop_string(void *fdt, const char *node_path,
+ const char *property, const char *string)
+{
+ int offset = node_offset(fdt, node_path);
+ if (offset < 0)
+ return offset;
+ return fdt_setprop_string(fdt, offset, property, string);
+}
+
+static int setprop_cell(void *fdt, const char *node_path,
+ const char *property, uint32_t val)
+{
+ int offset = node_offset(fdt, node_path);
+ if (offset < 0)
+ return offset;
+ return fdt_setprop_cell(fdt, offset, property, val);
+}
+
+static const void *getprop(const void *fdt, const char *node_path,
+ const char *property, int *len)
+{
+ int offset = fdt_path_offset(fdt, node_path);
+
+ if (offset == -FDT_ERR_NOTFOUND)
+ return NULL;
+
+ return fdt_getprop(fdt, offset, property, len);
+}
+
+static uint32_t get_cell_size(const void *fdt)
+{
+ int len;
+ uint32_t cell_size = 1;
+ const __be32 *size_len = getprop(fdt, "/", "#size-cells", &len);
+
+ if (size_len)
+ cell_size = fdt32_to_cpu(*size_len);
+ return cell_size;
+}
+
+static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
+{
+ char cmdline[COMMAND_LINE_SIZE];
+ const char *fdt_bootargs;
+ char *ptr = cmdline;
+ int len = 0;
+
+ /* copy the fdt command line into the buffer */
+ fdt_bootargs = getprop(fdt, "/chosen", "bootargs", &len);
+ if (fdt_bootargs)
+ if (len < COMMAND_LINE_SIZE) {
+ memcpy(ptr, fdt_bootargs, len);
+ /* len is the length of the string
+ * including the NULL terminator */
+ ptr += len - 1;
+ }
+
+ /* and append the ATAG_CMDLINE */
+ if (fdt_cmdline) {
+ len = strlen(fdt_cmdline);
+ if (ptr - cmdline + len + 2 < COMMAND_LINE_SIZE) {
+ *ptr++ = ' ';
+ memcpy(ptr, fdt_cmdline, len);
+ ptr += len;
+ }
+ }
+ *ptr = '\0';
+
+ setprop_string(fdt, "/chosen", "bootargs", cmdline);
+}
+
+static void hex_str(char *out, uint32_t value)
+{
+ uint32_t digit;
+ int idx;
+
+ for (idx = 7; idx >= 0; idx--) {
+ digit = value >> 28;
+ value <<= 4;
+ digit &= 0xf;
+ if (digit < 10)
+ digit += '0';
+ else
+ digit += 'A'-10;
+ *out++ = digit;
+ }
+ *out = '\0';
+}
+
+/*
+ * Convert and fold provided ATAGs into the provided FDT.
+ *
+ * REturn values:
+ * = 0 -> pretend success
+ * = 1 -> bad ATAG (may retry with another possible ATAG pointer)
+ * < 0 -> error from libfdt
+ */
+int atags_to_fdt(void *atag_list, void *fdt, int total_space)
+{
+ struct tag *atag = atag_list;
+ /* In the case of 64 bits memory size, need to reserve 2 cells for
+ * address and size for each bank */
+ __be32 mem_reg_property[2 * 2 * NR_BANKS];
+ int memcount = 0;
+ int ret, memsize;
+
+ /* make sure we've got an aligned pointer */
+ if ((u32)atag_list & 0x3)
+ return 1;
+
+ /* if we get a DTB here we're done already */
+ if (*(__be32 *)atag_list == cpu_to_fdt32(FDT_MAGIC))
+ return 0;
+
+ /* validate the ATAG */
+ if (atag->hdr.tag != ATAG_CORE ||
+ (atag->hdr.size != tag_size(tag_core) &&
+ atag->hdr.size != 2))
+ return 1;
+
+ /* let's give it all the room it could need */
+ ret = fdt_open_into(fdt, fdt, total_space);
+ if (ret < 0)
+ return ret;
+
+ for_each_tag(atag, atag_list) {
+ if (atag->hdr.tag == ATAG_CMDLINE) {
+ /* Append the ATAGS command line to the device tree
+ * command line.
+ * NB: This means that if the same parameter is set in
+ * the device tree and in the tags, the one from the
+ * tags will be chosen.
+ */
+ if (do_extend_cmdline)
+ merge_fdt_bootargs(fdt,
+ atag->u.cmdline.cmdline);
+ else
+ setprop_string(fdt, "/chosen", "bootargs",
+ atag->u.cmdline.cmdline);
+ } else if (atag->hdr.tag == ATAG_MEM) {
+ if (memcount >= sizeof(mem_reg_property)/4)
+ continue;
+ if (!atag->u.mem.size)
+ continue;
+ memsize = get_cell_size(fdt);
+
+ if (memsize == 2) {
+ /* if memsize is 2, that means that
+ * each data needs 2 cells of 32 bits,
+ * so the data are 64 bits */
+ __be64 *mem_reg_prop64 =
+ (__be64 *)mem_reg_property;
+ mem_reg_prop64[memcount++] =
+ cpu_to_fdt64(atag->u.mem.start);
+ mem_reg_prop64[memcount++] =
+ cpu_to_fdt64(atag->u.mem.size);
+ } else {
+ mem_reg_property[memcount++] =
+ cpu_to_fdt32(atag->u.mem.start);
+ mem_reg_property[memcount++] =
+ cpu_to_fdt32(atag->u.mem.size);
+ }
+
+ } else if (atag->hdr.tag == ATAG_INITRD2) {
+ uint32_t initrd_start, initrd_size;
+ initrd_start = atag->u.initrd.start;
+ initrd_size = atag->u.initrd.size;
+ setprop_cell(fdt, "/chosen", "linux,initrd-start",
+ initrd_start);
+ setprop_cell(fdt, "/chosen", "linux,initrd-end",
+ initrd_start + initrd_size);
+ } else if (atag->hdr.tag == ATAG_SERIAL) {
+ char serno[16+2];
+ hex_str(serno, atag->u.serialnr.high);
+ hex_str(serno+8, atag->u.serialnr.low);
+ setprop_string(fdt, "/", "serial-number", serno);
+ }
+ }
+
+ if (memcount) {
+ setprop(fdt, "/memory", "reg", mem_reg_property,
+ 4 * memcount * memsize);
+ }
+
+ return fdt_pack(fdt);
+}
diff --git a/arch/arm/boot/compressed/big-endian.S b/arch/arm/boot/compressed/big-endian.S
new file mode 100644
index 000000000..0e092c36d
--- /dev/null
+++ b/arch/arm/boot/compressed/big-endian.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/arch/arm/boot/compressed/big-endian.S
+ *
+ * Switch CPU into big endian mode.
+ * Author: Nicolas Pitre
+ */
+
+ .section ".start", "ax"
+
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ orr r0, r0, #(1 << 7) @ enable big endian mode
+ mcr p15, 0, r0, c1, c0, 0 @ write control reg
+
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
new file mode 100644
index 000000000..fac40a717
--- /dev/null
+++ b/arch/arm/boot/compressed/debug.S
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#ifndef CONFIG_DEBUG_SEMIHOSTING
+
+#include CONFIG_DEBUG_LL_INCLUDE
+
+ENTRY(putc)
+ addruart r1, r2, r3
+#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
+ waituartcts r3, r1
+#endif
+ waituarttxrdy r3, r1
+ senduart r0, r1
+ busyuart r3, r1
+ mov pc, lr
+ENDPROC(putc)
+
+#else
+
+ENTRY(putc)
+ adr r1, 1f
+ ldmia r1, {r2, r3}
+ add r2, r2, r1
+ ldr r1, [r2, r3]
+ strb r0, [r1]
+ mov r0, #0x03 @ SYS_WRITEC
+ ARM( svc #0x123456 )
+#ifdef CONFIG_CPU_V7M
+ THUMB( bkpt #0xab )
+#else
+ THUMB( svc #0xab )
+#endif
+ mov pc, lr
+ .align 2
+1: .word _GLOBAL_OFFSET_TABLE_ - .
+ .word semi_writec_buf(GOT)
+ENDPROC(putc)
+
+ .bss
+ .global semi_writec_buf
+ .type semi_writec_buf, %object
+semi_writec_buf:
+ .space 4
+ .size semi_writec_buf, 4
+
+#endif
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
new file mode 100644
index 000000000..74255e819
--- /dev/null
+++ b/arch/arm/boot/compressed/decompress.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _LINUX_STRING_H_
+
+#include <linux/compiler.h> /* for inline */
+#include <linux/types.h> /* for size_t */
+#include <linux/stddef.h> /* for NULL */
+#include <linux/linkage.h>
+#include <asm/string.h>
+#include "misc.h"
+
+#define STATIC static
+#define STATIC_RW_DATA /* non-static please */
+
+/* Diagnostic functions */
+#ifdef DEBUG
+# define Assert(cond,msg) {if(!(cond)) error(msg);}
+# define Trace(x) fprintf x
+# define Tracev(x) {if (verbose) fprintf x ;}
+# define Tracevv(x) {if (verbose>1) fprintf x ;}
+# define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
+# define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
+#else
+# define Assert(cond,msg)
+# define Trace(x)
+# define Tracev(x)
+# define Tracevv(x)
+# define Tracec(c,x)
+# define Tracecv(c,x)
+#endif
+
+/* Not needed, but used in some headers pulled in by decompressors */
+extern char * strstr(const char * s1, const char *s2);
+extern size_t strlen(const char *s);
+extern int memcmp(const void *cs, const void *ct, size_t count);
+extern char * strchrnul(const char *, int);
+
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
+
+#ifdef CONFIG_KERNEL_XZ
+/* Prevent KASAN override of string helpers in decompressor */
+#undef memmove
+#define memmove memmove
+#undef memcpy
+#define memcpy memcpy
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
+int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
+{
+ return __decompress(input, len, NULL, NULL, output, 0, NULL, error);
+}
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S
new file mode 100644
index 000000000..230030c13
--- /dev/null
+++ b/arch/arm/boot/compressed/efi-header.S
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013-2017 Linaro Ltd
+ * Authors: Roy Franz <roy.franz@linaro.org>
+ * Ard Biesheuvel <ard.biesheuvel@linaro.org>
+ */
+
+#include <linux/pe.h>
+#include <linux/sizes.h>
+
+ .macro __nop
+ AR_CLASS( mov r0, r0 )
+ M_CLASS( nop.w )
+ .endm
+
+ .macro __initial_nops
+#ifdef CONFIG_EFI_STUB
+ @ This is a two-instruction NOP, which happens to bear the
+ @ PE/COFF signature "MZ" in the first two bytes, so the kernel
+ @ is accepted as an EFI binary. Booting via the UEFI stub
+ @ will not execute those instructions, but the ARM/Linux
+ @ boot protocol does, so we need some NOPs here.
+ .inst MZ_MAGIC | (0xe225 << 16) @ eor r5, r5, 0x4d000
+ eor r5, r5, 0x4d000 @ undo previous insn
+#else
+ __nop
+ __nop
+#endif
+ .endm
+
+ .macro __EFI_HEADER
+#ifdef CONFIG_EFI_STUB
+ .set start_offset, __efi_start - start
+ .org start + 0x3c
+ @
+ @ The PE header can be anywhere in the file, but for
+ @ simplicity we keep it together with the MSDOS header
+ @ The offset to the PE/COFF header needs to be at offset
+ @ 0x3C in the MSDOS header.
+ @ The only 2 fields of the MSDOS header that are used are this
+ @ PE/COFF offset, and the "MZ" bytes at offset 0x0.
+ @
+ .long pe_header - start @ Offset to the PE header.
+
+pe_header:
+ .long PE_MAGIC
+
+coff_header:
+ .short IMAGE_FILE_MACHINE_THUMB @ Machine
+ .short section_count @ NumberOfSections
+ .long 0 @ TimeDateStamp
+ .long 0 @ PointerToSymbolTable
+ .long 0 @ NumberOfSymbols
+ .short section_table - optional_header @ SizeOfOptionalHeader
+ .short IMAGE_FILE_32BIT_MACHINE | \
+ IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED @ Characteristics
+
+#define __pecoff_code_size (__pecoff_data_start - __efi_start)
+
+optional_header:
+ .short PE_OPT_MAGIC_PE32 @ PE32 format
+ .byte 0x02 @ MajorLinkerVersion
+ .byte 0x14 @ MinorLinkerVersion
+ .long __pecoff_code_size @ SizeOfCode
+ .long __pecoff_data_size @ SizeOfInitializedData
+ .long 0 @ SizeOfUninitializedData
+ .long efi_pe_entry - start @ AddressOfEntryPoint
+ .long start_offset @ BaseOfCode
+ .long __pecoff_data_start - start @ BaseOfData
+
+extra_header_fields:
+ .long 0 @ ImageBase
+ .long SZ_4K @ SectionAlignment
+ .long SZ_512 @ FileAlignment
+ .short 0 @ MajorOsVersion
+ .short 0 @ MinorOsVersion
+ .short LINUX_EFISTUB_MAJOR_VERSION @ MajorImageVersion
+ .short LINUX_EFISTUB_MINOR_VERSION @ MinorImageVersion
+ .short 0 @ MajorSubsystemVersion
+ .short 0 @ MinorSubsystemVersion
+ .long 0 @ Win32VersionValue
+
+ .long __pecoff_end - start @ SizeOfImage
+ .long start_offset @ SizeOfHeaders
+ .long 0 @ CheckSum
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION @ Subsystem
+ .short 0 @ DllCharacteristics
+ .long 0 @ SizeOfStackReserve
+ .long 0 @ SizeOfStackCommit
+ .long 0 @ SizeOfHeapReserve
+ .long 0 @ SizeOfHeapCommit
+ .long 0 @ LoaderFlags
+ .long (section_table - .) / 8 @ NumberOfRvaAndSizes
+
+ .quad 0 @ ExportTable
+ .quad 0 @ ImportTable
+ .quad 0 @ ResourceTable
+ .quad 0 @ ExceptionTable
+ .quad 0 @ CertificationTable
+ .quad 0 @ BaseRelocationTable
+
+section_table:
+ .ascii ".text\0\0\0"
+ .long __pecoff_code_size @ VirtualSize
+ .long __efi_start @ VirtualAddress
+ .long __pecoff_code_size @ SizeOfRawData
+ .long __efi_start @ PointerToRawData
+ .long 0 @ PointerToRelocations
+ .long 0 @ PointerToLineNumbers
+ .short 0 @ NumberOfRelocations
+ .short 0 @ NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE @ Characteristics
+
+ .ascii ".data\0\0\0"
+ .long __pecoff_data_size @ VirtualSize
+ .long __pecoff_data_start - start @ VirtualAddress
+ .long __pecoff_data_rawsize @ SizeOfRawData
+ .long __pecoff_data_start - start @ PointerToRawData
+ .long 0 @ PointerToRelocations
+ .long 0 @ PointerToLineNumbers
+ .short 0 @ NumberOfRelocations
+ .short 0 @ NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE @ Characteristics
+
+ .set section_count, (. - section_table) / 40
+
+ .align 12
+__efi_start:
+#endif
+ .endm
diff --git a/arch/arm/boot/compressed/fdt.c b/arch/arm/boot/compressed/fdt.c
new file mode 100644
index 000000000..f8ea7a201
--- /dev/null
+++ b/arch/arm/boot/compressed/fdt.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../../../lib/fdt.c"
diff --git a/arch/arm/boot/compressed/fdt_ro.c b/arch/arm/boot/compressed/fdt_ro.c
new file mode 100644
index 000000000..93970a4ad
--- /dev/null
+++ b/arch/arm/boot/compressed/fdt_ro.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../../../lib/fdt_ro.c"
diff --git a/arch/arm/boot/compressed/fdt_rw.c b/arch/arm/boot/compressed/fdt_rw.c
new file mode 100644
index 000000000..f7c6b8b7e
--- /dev/null
+++ b/arch/arm/boot/compressed/fdt_rw.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../../../lib/fdt_rw.c"
diff --git a/arch/arm/boot/compressed/fdt_wip.c b/arch/arm/boot/compressed/fdt_wip.c
new file mode 100644
index 000000000..048d2c7a0
--- /dev/null
+++ b/arch/arm/boot/compressed/fdt_wip.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../../../lib/fdt_wip.c"
diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S
new file mode 100644
index 000000000..95abdd850
--- /dev/null
+++ b/arch/arm/boot/compressed/head-sa1100.S
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/arch/arm/boot/compressed/head-sa1100.S
+ *
+ * Copyright (C) 1999 Nicolas Pitre <nico@fluxnic.net>
+ *
+ * SA1100 specific tweaks. This is merged into head.S by the linker.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/mach-types.h>
+
+ .section ".start", "ax"
+ .arch armv4
+
+__SA1100_start:
+
+ @ Preserve r8/r7 i.e. kernel entry values
+#ifdef CONFIG_SA1100_COLLIE
+ mov r7, #MACH_TYPE_COLLIE
+#endif
+#ifdef CONFIG_SA1100_SIMPAD
+ @ UNTIL we've something like an open bootldr
+ mov r7, #MACH_TYPE_SIMPAD @should be 87
+#endif
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ ands r0, r0, #0x0d
+ beq 99f
+
+ @ Data cache might be active.
+ @ Be sure to flush kernel binary out of the cache,
+ @ whatever state it is, before it is turned off.
+ @ This is done by fetching through currently executed
+ @ memory to be sure we hit the same cache.
+ bic r2, pc, #0x1f
+ add r3, r2, #0x4000 @ 16 kb is quite enough...
+1: ldr r0, [r2], #32
+ teq r2, r3
+ bne 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches
+
+ @ disabling MMU and caches
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #0x0d @ clear WB, DC, MMU
+ bic r0, r0, #0x1000 @ clear Icache
+ mcr p15, 0, r0, c1, c0, 0
+99:
diff --git a/arch/arm/boot/compressed/head-sharpsl.S b/arch/arm/boot/compressed/head-sharpsl.S
new file mode 100644
index 000000000..992e78450
--- /dev/null
+++ b/arch/arm/boot/compressed/head-sharpsl.S
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/arch/arm/boot/compressed/head-sharpsl.S
+ *
+ * Copyright (C) 2004-2005 Richard Purdie <rpurdie@rpsys.net>
+ *
+ * Sharp's bootloader doesn't pass any kind of machine ID
+ * so we have to figure out the machine for ourselves...
+ *
+ * Support for Poodle, Corgi (SL-C700), Shepherd (SL-C750)
+ * Husky (SL-C760), Tosa (SL-C6000), Spitz (SL-C3000),
+ * Akita (SL-C1000) and Borzoi (SL-C3100).
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/mach-types.h>
+
+#ifndef CONFIG_PXA_SHARPSL
+#error What am I doing here...
+#endif
+
+ .section ".start", "ax"
+
+__SharpSL_start:
+
+/* Check for TC6393 - if found we have a Tosa */
+ ldr r7, .TOSAID
+ mov r1, #0x10000000 @ Base address of TC6393 chip
+ mov r6, #0x03
+ ldrh r3, [r1, #8] @ Load TC6393XB Revison: This is 0x0003
+ cmp r6, r3
+ beq .SHARPEND @ Success -> tosa
+
+/* Check for pxa270 - if found, branch */
+ mrc p15, 0, r4, c0, c0 @ Get Processor ID
+ and r4, r4, #0xffffff00
+ ldr r3, .PXA270ID
+ cmp r4, r3
+ beq .PXA270
+
+/* Check for w100 - if not found we have a Poodle */
+ ldr r1, .W100ADDR @ Base address of w100 chip + regs offset
+
+ mov r6, #0x31 @ Load Magic Init value
+ str r6, [r1, #0x280] @ to SCRATCH_UMSK
+ mov r5, #0x3000
+.W100LOOP:
+ subs r5, r5, #1
+ bne .W100LOOP
+ mov r6, #0x30 @ Load 2nd Magic Init value
+ str r6, [r1, #0x280] @ to SCRATCH_UMSK
+
+ ldr r6, [r1, #0] @ Load Chip ID
+ ldr r3, .W100ID
+ ldr r7, .POODLEID
+ cmp r6, r3
+ bne .SHARPEND @ We have no w100 - Poodle
+
+/* Check for pxa250 - if found we have a Corgi */
+ ldr r7, .CORGIID
+ ldr r3, .PXA255ID
+ cmp r4, r3
+ blo .SHARPEND @ We have a PXA250 - Corgi
+
+/* Check for 64MiB flash - if found we have a Shepherd */
+ bl get_flash_ids
+ ldr r7, .SHEPHERDID
+ cmp r3, #0x76 @ 64MiB flash
+ beq .SHARPEND @ We have Shepherd
+
+/* Must be a Husky */
+ ldr r7, .HUSKYID @ Must be Husky
+ b .SHARPEND
+
+.PXA270:
+/* Check for 16MiB flash - if found we have Spitz */
+ bl get_flash_ids
+ ldr r7, .SPITZID
+ cmp r3, #0x73 @ 16MiB flash
+ beq .SHARPEND @ We have Spitz
+
+/* Check for a second SCOOP chip - if found we have Borzoi */
+ ldr r1, .SCOOP2ADDR
+ ldr r7, .BORZOIID
+ mov r6, #0x0140
+ strh r6, [r1]
+ ldrh r6, [r1]
+ cmp r6, #0x0140
+ beq .SHARPEND @ We have Borzoi
+
+/* Must be Akita */
+ ldr r7, .AKITAID
+ b .SHARPEND @ We have Borzoi
+
+.PXA255ID:
+ .word 0x69052d00 @ PXA255 Processor ID
+.PXA270ID:
+ .word 0x69054100 @ PXA270 Processor ID
+.W100ID:
+ .word 0x57411002 @ w100 Chip ID
+.W100ADDR:
+ .word 0x08010000 @ w100 Chip ID Reg Address
+.SCOOP2ADDR:
+ .word 0x08800040
+.POODLEID:
+ .word MACH_TYPE_POODLE
+.CORGIID:
+ .word MACH_TYPE_CORGI
+.SHEPHERDID:
+ .word MACH_TYPE_SHEPHERD
+.HUSKYID:
+ .word MACH_TYPE_HUSKY
+.TOSAID:
+ .word MACH_TYPE_TOSA
+.SPITZID:
+ .word MACH_TYPE_SPITZ
+.AKITAID:
+ .word MACH_TYPE_AKITA
+.BORZOIID:
+ .word MACH_TYPE_BORZOI
+
+/*
+ * Return: r2 - NAND Manufacturer ID
+ * r3 - NAND Chip ID
+ * Corrupts: r1
+ */
+get_flash_ids:
+ mov r1, #0x0c000000 @ Base address of NAND chip
+ ldrb r3, [r1, #24] @ Load FLASHCTL
+ bic r3, r3, #0x11 @ SET NCE
+ orr r3, r3, #0x0a @ SET CLR + FLWP
+ strb r3, [r1, #24] @ Save to FLASHCTL
+ mov r2, #0x90 @ Command "readid"
+ strb r2, [r1, #20] @ Save to FLASHIO
+ bic r3, r3, #2 @ CLR CLE
+ orr r3, r3, #4 @ SET ALE
+ strb r3, [r1, #24] @ Save to FLASHCTL
+ mov r2, #0 @ Address 0x00
+ strb r2, [r1, #20] @ Save to FLASHIO
+ bic r3, r3, #4 @ CLR ALE
+ strb r3, [r1, #24] @ Save to FLASHCTL
+.fids1:
+ ldrb r3, [r1, #24] @ Load FLASHCTL
+ tst r3, #32 @ Is chip ready?
+ beq .fids1
+ ldrb r2, [r1, #20] @ NAND Manufacturer ID
+ ldrb r3, [r1, #20] @ NAND Chip ID
+ mov pc, lr
+
+.SHARPEND:
diff --git a/arch/arm/boot/compressed/head-xscale.S b/arch/arm/boot/compressed/head-xscale.S
new file mode 100644
index 000000000..20fa44d59
--- /dev/null
+++ b/arch/arm/boot/compressed/head-xscale.S
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * linux/arch/arm/boot/compressed/head-xscale.S
+ *
+ * XScale specific tweaks. This is merged into head.S by the linker.
+ *
+ */
+
+#include <linux/linkage.h>
+
+ .section ".start", "ax"
+
+__XScale_start:
+
+ @ Preserve r8/r7 i.e. kernel entry values
+
+ @ Data cache might be active.
+ @ Be sure to flush kernel binary out of the cache,
+ @ whatever state it is, before it is turned off.
+ @ This is done by fetching through currently executed
+ @ memory to be sure we hit the same cache.
+ bic r2, pc, #0x1f
+ add r3, r2, #0x10000 @ 64 kb is quite enough...
+1: ldr r0, [r2], #32
+ teq r2, r3
+ bne 1b
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mcr p15, 0, r0, c7, c7, 0 @ flush I & D caches
+
+ @ disabling MMU and caches
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #0x05 @ clear DC, MMU
+ bic r0, r0, #0x1000 @ clear Icache
+ mcr p15, 0, r0, c1, c0, 0
+
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
new file mode 100644
index 000000000..7a38c63b6
--- /dev/null
+++ b/arch/arm/boot/compressed/head.S
@@ -0,0 +1,1519 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/arch/arm/boot/compressed/head.S
+ *
+ * Copyright (C) 1996-2002 Russell King
+ * Copyright (C) 2004 Hyok S. Choi (MPU support)
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/v7m.h>
+
+#include "efi-header.S"
+
+ AR_CLASS( .arch armv7-a )
+ M_CLASS( .arch armv7-m )
+
+/*
+ * Debugging stuff
+ *
+ * Note that these macros must not contain any code which is not
+ * 100% relocatable. Any attempt to do so will result in a crash.
+ * Please select one of the following when turning on debugging.
+ */
+#ifdef DEBUG
+
+#if defined(CONFIG_DEBUG_ICEDCC)
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
+ .macro loadsp, rb, tmp1, tmp2
+ .endm
+ .macro writeb, ch, rb, tmp
+ mcr p14, 0, \ch, c0, c5, 0
+ .endm
+#elif defined(CONFIG_CPU_XSCALE)
+ .macro loadsp, rb, tmp1, tmp2
+ .endm
+ .macro writeb, ch, rb, tmp
+ mcr p14, 0, \ch, c8, c0, 0
+ .endm
+#else
+ .macro loadsp, rb, tmp1, tmp2
+ .endm
+ .macro writeb, ch, rb, tmp
+ mcr p14, 0, \ch, c1, c0, 0
+ .endm
+#endif
+
+#else
+
+#include CONFIG_DEBUG_LL_INCLUDE
+
+ .macro writeb, ch, rb, tmp
+#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
+ waituartcts \tmp, \rb
+#endif
+ waituarttxrdy \tmp, \rb
+ senduart \ch, \rb
+ busyuart \tmp, \rb
+ .endm
+
+#if defined(CONFIG_ARCH_SA1100)
+ .macro loadsp, rb, tmp1, tmp2
+ mov \rb, #0x80000000 @ physical base address
+#ifdef CONFIG_DEBUG_LL_SER3
+ add \rb, \rb, #0x00050000 @ Ser3
+#else
+ add \rb, \rb, #0x00010000 @ Ser1
+#endif
+ .endm
+#else
+ .macro loadsp, rb, tmp1, tmp2
+ addruart \rb, \tmp1, \tmp2
+ .endm
+#endif
+#endif
+#endif
+
+ .macro kputc,val
+ mov r0, \val
+ bl putc
+ .endm
+
+ .macro kphex,val,len
+ mov r0, \val
+ mov r1, #\len
+ bl phex
+ .endm
+
+ /*
+ * Debug kernel copy by printing the memory addresses involved
+ */
+ .macro dbgkc, begin, end, cbegin, cend
+#ifdef DEBUG
+ kputc #'C'
+ kputc #':'
+ kputc #'0'
+ kputc #'x'
+ kphex \begin, 8 /* Start of compressed kernel */
+ kputc #'-'
+ kputc #'0'
+ kputc #'x'
+ kphex \end, 8 /* End of compressed kernel */
+ kputc #'-'
+ kputc #'>'
+ kputc #'0'
+ kputc #'x'
+ kphex \cbegin, 8 /* Start of kernel copy */
+ kputc #'-'
+ kputc #'0'
+ kputc #'x'
+ kphex \cend, 8 /* End of kernel copy */
+ kputc #'\n'
+#endif
+ .endm
+
+ /*
+ * Debug print of the final appended DTB location
+ */
+ .macro dbgadtb, begin, size
+#ifdef DEBUG
+ kputc #'D'
+ kputc #'T'
+ kputc #'B'
+ kputc #':'
+ kputc #'0'
+ kputc #'x'
+ kphex \begin, 8 /* Start of appended DTB */
+ kputc #' '
+ kputc #'('
+ kputc #'0'
+ kputc #'x'
+ kphex \size, 8 /* Size of appended DTB */
+ kputc #')'
+ kputc #'\n'
+#endif
+ .endm
+
+ .macro enable_cp15_barriers, reg
+ mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR
+ tst \reg, #(1 << 5) @ CP15BEN bit set?
+ bne .L_\@
+ orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions
+ mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR
+ ARM( .inst 0xf57ff06f @ v7+ isb )
+ THUMB( isb )
+.L_\@:
+ .endm
+
+ /*
+ * The kernel build system appends the size of the
+ * decompressed kernel at the end of the compressed data
+ * in little-endian form.
+ */
+ .macro get_inflated_image_size, res:req, tmp1:req, tmp2:req
+ adr \res, .Linflated_image_size_offset
+ ldr \tmp1, [\res]
+ add \tmp1, \tmp1, \res @ address of inflated image size
+
+ ldrb \res, [\tmp1] @ get_unaligned_le32
+ ldrb \tmp2, [\tmp1, #1]
+ orr \res, \res, \tmp2, lsl #8
+ ldrb \tmp2, [\tmp1, #2]
+ ldrb \tmp1, [\tmp1, #3]
+ orr \res, \res, \tmp2, lsl #16
+ orr \res, \res, \tmp1, lsl #24
+ .endm
+
+ .section ".start", "ax"
+/*
+ * sort out different calling conventions
+ */
+ .align
+ /*
+ * Always enter in ARM state for CPUs that support the ARM ISA.
+ * As of today (2014) that's exactly the members of the A and R
+ * classes.
+ */
+ AR_CLASS( .arm )
+start:
+ .type start,#function
+ /*
+ * These 7 nops along with the 1 nop immediately below for
+ * !THUMB2 form 8 nops that make the compressed kernel bootable
+ * on legacy ARM systems that were assuming the kernel in a.out
+ * binary format. The boot loaders on these systems would
+ * jump 32 bytes into the image to skip the a.out header.
+ * with these 8 nops filling exactly 32 bytes, things still
+ * work as expected on these legacy systems. Thumb2 mode keeps
+ * 7 of the nops as it turns out that some boot loaders
+ * were patching the initial instructions of the kernel, i.e
+ * had started to exploit this "patch area".
+ */
+ __initial_nops
+ .rept 5
+ __nop
+ .endr
+#ifndef CONFIG_THUMB2_KERNEL
+ __nop
+#else
+ AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
+ M_CLASS( nop.w ) @ M: already in Thumb2 mode
+ .thumb
+#endif
+ W(b) 1f
+
+ .word _magic_sig @ Magic numbers to help the loader
+ .word _magic_start @ absolute load/run zImage address
+ .word _magic_end @ zImage end address
+ .word 0x04030201 @ endianness flag
+ .word 0x45454545 @ another magic number to indicate
+ .word _magic_table @ additional data table
+
+ __EFI_HEADER
+1:
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
+ AR_CLASS( mrs r9, cpsr )
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install @ get into SVC mode, reversibly
+#endif
+ mov r7, r1 @ save architecture ID
+ mov r8, r2 @ save atags pointer
+
+#ifndef CONFIG_CPU_V7M
+ /*
+ * Booting from Angel - need to enter SVC mode and disable
+ * FIQs/IRQs (numeric definitions from angel arm.h source).
+ * We only do this if we were in user mode on entry.
+ */
+ mrs r2, cpsr @ get current mode
+ tst r2, #3 @ not user?
+ bne not_angel
+ mov r0, #0x17 @ angel_SWIreason_EnterSVC
+ ARM( swi 0x123456 ) @ angel_SWI_ARM
+ THUMB( svc 0xab ) @ angel_SWI_THUMB
+not_angel:
+ safe_svcmode_maskall r0
+ msr spsr_cxsf, r9 @ Save the CPU boot mode in
+ @ SPSR
+#endif
+ /*
+ * Note that some cache flushing and other stuff may
+ * be needed here - is there an Angel SWI call for this?
+ */
+
+ /*
+ * some architecture specific code can be inserted
+ * by the linker here, but it should preserve r7, r8, and r9.
+ */
+
+ .text
+
+#ifdef CONFIG_AUTO_ZRELADDR
+ /*
+ * Find the start of physical memory. As we are executing
+ * without the MMU on, we are in the physical address space.
+ * We just need to get rid of any offset by aligning the
+ * address.
+ *
+ * This alignment is a balance between the requirements of
+ * different platforms - we have chosen 128MB to allow
+ * platforms which align the start of their physical memory
+ * to 128MB to use this feature, while allowing the zImage
+ * to be placed within the first 128MB of memory on other
+ * platforms. Increasing the alignment means we place
+ * stricter alignment requirements on the start of physical
+ * memory, but relaxing it means that we break people who
+ * are already placing their zImage in (eg) the top 64MB
+ * of this range.
+ */
+ mov r4, pc
+ and r4, r4, #0xf8000000
+ /* Determine final kernel image address. */
+ add r4, r4, #TEXT_OFFSET
+#else
+ ldr r4, =zreladdr
+#endif
+
+ /*
+ * Set up a page table only if it won't overwrite ourself.
+ * That means r4 < pc || r4 - 16k page directory > &_end.
+ * Given that r4 > &_end is most unfrequent, we add a rough
+ * additional 1MB of room for a possible appended DTB.
+ */
+ mov r0, pc
+ cmp r0, r4
+ ldrcc r0, .Lheadroom
+ addcc r0, r0, pc
+ cmpcc r4, r0
+ orrcc r4, r4, #1 @ remember we skipped cache_on
+ blcs cache_on
+
+restart: adr r0, LC1
+ ldr sp, [r0]
+ ldr r6, [r0, #4]
+ add sp, sp, r0
+ add r6, r6, r0
+
+ get_inflated_image_size r9, r10, lr
+
+#ifndef CONFIG_ZBOOT_ROM
+ /* malloc space is above the relocated stack (64k max) */
+ add r10, sp, #MALLOC_SIZE
+#else
+ /*
+ * With ZBOOT_ROM the bss/stack is non relocatable,
+ * but someone could still run this code from RAM,
+ * in which case our reference is _edata.
+ */
+ mov r10, r6
+#endif
+
+ mov r5, #0 @ init dtb size to 0
+#ifdef CONFIG_ARM_APPENDED_DTB
+/*
+ * r4 = final kernel address (possibly with LSB set)
+ * r5 = appended dtb size (still unknown)
+ * r6 = _edata
+ * r7 = architecture ID
+ * r8 = atags/device tree pointer
+ * r9 = size of decompressed image
+ * r10 = end of this image, including bss/stack/malloc space if non XIP
+ * sp = stack pointer
+ *
+ * if there are device trees (dtb) appended to zImage, advance r10 so that the
+ * dtb data will get relocated along with the kernel if necessary.
+ */
+
+ ldr lr, [r6, #0]
+#ifndef __ARMEB__
+ ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
+#else
+ ldr r1, =0xd00dfeed
+#endif
+ cmp lr, r1
+ bne dtb_check_done @ not found
+
+#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
+ /*
+ * OK... Let's do some funky business here.
+ * If we do have a DTB appended to zImage, and we do have
+ * an ATAG list around, we want the later to be translated
+ * and folded into the former here. No GOT fixup has occurred
+ * yet, but none of the code we're about to call uses any
+ * global variable.
+ */
+
+ /* Get the initial DTB size */
+ ldr r5, [r6, #4]
+#ifndef __ARMEB__
+ /* convert to little endian */
+ eor r1, r5, r5, ror #16
+ bic r1, r1, #0x00ff0000
+ mov r5, r5, ror #8
+ eor r5, r5, r1, lsr #8
+#endif
+ dbgadtb r6, r5
+ /* 50% DTB growth should be good enough */
+ add r5, r5, r5, lsr #1
+ /* preserve 64-bit alignment */
+ add r5, r5, #7
+ bic r5, r5, #7
+ /* clamp to 32KB min and 1MB max */
+ cmp r5, #(1 << 15)
+ movlo r5, #(1 << 15)
+ cmp r5, #(1 << 20)
+ movhi r5, #(1 << 20)
+ /* temporarily relocate the stack past the DTB work space */
+ add sp, sp, r5
+
+ mov r0, r8
+ mov r1, r6
+ mov r2, r5
+ bl atags_to_fdt
+
+ /*
+ * If returned value is 1, there is no ATAG at the location
+ * pointed by r8. Try the typical 0x100 offset from start
+ * of RAM and hope for the best.
+ */
+ cmp r0, #1
+ sub r0, r4, #TEXT_OFFSET
+ bic r0, r0, #1
+ add r0, r0, #0x100
+ mov r1, r6
+ mov r2, r5
+ bleq atags_to_fdt
+
+ sub sp, sp, r5
+#endif
+
+ mov r8, r6 @ use the appended device tree
+
+ /*
+ * Make sure that the DTB doesn't end up in the final
+ * kernel's .bss area. To do so, we adjust the decompressed
+ * kernel size to compensate if that .bss size is larger
+ * than the relocated code.
+ */
+ ldr r5, =_kernel_bss_size
+ adr r1, wont_overwrite
+ sub r1, r6, r1
+ subs r1, r5, r1
+ addhi r9, r9, r1
+
+ /* Get the current DTB size */
+ ldr r5, [r6, #4]
+#ifndef __ARMEB__
+ /* convert r5 (dtb size) to little endian */
+ eor r1, r5, r5, ror #16
+ bic r1, r1, #0x00ff0000
+ mov r5, r5, ror #8
+ eor r5, r5, r1, lsr #8
+#endif
+
+ /* preserve 64-bit alignment */
+ add r5, r5, #7
+ bic r5, r5, #7
+
+ /* relocate some pointers past the appended dtb */
+ add r6, r6, r5
+ add r10, r10, r5
+ add sp, sp, r5
+dtb_check_done:
+#endif
+
+/*
+ * Check to see if we will overwrite ourselves.
+ * r4 = final kernel address (possibly with LSB set)
+ * r9 = size of decompressed image
+ * r10 = end of this image, including bss/stack/malloc space if non XIP
+ * We basically want:
+ * r4 - 16k page directory >= r10 -> OK
+ * r4 + image length <= address of wont_overwrite -> OK
+ * Note: the possible LSB in r4 is harmless here.
+ */
+ add r10, r10, #16384
+ cmp r4, r10
+ bhs wont_overwrite
+ add r10, r4, r9
+ adr r9, wont_overwrite
+ cmp r10, r9
+ bls wont_overwrite
+
+/*
+ * Relocate ourselves past the end of the decompressed kernel.
+ * r6 = _edata
+ * r10 = end of the decompressed kernel
+ * Because we always copy ahead, we need to do it from the end and go
+ * backward in case the source and destination overlap.
+ */
+ /*
+ * Bump to the next 256-byte boundary with the size of
+ * the relocation code added. This avoids overwriting
+ * ourself when the offset is small.
+ */
+ add r10, r10, #((reloc_code_end - restart + 256) & ~255)
+ bic r10, r10, #255
+
+ /* Get start of code we want to copy and align it down. */
+ adr r5, restart
+ bic r5, r5, #31
+
+/* Relocate the hyp vector base if necessary */
+#ifdef CONFIG_ARM_VIRT_EXT
+ mrs r0, spsr
+ and r0, r0, #MODE_MASK
+ cmp r0, #HYP_MODE
+ bne 1f
+
+ /*
+ * Compute the address of the hyp vectors after relocation.
+ * This requires some arithmetic since we cannot directly
+ * reference __hyp_stub_vectors in a PC-relative way.
+ * Call __hyp_set_vectors with the new address so that we
+ * can HVC again after the copy.
+ */
+0: adr r0, 0b
+ movw r1, #:lower16:__hyp_stub_vectors - 0b
+ movt r1, #:upper16:__hyp_stub_vectors - 0b
+ add r0, r0, r1
+ sub r0, r0, r5
+ add r0, r0, r10
+ bl __hyp_set_vectors
+1:
+#endif
+
+ sub r9, r6, r5 @ size to copy
+ add r9, r9, #31 @ rounded up to a multiple
+ bic r9, r9, #31 @ ... of 32 bytes
+ add r6, r9, r5
+ add r9, r9, r10
+
+#ifdef DEBUG
+ sub r10, r6, r5
+ sub r10, r9, r10
+ /*
+ * We are about to copy the kernel to a new memory area.
+ * The boundaries of the new memory area can be found in
+ * r10 and r9, whilst r5 and r6 contain the boundaries
+ * of the memory we are going to copy.
+ * Calling dbgkc will help with the printing of this
+ * information.
+ */
+ dbgkc r5, r6, r10, r9
+#endif
+
+1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
+ cmp r6, r5
+ stmdb r9!, {r0 - r3, r10 - r12, lr}
+ bhi 1b
+
+ /* Preserve offset to relocated code. */
+ sub r6, r9, r6
+
+ mov r0, r9 @ start of relocated zImage
+ add r1, sp, r6 @ end of relocated zImage
+ bl cache_clean_flush
+
+ badr r0, restart
+ add r0, r0, r6
+ mov pc, r0
+
+wont_overwrite:
+ adr r0, LC0
+ ldmia r0, {r1, r2, r3, r11, r12}
+ sub r0, r0, r1 @ calculate the delta offset
+
+/*
+ * If delta is zero, we are running at the address we were linked at.
+ * r0 = delta
+ * r2 = BSS start
+ * r3 = BSS end
+ * r4 = kernel execution address (possibly with LSB set)
+ * r5 = appended dtb size (0 if not present)
+ * r7 = architecture ID
+ * r8 = atags pointer
+ * r11 = GOT start
+ * r12 = GOT end
+ * sp = stack pointer
+ */
+ orrs r1, r0, r5
+ beq not_relocated
+
+ add r11, r11, r0
+ add r12, r12, r0
+
+#ifndef CONFIG_ZBOOT_ROM
+ /*
+ * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
+ * we need to fix up pointers into the BSS region.
+ * Note that the stack pointer has already been fixed up.
+ */
+ add r2, r2, r0
+ add r3, r3, r0
+
+ /*
+ * Relocate all entries in the GOT table.
+ * Bump bss entries to _edata + dtb size
+ */
+1: ldr r1, [r11, #0] @ relocate entries in the GOT
+ add r1, r1, r0 @ This fixes up C references
+ cmp r1, r2 @ if entry >= bss_start &&
+ cmphs r3, r1 @ bss_end > entry
+ addhi r1, r1, r5 @ entry += dtb size
+ str r1, [r11], #4 @ next entry
+ cmp r11, r12
+ blo 1b
+
+ /* bump our bss pointers too */
+ add r2, r2, r5
+ add r3, r3, r5
+
+#else
+
+ /*
+ * Relocate entries in the GOT table. We only relocate
+ * the entries that are outside the (relocated) BSS region.
+ */
+1: ldr r1, [r11, #0] @ relocate entries in the GOT
+ cmp r1, r2 @ entry < bss_start ||
+ cmphs r3, r1 @ _end < entry
+ addlo r1, r1, r0 @ table. This fixes up the
+ str r1, [r11], #4 @ C references.
+ cmp r11, r12
+ blo 1b
+#endif
+
+not_relocated: mov r0, #0
+1: str r0, [r2], #4 @ clear bss
+ str r0, [r2], #4
+ str r0, [r2], #4
+ str r0, [r2], #4
+ cmp r2, r3
+ blo 1b
+
+ /*
+ * Did we skip the cache setup earlier?
+ * That is indicated by the LSB in r4.
+ * Do it now if so.
+ */
+ tst r4, #1
+ bic r4, r4, #1
+ blne cache_on
+
+/*
+ * The C runtime environment should now be setup sufficiently.
+ * Set up some pointers, and start decompressing.
+ * r4 = kernel execution address
+ * r7 = architecture ID
+ * r8 = atags pointer
+ */
+ mov r0, r4
+ mov r1, sp @ malloc space above stack
+ add r2, sp, #MALLOC_SIZE @ 64k max
+ mov r3, r7
+ bl decompress_kernel
+
+ get_inflated_image_size r1, r2, r3
+
+ mov r0, r4 @ start of inflated image
+ add r1, r1, r0 @ end of inflated image
+ bl cache_clean_flush
+ bl cache_off
+
+#ifdef CONFIG_ARM_VIRT_EXT
+ mrs r0, spsr @ Get saved CPU boot mode
+ and r0, r0, #MODE_MASK
+ cmp r0, #HYP_MODE @ if not booted in HYP mode...
+ bne __enter_kernel @ boot kernel directly
+
+ adr r12, .L__hyp_reentry_vectors_offset
+ ldr r0, [r12]
+ add r0, r0, r12
+
+ bl __hyp_set_vectors
+ __HVC(0) @ otherwise bounce to hyp mode
+
+ b . @ should never be reached
+
+ .align 2
+.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
+#else
+ b __enter_kernel
+#endif
+
+ .align 2
+ .type LC0, #object
+LC0: .word LC0 @ r1
+ .word __bss_start @ r2
+ .word _end @ r3
+ .word _got_start @ r11
+ .word _got_end @ ip
+ .size LC0, . - LC0
+
+ .type LC1, #object
+LC1: .word .L_user_stack_end - LC1 @ sp
+ .word _edata - LC1 @ r6
+ .size LC1, . - LC1
+
+.Lheadroom:
+ .word _end - restart + 16384 + 1024*1024
+
+.Linflated_image_size_offset:
+ .long (input_data_end - 4) - .
+
+#ifdef CONFIG_ARCH_RPC
+ .globl params
+params: ldr r0, =0x10000100 @ params_phys for RPC
+ mov pc, lr
+ .ltorg
+ .align
+#endif
+
+/*
+ * dcache_line_size - get the minimum D-cache line size from the CTR register
+ * on ARMv7.
+ */
+ .macro dcache_line_size, reg, tmp
+#ifdef CONFIG_CPU_V7M
+ movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
+ movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
+ ldr \tmp, [\tmp]
+#else
+ mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
+#endif
+ lsr \tmp, \tmp, #16
+ and \tmp, \tmp, #0xf @ cache line size encoding
+ mov \reg, #4 @ bytes per word
+ mov \reg, \reg, lsl \tmp @ actual cache line size
+ .endm
+
+/*
+ * Turn on the cache. We need to setup some page tables so that we
+ * can have both the I and D caches on.
+ *
+ * We place the page tables 16k down from the kernel execution address,
+ * and we hope that nothing else is using it. If we're using it, we
+ * will go pop!
+ *
+ * On entry,
+ * r4 = kernel execution address
+ * r7 = architecture number
+ * r8 = atags pointer
+ * On exit,
+ * r0, r1, r2, r3, r9, r10, r12 corrupted
+ * This routine must preserve:
+ * r4, r7, r8
+ */
+ .align 5
+cache_on: mov r3, #8 @ cache_on function
+ b call_cache_fn
+
+/*
+ * Initialize the highest priority protection region, PR7
+ * to cover all 32bit address and cacheable and bufferable.
+ */
+__armv4_mpu_cache_on:
+ mov r0, #0x3f @ 4G, the whole
+ mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
+ mcr p15, 0, r0, c6, c7, 1
+
+ mov r0, #0x80 @ PR7
+ mcr p15, 0, r0, c2, c0, 0 @ D-cache on
+ mcr p15, 0, r0, c2, c0, 1 @ I-cache on
+ mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
+
+ mov r0, #0xc000
+ mcr p15, 0, r0, c5, c0, 1 @ I-access permission
+ mcr p15, 0, r0, c5, c0, 0 @ D-access permission
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
+ mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ @ ...I .... ..D. WC.M
+ orr r0, r0, #0x002d @ .... .... ..1. 11.1
+ orr r0, r0, #0x1000 @ ...1 .... .... ....
+
+ mcr p15, 0, r0, c1, c0, 0 @ write control reg
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
+ mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
+ mov pc, lr
+
+__armv3_mpu_cache_on:
+ mov r0, #0x3f @ 4G, the whole
+ mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
+
+ mov r0, #0x80 @ PR7
+ mcr p15, 0, r0, c2, c0, 0 @ cache on
+ mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
+
+ mov r0, #0xc000
+ mcr p15, 0, r0, c5, c0, 0 @ access permission
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
+ /*
+ * ?? ARMv3 MMU does not allow reading the control register,
+ * does this really work on ARMv3 MPU?
+ */
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ @ .... .... .... WC.M
+ orr r0, r0, #0x000d @ .... .... .... 11.1
+ /* ?? this overwrites the value constructed above? */
+ mov r0, #0
+ mcr p15, 0, r0, c1, c0, 0 @ write control reg
+
+ /* ?? invalidate for the second time? */
+ mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
+ mov pc, lr
+
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define CB_BITS 0x08
+#else
+#define CB_BITS 0x0c
+#endif
+
+__setup_mmu: sub r3, r4, #16384 @ Page directory size
+ bic r3, r3, #0xff @ Align the pointer
+ bic r3, r3, #0x3f00
+/*
+ * Initialise the page tables, turning on the cacheable and bufferable
+ * bits for the RAM area only.
+ */
+ mov r0, r3
+ mov r9, r0, lsr #18
+ mov r9, r9, lsl #18 @ start of RAM
+ add r10, r9, #0x10000000 @ a reasonable RAM size
+ mov r1, #0x12 @ XN|U + section mapping
+ orr r1, r1, #3 << 10 @ AP=11
+ add r2, r3, #16384
+1: cmp r1, r9 @ if virt > start of RAM
+ cmphs r10, r1 @ && end of RAM > virt
+ bic r1, r1, #0x1c @ clear XN|U + C + B
+ orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
+ orrhs r1, r1, r6 @ set RAM section settings
+ str r1, [r0], #4 @ 1:1 mapping
+ add r1, r1, #1048576
+ teq r0, r2
+ bne 1b
+/*
+ * If ever we are running from Flash, then we surely want the cache
+ * to be enabled also for our execution instance... We map 2MB of it
+ * so there is no map overlap problem for up to 1 MB compressed kernel.
+ * If the execution is in RAM then we would only be duplicating the above.
+ */
+ orr r1, r6, #0x04 @ ensure B is set for this
+ orr r1, r1, #3 << 10
+ mov r2, pc
+ mov r2, r2, lsr #20
+ orr r1, r1, r2, lsl #20
+ add r0, r3, r2, lsl #2
+ str r1, [r0], #4
+ add r1, r1, #1048576
+ str r1, [r0]
+ mov pc, lr
+ENDPROC(__setup_mmu)
+
+@ Enable unaligned access on v6, to allow better code generation
+@ for the decompressor C code:
+__armv6_mmu_cache_on:
+ mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
+ bic r0, r0, #2 @ A (no unaligned access fault)
+ orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
+ mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
+ b __armv4_mmu_cache_on
+
+__arm926ejs_mmu_cache_on:
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+ mov r0, #4 @ put dcache in WT mode
+ mcr p15, 7, r0, c15, c0, 0
+#endif
+
+__armv4_mmu_cache_on:
+ mov r12, lr
+#ifdef CONFIG_MMU
+ mov r6, #CB_BITS | 0x12 @ U
+ bl __setup_mmu
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
+ orr r0, r0, #0x0030
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
+ bl __common_mmu_cache_on
+ mov r0, #0
+ mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
+#endif
+ mov pc, r12
+
+__armv7_mmu_cache_on:
+ enable_cp15_barriers r11
+ mov r12, lr
+#ifdef CONFIG_MMU
+ mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
+ tst r11, #0xf @ VMSA
+ movne r6, #CB_BITS | 0x02 @ !XN
+ blne __setup_mmu
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ tst r11, #0xf @ VMSA
+ mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
+#endif
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ bic r0, r0, #1 << 28 @ clear SCTLR.TRE
+ orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
+ orr r0, r0, #0x003c @ write buffer
+ bic r0, r0, #2 @ A (no unaligned access fault)
+ orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
+ @ (needed for ARM1176)
+#ifdef CONFIG_MMU
+ ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
+ mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
+ orrne r0, r0, #1 @ MMU enabled
+ movne r1, #0xfffffffd @ domain 0 = client
+ bic r6, r6, #1 << 31 @ 32-bit translation system
+ bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0
+ mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
+ mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
+ mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
+#endif
+ mcr p15, 0, r0, c7, c5, 4 @ ISB
+ mcr p15, 0, r0, c1, c0, 0 @ load control register
+ mrc p15, 0, r0, c1, c0, 0 @ and read it back
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 4 @ ISB
+ mov pc, r12
+
+__fa526_cache_on:
+ mov r12, lr
+ mov r6, #CB_BITS | 0x12 @ U
+ bl __setup_mmu
+ mov r0, #0
+ mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
+ mrc p15, 0, r0, c1, c0, 0 @ read control reg
+ orr r0, r0, #0x1000 @ I-cache enable
+ bl __common_mmu_cache_on
+ mov r0, #0
+ mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
+ mov pc, r12
+
+__common_mmu_cache_on:
+#ifndef CONFIG_THUMB2_KERNEL
+#ifndef DEBUG
+ orr r0, r0, #0x000d @ Write buffer, mmu
+#endif
+ mov r1, #-1
+ mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
+ mcr p15, 0, r1, c3, c0, 0 @ load domain access control
+ b 1f
+ .align 5 @ cache line aligned
+1: mcr p15, 0, r0, c1, c0, 0 @ load control register
+ mrc p15, 0, r0, c1, c0, 0 @ and read it back to
+ sub pc, lr, r0, lsr #32 @ properly flush pipeline
+#endif
+
+#define PROC_ENTRY_SIZE (4*5)
+
+/*
+ * Here follow the relocatable cache support functions for the
+ * various processors. This is a generic hook for locating an
+ * entry and jumping to an instruction at the specified offset
+ * from the start of the block. Please note this is all position
+ * independent code.
+ *
+ * r1 = corrupted
+ * r2 = corrupted
+ * r3 = block offset
+ * r9 = corrupted
+ * r12 = corrupted
+ */
+
+call_cache_fn: adr r12, proc_types
+#ifdef CONFIG_CPU_CP15
+ mrc p15, 0, r9, c0, c0 @ get processor ID
+#elif defined(CONFIG_CPU_V7M)
+ /*
+ * On v7-M the processor id is located in the V7M_SCB_CPUID
+ * register, but as cache handling is IMPLEMENTATION DEFINED on
+ * v7-M (if existant at all) we just return early here.
+ * If V7M_SCB_CPUID were used the cpu ID functions (i.e.
+ * __armv7_mmu_cache_{on,off,flush}) would be selected which
+ * use cp15 registers that are not implemented on v7-M.
+ */
+ bx lr
+#else
+ ldr r9, =CONFIG_PROCESSOR_ID
+#endif
+1: ldr r1, [r12, #0] @ get value
+ ldr r2, [r12, #4] @ get mask
+ eor r1, r1, r9 @ (real ^ match)
+ tst r1, r2 @ & mask
+ ARM( addeq pc, r12, r3 ) @ call cache function
+ THUMB( addeq r12, r3 )
+ THUMB( moveq pc, r12 ) @ call cache function
+ add r12, r12, #PROC_ENTRY_SIZE
+ b 1b
+
+/*
+ * Table for cache operations. This is basically:
+ * - CPU ID match
+ * - CPU ID mask
+ * - 'cache on' method instruction
+ * - 'cache off' method instruction
+ * - 'cache flush' method instruction
+ *
+ * We match an entry using: ((real_id ^ match) & mask) == 0
+ *
+ * Writethrough caches generally only need 'on' and 'off'
+ * methods. Writeback caches _must_ have the flush method
+ * defined.
+ */
+ .align 2
+ .type proc_types,#object
+proc_types:
+ .word 0x41000000 @ old ARM ID
+ .word 0xff00f000
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
+
+ .word 0x41007000 @ ARM7/710
+ .word 0xfff8fe00
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
+
+ .word 0x41807200 @ ARM720T (writethrough)
+ .word 0xffffff00
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ mov pc, lr
+ THUMB( nop )
+
+ .word 0x41007400 @ ARM74x
+ .word 0xff00ff00
+ W(b) __armv3_mpu_cache_on
+ W(b) __armv3_mpu_cache_off
+ W(b) __armv3_mpu_cache_flush
+
+ .word 0x41009400 @ ARM94x
+ .word 0xff00ff00
+ W(b) __armv4_mpu_cache_on
+ W(b) __armv4_mpu_cache_off
+ W(b) __armv4_mpu_cache_flush
+
+ .word 0x41069260 @ ARM926EJ-S (v5TEJ)
+ .word 0xff0ffff0
+ W(b) __arm926ejs_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
+
+ .word 0x00007000 @ ARM7 IDs
+ .word 0x0000f000
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
+
+ @ Everything from here on will be the new ID system.
+
+ .word 0x4401a100 @ sa110 / sa1100
+ .word 0xffffffe0
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
+
+ .word 0x6901b110 @ sa1110
+ .word 0xfffffff0
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
+
+ .word 0x56056900
+ .word 0xffffff00 @ PXA9xx
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
+
+ .word 0x56158000 @ PXA168
+ .word 0xfffff000
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
+
+ .word 0x56050000 @ Feroceon
+ .word 0xff0f0000
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
+
+#ifdef CONFIG_CPU_FEROCEON_OLD_ID
+ /* this conflicts with the standard ARMv5TE entry */
+ .long 0x41009260 @ Old Feroceon
+ .long 0xff00fff0
+ b __armv4_mmu_cache_on
+ b __armv4_mmu_cache_off
+ b __armv5tej_mmu_cache_flush
+#endif
+
+ .word 0x66015261 @ FA526
+ .word 0xff01fff1
+ W(b) __fa526_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __fa526_cache_flush
+
+ @ These match on the architecture ID
+
+ .word 0x00020000 @ ARMv4T
+ .word 0x000f0000
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
+
+ .word 0x00050000 @ ARMv5TE
+ .word 0x000f0000
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv4_mmu_cache_flush
+
+ .word 0x00060000 @ ARMv5TEJ
+ .word 0x000f0000
+ W(b) __armv4_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv5tej_mmu_cache_flush
+
+ .word 0x0007b000 @ ARMv6
+ .word 0x000ff000
+ W(b) __armv6_mmu_cache_on
+ W(b) __armv4_mmu_cache_off
+ W(b) __armv6_mmu_cache_flush
+
+ .word 0x000f0000 @ new CPU Id
+ .word 0x000f0000
+ W(b) __armv7_mmu_cache_on
+ W(b) __armv7_mmu_cache_off
+ W(b) __armv7_mmu_cache_flush
+
+ .word 0 @ unrecognised type
+ .word 0
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
+ mov pc, lr
+ THUMB( nop )
+
+ .size proc_types, . - proc_types
+
+ /*
+ * If you get a "non-constant expression in ".if" statement"
+ * error from the assembler on this line, check that you have
+ * not accidentally written a "b" instruction where you should
+ * have written W(b).
+ */
+ .if (. - proc_types) % PROC_ENTRY_SIZE != 0
+ .error "The size of one or more proc_types entries is wrong."
+ .endif
+
+/*
+ * Turn off the Cache and MMU. ARMv3 does not support
+ * reading the control register, but ARMv4 does.
+ *
+ * On exit,
+ * r0, r1, r2, r3, r9, r12 corrupted
+ * This routine must preserve:
+ * r4, r7, r8
+ */
+ .align 5
+cache_off: mov r3, #12 @ cache_off function
+ b call_cache_fn
+
+__armv4_mpu_cache_off:
+ mrc p15, 0, r0, c1, c0
+ bic r0, r0, #0x000d
+ mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
+ mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
+ mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
+ mov pc, lr
+
+__armv3_mpu_cache_off:
+ mrc p15, 0, r0, c1, c0
+ bic r0, r0, #0x000d
+ mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
+ mov pc, lr
+
+__armv4_mmu_cache_off:
+#ifdef CONFIG_MMU
+ mrc p15, 0, r0, c1, c0
+ bic r0, r0, #0x000d
+ mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
+ mov r0, #0
+ mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
+ mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
+#endif
+ mov pc, lr
+
+__armv7_mmu_cache_off:
+ mrc p15, 0, r0, c1, c0
+#ifdef CONFIG_MMU
+ bic r0, r0, #0x0005
+#else
+ bic r0, r0, #0x0004
+#endif
+ mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
+ mov r0, #0
+#ifdef CONFIG_MMU
+ mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
+#endif
+ mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
+ mcr p15, 0, r0, c7, c10, 4 @ DSB
+ mcr p15, 0, r0, c7, c5, 4 @ ISB
+ mov pc, lr
+
+/*
+ * Clean and flush the cache to maintain consistency.
+ *
+ * On entry,
+ * r0 = start address
+ * r1 = end address (exclusive)
+ * On exit,
+ * r1, r2, r3, r9, r10, r11, r12 corrupted
+ * This routine must preserve:
+ * r4, r6, r7, r8
+ */
+ .align 5
+cache_clean_flush:
+ mov r3, #16
+ mov r11, r1
+ b call_cache_fn
+
+__armv4_mpu_cache_flush:
+ tst r4, #1
+ movne pc, lr
+ mov r2, #1
+ mov r3, #0
+ mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
+ mov r1, #7 << 5 @ 8 segments
+1: orr r3, r1, #63 << 26 @ 64 entries
+2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
+ subs r3, r3, #1 << 26
+ bcs 2b @ entries 63 to 0
+ subs r1, r1, #1 << 5
+ bcs 1b @ segments 7 to 0
+
+ teq r2, #0
+ mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
+ mcr p15, 0, ip, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+__fa526_cache_flush:
+ tst r4, #1
+ movne pc, lr
+ mov r1, #0
+ mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
+ mcr p15, 0, r1, c7, c5, 0 @ flush I cache
+ mcr p15, 0, r1, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+__armv6_mmu_cache_flush:
+ mov r1, #0
+ tst r4, #1
+ mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
+ mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
+ mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
+ mcr p15, 0, r1, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+__armv7_mmu_cache_flush:
+ enable_cp15_barriers r10
+ tst r4, #1
+ bne iflush
+ mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
+ tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
+ mov r10, #0
+ beq hierarchical
+ mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
+ b iflush
+hierarchical:
+ dcache_line_size r1, r2 @ r1 := dcache min line size
+ sub r2, r1, #1 @ r2 := line size mask
+ bic r0, r0, r2 @ round down start to line size
+ sub r11, r11, #1 @ end address is exclusive
+ bic r11, r11, r2 @ round down end to line size
+0: cmp r0, r11 @ finished?
+ bgt iflush
+ mcr p15, 0, r0, c7, c14, 1 @ Dcache clean/invalidate by VA
+ add r0, r0, r1
+ b 0b
+iflush:
+ mcr p15, 0, r10, c7, c10, 4 @ DSB
+ mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
+ mcr p15, 0, r10, c7, c10, 4 @ DSB
+ mcr p15, 0, r10, c7, c5, 4 @ ISB
+ mov pc, lr
+
+__armv5tej_mmu_cache_flush:
+ tst r4, #1
+ movne pc, lr
+1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache
+ bne 1b
+ mcr p15, 0, r0, c7, c5, 0 @ flush I cache
+ mcr p15, 0, r0, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+__armv4_mmu_cache_flush:
+ tst r4, #1
+ movne pc, lr
+ mov r2, #64*1024 @ default: 32K dcache size (*2)
+ mov r11, #32 @ default: 32 byte line size
+ mrc p15, 0, r3, c0, c0, 1 @ read cache type
+ teq r3, r9 @ cache ID register present?
+ beq no_cache_id
+ mov r1, r3, lsr #18
+ and r1, r1, #7
+ mov r2, #1024
+ mov r2, r2, lsl r1 @ base dcache size *2
+ tst r3, #1 << 14 @ test M bit
+ addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
+ mov r3, r3, lsr #12
+ and r3, r3, #3
+ mov r11, #8
+ mov r11, r11, lsl r3 @ cache line size in bytes
+no_cache_id:
+ mov r1, pc
+ bic r1, r1, #63 @ align to longest cache line
+ add r2, r1, r2
+1:
+ ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
+ THUMB( ldr r3, [r1] ) @ s/w flush D cache
+ THUMB( add r1, r1, r11 )
+ teq r1, r2
+ bne 1b
+
+ mcr p15, 0, r1, c7, c5, 0 @ flush I cache
+ mcr p15, 0, r1, c7, c6, 0 @ flush D cache
+ mcr p15, 0, r1, c7, c10, 4 @ drain WB
+ mov pc, lr
+
+__armv3_mmu_cache_flush:
+__armv3_mpu_cache_flush:
+ tst r4, #1
+ movne pc, lr
+ mov r1, #0
+ mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
+ mov pc, lr
+
+/*
+ * Various debugging routines for printing hex characters and
+ * memory, which again must be relocatable.
+ */
+#ifdef DEBUG
+ .align 2
+ .type phexbuf,#object
+phexbuf: .space 12
+ .size phexbuf, . - phexbuf
+
+@ phex corrupts {r0, r1, r2, r3}
+phex: adr r3, phexbuf
+ mov r2, #0
+ strb r2, [r3, r1]
+1: subs r1, r1, #1
+ movmi r0, r3
+ bmi puts
+ and r2, r0, #15
+ mov r0, r0, lsr #4
+ cmp r2, #10
+ addge r2, r2, #7
+ add r2, r2, #'0'
+ strb r2, [r3, r1]
+ b 1b
+
+@ puts corrupts {r0, r1, r2, r3}
+puts: loadsp r3, r2, r1
+1: ldrb r2, [r0], #1
+ teq r2, #0
+ moveq pc, lr
+2: writeb r2, r3, r1
+ mov r1, #0x00020000
+3: subs r1, r1, #1
+ bne 3b
+ teq r2, #'\n'
+ moveq r2, #'\r'
+ beq 2b
+ teq r0, #0
+ bne 1b
+ mov pc, lr
+@ putc corrupts {r0, r1, r2, r3}
+putc:
+ mov r2, r0
+ loadsp r3, r1, r0
+ mov r0, #0
+ b 2b
+
+@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
+memdump: mov r12, r0
+ mov r10, lr
+ mov r11, #0
+2: mov r0, r11, lsl #2
+ add r0, r0, r12
+ mov r1, #8
+ bl phex
+ mov r0, #':'
+ bl putc
+1: mov r0, #' '
+ bl putc
+ ldr r0, [r12, r11, lsl #2]
+ mov r1, #8
+ bl phex
+ and r0, r11, #7
+ teq r0, #3
+ moveq r0, #' '
+ bleq putc
+ and r0, r11, #7
+ add r11, r11, #1
+ teq r0, #7
+ bne 1b
+ mov r0, #'\n'
+ bl putc
+ cmp r11, #64
+ blt 2b
+ mov pc, r10
+#endif
+
+ .ltorg
+
+#ifdef CONFIG_ARM_VIRT_EXT
+.align 5
+__hyp_reentry_vectors:
+ W(b) . @ reset
+ W(b) . @ undef
+#ifdef CONFIG_EFI_STUB
+ W(b) __enter_kernel_from_hyp @ hvc from HYP
+#else
+ W(b) . @ svc
+#endif
+ W(b) . @ pabort
+ W(b) . @ dabort
+ W(b) __enter_kernel @ hyp
+ W(b) . @ irq
+ W(b) . @ fiq
+#endif /* CONFIG_ARM_VIRT_EXT */
+
+__enter_kernel:
+ mov r0, #0 @ must be 0
+ mov r1, r7 @ restore architecture number
+ mov r2, r8 @ restore atags pointer
+ ARM( mov pc, r4 ) @ call kernel
+ M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class
+ THUMB( bx r4 ) @ entry point is always ARM for A/R classes
+
+reloc_code_end:
+
+#ifdef CONFIG_EFI_STUB
+__enter_kernel_from_hyp:
+ mrc p15, 4, r0, c1, c0, 0 @ read HSCTLR
+ bic r0, r0, #0x5 @ disable MMU and caches
+ mcr p15, 4, r0, c1, c0, 0 @ write HSCTLR
+ isb
+ b __enter_kernel
+
+ENTRY(efi_enter_kernel)
+ mov r4, r0 @ preserve image base
+ mov r8, r1 @ preserve DT pointer
+
+ adr_l r0, call_cache_fn
+ adr r1, 0f @ clean the region of code we
+ bl cache_clean_flush @ may run with the MMU off
+
+#ifdef CONFIG_ARM_VIRT_EXT
+ @
+ @ The EFI spec does not support booting on ARM in HYP mode,
+ @ since it mandates that the MMU and caches are on, with all
+ @ 32-bit addressable DRAM mapped 1:1 using short descriptors.
+ @
+ @ While the EDK2 reference implementation adheres to this,
+ @ U-Boot might decide to enter the EFI stub in HYP mode
+ @ anyway, with the MMU and caches either on or off.
+ @
+ mrs r0, cpsr @ get the current mode
+ msr spsr_cxsf, r0 @ record boot mode
+ and r0, r0, #MODE_MASK @ are we running in HYP mode?
+ cmp r0, #HYP_MODE
+ bne .Lefi_svc
+
+ mrc p15, 4, r1, c1, c0, 0 @ read HSCTLR
+ tst r1, #0x1 @ MMU enabled at HYP?
+ beq 1f
+
+ @
+ @ When running in HYP mode with the caches on, we're better
+ @ off just carrying on using the cached 1:1 mapping that the
+ @ firmware provided. Set up the HYP vectors so HVC instructions
+ @ issued from HYP mode take us to the correct handler code. We
+ @ will disable the MMU before jumping to the kernel proper.
+ @
+ ARM( bic r1, r1, #(1 << 30) ) @ clear HSCTLR.TE
+ THUMB( orr r1, r1, #(1 << 30) ) @ set HSCTLR.TE
+ mcr p15, 4, r1, c1, c0, 0
+ adr r0, __hyp_reentry_vectors
+ mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
+ isb
+ b .Lefi_hyp
+
+ @
+ @ When running in HYP mode with the caches off, we need to drop
+ @ into SVC mode now, and let the decompressor set up its cached
+ @ 1:1 mapping as usual.
+ @
+1: mov r9, r4 @ preserve image base
+ bl __hyp_stub_install @ install HYP stub vectors
+ safe_svcmode_maskall r1 @ drop to SVC mode
+ msr spsr_cxsf, r0 @ record boot mode
+ orr r4, r9, #1 @ restore image base and set LSB
+ b .Lefi_hyp
+.Lefi_svc:
+#endif
+ mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
+ tst r0, #0x1 @ MMU enabled?
+ orreq r4, r4, #1 @ set LSB if not
+
+.Lefi_hyp:
+ mov r0, r8 @ DT start
+ add r1, r8, r2 @ DT end
+ bl cache_clean_flush
+
+ adr r0, 0f @ switch to our stack
+ ldr sp, [r0]
+ add sp, sp, r0
+
+ mov r5, #0 @ appended DTB size
+ mov r7, #0xFFFFFFFF @ machine ID
+ b wont_overwrite
+ENDPROC(efi_enter_kernel)
+0: .long .L_user_stack_end - .
+#endif
+
+ .align
+ .section ".stack", "aw", %nobits
+.L_user_stack: .space 4096
+.L_user_stack_end:
diff --git a/arch/arm/boot/compressed/ll_char_wr.S b/arch/arm/boot/compressed/ll_char_wr.S
new file mode 100644
index 000000000..1ec8cb289
--- /dev/null
+++ b/arch/arm/boot/compressed/ll_char_wr.S
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * linux/arch/arm/lib/ll_char_wr.S
+ *
+ * Copyright (C) 1995, 1996 Russell King.
+ *
+ * Speedups & 1bpp code (C) 1996 Philip Blundell & Russell King.
+ *
+ * 10-04-96 RMK Various cleanups & reduced register usage.
+ * 08-04-98 RMK Shifts re-ordered
+ */
+
+@ Regs: [] = corruptible
+@ {} = used
+@ () = do not use
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+ .text
+
+LC0: .word LC0
+ .word bytes_per_char_h
+ .word video_size_row
+ .word acorndata_8x8
+ .word con_charconvtable
+
+/*
+ * r0 = ptr
+ * r1 = char
+ * r2 = white
+ */
+ENTRY(ll_write_char)
+ stmfd sp!, {r4 - r7, lr}
+@
+@ Smashable regs: {r0 - r3}, [r4 - r7], (r8 - fp), [ip], (sp), [lr], (pc)
+@
+ /*
+ * calculate offset into character table
+ */
+ mov r1, r1, lsl #3
+ /*
+ * calculate offset required for each row.
+ */
+ adr ip, LC0
+ ldmia ip, {r3, r4, r5, r6, lr}
+ sub ip, ip, r3
+ add r6, r6, ip
+ add lr, lr, ip
+ ldr r4, [r4, ip]
+ ldr r5, [r5, ip]
+ /*
+ * Go to resolution-dependent routine...
+ */
+ cmp r4, #4
+ blt Lrow1bpp
+ add r0, r0, r5, lsl #3 @ Move to bottom of character
+ orr r1, r1, #7
+ ldrb r7, [r6, r1]
+ teq r4, #8
+ beq Lrow8bpplp
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
+@
+Lrow4bpplp:
+ ldr r7, [lr, r7, lsl #2]
+ mul r7, r2, r7
+ sub r1, r1, #1 @ avoid using r7 directly after
+ str r7, [r0, -r5]!
+ ldrb r7, [r6, r1]
+ ldr r7, [lr, r7, lsl #2]
+ mul r7, r2, r7
+ tst r1, #7 @ avoid using r7 directly after
+ str r7, [r0, -r5]!
+ subne r1, r1, #1
+ ldrbne r7, [r6, r1]
+ bne Lrow4bpplp
+ ldmfd sp!, {r4 - r7, pc}
+
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5 - r7}, (r8 - fp), [ip], (sp), {lr}, (pc)
+@
+Lrow8bpplp:
+ mov ip, r7, lsr #4
+ ldr ip, [lr, ip, lsl #2]
+ mul r4, r2, ip
+ and ip, r7, #15 @ avoid r4
+ ldr ip, [lr, ip, lsl #2] @ avoid r4
+ mul ip, r2, ip @ avoid r4
+ sub r1, r1, #1 @ avoid ip
+ sub r0, r0, r5 @ avoid ip
+ stmia r0, {r4, ip}
+ ldrb r7, [r6, r1]
+ mov ip, r7, lsr #4
+ ldr ip, [lr, ip, lsl #2]
+ mul r4, r2, ip
+ and ip, r7, #15 @ avoid r4
+ ldr ip, [lr, ip, lsl #2] @ avoid r4
+ mul ip, r2, ip @ avoid r4
+ tst r1, #7 @ avoid ip
+ sub r0, r0, r5 @ avoid ip
+ stmia r0, {r4, ip}
+ subne r1, r1, #1
+ ldrbne r7, [r6, r1]
+ bne Lrow8bpplp
+ ldmfd sp!, {r4 - r7, pc}
+
+@
+@ Smashable regs: {r0 - r3}, [r4], {r5, r6}, [r7], (r8 - fp), [ip], (sp), [lr], (pc)
+@
+Lrow1bpp:
+ add r6, r6, r1
+ ldmia r6, {r4, r7}
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ mov r4, r4, lsr #8
+ strb r4, [r0], r5
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ strb r7, [r0], r5
+ mov r7, r7, lsr #8
+ strb r7, [r0], r5
+ ldmfd sp!, {r4 - r7, pc}
+
+ .bss
+ENTRY(con_charconvtable)
+ .space 1024
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
new file mode 100644
index 000000000..e1e9a5dde
--- /dev/null
+++ b/arch/arm/boot/compressed/misc.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * misc.c
+ *
+ * This is a collection of several routines from gzip-1.0.3
+ * adapted for Linux.
+ *
+ * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ *
+ * Modified for ARM Linux by Russell King
+ *
+ * Nicolas Pitre <nico@visuaide.com> 1999/04/14 :
+ * For this code to run directly from Flash, all constant variables must
+ * be marked with 'const' and all other variables initialized at run-time
+ * only. This way all non constant variables will end up in the bss segment,
+ * which should point to addresses in RAM and cleared to 0 on start.
+ * This allows for a much quicker boot time.
+ */
+
+unsigned int __machine_arch_type;
+
+#include <linux/compiler.h> /* for inline */
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include "misc.h"
+
+static void putstr(const char *ptr);
+
+#include CONFIG_UNCOMPRESS_INCLUDE
+
+#ifdef CONFIG_DEBUG_ICEDCC
+
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
+
+static void icedcc_putc(int ch)
+{
+ int status, i = 0x4000000;
+
+ do {
+ if (--i < 0)
+ return;
+
+ asm volatile ("mrc p14, 0, %0, c0, c1, 0" : "=r" (status));
+ } while (status & (1 << 29));
+
+ asm("mcr p14, 0, %0, c0, c5, 0" : : "r" (ch));
+}
+
+
+#elif defined(CONFIG_CPU_XSCALE)
+
+static void icedcc_putc(int ch)
+{
+ int status, i = 0x4000000;
+
+ do {
+ if (--i < 0)
+ return;
+
+ asm volatile ("mrc p14, 0, %0, c14, c0, 0" : "=r" (status));
+ } while (status & (1 << 28));
+
+ asm("mcr p14, 0, %0, c8, c0, 0" : : "r" (ch));
+}
+
+#else
+
+static void icedcc_putc(int ch)
+{
+ int status, i = 0x4000000;
+
+ do {
+ if (--i < 0)
+ return;
+
+ asm volatile ("mrc p14, 0, %0, c0, c0, 0" : "=r" (status));
+ } while (status & 2);
+
+ asm("mcr p14, 0, %0, c1, c0, 0" : : "r" (ch));
+}
+
+#endif
+
+#define putc(ch) icedcc_putc(ch)
+#endif
+
+static void putstr(const char *ptr)
+{
+ char c;
+
+ while ((c = *ptr++) != '\0') {
+ if (c == '\n')
+ putc('\r');
+ putc(c);
+ }
+
+ flush();
+}
+
+/*
+ * gzip declarations
+ */
+extern char input_data[];
+extern char input_data_end[];
+
+unsigned char *output_data;
+
+unsigned long free_mem_ptr;
+unsigned long free_mem_end_ptr;
+
+#ifndef arch_error
+#define arch_error(x)
+#endif
+
+void error(char *x)
+{
+ arch_error(x);
+
+ putstr("\n\n");
+ putstr(x);
+ putstr("\n\n -- System halted");
+
+ while(1); /* Halt */
+}
+
+asmlinkage void __div0(void)
+{
+ error("Attempting division by 0!");
+}
+
+const unsigned long __stack_chk_guard = 0x000a0dff;
+
+void __stack_chk_fail(void)
+{
+ error("stack-protector: Kernel stack is corrupted\n");
+}
+
+extern int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x));
+
+
+void
+decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
+ unsigned long free_mem_ptr_end_p,
+ int arch_id)
+{
+ int ret;
+
+ output_data = (unsigned char *)output_start;
+ free_mem_ptr = free_mem_ptr_p;
+ free_mem_end_ptr = free_mem_ptr_end_p;
+ __machine_arch_type = arch_id;
+
+ arch_decomp_setup();
+
+ putstr("Uncompressing Linux...");
+ ret = do_decompress(input_data, input_data_end - input_data,
+ output_data, error);
+ if (ret)
+ error("decompressor returned an error");
+ else
+ putstr(" done, booting the kernel.\n");
+}
+
+void fortify_panic(const char *name)
+{
+ error("detected buffer overflow");
+}
diff --git a/arch/arm/boot/compressed/misc.h b/arch/arm/boot/compressed/misc.h
new file mode 100644
index 000000000..c958dccd1
--- /dev/null
+++ b/arch/arm/boot/compressed/misc.h
@@ -0,0 +1,10 @@
+#ifndef MISC_H
+#define MISC_H
+
+#include <linux/compiler.h>
+
+void error(char *x) __noreturn;
+extern unsigned long free_mem_ptr;
+extern unsigned long free_mem_end_ptr;
+
+#endif
diff --git a/arch/arm/boot/compressed/piggy.S b/arch/arm/boot/compressed/piggy.S
new file mode 100644
index 000000000..27577644e
--- /dev/null
+++ b/arch/arm/boot/compressed/piggy.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+ .section .piggydata, "a"
+ .globl input_data
+input_data:
+ .incbin "arch/arm/boot/compressed/piggy_data"
+ .globl input_data_end
+input_data_end:
diff --git a/arch/arm/boot/compressed/string.c b/arch/arm/boot/compressed/string.c
new file mode 100644
index 000000000..ade5079be
--- /dev/null
+++ b/arch/arm/boot/compressed/string.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/arm/boot/compressed/string.c
+ *
+ * Small subset of simple string routines
+ */
+
+#include <linux/string.h>
+
+void *memcpy(void *__dest, __const void *__src, size_t __n)
+{
+ int i = 0;
+ unsigned char *d = (unsigned char *)__dest, *s = (unsigned char *)__src;
+
+ for (i = __n >> 3; i > 0; i--) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (__n & 1 << 2) {
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (__n & 1 << 1) {
+ *d++ = *s++;
+ *d++ = *s++;
+ }
+
+ if (__n & 1)
+ *d++ = *s++;
+
+ return __dest;
+}
+
+void *memmove(void *__dest, __const void *__src, size_t count)
+{
+ unsigned char *d = __dest;
+ const unsigned char *s = __src;
+
+ if (__dest == __src)
+ return __dest;
+
+ if (__dest < __src)
+ return memcpy(__dest, __src, count);
+
+ while (count--)
+ d[count] = s[count];
+ return __dest;
+}
+
+size_t strlen(const char *s)
+{
+ const char *sc = s;
+
+ while (*sc != '\0')
+ sc++;
+ return sc - s;
+}
+
+size_t strnlen(const char *s, size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
+
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1 = cs, *su2 = ct, *end = su1 + count;
+ int res = 0;
+
+ while (su1 < end) {
+ res = *su1++ - *su2++;
+ if (res)
+ break;
+ }
+ return res;
+}
+
+int strcmp(const char *cs, const char *ct)
+{
+ unsigned char c1, c2;
+ int res = 0;
+
+ do {
+ c1 = *cs++;
+ c2 = *ct++;
+ res = c1 - c2;
+ if (res)
+ break;
+ } while (c1);
+ return res;
+}
+
+void *memchr(const void *s, int c, size_t count)
+{
+ const unsigned char *p = s;
+
+ while (count--)
+ if ((unsigned char)c == *p++)
+ return (void *)(p - 1);
+ return NULL;
+}
+
+char *strchr(const char *s, int c)
+{
+ while (*s != (char)c)
+ if (*s++ == '\0')
+ return NULL;
+ return (char *)s;
+}
+
+char *strrchr(const char *s, int c)
+{
+ const char *last = NULL;
+ do {
+ if (*s == (char)c)
+ last = s;
+ } while (*s++);
+ return (char *)last;
+}
+
+#undef memset
+
+void *memset(void *s, int c, size_t count)
+{
+ char *xs = s;
+ while (count--)
+ *xs++ = c;
+ return s;
+}
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
new file mode 100644
index 000000000..1bcb68ac4
--- /dev/null
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2000 Russell King
+ */
+#include <asm/vmlinux.lds.h>
+
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \
+ (((x) >> 8) & 0x0000ff00) | \
+ (((x) << 8) & 0x00ff0000) | \
+ (((x) << 24) & 0xff000000) )
+#else
+#define ZIMAGE_MAGIC(x) (x)
+#endif
+
+OUTPUT_ARCH(arm)
+ENTRY(_start)
+SECTIONS
+{
+ /DISCARD/ : {
+ COMMON_DISCARDS
+ *(.ARM.exidx*)
+ *(.ARM.extab*)
+ *(.note.*)
+ *(.rel.*)
+ /*
+ * Discard any r/w data - this produces a link error if we have any,
+ * which is required for PIC decompression. Local data generates
+ * GOTOFF relocations, which prevents it being relocated independently
+ * of the text/got segments.
+ */
+ *(.data)
+ }
+
+ . = TEXT_START;
+ _text = .;
+
+ .text : {
+ _start = .;
+ *(.start)
+ *(.text)
+ *(.text.*)
+ ARM_STUBS_TEXT
+ }
+ .table : ALIGN(4) {
+ _table_start = .;
+ LONG(ZIMAGE_MAGIC(6))
+ LONG(ZIMAGE_MAGIC(0x5a534c4b))
+ LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start))
+ LONG(ZIMAGE_MAGIC(_kernel_bss_size))
+ LONG(ZIMAGE_MAGIC(TEXT_OFFSET))
+ LONG(ZIMAGE_MAGIC(MALLOC_SIZE))
+ LONG(0)
+ _table_end = .;
+ }
+ .rodata : {
+ *(.rodata)
+ *(.rodata.*)
+ *(.data.rel.ro)
+ }
+ .piggydata : {
+ *(.piggydata)
+ __piggy_size_addr = . - 4;
+ }
+
+ . = ALIGN(4);
+ _etext = .;
+
+ .got.plt : { *(.got.plt) }
+#ifndef CONFIG_EFI_STUB
+ _got_start = .;
+ .got : { *(.got) }
+ _got_end = .;
+#endif
+
+ /* ensure the zImage file size is always a multiple of 64 bits */
+ /* (without a dummy byte, ld just ignores the empty section) */
+ .pad : { BYTE(0); . = ALIGN(8); }
+
+#ifdef CONFIG_EFI_STUB
+ .data : ALIGN(4096) {
+ __pecoff_data_start = .;
+ _got_start = .;
+ *(.got)
+ _got_end = .;
+ /*
+ * The EFI stub always executes from RAM, and runs strictly before the
+ * decompressor, so we can make an exception for its r/w data, and keep it
+ */
+ *(.data.efistub .bss.efistub)
+ __pecoff_data_end = .;
+
+ /*
+ * PE/COFF mandates a file size which is a multiple of 512 bytes if the
+ * section size equals or exceeds 4 KB
+ */
+ . = ALIGN(512);
+ }
+ __pecoff_data_rawsize = . - ADDR(.data);
+#endif
+
+ _edata = .;
+
+ /*
+ * The image_end section appears after any additional loadable sections
+ * that the linker may decide to insert in the binary image. Having
+ * this symbol allows further debug in the near future.
+ */
+ .image_end (NOLOAD) : {
+ /*
+ * EFI requires that the image is aligned to 512 bytes, and appended
+ * DTB requires that we know where the end of the image is. Ensure
+ * that both are satisfied by ensuring that there are no additional
+ * sections emitted into the decompressor image.
+ */
+ _edata_real = .;
+ }
+
+ _magic_sig = ZIMAGE_MAGIC(0x016f2818);
+ _magic_start = ZIMAGE_MAGIC(_start);
+ _magic_end = ZIMAGE_MAGIC(_edata);
+ _magic_table = ZIMAGE_MAGIC(_table_start - _start);
+
+ . = BSS_START;
+ __bss_start = .;
+ .bss : { *(.bss) }
+ _end = .;
+
+ . = ALIGN(8); /* the stack must be 64-bit aligned */
+ .stack : { *(.stack) }
+
+ PROVIDE(__pecoff_data_size = ALIGN(512) - ADDR(.data));
+ PROVIDE(__pecoff_end = ALIGN(512));
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ARM_DETAILS
+
+ ARM_ASSERTS
+}
+ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");