From 8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 20:50:12 +0200 Subject: Merging upstream version 6.8.9. Signed-off-by: Daniel Baumann --- arch/riscv/Makefile | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/riscv/Makefile') diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index a74be78678..0b7d109258 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -43,8 +43,7 @@ else KBUILD_LDFLAGS += -melf32lriscv endif -ifeq ($(CONFIG_LD_IS_LLD),y) -ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 150000),y) +ifndef CONFIG_RISCV_USE_LINKER_RELAXATION KBUILD_CFLAGS += -mno-relax KBUILD_AFLAGS += -mno-relax ifndef CONFIG_AS_IS_LLVM @@ -52,7 +51,6 @@ ifndef CONFIG_AS_IS_LLVM KBUILD_AFLAGS += -Wa,-mno-relax endif endif -endif ifeq ($(CONFIG_SHADOW_CALL_STACK),y) KBUILD_LDFLAGS += --no-relax-gp @@ -108,7 +106,9 @@ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) # unaligned accesses. While unaligned accesses are explicitly allowed in the # RISC-V ISA, they're emulated by machine mode traps on all extant # architectures. It's faster to have GCC emit only aligned accesses. +ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS),y) KBUILD_CFLAGS += $(call cc-option,-mstrict-align) +endif ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) prepare: stack_protector_prepare @@ -163,6 +163,8 @@ BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi all: $(notdir $(KBUILD_IMAGE)) +loader.bin: loader +Image.gz loader vmlinuz.efi: Image $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ @$(kecho) ' Kernel: $(boot)/$@ is ready' -- cgit v1.2.3