summaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 21:00:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 21:00:30 +0000
commite54def4ad8144ab15f826416e2e0f290ef1901b4 (patch)
tree583f8d4bd95cd67c44ff37b878a7eddfca9ab97a /arch/sh
parentAdding upstream version 6.8.12. (diff)
downloadlinux-e54def4ad8144ab15f826416e2e0f290ef1901b4.tar.xz
linux-e54def4ad8144ab15f826416e2e0f290ef1901b4.zip
Adding upstream version 6.9.2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boot/compressed/Makefile1
-rw-r--r--arch/sh/cchips/hd6446x/hd64461.c2
-rw-r--r--arch/sh/drivers/dma/dma-sysfs.c2
-rw-r--r--arch/sh/include/asm/cachetype.h9
-rw-r--r--arch/sh/include/asm/page.h13
-rw-r--r--arch/sh/include/asm/word-at-a-time.h2
-rw-r--r--arch/sh/kernel/Makefile1
-rw-r--r--arch/sh/kernel/kprobes.c7
-rw-r--r--arch/sh/kernel/machine_kexec.c14
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/vmcore_info.c15
-rw-r--r--arch/sh/lib/checksum.S67
-rw-r--r--arch/sh/mm/Kconfig42
14 files changed, 103 insertions, 75 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 7500521b2b..2ad3e29f0e 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -2,6 +2,7 @@
config SUPERH
def_bool y
select ARCH_32BIT_OFF_T
+ select ARCH_HAS_CPU_CACHE_ALIASING
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU
select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index b5e29f99c0..6c6c791a1d 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -12,6 +12,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo $(OBJECTS)
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
#
# IMAGE_OFFSET is the load offset of the compression loader
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c
index f3fba96744..81764882d8 100644
--- a/arch/sh/cchips/hd6446x/hd64461.c
+++ b/arch/sh/cchips/hd6446x/hd64461.c
@@ -72,7 +72,7 @@ static void hd64461_irq_demux(struct irq_desc *desc)
}
}
-int __init setup_hd64461(void)
+static int __init setup_hd64461(void)
{
int irq_base, i;
diff --git a/arch/sh/drivers/dma/dma-sysfs.c b/arch/sh/drivers/dma/dma-sysfs.c
index 431bc18f0a..9f666280d8 100644
--- a/arch/sh/drivers/dma/dma-sysfs.c
+++ b/arch/sh/drivers/dma/dma-sysfs.c
@@ -15,7 +15,7 @@
#include <linux/string.h>
#include <asm/dma.h>
-static struct bus_type dma_subsys = {
+static const struct bus_type dma_subsys = {
.name = "dma",
.dev_name = "dma",
};
diff --git a/arch/sh/include/asm/cachetype.h b/arch/sh/include/asm/cachetype.h
new file mode 100644
index 0000000000..a5fffe5360
--- /dev/null
+++ b/arch/sh/include/asm/cachetype.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_SH_CACHETYPE_H
+#define __ASM_SH_CACHETYPE_H
+
+#include <linux/types.h>
+
+#define cpu_dcache_is_aliasing() true
+
+#endif
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 62f4b9edcb..f780b467e7 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -9,18 +9,7 @@
#include <linux/const.h>
/* PAGE_SHIFT determines the page size */
-#if defined(CONFIG_PAGE_SIZE_4KB)
-# define PAGE_SHIFT 12
-#elif defined(CONFIG_PAGE_SIZE_8KB)
-# define PAGE_SHIFT 13
-#elif defined(CONFIG_PAGE_SIZE_16KB)
-# define PAGE_SHIFT 14
-#elif defined(CONFIG_PAGE_SIZE_64KB)
-# define PAGE_SHIFT 16
-#else
-# error "Bogus kernel page size?"
-#endif
-
+#define PAGE_SHIFT CONFIG_PAGE_SHIFT
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PTE_MASK PAGE_MASK
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h
index 4aa398455b..95100ce128 100644
--- a/arch/sh/include/asm/word-at-a-time.h
+++ b/arch/sh/include/asm/word-at-a-time.h
@@ -5,6 +5,8 @@
#ifdef CONFIG_CPU_BIG_ENDIAN
# include <asm-generic/word-at-a-time.h>
#else
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
/*
* Little-endian version cribbed from x86.
*/
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 2d7e70537d..ba917008d6 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_IO_TRAPPED) += io_trapped.o
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 74051b8ddf..aed1ea8e2c 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -44,12 +44,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (OPCODE_RTE(opcode))
return -EFAULT; /* Bad breakpoint */
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
p->opcode = opcode;
return 0;
}
+void __kprobes arch_copy_kprobe(struct kprobe *p)
+{
+ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ p->opcode = *p->addr;
+}
+
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index fa3a7b3619..8321b31d2e 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -137,22 +137,14 @@ void machine_kexec(struct kimage *image)
__ftrace_enabled_restore(save_ftrace_enabled);
}
-void arch_crash_save_vmcoreinfo(void)
-{
-#ifdef CONFIG_NUMA
- VMCOREINFO_SYMBOL(node_data);
- VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
-#endif
-#ifdef CONFIG_X2TLB
- VMCOREINFO_CONFIG(X2TLB);
-#endif
-}
-
void __init reserve_crashkernel(void)
{
unsigned long long crash_size, crash_base;
int ret;
+ if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
+ return;
+
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base, NULL, NULL);
if (ret == 0 && crash_size > 0) {
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index d3175f09b3..620e5cf8ae 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -220,7 +220,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
request_resource(res, &code_resource);
request_resource(res, &data_resource);
request_resource(res, &bss_resource);
-#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_CRASH_RESERVE
request_resource(res, &crashk_res);
#endif
diff --git a/arch/sh/kernel/vmcore_info.c b/arch/sh/kernel/vmcore_info.c
new file mode 100644
index 0000000000..a244a204a1
--- /dev/null
+++ b/arch/sh/kernel/vmcore_info.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/vmcore_info.h>
+#include <linux/mm.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_NUMA
+ VMCOREINFO_SYMBOL(node_data);
+ VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
+#endif
+#ifdef CONFIG_X2TLB
+ VMCOREINFO_CONFIG(X2TLB);
+#endif
+}
diff --git a/arch/sh/lib/checksum.S b/arch/sh/lib/checksum.S
index 06fed5a21e..3e07074e00 100644
--- a/arch/sh/lib/checksum.S
+++ b/arch/sh/lib/checksum.S
@@ -33,8 +33,7 @@
*/
/*
- * unsigned int csum_partial(const unsigned char *buf, int len,
- * unsigned int sum);
+ * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
*/
.text
@@ -46,11 +45,31 @@ ENTRY(csum_partial)
* Fortunately, it is easy to convert 2-byte alignment to 4-byte
* alignment for the unrolled loop.
*/
- mov r5, r1
mov r4, r0
- tst #2, r0 ! Check alignment.
- bt 2f ! Jump if alignment is ok.
+ tst #3, r0 ! Check alignment.
+ bt/s 2f ! Jump if alignment is ok.
+ mov r4, r7 ! Keep a copy to check for alignment
!
+ tst #1, r0 ! Check alignment.
+ bt 21f ! Jump if alignment is boundary of 2bytes.
+
+ ! buf is odd
+ tst r5, r5
+ add #-1, r5
+ bt 9f
+ mov.b @r4+, r0
+ extu.b r0, r0
+ addc r0, r6 ! t=0 from previous tst
+ mov r6, r0
+ shll8 r6
+ shlr16 r0
+ shlr8 r0
+ or r0, r6
+ mov r4, r0
+ tst #2, r0
+ bt 2f
+21:
+ ! buf is 2 byte aligned (len could be 0)
add #-2, r5 ! Alignment uses up two bytes.
cmp/pz r5 !
bt/s 1f ! Jump if we had at least two bytes.
@@ -58,16 +77,17 @@ ENTRY(csum_partial)
bra 6f
add #2, r5 ! r5 was < 2. Deal with it.
1:
- mov r5, r1 ! Save new len for later use.
mov.w @r4+, r0
extu.w r0, r0
addc r0, r6
bf 2f
add #1, r6
2:
+ ! buf is 4 byte aligned (len could be 0)
+ mov r5, r1
mov #-5, r0
- shld r0, r5
- tst r5, r5
+ shld r0, r1
+ tst r1, r1
bt/s 4f ! if it's =0, go to 4f
clrt
.align 2
@@ -89,30 +109,31 @@ ENTRY(csum_partial)
addc r0, r6
addc r2, r6
movt r0
- dt r5
+ dt r1
bf/s 3b
cmp/eq #1, r0
- ! here, we know r5==0
- addc r5, r6 ! add carry to r6
+ ! here, we know r1==0
+ addc r1, r6 ! add carry to r6
4:
- mov r1, r0
+ mov r5, r0
and #0x1c, r0
tst r0, r0
- bt/s 6f
- mov r0, r5
- shlr2 r5
+ bt 6f
+ ! 4 bytes or more remaining
+ mov r0, r1
+ shlr2 r1
mov #0, r2
5:
addc r2, r6
mov.l @r4+, r2
movt r0
- dt r5
+ dt r1
bf/s 5b
cmp/eq #1, r0
addc r2, r6
- addc r5, r6 ! r5==0 here, so it means add carry-bit
+ addc r1, r6 ! r1==0 here, so it means add carry-bit
6:
- mov r1, r5
+ ! 3 bytes or less remaining
mov #3, r0
and r0, r5
tst r5, r5
@@ -138,6 +159,16 @@ ENTRY(csum_partial)
mov #0, r0
addc r0, r6
9:
+ ! Check if the buffer was misaligned, if so realign sum
+ mov r7, r0
+ tst #1, r0
+ bt 10f
+ mov r6, r0
+ shll8 r6
+ shlr16 r0
+ shlr8 r0
+ or r0, r6
+10:
rts
mov r6, r0
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 455311d9a5..f32a1963ff 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -4,6 +4,9 @@ menu "Memory management options"
config MMU
bool "Support for memory management hardware"
depends on !CPU_SH2
+ select HAVE_PAGE_SIZE_4KB
+ select HAVE_PAGE_SIZE_8KB if X2TLB
+ select HAVE_PAGE_SIZE_64KB if CPU_SH4
default y
help
Some SH processors (such as SH-2/SH-2A) lack an MMU. In order to
@@ -13,6 +16,15 @@ config MMU
turning this off will boot the kernel on these machines with the
MMU implicitly switched off.
+config NOMMU
+ def_bool !MMU
+ select HAVE_PAGE_SIZE_4KB
+ select HAVE_PAGE_SIZE_8KB
+ select HAVE_PAGE_SIZE_16KB
+ select HAVE_PAGE_SIZE_64KB
+ help
+ On MMU-less systems, any of these page sizes can be selected
+
config PAGE_OFFSET
hex
default "0x80000000" if MMU
@@ -148,36 +160,6 @@ config HAVE_SRAM_POOL
select GENERIC_ALLOCATOR
choice
- prompt "Kernel page size"
- default PAGE_SIZE_4KB
-
-config PAGE_SIZE_4KB
- bool "4kB"
- help
- This is the default page size used by all SuperH CPUs.
-
-config PAGE_SIZE_8KB
- bool "8kB"
- depends on !MMU || X2TLB
- help
- This enables 8kB pages as supported by SH-X2 and later MMUs.
-
-config PAGE_SIZE_16KB
- bool "16kB"
- depends on !MMU
- help
- This enables 16kB pages on MMU-less SH systems.
-
-config PAGE_SIZE_64KB
- bool "64kB"
- depends on !MMU || CPU_SH4
- help
- This enables support for 64kB pages, possible on all SH-4
- CPUs and later.
-
-endchoice
-
-choice
prompt "HugeTLB page size"
depends on HUGETLB_PAGE
default HUGETLB_PAGE_SIZE_1MB if PAGE_SIZE_64KB