diff options
Diffstat (limited to '')
175 files changed, 36440 insertions, 0 deletions
diff --git a/arch/sh/kernel/.gitignore b/arch/sh/kernel/.gitignore new file mode 100644 index 000000000..bbb90f92d --- /dev/null +++ b/arch/sh/kernel/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +vmlinux.lds diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile new file mode 100644 index 000000000..69cd9ac4b --- /dev/null +++ b/arch/sh/kernel/Makefile @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH kernel. +# + +extra-y := vmlinux.lds + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_ftrace.o = -pg +endif + +CFLAGS_REMOVE_return_address.o = -pg + +obj-y := head_32.o debugtraps.o dumpstack.o \ + idle.o io.o irq.o irq_32.o kdebugfs.o \ + machvec.o nmi_debug.o process.o \ + process_32.o ptrace.o ptrace_32.o \ + reboot.o return_address.o \ + setup.o signal_32.o sys_sh.o \ + syscalls_32.o time.o topology.o traps.o \ + traps_32.o unwinder.o + +ifndef CONFIG_GENERIC_IOMAP +obj-y += iomap.o +obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o +endif + +obj-y += sys_sh32.o +obj-y += cpu/ +obj-$(CONFIG_VSYSCALL) += vsyscall/ +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_IO_TRAPPED) += io_trapped.o +obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o +obj-$(CONFIG_DUMP_CODE) += disassemble.o +obj-$(CONFIG_HIBERNATION) += swsusp.o +obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_callchain.o +obj-$(CONFIG_DMA_NONCOHERENT) += dma-coherent.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c new file mode 100644 index 000000000..a0322e832 --- /dev/null +++ b/arch/sh/kernel/asm-offsets.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is used to generate definitions needed by + * assembly language modules. + * + * We use the technique used in the OSF Mach kernel code: + * generate asm statements containing #defines, + * compile this file to assembler, and then extract the + * #defines from the assembly-language output. + */ + +#include <linux/stddef.h> +#include <linux/types.h> +#include <linux/mm.h> +#include <linux/kbuild.h> +#include <linux/suspend.h> + +#include <asm/thread_info.h> +#include <asm/suspend.h> + +int main(void) +{ + /* offsets into the thread_info struct */ + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_SIZE, sizeof(struct thread_info)); + +#ifdef CONFIG_HIBERNATION + DEFINE(PBE_ADDRESS, offsetof(struct pbe, address)); + DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address)); + DEFINE(PBE_NEXT, offsetof(struct pbe, next)); + DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs)); +#endif + + DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode)); + DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre)); + DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post)); + DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume)); + DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr)); + DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc)); + DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr)); + DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp)); + DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr)); + DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data)); + DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr)); + DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar)); + DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh)); + DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel)); + DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb)); + DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea)); + DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr)); + DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea)); + DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr)); + DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr)); + DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr)); + DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr)); + return 0; +} diff --git a/arch/sh/kernel/cpu/Makefile b/arch/sh/kernel/cpu/Makefile new file mode 100644 index 000000000..46118236b --- /dev/null +++ b/arch/sh/kernel/cpu/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH CPU-specific backends. +# + +obj-$(CONFIG_CPU_SH2) = sh2/ +obj-$(CONFIG_CPU_SH2A) = sh2a/ +obj-$(CONFIG_CPU_SH3) = sh3/ +obj-$(CONFIG_CPU_SH4) = sh4/ + +# Special cases for family ancestry. + +obj-$(CONFIG_CPU_SH4A) += sh4a/ +obj-$(CONFIG_ARCH_SHMOBILE) += shmobile/ + +# Common interfaces. + +obj-$(CONFIG_SH_ADC) += adc.o +obj-$(CONFIG_SH_CLK_CPG_LEGACY) += clock-cpg.o + +obj-y += irq/ init.o clock.o fpu.o pfc.o proc.o diff --git a/arch/sh/kernel/cpu/adc.c b/arch/sh/kernel/cpu/adc.c new file mode 100644 index 000000000..509136715 --- /dev/null +++ b/arch/sh/kernel/cpu/adc.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sh/kernel/adc.c -- SH3 on-chip ADC support + * + * Copyright (C) 2004 Andriy Skulysh <askulysh@image.kiev.ua> + */ + +#include <linux/module.h> +#include <asm/adc.h> +#include <asm/io.h> + + +int adc_single(unsigned int channel) +{ + int off; + unsigned char csr; + + if (channel >= 8) return -1; + + off = (channel & 0x03) << 2; + + csr = __raw_readb(ADCSR); + csr = channel | ADCSR_ADST | ADCSR_CKS; + __raw_writeb(csr, ADCSR); + + do { + csr = __raw_readb(ADCSR); + } while ((csr & ADCSR_ADF) == 0); + + csr &= ~(ADCSR_ADF | ADCSR_ADST); + __raw_writeb(csr, ADCSR); + + return (((__raw_readb(ADDRAH + off) << 8) | + __raw_readb(ADDRAL + off)) >> 6); +} + +EXPORT_SYMBOL(adc_single); diff --git a/arch/sh/kernel/cpu/clock-cpg.c b/arch/sh/kernel/cpu/clock-cpg.c new file mode 100644 index 000000000..5b75a384c --- /dev/null +++ b/arch/sh/kernel/cpu/clock-cpg.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk.h> +#include <linux/compiler.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> + +static struct clk master_clk = { + .flags = CLK_ENABLE_ON_INIT, + .rate = CONFIG_SH_PCLK_FREQ, +}; + +static struct clk peripheral_clk = { + .parent = &master_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static struct clk bus_clk = { + .parent = &master_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static struct clk cpu_clk = { + .parent = &master_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +/* + * The ordering of these clocks matters, do not change it. + */ +static struct clk *onchip_clocks[] = { + &master_clk, + &peripheral_clk, + &bus_clk, + &cpu_clk, +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("master_clk", &master_clk), + CLKDEV_CON_ID("peripheral_clk", &peripheral_clk), + CLKDEV_CON_ID("bus_clk", &bus_clk), + CLKDEV_CON_ID("cpu_clk", &cpu_clk), +}; + +int __init __deprecated cpg_clk_init(void) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) { + struct clk *clk = onchip_clocks[i]; + arch_init_clk_ops(&clk->ops, i); + if (clk->ops) + ret |= clk_register(clk); + } + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + clk_add_alias("fck", "sh-tmu-sh3.0", "peripheral_clk", NULL); + clk_add_alias("fck", "sh-tmu.0", "peripheral_clk", NULL); + clk_add_alias("fck", "sh-tmu.1", "peripheral_clk", NULL); + clk_add_alias("fck", "sh-tmu.2", "peripheral_clk", NULL); + clk_add_alias("fck", "sh-mtu2", "peripheral_clk", NULL); + clk_add_alias("fck", "sh-cmt-16.0", "peripheral_clk", NULL); + clk_add_alias("fck", "sh-cmt-32.0", "peripheral_clk", NULL); + + return ret; +} + +/* + * Placeholder for compatibility, until the lazy CPUs do this + * on their own. + */ +int __init __weak arch_clk_init(void) +{ + return cpg_clk_init(); +} diff --git a/arch/sh/kernel/cpu/clock.c b/arch/sh/kernel/cpu/clock.c new file mode 100644 index 000000000..6fb34410d --- /dev/null +++ b/arch/sh/kernel/cpu/clock.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/clock.c - SuperH clock framework + * + * Copyright (C) 2005 - 2009 Paul Mundt + * + * This clock framework is derived from the OMAP version by: + * + * Copyright (C) 2004 - 2008 Nokia Corporation + * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com> + * + * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com> + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/clk.h> +#include <asm/clock.h> +#include <asm/machvec.h> + +int __init clk_init(void) +{ + int ret; + +#ifndef CONFIG_COMMON_CLK + ret = arch_clk_init(); + if (unlikely(ret)) { + pr_err("%s: CPU clock registration failed.\n", __func__); + return ret; + } +#endif + + if (sh_mv.mv_clk_init) { + ret = sh_mv.mv_clk_init(); + if (unlikely(ret)) { + pr_err("%s: machvec clock initialization failed.\n", + __func__); + return ret; + } + } + +#ifndef CONFIG_COMMON_CLK + /* Kick the child clocks.. */ + recalculate_root_clocks(); + + /* Enable the necessary init clocks */ + clk_enable_init_clocks(); +#endif + + return ret; +} + + diff --git a/arch/sh/kernel/cpu/fpu.c b/arch/sh/kernel/cpu/fpu.c new file mode 100644 index 000000000..fd6db0ab1 --- /dev/null +++ b/arch/sh/kernel/cpu/fpu.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/sched/signal.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/slab.h> +#include <asm/processor.h> +#include <asm/fpu.h> +#include <asm/traps.h> +#include <asm/ptrace.h> + +int init_fpu(struct task_struct *tsk) +{ + if (tsk_used_math(tsk)) { + if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current) + unlazy_fpu(tsk, task_pt_regs(tsk)); + return 0; + } + + /* + * Memory allocation at the first usage of the FPU and other state. + */ + if (!tsk->thread.xstate) { + tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep, + GFP_KERNEL); + if (!tsk->thread.xstate) + return -ENOMEM; + } + + if (boot_cpu_data.flags & CPU_HAS_FPU) { + struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu; + memset(fp, 0, xstate_size); + fp->fpscr = FPSCR_INIT; + } else { + struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu; + memset(fp, 0, xstate_size); + fp->fpscr = FPSCR_INIT; + } + + set_stopped_child_used_math(tsk); + return 0; +} + +#ifdef CONFIG_SH_FPU +void __fpu_state_restore(void) +{ + struct task_struct *tsk = current; + + restore_fpu(tsk); + + task_thread_info(tsk)->status |= TS_USEDFPU; + tsk->thread.fpu_counter++; +} + +void fpu_state_restore(struct pt_regs *regs) +{ + struct task_struct *tsk = current; + + if (unlikely(!user_mode(regs))) { + printk(KERN_ERR "BUG: FPU is used in kernel mode.\n"); + BUG(); + return; + } + + if (!tsk_used_math(tsk)) { + int ret; + /* + * does a slab alloc which can sleep + */ + local_irq_enable(); + ret = init_fpu(tsk); + local_irq_disable(); + if (ret) { + /* + * ran out of memory! + */ + force_sig(SIGKILL); + return; + } + } + + grab_fpu(regs); + + __fpu_state_restore(); +} + +BUILD_TRAP_HANDLER(fpu_state_restore) +{ + TRAP_HANDLER_DECL; + + fpu_state_restore(regs); +} +#endif /* CONFIG_SH_FPU */ diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c new file mode 100644 index 000000000..1d0087458 --- /dev/null +++ b/arch/sh/kernel/cpu/init.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/init.c + * + * CPU init code + * + * Copyright (C) 2002 - 2009 Paul Mundt + * Copyright (C) 2003 Richard Curnow + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/log2.h> +#include <asm/mmu_context.h> +#include <asm/processor.h> +#include <linux/uaccess.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/cache.h> +#include <asm/elf.h> +#include <asm/io.h> +#include <asm/smp.h> +#include <asm/sh_bios.h> +#include <asm/setup.h> + +#ifdef CONFIG_SH_FPU +#define cpu_has_fpu 1 +#else +#define cpu_has_fpu 0 +#endif + +#ifdef CONFIG_SH_DSP +#define cpu_has_dsp 1 +#else +#define cpu_has_dsp 0 +#endif + +/* + * Generic wrapper for command line arguments to disable on-chip + * peripherals (nofpu, nodsp, and so forth). + */ +#define onchip_setup(x) \ +static int x##_disabled = !cpu_has_##x; \ + \ +static int x##_setup(char *opts) \ +{ \ + x##_disabled = 1; \ + return 1; \ +} \ +__setup("no" __stringify(x), x##_setup); + +onchip_setup(fpu); +onchip_setup(dsp); + +#ifdef CONFIG_SPECULATIVE_EXECUTION +#define CPUOPM 0xff2f0000 +#define CPUOPM_RABD (1 << 5) + +static void speculative_execution_init(void) +{ + /* Clear RABD */ + __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); + + /* Flush the update */ + (void)__raw_readl(CPUOPM); + ctrl_barrier(); +} +#else +#define speculative_execution_init() do { } while (0) +#endif + +#ifdef CONFIG_CPU_SH4A +#define EXPMASK 0xff2f0004 +#define EXPMASK_RTEDS (1 << 0) +#define EXPMASK_BRDSSLP (1 << 1) +#define EXPMASK_MMCAW (1 << 4) + +static void expmask_init(void) +{ + unsigned long expmask = __raw_readl(EXPMASK); + + /* + * Future proofing. + * + * Disable support for slottable sleep instruction, non-nop + * instructions in the rte delay slot, and associative writes to + * the memory-mapped cache array. + */ + expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP | EXPMASK_MMCAW); + + __raw_writel(expmask, EXPMASK); + ctrl_barrier(); +} +#else +#define expmask_init() do { } while (0) +#endif + +/* 2nd-level cache init */ +void __attribute__ ((weak)) l2_cache_init(void) +{ +} + +/* + * Generic first-level cache init + */ +#if !defined(CONFIG_CPU_J2) +static void cache_init(void) +{ + unsigned long ccr, flags; + + jump_to_uncached(); + ccr = __raw_readl(SH_CCR); + + /* + * At this point we don't know whether the cache is enabled or not - a + * bootloader may have enabled it. There are at least 2 things that + * could be dirty in the cache at this point: + * 1. kernel command line set up by boot loader + * 2. spilled registers from the prolog of this function + * => before re-initialising the cache, we must do a purge of the whole + * cache out to memory for safety. As long as nothing is spilled + * during the loop to lines that have already been done, this is safe. + * - RPC + */ + if (ccr & CCR_CACHE_ENABLE) { + unsigned long ways, waysize, addrstart; + + waysize = current_cpu_data.dcache.sets; + +#ifdef CCR_CACHE_ORA + /* + * If the OC is already in RAM mode, we only have + * half of the entries to flush.. + */ + if (ccr & CCR_CACHE_ORA) + waysize >>= 1; +#endif + + waysize <<= current_cpu_data.dcache.entry_shift; + +#ifdef CCR_CACHE_EMODE + /* If EMODE is not set, we only have 1 way to flush. */ + if (!(ccr & CCR_CACHE_EMODE)) + ways = 1; + else +#endif + ways = current_cpu_data.dcache.ways; + + addrstart = CACHE_OC_ADDRESS_ARRAY; + do { + unsigned long addr; + + for (addr = addrstart; + addr < addrstart + waysize; + addr += current_cpu_data.dcache.linesz) + __raw_writel(0, addr); + + addrstart += current_cpu_data.dcache.way_incr; + } while (--ways); + } + + /* + * Default CCR values .. enable the caches + * and invalidate them immediately.. + */ + flags = CCR_CACHE_ENABLE | CCR_CACHE_INVALIDATE; + +#ifdef CCR_CACHE_EMODE + /* Force EMODE if possible */ + if (current_cpu_data.dcache.ways > 1) + flags |= CCR_CACHE_EMODE; + else + flags &= ~CCR_CACHE_EMODE; +#endif + +#if defined(CONFIG_CACHE_WRITETHROUGH) + /* Write-through */ + flags |= CCR_CACHE_WT; +#elif defined(CONFIG_CACHE_WRITEBACK) + /* Write-back */ + flags |= CCR_CACHE_CB; +#else + /* Off */ + flags &= ~CCR_CACHE_ENABLE; +#endif + + l2_cache_init(); + + __raw_writel(flags, SH_CCR); + back_to_cached(); +} +#else +#define cache_init() do { } while (0) +#endif + +#define CSHAPE(totalsize, linesize, assoc) \ + ((totalsize & ~0xff) | (linesize << 4) | assoc) + +#define CACHE_DESC_SHAPE(desc) \ + CSHAPE((desc).way_size * (desc).ways, ilog2((desc).linesz), (desc).ways) + +static void detect_cache_shape(void) +{ + l1d_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.dcache); + + if (current_cpu_data.dcache.flags & SH_CACHE_COMBINED) + l1i_cache_shape = l1d_cache_shape; + else + l1i_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.icache); + + if (current_cpu_data.flags & CPU_HAS_L2_CACHE) + l2_cache_shape = CACHE_DESC_SHAPE(current_cpu_data.scache); + else + l2_cache_shape = -1; /* No S-cache */ +} + +static void fpu_init(void) +{ + /* Disable the FPU */ + if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { + printk("FPU Disabled\n"); + current_cpu_data.flags &= ~CPU_HAS_FPU; + } + + disable_fpu(); + clear_used_math(); +} + +#ifdef CONFIG_SH_DSP +static void release_dsp(void) +{ + unsigned long sr; + + /* Clear SR.DSP bit */ + __asm__ __volatile__ ( + "stc\tsr, %0\n\t" + "and\t%1, %0\n\t" + "ldc\t%0, sr\n\t" + : "=&r" (sr) + : "r" (~SR_DSP) + ); +} + +static void dsp_init(void) +{ + unsigned long sr; + + /* + * Set the SR.DSP bit, wait for one instruction, and then read + * back the SR value. + */ + __asm__ __volatile__ ( + "stc\tsr, %0\n\t" + "or\t%1, %0\n\t" + "ldc\t%0, sr\n\t" + "nop\n\t" + "stc\tsr, %0\n\t" + : "=&r" (sr) + : "r" (SR_DSP) + ); + + /* If the DSP bit is still set, this CPU has a DSP */ + if (sr & SR_DSP) + current_cpu_data.flags |= CPU_HAS_DSP; + + /* Disable the DSP */ + if (dsp_disabled && (current_cpu_data.flags & CPU_HAS_DSP)) { + printk("DSP Disabled\n"); + current_cpu_data.flags &= ~CPU_HAS_DSP; + } + + /* Now that we've determined the DSP status, clear the DSP bit. */ + release_dsp(); +} +#else +static inline void dsp_init(void) { } +#endif /* CONFIG_SH_DSP */ + +/** + * cpu_init + * + * This is our initial entry point for each CPU, and is invoked on the + * boot CPU prior to calling start_kernel(). For SMP, a combination of + * this and start_secondary() will bring up each processor to a ready + * state prior to hand forking the idle loop. + * + * We do all of the basic processor init here, including setting up + * the caches, FPU, DSP, etc. By the time start_kernel() is hit (and + * subsequently platform_setup()) things like determining the CPU + * subtype and initial configuration will all be done. + * + * Each processor family is still responsible for doing its own probing + * and cache configuration in cpu_probe(). + */ +asmlinkage void cpu_init(void) +{ + current_thread_info()->cpu = hard_smp_processor_id(); + + /* First, probe the CPU */ + cpu_probe(); + + if (current_cpu_data.type == CPU_SH_NONE) + panic("Unknown CPU"); + + /* First setup the rest of the I-cache info */ + current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr - + current_cpu_data.icache.linesz; + + current_cpu_data.icache.way_size = current_cpu_data.icache.sets * + current_cpu_data.icache.linesz; + + /* And the D-cache too */ + current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr - + current_cpu_data.dcache.linesz; + + current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets * + current_cpu_data.dcache.linesz; + + /* Init the cache */ + cache_init(); + + if (raw_smp_processor_id() == 0) { +#ifdef CONFIG_MMU + shm_align_mask = max_t(unsigned long, + current_cpu_data.dcache.way_size - 1, + PAGE_SIZE - 1); +#else + shm_align_mask = PAGE_SIZE - 1; +#endif + + /* Boot CPU sets the cache shape */ + detect_cache_shape(); + } + + fpu_init(); + dsp_init(); + + /* + * Initialize the per-CPU ASID cache very early, since the + * TLB flushing routines depend on this being setup. + */ + current_cpu_data.asid_cache = NO_CONTEXT; + + current_cpu_data.phys_bits = __in_29bit_mode() ? 29 : 32; + + speculative_execution_init(); + expmask_init(); + + /* Do the rest of the boot processor setup */ + if (raw_smp_processor_id() == 0) { + /* Save off the BIOS VBR, if there is one */ + sh_bios_vbr_init(); + + /* + * Setup VBR for boot CPU. Secondary CPUs do this through + * start_secondary(). + */ + per_cpu_trap_init(); + + /* + * Boot processor to setup the FP and extended state + * context info. + */ + init_thread_xstate(); + } +} diff --git a/arch/sh/kernel/cpu/irq/Makefile b/arch/sh/kernel/cpu/irq/Makefile new file mode 100644 index 000000000..e4578cde4 --- /dev/null +++ b/arch/sh/kernel/cpu/irq/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH CPU-specific IRQ handlers. +# +obj-y += imask.o +obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o diff --git a/arch/sh/kernel/cpu/irq/imask.c b/arch/sh/kernel/cpu/irq/imask.c new file mode 100644 index 000000000..572585c3f --- /dev/null +++ b/arch/sh/kernel/cpu/irq/imask.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/irq/imask.c + * + * Copyright (C) 1999, 2000 Niibe Yutaka + * + * Simple interrupt handling using IMASK of SR register. + * + */ +/* NOTE: Will not work on level 15 */ +#include <linux/ptrace.h> +#include <linux/errno.h> +#include <linux/kernel_stat.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/bitops.h> +#include <linux/spinlock.h> +#include <linux/cache.h> +#include <linux/irq.h> +#include <linux/bitmap.h> +#include <asm/irq.h> + +/* Bitmap of IRQ masked */ +#define IMASK_PRIORITY 15 + +static DECLARE_BITMAP(imask_mask, IMASK_PRIORITY); +static int interrupt_priority; + +static inline void set_interrupt_registers(int ip) +{ + unsigned long __dummy; + + asm volatile( +#ifdef CONFIG_CPU_HAS_SR_RB + "ldc %2, r6_bank\n\t" +#endif + "stc sr, %0\n\t" + "and #0xf0, %0\n\t" + "shlr2 %0\n\t" + "cmp/eq #0x3c, %0\n\t" + "bt/s 1f ! CLI-ed\n\t" + " stc sr, %0\n\t" + "and %1, %0\n\t" + "or %2, %0\n\t" + "ldc %0, sr\n" + "1:" + : "=&z" (__dummy) + : "r" (~0xf0), "r" (ip << 4) + : "t"); +} + +static void mask_imask_irq(struct irq_data *data) +{ + unsigned int irq = data->irq; + + clear_bit(irq, imask_mask); + if (interrupt_priority < IMASK_PRIORITY - irq) + interrupt_priority = IMASK_PRIORITY - irq; + set_interrupt_registers(interrupt_priority); +} + +static void unmask_imask_irq(struct irq_data *data) +{ + unsigned int irq = data->irq; + + set_bit(irq, imask_mask); + interrupt_priority = IMASK_PRIORITY - + find_first_zero_bit(imask_mask, IMASK_PRIORITY); + set_interrupt_registers(interrupt_priority); +} + +static struct irq_chip imask_irq_chip = { + .name = "SR.IMASK", + .irq_mask = mask_imask_irq, + .irq_unmask = unmask_imask_irq, + .irq_mask_ack = mask_imask_irq, +}; + +void make_imask_irq(unsigned int irq) +{ + irq_set_chip_and_handler_name(irq, &imask_irq_chip, handle_level_irq, + "level"); +} diff --git a/arch/sh/kernel/cpu/irq/ipr.c b/arch/sh/kernel/cpu/irq/ipr.c new file mode 100644 index 000000000..d41bce71f --- /dev/null +++ b/arch/sh/kernel/cpu/irq/ipr.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Interrupt handling for IPR-based IRQ. + * + * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi + * Copyright (C) 2000 Kazumoto Kojima + * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp> + * Copyright (C) 2006 Paul Mundt + * + * Supported system: + * On-chip supporting modules (TMU, RTC, etc.). + * On-chip supporting modules for SH7709/SH7709A/SH7729. + * Hitachi SolutionEngine external I/O: + * MS7709SE01, MS7709ASE01, and MS7750SE01 + */ +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/topology.h> + +static inline struct ipr_desc *get_ipr_desc(struct irq_data *data) +{ + struct irq_chip *chip = irq_data_get_irq_chip(data); + return container_of(chip, struct ipr_desc, chip); +} + +static void disable_ipr_irq(struct irq_data *data) +{ + struct ipr_data *p = irq_data_get_irq_chip_data(data); + unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx]; + /* Set the priority in IPR to 0 */ + __raw_writew(__raw_readw(addr) & (0xffff ^ (0xf << p->shift)), addr); + (void)__raw_readw(addr); /* Read back to flush write posting */ +} + +static void enable_ipr_irq(struct irq_data *data) +{ + struct ipr_data *p = irq_data_get_irq_chip_data(data); + unsigned long addr = get_ipr_desc(data)->ipr_offsets[p->ipr_idx]; + /* Set priority in IPR back to original value */ + __raw_writew(__raw_readw(addr) | (p->priority << p->shift), addr); +} + +/* + * The shift value is now the number of bits to shift, not the number of + * bits/4. This is to make it easier to read the value directly from the + * datasheets. The IPR address is calculated using the ipr_offset table. + */ +void register_ipr_controller(struct ipr_desc *desc) +{ + int i; + + desc->chip.irq_mask = disable_ipr_irq; + desc->chip.irq_unmask = enable_ipr_irq; + + for (i = 0; i < desc->nr_irqs; i++) { + struct ipr_data *p = desc->ipr_data + i; + int res; + + BUG_ON(p->ipr_idx >= desc->nr_offsets); + BUG_ON(!desc->ipr_offsets[p->ipr_idx]); + + res = irq_alloc_desc_at(p->irq, numa_node_id()); + if (unlikely(res != p->irq && res != -EEXIST)) { + printk(KERN_INFO "can not get irq_desc for %d\n", + p->irq); + continue; + } + + disable_irq_nosync(p->irq); + irq_set_chip_and_handler_name(p->irq, &desc->chip, + handle_level_irq, "level"); + irq_set_chip_data(p->irq, p); + disable_ipr_irq(irq_get_irq_data(p->irq)); + } +} +EXPORT_SYMBOL(register_ipr_controller); diff --git a/arch/sh/kernel/cpu/pfc.c b/arch/sh/kernel/cpu/pfc.c new file mode 100644 index 000000000..062056ede --- /dev/null +++ b/arch/sh/kernel/cpu/pfc.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH Pin Function Control Initialization + * + * Copyright (C) 2012 Renesas Solutions Corp. + */ + +#include <linux/init.h> +#include <linux/platform_device.h> + +#include <cpu/pfc.h> + +static struct platform_device sh_pfc_device = { + .id = -1, +}; + +int __init sh_pfc_register(const char *name, + struct resource *resource, u32 num_resources) +{ + sh_pfc_device.name = name; + sh_pfc_device.num_resources = num_resources; + sh_pfc_device.resource = resource; + + return platform_device_register(&sh_pfc_device); +} diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c new file mode 100644 index 000000000..a306bcd6b --- /dev/null +++ b/arch/sh/kernel/cpu/proc.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/seq_file.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <asm/machvec.h> +#include <asm/processor.h> + +static const char *cpu_name[] = { + [CPU_SH7201] = "SH7201", + [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263", + [CPU_SH7264] = "SH7264", [CPU_SH7269] = "SH7269", + [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619", + [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706", + [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708", + [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710", + [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720", + [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729", + [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S", + [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751", + [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760", + [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501", + [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770", + [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781", + [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785", + [CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757", + [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3", + [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723", + [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724", + [CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734", + [CPU_J2] = "J2", + [CPU_SH_NONE] = "Unknown" +}; + +const char *get_cpu_subtype(struct sh_cpuinfo *c) +{ + return cpu_name[c->type]; +} +EXPORT_SYMBOL(get_cpu_subtype); + +#ifdef CONFIG_PROC_FS +/* Symbolic CPU flags, keep in sync with asm/cpu-features.h */ +static const char *cpu_flags[] = { + "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr", + "ptea", "llsc", "l2", "op32", "pteaex", NULL +}; + +static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c) +{ + unsigned long i; + + seq_printf(m, "cpu flags\t:"); + + if (!c->flags) { + seq_printf(m, " %s\n", cpu_flags[0]); + return; + } + + for (i = 0; cpu_flags[i]; i++) + if ((c->flags & (1 << i))) + seq_printf(m, " %s", cpu_flags[i+1]); + + seq_printf(m, "\n"); +} + +static void show_cacheinfo(struct seq_file *m, const char *type, + struct cache_info info) +{ + unsigned int cache_size; + + cache_size = info.ways * info.sets * info.linesz; + + seq_printf(m, "%s size\t: %2dKiB (%d-way)\n", + type, cache_size >> 10, info.ways); +} + +/* + * Get CPU information for use by the procfs. + */ +static int show_cpuinfo(struct seq_file *m, void *v) +{ + struct sh_cpuinfo *c = v; + unsigned int cpu = c - cpu_data; + + if (!cpu_online(cpu)) + return 0; + + if (cpu == 0) + seq_printf(m, "machine\t\t: %s\n", get_system_type()); + else + seq_printf(m, "\n"); + + seq_printf(m, "processor\t: %d\n", cpu); + seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine); + seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c)); + if (c->cut_major == -1) + seq_printf(m, "cut\t\t: unknown\n"); + else if (c->cut_minor == -1) + seq_printf(m, "cut\t\t: %d.x\n", c->cut_major); + else + seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor); + + show_cpuflags(m, c); + + seq_printf(m, "cache type\t: "); + + /* + * Check for what type of cache we have, we support both the + * unified cache on the SH-2 and SH-3, as well as the harvard + * style cache on the SH-4. + */ + if (c->icache.flags & SH_CACHE_COMBINED) { + seq_printf(m, "unified\n"); + show_cacheinfo(m, "cache", c->icache); + } else { + seq_printf(m, "split (harvard)\n"); + show_cacheinfo(m, "icache", c->icache); + show_cacheinfo(m, "dcache", c->dcache); + } + + /* Optional secondary cache */ + if (c->flags & CPU_HAS_L2_CACHE) + show_cacheinfo(m, "scache", c->scache); + + seq_printf(m, "address sizes\t: %u bits physical\n", c->phys_bits); + + seq_printf(m, "bogomips\t: %lu.%02lu\n", + c->loops_per_jiffy/(500000/HZ), + (c->loops_per_jiffy/(5000/HZ)) % 100); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < NR_CPUS ? cpu_data + *pos : NULL; +} +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} +static void c_stop(struct seq_file *m, void *v) +{ +} +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; +#endif /* CONFIG_PROC_FS */ diff --git a/arch/sh/kernel/cpu/sh2/Makefile b/arch/sh/kernel/cpu/sh2/Makefile new file mode 100644 index 000000000..214c3a5b1 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH SH-2 backends. +# + +obj-y := ex.o probe.o entry.o + +obj-$(CONFIG_CPU_SUBTYPE_SH7619) += setup-sh7619.o clock-sh7619.o + +# SMP setup +smp-$(CONFIG_CPU_J2) := smp-j2.o +obj-$(CONFIG_SMP) += $(smp-y) diff --git a/arch/sh/kernel/cpu/sh2/clock-sh7619.c b/arch/sh/kernel/cpu/sh2/clock-sh7619.c new file mode 100644 index 000000000..d66d194c7 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/clock-sh7619.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2/clock-sh7619.c + * + * SH7619 support for the clock framework + * + * Copyright (C) 2006 Yoshinori Sato + * + * Based on clock-sh4.c + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/processor.h> + +static const int pll1rate[] = {1,2}; +static const int pfc_divisors[] = {1,2,0,4}; +static unsigned int pll2_mult; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 7]; +} + +static struct sh_clk_ops sh7619_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FREQCR) & 0x0007); + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7619_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 7]; +} + +static struct sh_clk_ops sh7619_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static struct sh_clk_ops sh7619_cpu_clk_ops = { + .recalc = followparent_recalc, +}; + +static struct sh_clk_ops *sh7619_clk_ops[] = { + &sh7619_master_clk_ops, + &sh7619_module_clk_ops, + &sh7619_bus_clk_ops, + &sh7619_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (test_mode_pin(MODE_PIN2 | MODE_PIN0) || + test_mode_pin(MODE_PIN2 | MODE_PIN1)) + pll2_mult = 2; + else if (test_mode_pin(MODE_PIN0) || test_mode_pin(MODE_PIN1)) + pll2_mult = 4; + + BUG_ON(!pll2_mult); + + if (idx < ARRAY_SIZE(sh7619_clk_ops)) + *ops = sh7619_clk_ops[idx]; +} diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S new file mode 100644 index 000000000..0a1c2bf21 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/entry.S @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/cpu/sh2/entry.S + * + * The SH-2 exception entry + * + * Copyright (C) 2005-2008 Yoshinori Sato + * Copyright (C) 2005 AXE,Inc. + */ + +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <cpu/mmu_context.h> +#include <asm/unistd.h> +#include <asm/errno.h> +#include <asm/page.h> + +/* Offsets to the stack */ +OFF_R0 = 0 /* Return value. New ABI also arg4 */ +OFF_R1 = 4 /* New ABI: arg5 */ +OFF_R2 = 8 /* New ABI: arg6 */ +OFF_R3 = 12 /* New ABI: syscall_nr */ +OFF_R4 = 16 /* New ABI: arg0 */ +OFF_R5 = 20 /* New ABI: arg1 */ +OFF_R6 = 24 /* New ABI: arg2 */ +OFF_R7 = 28 /* New ABI: arg3 */ +OFF_SP = (15*4) +OFF_PC = (16*4) +OFF_SR = (16*4+2*4) +OFF_TRA = (16*4+6*4) + +#include <asm/entry-macros.S> + +ENTRY(exception_handler) + ! stack + ! r0 <- point sp + ! r1 + ! pc + ! sr + ! r0 = temporary + ! r1 = vector (pseudo EXPEVT / INTEVT / TRA) + mov.l r2,@-sp + mov.l r3,@-sp + cli + mov.l $cpu_mode,r2 +#ifdef CONFIG_SMP + mov.l $cpuid,r3 + mov.l @r3,r3 + mov.l @r3,r3 + shll2 r3 + add r3,r2 +#endif + mov.l @r2,r0 + mov.l @(5*4,r15),r3 ! previous SR + or r0,r3 ! set MD + tst r0,r0 + bf/s 1f ! previous mode check + mov.l r3,@(5*4,r15) ! update SR + ! switch to kernel mode + mov.l __md_bit,r0 + mov.l r0,@r2 ! enter kernel mode + mov.l $current_thread_info,r2 +#ifdef CONFIG_SMP + mov.l $cpuid,r0 + mov.l @r0,r0 + mov.l @r0,r0 + shll2 r0 + add r0,r2 +#endif + mov.l @r2,r2 + mov #(THREAD_SIZE >> 8),r0 + shll8 r0 + add r2,r0 + mov r15,r2 ! r2 = user stack top + mov r0,r15 ! switch kernel stack + mov.l r1,@-r15 ! TRA + sts.l macl, @-r15 + sts.l mach, @-r15 + stc.l gbr, @-r15 + mov.l @(5*4,r2),r0 + mov.l r0,@-r15 ! original SR + sts.l pr,@-r15 + mov.l @(4*4,r2),r0 + mov.l r0,@-r15 ! original PC + mov r2,r3 + add #(4+2)*4,r3 ! rewind r0 - r3 + exception frame + mov.l r3,@-r15 ! original SP + mov.l r14,@-r15 + mov.l r13,@-r15 + mov.l r12,@-r15 + mov.l r11,@-r15 + mov.l r10,@-r15 + mov.l r9,@-r15 + mov.l r8,@-r15 + mov.l r7,@-r15 + mov.l r6,@-r15 + mov.l r5,@-r15 + mov.l r4,@-r15 + mov r1,r9 ! save TRA + mov r2,r8 ! copy user -> kernel stack + mov.l @(0,r8),r3 + mov.l r3,@-r15 + mov.l @(4,r8),r2 + mov.l r2,@-r15 + mov.l @(12,r8),r1 + mov.l r1,@-r15 + mov.l @(8,r8),r0 + bra 2f + mov.l r0,@-r15 +1: + ! in kernel exception + mov #(22-4-4-1)*4+4,r0 + mov r15,r2 + sub r0,r15 + mov.l @r2+,r0 ! old R3 + mov.l r0,@-r15 + mov.l @r2+,r0 ! old R2 + mov.l r0,@-r15 + mov.l @(4,r2),r0 ! old R1 + mov.l r0,@-r15 + mov.l @r2,r0 ! old R0 + mov.l r0,@-r15 + add #8,r2 + mov.l @r2+,r3 ! old PC + mov.l @r2+,r0 ! old SR + add #-4,r2 ! exception frame stub (sr) + mov.l r1,@-r2 ! TRA + sts.l macl, @-r2 + sts.l mach, @-r2 + stc.l gbr, @-r2 + mov.l r0,@-r2 ! save old SR + sts.l pr,@-r2 + mov.l r3,@-r2 ! save old PC + mov r2,r0 + add #8*4,r0 + mov.l r0,@-r2 ! save old SP + mov.l r14,@-r2 + mov.l r13,@-r2 + mov.l r12,@-r2 + mov.l r11,@-r2 + mov.l r10,@-r2 + mov.l r9,@-r2 + mov.l r8,@-r2 + mov.l r7,@-r2 + mov.l r6,@-r2 + mov.l r5,@-r2 + mov.l r4,@-r2 + mov r1,r9 + mov.l @(OFF_R0,r15),r0 + mov.l @(OFF_R1,r15),r1 + mov.l @(OFF_R2,r15),r2 + mov.l @(OFF_R3,r15),r3 +2: + mov #64,r8 + cmp/hs r8,r9 + bt interrupt_entry ! vec >= 64 is interrupt + mov #31,r8 + cmp/hs r8,r9 + bt trap_entry ! 64 > vec >= 31 is trap +#ifdef CONFIG_CPU_J2 + mov #16,r8 + cmp/hs r8,r9 + bt interrupt_entry ! 31 > vec >= 16 is interrupt +#endif + + mov.l 4f,r8 + mov r9,r4 + shll2 r9 + add r9,r8 + mov.l @r8,r8 ! exception handler address + tst r8,r8 + bf 3f + mov.l 8f,r8 ! unhandled exception +3: + mov.l 5f,r10 + jmp @r8 + lds r10,pr + +interrupt_entry: + mov r9,r4 + mov r15,r5 + mov.l 6f,r9 + mov.l 7f,r8 + jmp @r8 + lds r9,pr + + .align 2 +4: .long exception_handling_table +5: .long ret_from_exception +6: .long ret_from_irq +7: .long do_IRQ +8: .long exception_error + +trap_entry: + mov #0x30,r8 + cmp/ge r8,r9 ! vector 0x1f-0x2f is systemcall + bt 1f + mov #0x1f,r9 ! convert to unified SH2/3/4 trap number +1: + shll2 r9 ! TRA + bra system_call ! jump common systemcall entry + mov r9,r8 + +#if defined(CONFIG_SH_STANDARD_BIOS) + /* Unwind the stack and jmp to the debug entry */ +ENTRY(sh_bios_handler) + mov r15,r0 + add #(22-4)*4-4,r0 + ldc.l @r0+,gbr + lds.l @r0+,mach + lds.l @r0+,macl + mov r15,r0 + mov.l @(OFF_SP,r0),r1 + mov #OFF_SR,r2 + mov.l @(r0,r2),r3 + mov.l r3,@-r1 + mov #OFF_SP,r2 + mov.l @(r0,r2),r3 + mov.l r3,@-r1 + mov r15,r0 + add #(22-4)*4-8,r0 + mov.l 1f,r2 + mov.l @r2,r2 + stc sr,r3 + mov.l r2,@r0 + mov.l r3,@(4,r0) + mov.l r1,@(8,r0) + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + mov.l @r15+, r8 + mov.l @r15+, r9 + mov.l @r15+, r10 + mov.l @r15+, r11 + mov.l @r15+, r12 + mov.l @r15+, r13 + mov.l @r15+, r14 + add #8,r15 + lds.l @r15+, pr + mov.l @r15+,r15 + rte + nop + .align 2 +1: .long gdb_vbr_vector +#endif /* CONFIG_SH_STANDARD_BIOS */ + +ENTRY(address_error_trap_handler) + mov r15,r4 ! regs + mov #OFF_PC,r0 + mov.l @(r0,r15),r6 ! pc + mov.l 1f,r0 + jmp @r0 + mov #0,r5 ! writeaccess is unknown + + .align 2 +1: .long do_address_error + +restore_all: + stc sr,r0 + or #0xf0,r0 + ldc r0,sr ! all interrupt block (same BL = 1) + ! restore special register + ! overlap exception frame + mov r15,r0 + add #17*4,r0 + lds.l @r0+,pr + add #4,r0 + ldc.l @r0+,gbr + lds.l @r0+,mach + lds.l @r0+,macl + mov r15,r0 + mov.l $cpu_mode,r2 +#ifdef CONFIG_SMP + mov.l $cpuid,r3 + mov.l @r3,r3 + mov.l @r3,r3 + shll2 r3 + add r3,r2 +#endif + mov #OFF_SR,r3 + mov.l @(r0,r3),r1 + mov.l __md_bit,r3 + and r1,r3 ! copy MD bit + mov.l r3,@r2 + shll2 r1 ! clear MD bit + shlr2 r1 + mov.l @(OFF_SP,r0),r2 + add #-8,r2 + mov.l r2,@(OFF_SP,r0) ! point exception frame top + mov.l r1,@(4,r2) ! set sr + mov #OFF_PC,r3 + mov.l @(r0,r3),r1 + mov.l r1,@r2 ! set pc + get_current_thread_info r0, r1 + mov.l $current_thread_info,r1 +#ifdef CONFIG_SMP + mov.l $cpuid,r3 + mov.l @r3,r3 + mov.l @r3,r3 + shll2 r3 + add r3,r1 +#endif + mov.l r0,@r1 + mov.l @r15+,r0 + mov.l @r15+,r1 + mov.l @r15+,r2 + mov.l @r15+,r3 + mov.l @r15+,r4 + mov.l @r15+,r5 + mov.l @r15+,r6 + mov.l @r15+,r7 + mov.l @r15+,r8 + mov.l @r15+,r9 + mov.l @r15+,r10 + mov.l @r15+,r11 + mov.l @r15+,r12 + mov.l @r15+,r13 + mov.l @r15+,r14 + mov.l @r15,r15 + rte + nop + + .align 2 +__md_bit: + .long 0x40000000 +$current_thread_info: + .long __current_thread_info +$cpu_mode: + .long __cpu_mode +#ifdef CONFIG_SMP +$cpuid: + .long sh2_cpuid_addr +#endif + +! common exception handler +#include "../../entry-common.S" + +#ifdef CONFIG_NR_CPUS +#define NR_CPUS CONFIG_NR_CPUS +#else +#define NR_CPUS 1 +#endif + + .data +! cpu operation mode +! bit30 = MD (compatible SH3/4) +__cpu_mode: + .rept NR_CPUS + .long 0x40000000 + .endr + +#ifdef CONFIG_SMP +.global sh2_cpuid_addr +sh2_cpuid_addr: + .long dummy_cpuid +dummy_cpuid: + .long 0 +#endif + + .section .bss +__current_thread_info: + .rept NR_CPUS + .long 0 + .endr + +ENTRY(exception_handling_table) + .space 4*32 diff --git a/arch/sh/kernel/cpu/sh2/ex.S b/arch/sh/kernel/cpu/sh2/ex.S new file mode 100644 index 000000000..dd0cc887a --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/ex.S @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/cpu/sh2/ex.S + * + * The SH-2 exception vector table + * + * Copyright (C) 2005 Yoshinori Sato + */ + +#include <linux/linkage.h> + +! +! convert Exception Vector to Exception Number +! +exception_entry: +no = 0 + .rept 256 + mov.l r1,@-sp + bra exception_trampoline + mov #no,r1 +no = no + 1 + .endr +exception_trampoline: + mov.l r0,@-sp + mov.l $exception_handler,r0 + extu.b r1,r1 + jmp @r0 + extu.w r1,r1 + + .align 2 +$exception_entry: + .long exception_entry +$exception_handler: + .long exception_handler +! +! Exception Vector Base +! + .align 2 +ENTRY(vbr_base) +vector = 0 + .rept 256 + .long exception_entry + vector * 6 +vector = vector + 1 + .endr diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c new file mode 100644 index 000000000..70a07f4f2 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/probe.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2/probe.c + * + * CPU Subtype Probing for SH-2. + * + * Copyright (C) 2002 Paul Mundt + */ +#include <linux/init.h> +#include <linux/of_fdt.h> +#include <linux/smp.h> +#include <linux/io.h> +#include <asm/processor.h> +#include <asm/cache.h> + +#if defined(CONFIG_CPU_J2) +extern u32 __iomem *j2_ccr_base; +static int __init scan_cache(unsigned long node, const char *uname, + int depth, void *data) +{ + if (!of_flat_dt_is_compatible(node, "jcore,cache")) + return 0; + + j2_ccr_base = ioremap(of_flat_dt_translate_address(node), 4); + + return 1; +} +#endif + +void __ref cpu_probe(void) +{ +#if defined(CONFIG_CPU_SUBTYPE_SH7619) + boot_cpu_data.type = CPU_SH7619; + boot_cpu_data.dcache.ways = 4; + boot_cpu_data.dcache.way_incr = (1<<12); + boot_cpu_data.dcache.sets = 256; + boot_cpu_data.dcache.entry_shift = 4; + boot_cpu_data.dcache.linesz = L1_CACHE_BYTES; + boot_cpu_data.dcache.flags = 0; +#endif + +#if defined(CONFIG_CPU_J2) +#if defined(CONFIG_SMP) + unsigned cpu = hard_smp_processor_id(); +#else + unsigned cpu = 0; +#endif + if (cpu == 0) of_scan_flat_dt(scan_cache, NULL); + if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu); + if (cpu != 0) return; + boot_cpu_data.type = CPU_J2; + + /* These defaults are appropriate for the original/current + * J2 cache. Once there is a proper framework for getting cache + * info from device tree, we should switch to that. */ + boot_cpu_data.dcache.ways = 1; + boot_cpu_data.dcache.sets = 256; + boot_cpu_data.dcache.entry_shift = 5; + boot_cpu_data.dcache.linesz = 32; + boot_cpu_data.dcache.flags = 0; + + boot_cpu_data.flags |= CPU_HAS_CAS_L; +#else + /* + * SH-2 doesn't have separate caches + */ + boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; +#endif + boot_cpu_data.icache = boot_cpu_data.dcache; + boot_cpu_data.family = CPU_FAMILY_SH2; +} diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c new file mode 100644 index 000000000..b1c877b6a --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7619 Setup + * + * Copyright (C) 2006 Yoshinori Sato + * Copyright (C) 2009 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_eth.h> +#include <linux/sh_timer.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + WDT, EDMAC, CMT0, CMT1, + SCIF0, SCIF1, SCIF2, + HIF_HIFI, HIF_HIFBI, + DMAC0, DMAC1, DMAC2, DMAC3, + SIOF, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), + INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), + INTC_IRQ(IRQ4, 80), INTC_IRQ(IRQ5, 81), + INTC_IRQ(IRQ6, 82), INTC_IRQ(IRQ7, 83), + INTC_IRQ(WDT, 84), INTC_IRQ(EDMAC, 85), + INTC_IRQ(CMT0, 86), INTC_IRQ(CMT1, 87), + INTC_IRQ(SCIF0, 88), INTC_IRQ(SCIF0, 89), + INTC_IRQ(SCIF0, 90), INTC_IRQ(SCIF0, 91), + INTC_IRQ(SCIF1, 92), INTC_IRQ(SCIF1, 93), + INTC_IRQ(SCIF1, 94), INTC_IRQ(SCIF1, 95), + INTC_IRQ(SCIF2, 96), INTC_IRQ(SCIF2, 97), + INTC_IRQ(SCIF2, 98), INTC_IRQ(SCIF2, 99), + INTC_IRQ(HIF_HIFI, 100), INTC_IRQ(HIF_HIFBI, 101), + INTC_IRQ(DMAC0, 104), INTC_IRQ(DMAC1, 105), + INTC_IRQ(DMAC2, 106), INTC_IRQ(DMAC3, 107), + INTC_IRQ(SIOF, 108), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xf8140006, 0, 16, 4, /* IPRA */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xf8140008, 0, 16, 4, /* IPRB */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xf8080000, 0, 16, 4, /* IPRC */ { WDT, EDMAC, CMT0, CMT1 } }, + { 0xf8080002, 0, 16, 4, /* IPRD */ { SCIF0, SCIF1, SCIF2 } }, + { 0xf8080004, 0, 16, 4, /* IPRE */ { HIF_HIFI, HIF_HIFBI } }, + { 0xf8080006, 0, 16, 4, /* IPRF */ { DMAC0, DMAC1, DMAC2, DMAC3 } }, + { 0xf8080008, 0, 16, 4, /* IPRG */ { SIOF } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7619", vectors, NULL, + NULL, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xf8400000, 0x100), + DEFINE_RES_IRQ(88), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xf8410000, 0x100), + DEFINE_RES_IRQ(92), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xf8420000, 0x100), + DEFINE_RES_IRQ(96), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct sh_eth_plat_data eth_platform_data = { + .phy = 1, + .phy_interface = PHY_INTERFACE_MODE_MII, +}; + +static struct resource eth_resources[] = { + [0] = { + .start = 0xfb000000, + .end = 0xfb0001c7, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 85, + .end = 85, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device eth_device = { + .name = "sh7619-ether", + .id = -1, + .dev = { + .platform_data = ð_platform_data, + }, + .num_resources = ARRAY_SIZE(eth_resources), + .resource = eth_resources, +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 3, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0xf84a0070, 0x10), + DEFINE_RES_IRQ(86), + DEFINE_RES_IRQ(87), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-16", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct platform_device *sh7619_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + ð_device, + &cmt_device, +}; + +static int __init sh7619_devices_setup(void) +{ + return platform_add_devices(sh7619_devices, + ARRAY_SIZE(sh7619_devices)); +} +arch_initcall(sh7619_devices_setup); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +static struct platform_device *sh7619_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &cmt_device, +}; + +#define STBCR3 0xf80a0000 + +void __init plat_early_device_setup(void) +{ + /* enable CMT clock */ + __raw_writeb(__raw_readb(STBCR3) & ~0x10, STBCR3); + + sh_early_platform_add_devices(sh7619_early_devices, + ARRAY_SIZE(sh7619_early_devices)); +} diff --git a/arch/sh/kernel/cpu/sh2/smp-j2.c b/arch/sh/kernel/cpu/sh2/smp-j2.c new file mode 100644 index 000000000..d0d5d8145 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2/smp-j2.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SMP support for J2 processor + * + * Copyright (C) 2015-2016 Smart Energy Instruments, Inc. + */ + +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <asm/cmpxchg.h> + +DEFINE_PER_CPU(unsigned, j2_ipi_messages); + +extern u32 *sh2_cpuid_addr; +static u32 *j2_ipi_trigger; +static int j2_ipi_irq; + +static irqreturn_t j2_ipi_interrupt_handler(int irq, void *arg) +{ + unsigned cpu = hard_smp_processor_id(); + volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu); + unsigned messages, i; + + do messages = *pmsg; + while (cmpxchg(pmsg, messages, 0) != messages); + + if (!messages) return IRQ_NONE; + + for (i=0; i<SMP_MSG_NR; i++) + if (messages & (1U<<i)) + smp_message_recv(i); + + return IRQ_HANDLED; +} + +static void j2_smp_setup(void) +{ +} + +static void j2_prepare_cpus(unsigned int max_cpus) +{ + struct device_node *np; + unsigned i, max = 1; + + np = of_find_compatible_node(NULL, NULL, "jcore,ipi-controller"); + if (!np) + goto out; + + j2_ipi_irq = irq_of_parse_and_map(np, 0); + j2_ipi_trigger = of_iomap(np, 0); + if (!j2_ipi_irq || !j2_ipi_trigger) + goto out; + + np = of_find_compatible_node(NULL, NULL, "jcore,cpuid-mmio"); + if (!np) + goto out; + + sh2_cpuid_addr = of_iomap(np, 0); + if (!sh2_cpuid_addr) + goto out; + + if (request_irq(j2_ipi_irq, j2_ipi_interrupt_handler, IRQF_PERCPU, + "ipi", (void *)j2_ipi_interrupt_handler) != 0) + goto out; + + max = max_cpus; +out: + /* Disable any cpus past max_cpus, or all secondaries if we didn't + * get the necessary resources to support SMP. */ + for (i=max; i<NR_CPUS; i++) { + set_cpu_possible(i, false); + set_cpu_present(i, false); + } +} + +static void j2_start_cpu(unsigned int cpu, unsigned long entry_point) +{ + struct device_node *np; + u32 regs[2]; + void __iomem *release, *initpc; + + if (!cpu) return; + + np = of_get_cpu_node(cpu, NULL); + if (!np) return; + + if (of_property_read_u32_array(np, "cpu-release-addr", regs, 2)) return; + release = ioremap(regs[0], sizeof(u32)); + initpc = ioremap(regs[1], sizeof(u32)); + + __raw_writel(entry_point, initpc); + __raw_writel(1, release); + + iounmap(initpc); + iounmap(release); + + pr_info("J2 SMP: requested start of cpu %u\n", cpu); +} + +static unsigned int j2_smp_processor_id(void) +{ + return __raw_readl(sh2_cpuid_addr); +} + +static void j2_send_ipi(unsigned int cpu, unsigned int message) +{ + volatile unsigned *pmsg; + unsigned old; + unsigned long val; + + /* There is only one IPI interrupt shared by all messages, so + * we keep a separate interrupt flag per message type in sw. */ + pmsg = &per_cpu(j2_ipi_messages, cpu); + do old = *pmsg; + while (cmpxchg(pmsg, old, old|(1U<<message)) != old); + + /* Generate the actual interrupt by writing to CCRn bit 28. */ + val = __raw_readl(j2_ipi_trigger + cpu); + __raw_writel(val | (1U<<28), j2_ipi_trigger + cpu); +} + +static struct plat_smp_ops j2_smp_ops = { + .smp_setup = j2_smp_setup, + .prepare_cpus = j2_prepare_cpus, + .start_cpu = j2_start_cpu, + .smp_processor_id = j2_smp_processor_id, + .send_ipi = j2_send_ipi, + .cpu_die = native_cpu_die, + .cpu_disable = native_cpu_disable, + .play_dead = native_play_dead, +}; + +CPU_METHOD_OF_DECLARE(j2_cpu_method, "jcore,spin-table", &j2_smp_ops); diff --git a/arch/sh/kernel/cpu/sh2a/Makefile b/arch/sh/kernel/cpu/sh2a/Makefile new file mode 100644 index 000000000..2a7515b65 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/Makefile @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH SH-2A backends. +# + +obj-y := common.o probe.o opcode_helper.o + +common-y += ex.o entry.o + +obj-$(CONFIG_SH_FPU) += fpu.o + +obj-$(CONFIG_CPU_SUBTYPE_SH7201) += setup-sh7201.o clock-sh7201.o +obj-$(CONFIG_CPU_SUBTYPE_SH7203) += setup-sh7203.o clock-sh7203.o +obj-$(CONFIG_CPU_SUBTYPE_SH7263) += setup-sh7203.o clock-sh7203.o +obj-$(CONFIG_CPU_SUBTYPE_SH7264) += setup-sh7264.o clock-sh7264.o +obj-$(CONFIG_CPU_SUBTYPE_SH7206) += setup-sh7206.o clock-sh7206.o +obj-$(CONFIG_CPU_SUBTYPE_SH7269) += setup-sh7269.o clock-sh7269.o +obj-$(CONFIG_CPU_SUBTYPE_MXG) += setup-mxg.o clock-sh7206.o + +# Pinmux setup +pinmux-$(CONFIG_CPU_SUBTYPE_SH7203) := pinmux-sh7203.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7264) := pinmux-sh7264.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7269) := pinmux-sh7269.o + +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c new file mode 100644 index 000000000..5a5daaafb --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2a/clock-sh7201.c + * + * SH7201 support for the clock framework + * + * Copyright (C) 2008 Peter Griffin <pgriffin@mpc-data.co.uk> + * + * Based on clock-sh4.c + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static const int pll1rate[]={1,2,3,4,6,8}; +static const int pfc_divisors[]={1,2,3,4,6,8,12}; +#define ifc_divisors pfc_divisors + +static unsigned int pll2_mult; + +static void master_clk_init(struct clk *clk) +{ + clk->rate = 10000000 * pll2_mult * + pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; +} + +static struct sh_clk_ops sh7201_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FREQCR) & 0x0007); + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7201_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FREQCR) & 0x0007); + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7201_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readw(FREQCR) >> 4) & 0x0007); + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh7201_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7201_clk_ops[] = { + &sh7201_master_clk_ops, + &sh7201_module_clk_ops, + &sh7201_bus_clk_ops, + &sh7201_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (test_mode_pin(MODE_PIN1 | MODE_PIN0)) + pll2_mult = 1; + else if (test_mode_pin(MODE_PIN1)) + pll2_mult = 2; + else + pll2_mult = 4; + + if (idx < ARRAY_SIZE(sh7201_clk_ops)) + *ops = sh7201_clk_ops[idx]; +} diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7203.c b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c new file mode 100644 index 000000000..c62053945 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7203.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2a/clock-sh7203.c + * + * SH7203 support for the clock framework + * + * Copyright (C) 2007 Kieran Bingham (MPC-Data Ltd) + * + * Based on clock-sh7263.c + * Copyright (C) 2006 Yoshinori Sato + * + * Based on clock-sh4.c + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static const int pll1rate[]={8,12,16,0}; +static const int pfc_divisors[]={1,2,3,4,6,8,12}; +#define ifc_divisors pfc_divisors + +static unsigned int pll2_mult; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0003] * pll2_mult; +} + +static struct sh_clk_ops sh7203_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FREQCR) & 0x0007); + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7203_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FREQCR) & 0x0007); + return clk->parent->rate / pfc_divisors[idx-2]; +} + +static struct sh_clk_ops sh7203_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static struct sh_clk_ops sh7203_cpu_clk_ops = { + .recalc = followparent_recalc, +}; + +static struct sh_clk_ops *sh7203_clk_ops[] = { + &sh7203_master_clk_ops, + &sh7203_module_clk_ops, + &sh7203_bus_clk_ops, + &sh7203_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (test_mode_pin(MODE_PIN1)) + pll2_mult = 4; + else if (test_mode_pin(MODE_PIN0)) + pll2_mult = 2; + else + pll2_mult = 1; + + if (idx < ARRAY_SIZE(sh7203_clk_ops)) + *ops = sh7203_clk_ops[idx]; +} diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7206.c b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c new file mode 100644 index 000000000..d286d7b91 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7206.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2a/clock-sh7206.c + * + * SH7206 support for the clock framework + * + * Copyright (C) 2006 Yoshinori Sato + * + * Based on clock-sh4.c + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static const int pll1rate[]={1,2,3,4,6,8}; +static const int pfc_divisors[]={1,2,3,4,6,8,12}; +#define ifc_divisors pfc_divisors + +static unsigned int pll2_mult; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= pll2_mult * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; +} + +static struct sh_clk_ops sh7206_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FREQCR) & 0x0007); + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7206_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + return clk->parent->rate / pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; +} + +static struct sh_clk_ops sh7206_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FREQCR) & 0x0007); + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh7206_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7206_clk_ops[] = { + &sh7206_master_clk_ops, + &sh7206_module_clk_ops, + &sh7206_bus_clk_ops, + &sh7206_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (test_mode_pin(MODE_PIN2 | MODE_PIN1 | MODE_PIN0)) + pll2_mult = 1; + else if (test_mode_pin(MODE_PIN2 | MODE_PIN1)) + pll2_mult = 2; + else if (test_mode_pin(MODE_PIN1)) + pll2_mult = 4; + + if (idx < ARRAY_SIZE(sh7206_clk_ops)) + *ops = sh7206_clk_ops[idx]; +} diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7264.c b/arch/sh/kernel/cpu/sh2a/clock-sh7264.c new file mode 100644 index 000000000..d9acc1ed7 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7264.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2a/clock-sh7264.c + * + * SH7264 clock framework support + * + * Copyright (C) 2012 Phil Edworthy + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> + +/* SH7264 registers */ +#define FRQCR 0xfffe0010 +#define STBCR3 0xfffe0408 +#define STBCR4 0xfffe040c +#define STBCR5 0xfffe0410 +#define STBCR6 0xfffe0414 +#define STBCR7 0xfffe0418 +#define STBCR8 0xfffe041c + +static const unsigned int pll1rate[] = {8, 12}; + +static unsigned int pll1_div; + +/* Fixed 32 KHz root clock for RTC */ +static struct clk r_clk = { + .rate = 32768, +}; + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +static struct clk extal_clk = { + .rate = 18000000, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + unsigned long rate = clk->parent->rate / pll1_div; + return rate * pll1rate[(__raw_readw(FRQCR) >> 8) & 1]; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .parent = &extal_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +struct clk *main_clks[] = { + &r_clk, + &extal_clk, + &pll_clk, +}; + +static int div2[] = { 1, 2, 3, 4, 6, 8, 12 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = div2, + .nr_divisors = ARRAY_SIZE(div2), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_P, + DIV4_NR }; + +#define DIV4(_reg, _bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) + +/* The mask field specifies the div2 entries that are valid */ +struct clk div4_clks[DIV4_NR] = { + [DIV4_I] = DIV4(FRQCR, 4, 0x7, CLK_ENABLE_REG_16BIT + | CLK_ENABLE_ON_INIT), + [DIV4_P] = DIV4(FRQCR, 0, 0x78, CLK_ENABLE_REG_16BIT), +}; + +enum { MSTP77, MSTP74, MSTP72, + MSTP60, + MSTP35, MSTP34, MSTP33, MSTP32, MSTP30, + MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + [MSTP77] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 7, 0), /* SCIF */ + [MSTP74] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 4, 0), /* VDC */ + [MSTP72] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR7, 2, 0), /* CMT */ + [MSTP60] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR6, 0, 0), /* USB */ + [MSTP35] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 6, 0), /* MTU2 */ + [MSTP34] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 4, 0), /* SDHI0 */ + [MSTP33] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 3, 0), /* SDHI1 */ + [MSTP32] = SH_CLK_MSTP8(&div4_clks[DIV4_P], STBCR3, 2, 0), /* ADC */ + [MSTP30] = SH_CLK_MSTP8(&r_clk, STBCR3, 0, 0), /* RTC */ +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("rclk", &r_clk), + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + + /* MSTP clocks */ + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP77]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP77]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP77]), + CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP77]), + CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP77]), + CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP77]), + CLKDEV_ICK_ID("fck", "sh-sci.6", &mstp_clks[MSTP77]), + CLKDEV_ICK_ID("fck", "sh-sci.7", &mstp_clks[MSTP77]), + CLKDEV_CON_ID("vdc3", &mstp_clks[MSTP74]), + CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]), + CLKDEV_CON_ID("usb0", &mstp_clks[MSTP60]), + CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]), + CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP34]), + CLKDEV_CON_ID("sdhi1", &mstp_clks[MSTP33]), + CLKDEV_CON_ID("adc0", &mstp_clks[MSTP32]), + CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP30]), +}; + +int __init arch_clk_init(void) +{ + int k, ret = 0; + + if (test_mode_pin(MODE_PIN0)) { + if (test_mode_pin(MODE_PIN1)) + pll1_div = 3; + else + pll1_div = 4; + } else + pll1_div = 1; + + for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) + ret = clk_register(main_clks[k]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7269.c b/arch/sh/kernel/cpu/sh2a/clock-sh7269.c new file mode 100644 index 000000000..c17ab0d76 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7269.c @@ -0,0 +1,181 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2a/clock-sh7269.c + * + * SH7269 clock framework support + * + * Copyright (C) 2012 Phil Edworthy + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> + +/* SH7269 registers */ +#define FRQCR 0xfffe0010 +#define STBCR3 0xfffe0408 +#define STBCR4 0xfffe040c +#define STBCR5 0xfffe0410 +#define STBCR6 0xfffe0414 +#define STBCR7 0xfffe0418 + +#define PLL_RATE 20 + +/* Fixed 32 KHz root clock for RTC */ +static struct clk r_clk = { + .rate = 32768, +}; + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +static struct clk extal_clk = { + .rate = 13340000, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + return clk->parent->rate * PLL_RATE; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .parent = &extal_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static unsigned long peripheral0_recalc(struct clk *clk) +{ + return clk->parent->rate / 8; +} + +static struct sh_clk_ops peripheral0_clk_ops = { + .recalc = peripheral0_recalc, +}; + +static struct clk peripheral0_clk = { + .ops = &peripheral0_clk_ops, + .parent = &pll_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static unsigned long peripheral1_recalc(struct clk *clk) +{ + return clk->parent->rate / 4; +} + +static struct sh_clk_ops peripheral1_clk_ops = { + .recalc = peripheral1_recalc, +}; + +static struct clk peripheral1_clk = { + .ops = &peripheral1_clk_ops, + .parent = &pll_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +struct clk *main_clks[] = { + &r_clk, + &extal_clk, + &pll_clk, + &peripheral0_clk, + &peripheral1_clk, +}; + +static int div2[] = { 1, 2, 0, 4 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = div2, + .nr_divisors = ARRAY_SIZE(div2), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_B, + DIV4_NR }; + +#define DIV4(_reg, _bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) + +/* The mask field specifies the div2 entries that are valid */ +struct clk div4_clks[DIV4_NR] = { + [DIV4_I] = DIV4(FRQCR, 8, 0xB, CLK_ENABLE_REG_16BIT + | CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(FRQCR, 4, 0xA, CLK_ENABLE_REG_16BIT + | CLK_ENABLE_ON_INIT), +}; + +enum { MSTP72, + MSTP60, + MSTP47, MSTP46, MSTP45, MSTP44, MSTP43, MSTP42, MSTP41, MSTP40, + MSTP35, MSTP32, MSTP30, + MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + [MSTP72] = SH_CLK_MSTP8(&peripheral0_clk, STBCR7, 2, 0), /* CMT */ + [MSTP60] = SH_CLK_MSTP8(&peripheral1_clk, STBCR6, 0, 0), /* USB */ + [MSTP47] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 7, 0), /* SCIF0 */ + [MSTP46] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 6, 0), /* SCIF1 */ + [MSTP45] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 5, 0), /* SCIF2 */ + [MSTP44] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 4, 0), /* SCIF3 */ + [MSTP43] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 3, 0), /* SCIF4 */ + [MSTP42] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 2, 0), /* SCIF5 */ + [MSTP41] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 1, 0), /* SCIF6 */ + [MSTP40] = SH_CLK_MSTP8(&peripheral1_clk, STBCR4, 0, 0), /* SCIF7 */ + [MSTP35] = SH_CLK_MSTP8(&peripheral0_clk, STBCR3, 5, 0), /* MTU2 */ + [MSTP32] = SH_CLK_MSTP8(&peripheral1_clk, STBCR3, 2, 0), /* ADC */ + [MSTP30] = SH_CLK_MSTP8(&r_clk, STBCR3, 0, 0), /* RTC */ +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("rclk", &r_clk), + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + CLKDEV_CON_ID("peripheral_clk", &peripheral1_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + + /* MSTP clocks */ + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP47]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP46]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP45]), + CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP44]), + CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP43]), + CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP42]), + CLKDEV_ICK_ID("fck", "sh-sci.6", &mstp_clks[MSTP41]), + CLKDEV_ICK_ID("fck", "sh-sci.7", &mstp_clks[MSTP40]), + CLKDEV_ICK_ID("fck", "sh-cmt-16.0", &mstp_clks[MSTP72]), + CLKDEV_CON_ID("usb0", &mstp_clks[MSTP60]), + CLKDEV_ICK_ID("fck", "sh-mtu2", &mstp_clks[MSTP35]), + CLKDEV_CON_ID("adc0", &mstp_clks[MSTP32]), + CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP30]), +}; + +int __init arch_clk_init(void) +{ + int k, ret = 0; + + for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) + ret = clk_register(main_clks[k]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S new file mode 100644 index 000000000..9f11fc8b5 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/entry.S @@ -0,0 +1,247 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/cpu/sh2a/entry.S + * + * The SH-2A exception entry + * + * Copyright (C) 2008 Yoshinori Sato + * Based on arch/sh/kernel/cpu/sh2/entry.S + */ + +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <cpu/mmu_context.h> +#include <asm/unistd.h> +#include <asm/errno.h> +#include <asm/page.h> + +/* Offsets to the stack */ +OFF_R0 = 0 /* Return value. New ABI also arg4 */ +OFF_R1 = 4 /* New ABI: arg5 */ +OFF_R2 = 8 /* New ABI: arg6 */ +OFF_R3 = 12 /* New ABI: syscall_nr */ +OFF_R4 = 16 /* New ABI: arg0 */ +OFF_R5 = 20 /* New ABI: arg1 */ +OFF_R6 = 24 /* New ABI: arg2 */ +OFF_R7 = 28 /* New ABI: arg3 */ +OFF_SP = (15*4) +OFF_PC = (16*4) +OFF_SR = (16*4+2*4) +OFF_TRA = (16*4+6*4) + +#include <asm/entry-macros.S> + +ENTRY(exception_handler) + ! stack + ! r0 <- point sp + ! r1 + ! pc + ! sr + ! r0 = temporary + ! r1 = vector (pseudo EXPEVT / INTEVT / TRA) + mov.l r2,@-sp + cli + mov.l $cpu_mode,r2 + bld.b #6,@(0,r2) !previus SR.MD + bst.b #6,@(4*4,r15) !set cpu mode to SR.MD + bt 1f + ! switch to kernel mode + bset.b #6,@(0,r2) !set SR.MD + mov.l $current_thread_info,r2 + mov.l @r2,r2 + mov #(THREAD_SIZE >> 8),r0 + shll8 r0 + add r2,r0 ! r0 = kernel stack tail + mov r15,r2 ! r2 = user stack top + mov r0,r15 ! switch kernel stack + mov.l r1,@-r15 ! TRA + sts.l macl, @-r15 + sts.l mach, @-r15 + stc.l gbr, @-r15 + mov.l @(4*4,r2),r0 + mov.l r0,@-r15 ! original SR + sts.l pr,@-r15 + mov.l @(3*4,r2),r0 + mov.l r0,@-r15 ! original PC + mov r2,r0 + add #(3+2)*4,r0 ! rewind r0 - r3 + exception frame + lds r0,pr ! pr = original SP + movmu.l r3,@-r15 ! save regs + mov r2,r8 ! r8 = previus stack top + mov r1,r9 ! r9 = interrupt vector + ! restore previous stack + mov.l @r8+,r2 + mov.l @r8+,r0 + mov.l @r8+,r1 + bra 2f + movml.l r2,@-r15 +1: + ! in kernel exception + mov r15,r2 + add #-((OFF_TRA + 4) - OFF_PC) + 5*4,r15 + movmu.l r3,@-r15 + mov r2,r8 ! r8 = previous stack top + mov r1,r9 ! r9 = interrupt vector + ! restore exception frame & regs + mov.l @r8+,r2 ! old R2 + mov.l @r8+,r0 ! old R0 + mov.l @r8+,r1 ! old R1 + mov.l @r8+,r10 ! old PC + mov.l @r8+,r11 ! old SR + movml.l r2,@-r15 + mov.l r10,@(OFF_PC,r15) + mov.l r11,@(OFF_SR,r15) + mov.l r8,@(OFF_SP,r15) ! save old sp + mov r15,r8 + add #OFF_TRA + 4,r8 + mov.l r9,@-r8 + sts.l macl,@-r8 + sts.l mach,@-r8 + stc.l gbr,@-r8 + add #-4,r8 + sts.l pr,@-r8 +2: + ! dispatch exception / interrupt + mov #64,r8 + cmp/hs r8,r9 + bt interrupt_entry ! vec >= 64 is interrupt + mov #31,r8 + cmp/hs r8,r9 + bt trap_entry ! 64 > vec >= 31 is trap + + mov.l 4f,r8 + mov r9,r4 + shll2 r9 + add r9,r8 + mov.l @r8,r8 ! exception handler address + tst r8,r8 + bf 3f + mov.l 8f,r8 ! unhandled exception +3: + mov.l 5f,r10 + jmp @r8 + lds r10,pr + +interrupt_entry: + mov r9,r4 + mov r15,r5 + mov.l 7f,r8 + mov.l 6f,r9 + jmp @r8 + lds r9,pr + + .align 2 +4: .long exception_handling_table +5: .long ret_from_exception +6: .long ret_from_irq +7: .long do_IRQ +8: .long exception_error + +trap_entry: + mov #0x30,r8 + cmp/ge r8,r9 ! vector 0x1f-0x2f is systemcall + bt 1f + mov #0x1f,r9 ! convert to unified SH2/3/4 trap number +1: + shll2 r9 ! TRA + bra system_call ! jump common systemcall entry + mov r9,r8 + +#if defined(CONFIG_SH_STANDARD_BIOS) + /* Unwind the stack and jmp to the debug entry */ +ENTRY(sh_bios_handler) + mov r15,r0 + add #(22-4)*4-4,r0 + ldc.l @r0+,gbr + lds.l @r0+,mach + lds.l @r0+,macl + mov r15,r0 + mov.l @(OFF_SP,r0),r1 + mov.l @(OFF_SR,r2),r3 + mov.l r3,@-r1 + mov.l @(OFF_SP,r2),r3 + mov.l r3,@-r1 + mov r15,r0 + add #(22-4)*4-8,r0 + mov.l 1f,r2 + mov.l @r2,r2 + stc sr,r3 + mov.l r2,@r0 + mov.l r3,@(4,r0) + mov.l r1,@(8,r0) + movml.l @r15+,r14 + add #8,r15 + lds.l @r15+, pr + mov.l @r15+,r15 + rte + nop + .align 2 +1: .long gdb_vbr_vector +#endif /* CONFIG_SH_STANDARD_BIOS */ + +ENTRY(address_error_trap_handler) + mov r15,r4 ! regs + mov.l @(OFF_PC,r15),r6 ! pc + mov.l 1f,r0 + jmp @r0 + mov #0,r5 ! writeaccess is unknown + + .align 2 +1: .long do_address_error + +restore_all: + stc sr,r0 + or #0xf0,r0 + ldc r0,sr ! all interrupt block (same BL = 1) + ! restore special register + ! overlap exception frame + mov r15,r0 + add #17*4,r0 + lds.l @r0+,pr + add #4,r0 + ldc.l @r0+,gbr + lds.l @r0+,mach + lds.l @r0+,macl + mov r15,r0 + mov.l $cpu_mode,r2 + bld.b #6,@(OFF_SR,r15) + bst.b #6,@(0,r2) ! save CPU mode + mov.l @(OFF_SR,r0),r1 + shll2 r1 + shlr2 r1 ! clear MD bit + mov.l @(OFF_SP,r0),r2 + add #-8,r2 + mov.l r2,@(OFF_SP,r0) ! point exception frame top + mov.l r1,@(4,r2) ! set sr + mov.l @(OFF_PC,r0),r1 + mov.l r1,@r2 ! set pc + get_current_thread_info r0, r1 + mov.l $current_thread_info,r1 + mov.l r0,@r1 + movml.l @r15+,r14 + mov.l @r15,r15 + rte + nop + + .align 2 +$current_thread_info: + .long __current_thread_info +$cpu_mode: + .long __cpu_mode + +! common exception handler +#include "../../entry-common.S" + + .data +! cpu operation mode +! bit30 = MD (compatible SH3/4) +__cpu_mode: + .long 0x40000000 + + .section .bss +__current_thread_info: + .long 0 + +ENTRY(exception_handling_table) + .space 4*32 diff --git a/arch/sh/kernel/cpu/sh2a/ex.S b/arch/sh/kernel/cpu/sh2a/ex.S new file mode 100644 index 000000000..ed9199628 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/ex.S @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/cpu/sh2a/ex.S + * + * The SH-2A exception vector table + * + * Copyright (C) 2008 Yoshinori Sato + */ + +#include <linux/linkage.h> + +! +! convert Exception Vector to Exception Number +! + +! exception no 0 to 255 +exception_entry0: +no = 0 + .rept 256 + mov.l r1,@-sp + bra exception_trampoline0 + mov #no,r1 +no = no + 1 + .endr +exception_trampoline0: + mov.l r0,@-sp + mov.l 1f,r0 + extu.b r1,r1 + jmp @r0 + extu.w r1,r1 + + .align 2 +1: .long exception_handler + +! exception no 256 to 511 +exception_entry1: +no = 0 + .rept 256 + mov.l r1,@-sp + bra exception_trampoline1 + mov #no,r1 +no = no + 1 + .endr +exception_trampoline1: + mov.l r0,@-sp + extu.b r1,r1 + movi20 #0x100,r0 + add r0,r1 + mov.l 1f,r0 + jmp @r0 + extu.w r1,r1 + + .align 2 +1: .long exception_handler + + ! +! Exception Vector Base +! + .align 2 +ENTRY(vbr_base) +vector = 0 + .rept 256 + .long exception_entry0 + vector * 6 +vector = vector + 1 + .endr +vector = 0 + .rept 256 + .long exception_entry1 + vector * 6 +vector = vector + 1 + .endr diff --git a/arch/sh/kernel/cpu/sh2a/fpu.c b/arch/sh/kernel/cpu/sh2a/fpu.c new file mode 100644 index 000000000..0bcff11a4 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/fpu.c @@ -0,0 +1,572 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Save/restore floating point context for signal handlers. + * + * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka + * + * FIXME! These routines can be optimized in big endian case. + */ +#include <linux/sched/signal.h> +#include <linux/signal.h> +#include <asm/processor.h> +#include <asm/io.h> +#include <asm/fpu.h> +#include <asm/traps.h> + +/* The PR (precision) bit in the FP Status Register must be clear when + * an frchg instruction is executed, otherwise the instruction is undefined. + * Executing frchg with PR set causes a trap on some SH4 implementations. + */ + +#define FPSCR_RCHG 0x00000000 + + +/* + * Save FPU registers onto task structure. + */ +void save_fpu(struct task_struct *tsk) +{ + unsigned long dummy; + + enable_fpu(); + asm volatile("sts.l fpul, @-%0\n\t" + "sts.l fpscr, @-%0\n\t" + "fmov.s fr15, @-%0\n\t" + "fmov.s fr14, @-%0\n\t" + "fmov.s fr13, @-%0\n\t" + "fmov.s fr12, @-%0\n\t" + "fmov.s fr11, @-%0\n\t" + "fmov.s fr10, @-%0\n\t" + "fmov.s fr9, @-%0\n\t" + "fmov.s fr8, @-%0\n\t" + "fmov.s fr7, @-%0\n\t" + "fmov.s fr6, @-%0\n\t" + "fmov.s fr5, @-%0\n\t" + "fmov.s fr4, @-%0\n\t" + "fmov.s fr3, @-%0\n\t" + "fmov.s fr2, @-%0\n\t" + "fmov.s fr1, @-%0\n\t" + "fmov.s fr0, @-%0\n\t" + "lds %3, fpscr\n\t" + : "=r" (dummy) + : "0" ((char *)(&tsk->thread.xstate->hardfpu.status)), + "r" (FPSCR_RCHG), + "r" (FPSCR_INIT) + : "memory"); + + disable_fpu(); +} + +void restore_fpu(struct task_struct *tsk) +{ + unsigned long dummy; + + enable_fpu(); + asm volatile("fmov.s @%0+, fr0\n\t" + "fmov.s @%0+, fr1\n\t" + "fmov.s @%0+, fr2\n\t" + "fmov.s @%0+, fr3\n\t" + "fmov.s @%0+, fr4\n\t" + "fmov.s @%0+, fr5\n\t" + "fmov.s @%0+, fr6\n\t" + "fmov.s @%0+, fr7\n\t" + "fmov.s @%0+, fr8\n\t" + "fmov.s @%0+, fr9\n\t" + "fmov.s @%0+, fr10\n\t" + "fmov.s @%0+, fr11\n\t" + "fmov.s @%0+, fr12\n\t" + "fmov.s @%0+, fr13\n\t" + "fmov.s @%0+, fr14\n\t" + "fmov.s @%0+, fr15\n\t" + "lds.l @%0+, fpscr\n\t" + "lds.l @%0+, fpul\n\t" + : "=r" (dummy) + : "0" (tsk->thread.xstate), "r" (FPSCR_RCHG) + : "memory"); + disable_fpu(); +} + +/* + * Emulate arithmetic ops on denormalized number for some FPU insns. + */ + +/* denormalized float * float */ +static int denormal_mulf(int hx, int hy) +{ + unsigned int ix, iy; + unsigned long long m, n; + int exp, w; + + ix = hx & 0x7fffffff; + iy = hy & 0x7fffffff; + if (iy < 0x00800000 || ix == 0) + return ((hx ^ hy) & 0x80000000); + + exp = (iy & 0x7f800000) >> 23; + ix &= 0x007fffff; + iy = (iy & 0x007fffff) | 0x00800000; + m = (unsigned long long)ix * iy; + n = m; + w = -1; + while (n) { n >>= 1; w++; } + + /* FIXME: use guard bits */ + exp += w - 126 - 46; + if (exp > 0) + ix = ((int) (m >> (w - 23)) & 0x007fffff) | (exp << 23); + else if (exp + 22 >= 0) + ix = (int) (m >> (w - 22 - exp)) & 0x007fffff; + else + ix = 0; + + ix |= (hx ^ hy) & 0x80000000; + return ix; +} + +/* denormalized double * double */ +static void mult64(unsigned long long x, unsigned long long y, + unsigned long long *highp, unsigned long long *lowp) +{ + unsigned long long sub0, sub1, sub2, sub3; + unsigned long long high, low; + + sub0 = (x >> 32) * (unsigned long) (y >> 32); + sub1 = (x & 0xffffffffLL) * (unsigned long) (y >> 32); + sub2 = (x >> 32) * (unsigned long) (y & 0xffffffffLL); + sub3 = (x & 0xffffffffLL) * (unsigned long) (y & 0xffffffffLL); + low = sub3; + high = 0LL; + sub3 += (sub1 << 32); + if (low > sub3) + high++; + low = sub3; + sub3 += (sub2 << 32); + if (low > sub3) + high++; + low = sub3; + high += (sub1 >> 32) + (sub2 >> 32); + high += sub0; + *lowp = low; + *highp = high; +} + +static inline long long rshift64(unsigned long long mh, + unsigned long long ml, int n) +{ + if (n >= 64) + return mh >> (n - 64); + return (mh << (64 - n)) | (ml >> n); +} + +static long long denormal_muld(long long hx, long long hy) +{ + unsigned long long ix, iy; + unsigned long long mh, ml, nh, nl; + int exp, w; + + ix = hx & 0x7fffffffffffffffLL; + iy = hy & 0x7fffffffffffffffLL; + if (iy < 0x0010000000000000LL || ix == 0) + return ((hx ^ hy) & 0x8000000000000000LL); + + exp = (iy & 0x7ff0000000000000LL) >> 52; + ix &= 0x000fffffffffffffLL; + iy = (iy & 0x000fffffffffffffLL) | 0x0010000000000000LL; + mult64(ix, iy, &mh, &ml); + nh = mh; + nl = ml; + w = -1; + if (nh) { + while (nh) { nh >>= 1; w++;} + w += 64; + } else + while (nl) { nl >>= 1; w++;} + + /* FIXME: use guard bits */ + exp += w - 1022 - 52 * 2; + if (exp > 0) + ix = (rshift64(mh, ml, w - 52) & 0x000fffffffffffffLL) + | ((long long)exp << 52); + else if (exp + 51 >= 0) + ix = rshift64(mh, ml, w - 51 - exp) & 0x000fffffffffffffLL; + else + ix = 0; + + ix |= (hx ^ hy) & 0x8000000000000000LL; + return ix; +} + +/* ix - iy where iy: denormal and ix, iy >= 0 */ +static int denormal_subf1(unsigned int ix, unsigned int iy) +{ + int frac; + int exp; + + if (ix < 0x00800000) + return ix - iy; + + exp = (ix & 0x7f800000) >> 23; + if (exp - 1 > 31) + return ix; + iy >>= exp - 1; + if (iy == 0) + return ix; + + frac = (ix & 0x007fffff) | 0x00800000; + frac -= iy; + while (frac < 0x00800000) { + if (--exp == 0) + return frac; + frac <<= 1; + } + + return (exp << 23) | (frac & 0x007fffff); +} + +/* ix + iy where iy: denormal and ix, iy >= 0 */ +static int denormal_addf1(unsigned int ix, unsigned int iy) +{ + int frac; + int exp; + + if (ix < 0x00800000) + return ix + iy; + + exp = (ix & 0x7f800000) >> 23; + if (exp - 1 > 31) + return ix; + iy >>= exp - 1; + if (iy == 0) + return ix; + + frac = (ix & 0x007fffff) | 0x00800000; + frac += iy; + if (frac >= 0x01000000) { + frac >>= 1; + ++exp; + } + + return (exp << 23) | (frac & 0x007fffff); +} + +static int denormal_addf(int hx, int hy) +{ + unsigned int ix, iy; + int sign; + + if ((hx ^ hy) & 0x80000000) { + sign = hx & 0x80000000; + ix = hx & 0x7fffffff; + iy = hy & 0x7fffffff; + if (iy < 0x00800000) { + ix = denormal_subf1(ix, iy); + if ((int) ix < 0) { + ix = -ix; + sign ^= 0x80000000; + } + } else { + ix = denormal_subf1(iy, ix); + sign ^= 0x80000000; + } + } else { + sign = hx & 0x80000000; + ix = hx & 0x7fffffff; + iy = hy & 0x7fffffff; + if (iy < 0x00800000) + ix = denormal_addf1(ix, iy); + else + ix = denormal_addf1(iy, ix); + } + + return sign | ix; +} + +/* ix - iy where iy: denormal and ix, iy >= 0 */ +static long long denormal_subd1(unsigned long long ix, unsigned long long iy) +{ + long long frac; + int exp; + + if (ix < 0x0010000000000000LL) + return ix - iy; + + exp = (ix & 0x7ff0000000000000LL) >> 52; + if (exp - 1 > 63) + return ix; + iy >>= exp - 1; + if (iy == 0) + return ix; + + frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL; + frac -= iy; + while (frac < 0x0010000000000000LL) { + if (--exp == 0) + return frac; + frac <<= 1; + } + + return ((long long)exp << 52) | (frac & 0x000fffffffffffffLL); +} + +/* ix + iy where iy: denormal and ix, iy >= 0 */ +static long long denormal_addd1(unsigned long long ix, unsigned long long iy) +{ + long long frac; + long long exp; + + if (ix < 0x0010000000000000LL) + return ix + iy; + + exp = (ix & 0x7ff0000000000000LL) >> 52; + if (exp - 1 > 63) + return ix; + iy >>= exp - 1; + if (iy == 0) + return ix; + + frac = (ix & 0x000fffffffffffffLL) | 0x0010000000000000LL; + frac += iy; + if (frac >= 0x0020000000000000LL) { + frac >>= 1; + ++exp; + } + + return (exp << 52) | (frac & 0x000fffffffffffffLL); +} + +static long long denormal_addd(long long hx, long long hy) +{ + unsigned long long ix, iy; + long long sign; + + if ((hx ^ hy) & 0x8000000000000000LL) { + sign = hx & 0x8000000000000000LL; + ix = hx & 0x7fffffffffffffffLL; + iy = hy & 0x7fffffffffffffffLL; + if (iy < 0x0010000000000000LL) { + ix = denormal_subd1(ix, iy); + if ((int) ix < 0) { + ix = -ix; + sign ^= 0x8000000000000000LL; + } + } else { + ix = denormal_subd1(iy, ix); + sign ^= 0x8000000000000000LL; + } + } else { + sign = hx & 0x8000000000000000LL; + ix = hx & 0x7fffffffffffffffLL; + iy = hy & 0x7fffffffffffffffLL; + if (iy < 0x0010000000000000LL) + ix = denormal_addd1(ix, iy); + else + ix = denormal_addd1(iy, ix); + } + + return sign | ix; +} + +/** + * denormal_to_double - Given denormalized float number, + * store double float + * + * @fpu: Pointer to sh_fpu_hard structure + * @n: Index to FP register + */ +static void +denormal_to_double (struct sh_fpu_hard_struct *fpu, int n) +{ + unsigned long du, dl; + unsigned long x = fpu->fpul; + int exp = 1023 - 126; + + if (x != 0 && (x & 0x7f800000) == 0) { + du = (x & 0x80000000); + while ((x & 0x00800000) == 0) { + x <<= 1; + exp--; + } + x &= 0x007fffff; + du |= (exp << 20) | (x >> 3); + dl = x << 29; + + fpu->fp_regs[n] = du; + fpu->fp_regs[n+1] = dl; + } +} + +/** + * ieee_fpe_handler - Handle denormalized number exception + * + * @regs: Pointer to register structure + * + * Returns 1 when it's handled (should not cause exception). + */ +static int +ieee_fpe_handler (struct pt_regs *regs) +{ + unsigned short insn = *(unsigned short *) regs->pc; + unsigned short finsn; + unsigned long nextpc; + int nib[4] = { + (insn >> 12) & 0xf, + (insn >> 8) & 0xf, + (insn >> 4) & 0xf, + insn & 0xf}; + + if (nib[0] == 0xb || + (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) /* bsr & jsr */ + regs->pr = regs->pc + 4; + if (nib[0] == 0xa || nib[0] == 0xb) { /* bra & bsr */ + nextpc = regs->pc + 4 + ((short) ((insn & 0xfff) << 4) >> 3); + finsn = *(unsigned short *) (regs->pc + 2); + } else if (nib[0] == 0x8 && nib[1] == 0xd) { /* bt/s */ + if (regs->sr & 1) + nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); + else + nextpc = regs->pc + 4; + finsn = *(unsigned short *) (regs->pc + 2); + } else if (nib[0] == 0x8 && nib[1] == 0xf) { /* bf/s */ + if (regs->sr & 1) + nextpc = regs->pc + 4; + else + nextpc = regs->pc + 4 + ((char) (insn & 0xff) << 1); + finsn = *(unsigned short *) (regs->pc + 2); + } else if (nib[0] == 0x4 && nib[3] == 0xb && + (nib[2] == 0x0 || nib[2] == 0x2)) { /* jmp & jsr */ + nextpc = regs->regs[nib[1]]; + finsn = *(unsigned short *) (regs->pc + 2); + } else if (nib[0] == 0x0 && nib[3] == 0x3 && + (nib[2] == 0x0 || nib[2] == 0x2)) { /* braf & bsrf */ + nextpc = regs->pc + 4 + regs->regs[nib[1]]; + finsn = *(unsigned short *) (regs->pc + 2); + } else if (insn == 0x000b) { /* rts */ + nextpc = regs->pr; + finsn = *(unsigned short *) (regs->pc + 2); + } else { + nextpc = regs->pc + 2; + finsn = insn; + } + +#define FPSCR_FPU_ERROR (1 << 17) + + if ((finsn & 0xf1ff) == 0xf0ad) { /* fcnvsd */ + struct task_struct *tsk = current; + + if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_FPU_ERROR)) { + /* FPU error */ + denormal_to_double (&tsk->thread.xstate->hardfpu, + (finsn >> 8) & 0xf); + } else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf00f) == 0xf002) { /* fmul */ + struct task_struct *tsk = current; + int fpscr; + int n, m, prec; + unsigned int hx, hy; + + n = (finsn >> 8) & 0xf; + m = (finsn >> 4) & 0xf; + hx = tsk->thread.xstate->hardfpu.fp_regs[n]; + hy = tsk->thread.xstate->hardfpu.fp_regs[m]; + fpscr = tsk->thread.xstate->hardfpu.fpscr; + prec = fpscr & (1 << 19); + + if ((fpscr & FPSCR_FPU_ERROR) + && (prec && ((hx & 0x7fffffff) < 0x00100000 + || (hy & 0x7fffffff) < 0x00100000))) { + long long llx, lly; + + /* FPU error because of denormal */ + llx = ((long long) hx << 32) + | tsk->thread.xstate->hardfpu.fp_regs[n+1]; + lly = ((long long) hy << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m+1]; + if ((hx & 0x7fffffff) >= 0x00100000) + llx = denormal_muld(lly, llx); + else + llx = denormal_muld(llx, lly); + tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; + tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; + } else if ((fpscr & FPSCR_FPU_ERROR) + && (!prec && ((hx & 0x7fffffff) < 0x00800000 + || (hy & 0x7fffffff) < 0x00800000))) { + /* FPU error because of denormal */ + if ((hx & 0x7fffffff) >= 0x00800000) + hx = denormal_mulf(hy, hx); + else + hx = denormal_mulf(hx, hy); + tsk->thread.xstate->hardfpu.fp_regs[n] = hx; + } else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf00e) == 0xf000) { /* fadd, fsub */ + struct task_struct *tsk = current; + int fpscr; + int n, m, prec; + unsigned int hx, hy; + + n = (finsn >> 8) & 0xf; + m = (finsn >> 4) & 0xf; + hx = tsk->thread.xstate->hardfpu.fp_regs[n]; + hy = tsk->thread.xstate->hardfpu.fp_regs[m]; + fpscr = tsk->thread.xstate->hardfpu.fpscr; + prec = fpscr & (1 << 19); + + if ((fpscr & FPSCR_FPU_ERROR) + && (prec && ((hx & 0x7fffffff) < 0x00100000 + || (hy & 0x7fffffff) < 0x00100000))) { + long long llx, lly; + + /* FPU error because of denormal */ + llx = ((long long) hx << 32) + | tsk->thread.xstate->hardfpu.fp_regs[n+1]; + lly = ((long long) hy << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m+1]; + if ((finsn & 0xf00f) == 0xf000) + llx = denormal_addd(llx, lly); + else + llx = denormal_addd(llx, lly ^ (1LL << 63)); + tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; + tsk->thread.xstate->hardfpu.fp_regs[n+1] = llx & 0xffffffff; + } else if ((fpscr & FPSCR_FPU_ERROR) + && (!prec && ((hx & 0x7fffffff) < 0x00800000 + || (hy & 0x7fffffff) < 0x00800000))) { + /* FPU error because of denormal */ + if ((finsn & 0xf00f) == 0xf000) + hx = denormal_addf(hx, hy); + else + hx = denormal_addf(hx, hy ^ 0x80000000); + tsk->thread.xstate->hardfpu.fp_regs[n] = hx; + } else + return 0; + + regs->pc = nextpc; + return 1; + } + + return 0; +} + +BUILD_TRAP_HANDLER(fpu_error) +{ + struct task_struct *tsk = current; + TRAP_HANDLER_DECL; + + __unlazy_fpu(tsk, regs); + if (ieee_fpe_handler(regs)) { + tsk->thread.xstate->hardfpu.fpscr &= + ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); + grab_fpu(regs); + restore_fpu(tsk); + task_thread_info(tsk)->status |= TS_USEDFPU; + return; + } + + force_sig(SIGFPE); +} diff --git a/arch/sh/kernel/cpu/sh2a/opcode_helper.c b/arch/sh/kernel/cpu/sh2a/opcode_helper.c new file mode 100644 index 000000000..c509081d9 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/opcode_helper.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2a/opcode_helper.c + * + * Helper for the SH-2A 32-bit opcodes. + * + * Copyright (C) 2007 Paul Mundt + */ +#include <linux/kernel.h> + +/* + * Instructions on SH are generally fixed at 16-bits, however, SH-2A + * introduces some 32-bit instructions. Since there are no real + * constraints on their use (and they can be mixed and matched), we need + * to check the instruction encoding to work out if it's a true 32-bit + * instruction or not. + * + * Presently, 32-bit opcodes have only slight variations in what the + * actual encoding looks like in the first-half of the instruction, which + * makes it fairly straightforward to differentiate from the 16-bit ones. + * + * First 16-bits of encoding Used by + * + * 0011nnnnmmmm0001 mov.b, mov.w, mov.l, fmov.d, + * fmov.s, movu.b, movu.w + * + * 0011nnnn0iii1001 bclr.b, bld.b, bset.b, bst.b, band.b, + * bandnot.b, bldnot.b, bor.b, bornot.b, + * bxor.b + * + * 0000nnnniiii0000 movi20 + * 0000nnnniiii0001 movi20s + */ +unsigned int instruction_size(unsigned int insn) +{ + /* Look for the common cases */ + switch ((insn & 0xf00f)) { + case 0x0000: /* movi20 */ + case 0x0001: /* movi20s */ + case 0x3001: /* 32-bit mov/fmov/movu variants */ + return 4; + } + + /* And the special cases.. */ + switch ((insn & 0xf08f)) { + case 0x3009: /* 32-bit b*.b bit operations */ + return 4; + } + + return 2; +} diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c new file mode 100644 index 000000000..a6777e6fc --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7203.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7203 Pinmux + * + * Copyright (C) 2008 Magnus Damm + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7203_pfc_resources[] = { + [0] = { + .start = 0xfffe3800, + .end = 0xfffe3a9f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7203", sh7203_pfc_resources, + ARRAY_SIZE(sh7203_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c new file mode 100644 index 000000000..7a103e16c --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7264.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7264 Pinmux + * + * Copyright (C) 2012 Renesas Electronics Europe Ltd + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7264_pfc_resources[] = { + [0] = { + .start = 0xfffe3800, + .end = 0xfffe393f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7264", sh7264_pfc_resources, + ARRAY_SIZE(sh7264_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c new file mode 100644 index 000000000..4da432ef1 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7269 Pinmux + * + * Copyright (C) 2012 Renesas Electronics Europe Ltd + * Copyright (C) 2012 Phil Edworthy + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <cpu/pfc.h> + +static struct resource sh7269_pfc_resources[] = { + [0] = { + .start = 0xfffe3800, + .end = 0xfffe391f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7269", sh7269_pfc_resources, + ARRAY_SIZE(sh7269_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c new file mode 100644 index 000000000..c66a3bc88 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/probe.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh2a/probe.c + * + * CPU Subtype Probing for SH-2A. + * + * Copyright (C) 2004 - 2007 Paul Mundt + */ +#include <linux/init.h> +#include <asm/processor.h> +#include <asm/cache.h> + +void cpu_probe(void) +{ + boot_cpu_data.family = CPU_FAMILY_SH2A; + + /* All SH-2A CPUs have support for 16 and 32-bit opcodes.. */ + boot_cpu_data.flags |= CPU_HAS_OP32; + +#if defined(CONFIG_CPU_SUBTYPE_SH7201) + boot_cpu_data.type = CPU_SH7201; + boot_cpu_data.flags |= CPU_HAS_FPU; +#elif defined(CONFIG_CPU_SUBTYPE_SH7203) + boot_cpu_data.type = CPU_SH7203; + boot_cpu_data.flags |= CPU_HAS_FPU; +#elif defined(CONFIG_CPU_SUBTYPE_SH7263) + boot_cpu_data.type = CPU_SH7263; + boot_cpu_data.flags |= CPU_HAS_FPU; +#elif defined(CONFIG_CPU_SUBTYPE_SH7264) + boot_cpu_data.type = CPU_SH7264; + boot_cpu_data.flags |= CPU_HAS_FPU; +#elif defined(CONFIG_CPU_SUBTYPE_SH7269) + boot_cpu_data.type = CPU_SH7269; + boot_cpu_data.flags |= CPU_HAS_FPU; +#elif defined(CONFIG_CPU_SUBTYPE_SH7206) + boot_cpu_data.type = CPU_SH7206; + boot_cpu_data.flags |= CPU_HAS_DSP; +#elif defined(CONFIG_CPU_SUBTYPE_MXG) + boot_cpu_data.type = CPU_MXG; + boot_cpu_data.flags |= CPU_HAS_DSP; +#endif + + boot_cpu_data.dcache.ways = 4; + boot_cpu_data.dcache.way_incr = (1 << 11); + boot_cpu_data.dcache.sets = 128; + boot_cpu_data.dcache.entry_shift = 4; + boot_cpu_data.dcache.linesz = L1_CACHE_BYTES; + boot_cpu_data.dcache.flags = 0; + + /* + * The icache is the same as the dcache as far as this setup is + * concerned. The only real difference in hardware is that the icache + * lacks the U bit that the dcache has, none of this has any bearing + * on the cache info. + */ + boot_cpu_data.icache = boot_cpu_data.dcache; +} diff --git a/arch/sh/kernel/cpu/sh2a/setup-mxg.c b/arch/sh/kernel/cpu/sh2a/setup-mxg.c new file mode 100644 index 000000000..cefa07924 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-mxg.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Renesas MX-G (R8A03022BG) Setup + * + * Copyright (C) 2008, 2009 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + IRQ8, IRQ9, IRQ10, IRQ11, IRQ12, IRQ13, IRQ14, IRQ15, + + PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, + SINT8, SINT7, SINT6, SINT5, SINT4, SINT3, SINT2, SINT1, + + SCIF0, SCIF1, + + MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3, MTU2_GROUP4, MTU2_GROUP5, + MTU2_TGI3B, MTU2_TGI3C, + + /* interrupt groups */ + PINT, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), + INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), + INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), + INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), + INTC_IRQ(IRQ8, 72), INTC_IRQ(IRQ9, 73), + INTC_IRQ(IRQ10, 74), INTC_IRQ(IRQ11, 75), + INTC_IRQ(IRQ12, 76), INTC_IRQ(IRQ13, 77), + INTC_IRQ(IRQ14, 78), INTC_IRQ(IRQ15, 79), + + INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), + INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), + INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), + INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), + + INTC_IRQ(SINT8, 94), INTC_IRQ(SINT7, 95), + INTC_IRQ(SINT6, 96), INTC_IRQ(SINT5, 97), + INTC_IRQ(SINT4, 98), INTC_IRQ(SINT3, 99), + INTC_IRQ(SINT2, 100), INTC_IRQ(SINT1, 101), + + INTC_IRQ(SCIF0, 220), INTC_IRQ(SCIF0, 221), + INTC_IRQ(SCIF0, 222), INTC_IRQ(SCIF0, 223), + INTC_IRQ(SCIF1, 224), INTC_IRQ(SCIF1, 225), + INTC_IRQ(SCIF1, 226), INTC_IRQ(SCIF1, 227), + + INTC_IRQ(MTU2_GROUP1, 228), INTC_IRQ(MTU2_GROUP1, 229), + INTC_IRQ(MTU2_GROUP1, 230), INTC_IRQ(MTU2_GROUP1, 231), + INTC_IRQ(MTU2_GROUP1, 232), INTC_IRQ(MTU2_GROUP1, 233), + + INTC_IRQ(MTU2_GROUP2, 234), INTC_IRQ(MTU2_GROUP2, 235), + INTC_IRQ(MTU2_GROUP2, 236), INTC_IRQ(MTU2_GROUP2, 237), + INTC_IRQ(MTU2_GROUP2, 238), INTC_IRQ(MTU2_GROUP2, 239), + + INTC_IRQ(MTU2_GROUP3, 240), INTC_IRQ(MTU2_GROUP3, 241), + INTC_IRQ(MTU2_GROUP3, 242), INTC_IRQ(MTU2_GROUP3, 243), + + INTC_IRQ(MTU2_TGI3B, 244), + INTC_IRQ(MTU2_TGI3C, 245), + + INTC_IRQ(MTU2_GROUP4, 246), INTC_IRQ(MTU2_GROUP4, 247), + INTC_IRQ(MTU2_GROUP4, 248), INTC_IRQ(MTU2_GROUP4, 249), + INTC_IRQ(MTU2_GROUP4, 250), INTC_IRQ(MTU2_GROUP4, 251), + + INTC_IRQ(MTU2_GROUP5, 252), INTC_IRQ(MTU2_GROUP5, 253), + INTC_IRQ(MTU2_GROUP5, 254), INTC_IRQ(MTU2_GROUP5, 255), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, + PINT4, PINT5, PINT6, PINT7), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffd9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xfffd941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfffd941c, 0, 16, 4, /* IPR03 */ { IRQ8, IRQ9, IRQ10, IRQ11 } }, + { 0xfffd941e, 0, 16, 4, /* IPR04 */ { IRQ12, IRQ13, IRQ14, IRQ15 } }, + { 0xfffd9420, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } }, + { 0xfffd9800, 0, 16, 4, /* IPR06 */ { } }, + { 0xfffd9802, 0, 16, 4, /* IPR07 */ { } }, + { 0xfffd9804, 0, 16, 4, /* IPR08 */ { } }, + { 0xfffd9806, 0, 16, 4, /* IPR09 */ { } }, + { 0xfffd9808, 0, 16, 4, /* IPR10 */ { } }, + { 0xfffd980a, 0, 16, 4, /* IPR11 */ { } }, + { 0xfffd980c, 0, 16, 4, /* IPR12 */ { } }, + { 0xfffd980e, 0, 16, 4, /* IPR13 */ { } }, + { 0xfffd9810, 0, 16, 4, /* IPR14 */ { 0, 0, 0, SCIF0 } }, + { 0xfffd9812, 0, 16, 4, /* IPR15 */ + { SCIF1, MTU2_GROUP1, MTU2_GROUP2, MTU2_GROUP3 } }, + { 0xfffd9814, 0, 16, 4, /* IPR16 */ + { MTU2_TGI3B, MTU2_TGI3C, MTU2_GROUP4, MTU2_GROUP5 } }, +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfffd9408, 0, 16, /* PINTER */ + { 0, 0, 0, 0, 0, 0, 0, 0, + PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "mxg", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct resource mtu2_resources[] = { + DEFINE_RES_MEM(0xff801000, 0x400), + DEFINE_RES_IRQ_NAMED(228, "tgi0a"), + DEFINE_RES_IRQ_NAMED(234, "tgi1a"), + DEFINE_RES_IRQ_NAMED(240, "tgi2a"), +}; + +static struct platform_device mtu2_device = { + .name = "sh-mtu2", + .id = -1, + .resource = mtu2_resources, + .num_resources = ARRAY_SIZE(mtu2_resources), +}; + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xff804000, 0x100), + DEFINE_RES_IRQ(220), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct platform_device *mxg_devices[] __initdata = { + &scif0_device, + &mtu2_device, +}; + +static int __init mxg_devices_setup(void) +{ + return platform_add_devices(mxg_devices, + ARRAY_SIZE(mxg_devices)); +} +arch_initcall(mxg_devices_setup); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +static struct platform_device *mxg_early_devices[] __initdata = { + &scif0_device, + &mtu2_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(mxg_early_devices, + ARRAY_SIZE(mxg_early_devices)); +} diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7201.c b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c new file mode 100644 index 000000000..28f1bebf3 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7201.c @@ -0,0 +1,418 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7201 setup + * + * Copyright (C) 2008 Peter Griffin pgriffin@mpc-data.co.uk + * Copyright (C) 2009 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, + + ADC_ADI, + + MTU20_ABCD, MTU20_VEF, MTU21_AB, MTU21_VU, MTU22_AB, MTU22_VU, + MTU23_ABCD, MTU24_ABCD, MTU25_UVW, MTU2_TCI3V, MTU2_TCI4V, + + RTC, WDT, + + IIC30, IIC31, IIC32, + + DMAC0_DMINT0, DMAC1_DMINT1, + DMAC2_DMINT2, DMAC3_DMINT3, + + SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7, + + DMAC0_DMINTA, DMAC4_DMINT4, DMAC5_DMINT5, DMAC6_DMINT6, + DMAC7_DMINT7, + + RCAN0, RCAN1, + + SSI0_SSII, SSI1_SSII, + + TMR0, TMR1, + + /* interrupt groups */ + PINT, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), + INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), + INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), + INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), + + INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), + INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), + INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), + INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), + + INTC_IRQ(ADC_ADI, 92), + + INTC_IRQ(MTU20_ABCD, 108), INTC_IRQ(MTU20_ABCD, 109), + INTC_IRQ(MTU20_ABCD, 110), INTC_IRQ(MTU20_ABCD, 111), + + INTC_IRQ(MTU20_VEF, 112), INTC_IRQ(MTU20_VEF, 113), + INTC_IRQ(MTU20_VEF, 114), + + INTC_IRQ(MTU21_AB, 116), INTC_IRQ(MTU21_AB, 117), + INTC_IRQ(MTU21_VU, 120), INTC_IRQ(MTU21_VU, 121), + + INTC_IRQ(MTU22_AB, 124), INTC_IRQ(MTU22_AB, 125), + INTC_IRQ(MTU22_VU, 128), INTC_IRQ(MTU22_VU, 129), + + INTC_IRQ(MTU23_ABCD, 132), INTC_IRQ(MTU23_ABCD, 133), + INTC_IRQ(MTU23_ABCD, 134), INTC_IRQ(MTU23_ABCD, 135), + + INTC_IRQ(MTU2_TCI3V, 136), + + INTC_IRQ(MTU24_ABCD, 140), INTC_IRQ(MTU24_ABCD, 141), + INTC_IRQ(MTU24_ABCD, 142), INTC_IRQ(MTU24_ABCD, 143), + + INTC_IRQ(MTU2_TCI4V, 144), + + INTC_IRQ(MTU25_UVW, 148), INTC_IRQ(MTU25_UVW, 149), + INTC_IRQ(MTU25_UVW, 150), + + INTC_IRQ(RTC, 152), INTC_IRQ(RTC, 153), + INTC_IRQ(RTC, 154), + + INTC_IRQ(WDT, 156), + + INTC_IRQ(IIC30, 157), INTC_IRQ(IIC30, 158), + INTC_IRQ(IIC30, 159), INTC_IRQ(IIC30, 160), + INTC_IRQ(IIC30, 161), + + INTC_IRQ(IIC31, 164), INTC_IRQ(IIC31, 165), + INTC_IRQ(IIC31, 166), INTC_IRQ(IIC31, 167), + INTC_IRQ(IIC31, 168), + + INTC_IRQ(IIC32, 170), INTC_IRQ(IIC32, 171), + INTC_IRQ(IIC32, 172), INTC_IRQ(IIC32, 173), + INTC_IRQ(IIC32, 174), + + INTC_IRQ(DMAC0_DMINT0, 176), INTC_IRQ(DMAC1_DMINT1, 177), + INTC_IRQ(DMAC2_DMINT2, 178), INTC_IRQ(DMAC3_DMINT3, 179), + + INTC_IRQ(SCIF0, 180), INTC_IRQ(SCIF0, 181), + INTC_IRQ(SCIF0, 182), INTC_IRQ(SCIF0, 183), + INTC_IRQ(SCIF1, 184), INTC_IRQ(SCIF1, 185), + INTC_IRQ(SCIF1, 186), INTC_IRQ(SCIF1, 187), + INTC_IRQ(SCIF2, 188), INTC_IRQ(SCIF2, 189), + INTC_IRQ(SCIF2, 190), INTC_IRQ(SCIF2, 191), + INTC_IRQ(SCIF3, 192), INTC_IRQ(SCIF3, 193), + INTC_IRQ(SCIF3, 194), INTC_IRQ(SCIF3, 195), + INTC_IRQ(SCIF4, 196), INTC_IRQ(SCIF4, 197), + INTC_IRQ(SCIF4, 198), INTC_IRQ(SCIF4, 199), + INTC_IRQ(SCIF5, 200), INTC_IRQ(SCIF5, 201), + INTC_IRQ(SCIF5, 202), INTC_IRQ(SCIF5, 203), + INTC_IRQ(SCIF6, 204), INTC_IRQ(SCIF6, 205), + INTC_IRQ(SCIF6, 206), INTC_IRQ(SCIF6, 207), + INTC_IRQ(SCIF7, 208), INTC_IRQ(SCIF7, 209), + INTC_IRQ(SCIF7, 210), INTC_IRQ(SCIF7, 211), + + INTC_IRQ(DMAC0_DMINTA, 212), INTC_IRQ(DMAC4_DMINT4, 216), + INTC_IRQ(DMAC5_DMINT5, 217), INTC_IRQ(DMAC6_DMINT6, 218), + INTC_IRQ(DMAC7_DMINT7, 219), + + INTC_IRQ(RCAN0, 228), INTC_IRQ(RCAN0, 229), + INTC_IRQ(RCAN0, 230), + INTC_IRQ(RCAN0, 231), INTC_IRQ(RCAN0, 232), + + INTC_IRQ(RCAN1, 234), INTC_IRQ(RCAN1, 235), + INTC_IRQ(RCAN1, 236), + INTC_IRQ(RCAN1, 237), INTC_IRQ(RCAN1, 238), + + INTC_IRQ(SSI0_SSII, 244), INTC_IRQ(SSI1_SSII, 245), + + INTC_IRQ(TMR0, 246), INTC_IRQ(TMR0, 247), + INTC_IRQ(TMR0, 248), + + INTC_IRQ(TMR1, 252), INTC_IRQ(TMR1, 253), + INTC_IRQ(TMR1, 254), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, + PINT4, PINT5, PINT6, PINT7), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffe9418, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xfffe941a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfffe9420, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI, 0 } }, + { 0xfffe9800, 0, 16, 4, /* IPR06 */ { 0, MTU20_ABCD, MTU20_VEF, MTU21_AB } }, + { 0xfffe9802, 0, 16, 4, /* IPR07 */ { MTU21_VU, MTU22_AB, MTU22_VU, MTU23_ABCD } }, + { 0xfffe9804, 0, 16, 4, /* IPR08 */ { MTU2_TCI3V, MTU24_ABCD, MTU2_TCI4V, MTU25_UVW } }, + + { 0xfffe9806, 0, 16, 4, /* IPR09 */ { RTC, WDT, IIC30, 0 } }, + { 0xfffe9808, 0, 16, 4, /* IPR10 */ { IIC31, IIC32, DMAC0_DMINT0, DMAC1_DMINT1 } }, + { 0xfffe980a, 0, 16, 4, /* IPR11 */ { DMAC2_DMINT2, DMAC3_DMINT3, SCIF0, SCIF1 } }, + { 0xfffe980c, 0, 16, 4, /* IPR12 */ { SCIF2, SCIF3, SCIF4, SCIF5 } }, + { 0xfffe980e, 0, 16, 4, /* IPR13 */ { SCIF6, SCIF7, DMAC0_DMINTA, DMAC4_DMINT4 } }, + { 0xfffe9810, 0, 16, 4, /* IPR14 */ { DMAC5_DMINT5, DMAC6_DMINT6, DMAC7_DMINT7, 0 } }, + { 0xfffe9812, 0, 16, 4, /* IPR15 */ { 0, RCAN0, RCAN1, 0 } }, + { 0xfffe9814, 0, 16, 4, /* IPR16 */ { SSI0_SSII, SSI1_SSII, TMR0, TMR1 } }, +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfffe9408, 0, 16, /* PINTER */ + { 0, 0, 0, 0, 0, 0, 0, 0, + PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7201", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xfffe8000, 0x100), + DEFINE_RES_IRQ(180), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xfffe8800, 0x100), + DEFINE_RES_IRQ(184), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xfffe9000, 0x100), + DEFINE_RES_IRQ(188), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xfffe9800, 0x100), + DEFINE_RES_IRQ(192), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xfffea000, 0x100), + DEFINE_RES_IRQ(196), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xfffea800, 0x100), + DEFINE_RES_IRQ(200), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +static struct plat_sci_port scif6_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif6_resources[] = { + DEFINE_RES_MEM(0xfffeb000, 0x100), + DEFINE_RES_IRQ(204), +}; + +static struct platform_device scif6_device = { + .name = "sh-sci", + .id = 6, + .resource = scif6_resources, + .num_resources = ARRAY_SIZE(scif6_resources), + .dev = { + .platform_data = &scif6_platform_data, + }, +}; + +static struct plat_sci_port scif7_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif7_resources[] = { + DEFINE_RES_MEM(0xfffeb800, 0x100), + DEFINE_RES_IRQ(208), +}; + +static struct platform_device scif7_device = { + .name = "sh-sci", + .id = 7, + .resource = scif7_resources, + .num_resources = ARRAY_SIZE(scif7_resources), + .dev = { + .platform_data = &scif7_platform_data, + }, +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xffff0800, + .end = 0xffff2000 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Shared Period/Carry/Alarm IRQ */ + .start = 152, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct resource mtu2_resources[] = { + DEFINE_RES_MEM(0xfffe4000, 0x400), + DEFINE_RES_IRQ_NAMED(108, "tgi0a"), + DEFINE_RES_IRQ_NAMED(116, "tgi1a"), + DEFINE_RES_IRQ_NAMED(124, "tgi1b"), +}; + +static struct platform_device mtu2_device = { + .name = "sh-mtu2", + .id = -1, + .resource = mtu2_resources, + .num_resources = ARRAY_SIZE(mtu2_resources), +}; + +static struct platform_device *sh7201_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &scif6_device, + &scif7_device, + &rtc_device, + &mtu2_device, +}; + +static int __init sh7201_devices_setup(void) +{ + return platform_add_devices(sh7201_devices, + ARRAY_SIZE(sh7201_devices)); +} +arch_initcall(sh7201_devices_setup); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +static struct platform_device *sh7201_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &scif6_device, + &scif7_device, + &mtu2_device, +}; + +#define STBCR3 0xfffe0408 + +void __init plat_early_device_setup(void) +{ + /* enable MTU2 clock */ + __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3); + + sh_early_platform_add_devices(sh7201_early_devices, + ARRAY_SIZE(sh7201_early_devices)); +} diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7203.c b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c new file mode 100644 index 000000000..4839f3aae --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7203.c @@ -0,0 +1,355 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7203 and SH7263 Setup + * + * Copyright (C) 2007 - 2009 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, + DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, + USB, LCDC, CMT0, CMT1, BSC, WDT, + + MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, + MTU3_ABCD, MTU4_ABCD, MTU2_TCI3V, MTU2_TCI4V, + + ADC_ADI, + + IIC30, IIC31, IIC32, IIC33, + SCIF0, SCIF1, SCIF2, SCIF3, + + SSU0, SSU1, + + SSI0_SSII, SSI1_SSII, SSI2_SSII, SSI3_SSII, + + /* ROM-DEC, SDHI, SRC, and IEB are SH7263 specific */ + ROMDEC, FLCTL, SDHI, RTC, RCAN0, RCAN1, + SRC, IEBI, + + /* interrupt groups */ + PINT, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), + INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), + INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), + INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), + INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), + INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), + INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), + INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), + INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109), + INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113), + INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117), + INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121), + INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125), + INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129), + INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133), + INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137), + INTC_IRQ(USB, 140), INTC_IRQ(LCDC, 141), + INTC_IRQ(CMT0, 142), INTC_IRQ(CMT1, 143), + INTC_IRQ(BSC, 144), INTC_IRQ(WDT, 145), + INTC_IRQ(MTU0_ABCD, 146), INTC_IRQ(MTU0_ABCD, 147), + INTC_IRQ(MTU0_ABCD, 148), INTC_IRQ(MTU0_ABCD, 149), + INTC_IRQ(MTU0_VEF, 150), + INTC_IRQ(MTU0_VEF, 151), INTC_IRQ(MTU0_VEF, 152), + INTC_IRQ(MTU1_AB, 153), INTC_IRQ(MTU1_AB, 154), + INTC_IRQ(MTU1_VU, 155), INTC_IRQ(MTU1_VU, 156), + INTC_IRQ(MTU2_AB, 157), INTC_IRQ(MTU2_AB, 158), + INTC_IRQ(MTU2_VU, 159), INTC_IRQ(MTU2_VU, 160), + INTC_IRQ(MTU3_ABCD, 161), INTC_IRQ(MTU3_ABCD, 162), + INTC_IRQ(MTU3_ABCD, 163), INTC_IRQ(MTU3_ABCD, 164), + INTC_IRQ(MTU2_TCI3V, 165), + INTC_IRQ(MTU4_ABCD, 166), INTC_IRQ(MTU4_ABCD, 167), + INTC_IRQ(MTU4_ABCD, 168), INTC_IRQ(MTU4_ABCD, 169), + INTC_IRQ(MTU2_TCI4V, 170), + INTC_IRQ(ADC_ADI, 171), + INTC_IRQ(IIC30, 172), INTC_IRQ(IIC30, 173), + INTC_IRQ(IIC30, 174), INTC_IRQ(IIC30, 175), + INTC_IRQ(IIC30, 176), + INTC_IRQ(IIC31, 177), INTC_IRQ(IIC31, 178), + INTC_IRQ(IIC31, 179), INTC_IRQ(IIC31, 180), + INTC_IRQ(IIC31, 181), + INTC_IRQ(IIC32, 182), INTC_IRQ(IIC32, 183), + INTC_IRQ(IIC32, 184), INTC_IRQ(IIC32, 185), + INTC_IRQ(IIC32, 186), + INTC_IRQ(IIC33, 187), INTC_IRQ(IIC33, 188), + INTC_IRQ(IIC33, 189), INTC_IRQ(IIC33, 190), + INTC_IRQ(IIC33, 191), + INTC_IRQ(SCIF0, 192), INTC_IRQ(SCIF0, 193), + INTC_IRQ(SCIF0, 194), INTC_IRQ(SCIF0, 195), + INTC_IRQ(SCIF1, 196), INTC_IRQ(SCIF1, 197), + INTC_IRQ(SCIF1, 198), INTC_IRQ(SCIF1, 199), + INTC_IRQ(SCIF2, 200), INTC_IRQ(SCIF2, 201), + INTC_IRQ(SCIF2, 202), INTC_IRQ(SCIF2, 203), + INTC_IRQ(SCIF3, 204), INTC_IRQ(SCIF3, 205), + INTC_IRQ(SCIF3, 206), INTC_IRQ(SCIF3, 207), + INTC_IRQ(SSU0, 208), INTC_IRQ(SSU0, 209), + INTC_IRQ(SSU0, 210), + INTC_IRQ(SSU1, 211), INTC_IRQ(SSU1, 212), + INTC_IRQ(SSU1, 213), + INTC_IRQ(SSI0_SSII, 214), INTC_IRQ(SSI1_SSII, 215), + INTC_IRQ(SSI2_SSII, 216), INTC_IRQ(SSI3_SSII, 217), + INTC_IRQ(FLCTL, 224), INTC_IRQ(FLCTL, 225), + INTC_IRQ(FLCTL, 226), INTC_IRQ(FLCTL, 227), + INTC_IRQ(RTC, 231), INTC_IRQ(RTC, 232), + INTC_IRQ(RTC, 233), + INTC_IRQ(RCAN0, 234), INTC_IRQ(RCAN0, 235), + INTC_IRQ(RCAN0, 236), INTC_IRQ(RCAN0, 237), + INTC_IRQ(RCAN0, 238), + INTC_IRQ(RCAN1, 239), INTC_IRQ(RCAN1, 240), + INTC_IRQ(RCAN1, 241), INTC_IRQ(RCAN1, 242), + INTC_IRQ(RCAN1, 243), + + /* SH7263-specific trash */ +#ifdef CONFIG_CPU_SUBTYPE_SH7263 + INTC_IRQ(ROMDEC, 218), INTC_IRQ(ROMDEC, 219), + INTC_IRQ(ROMDEC, 220), INTC_IRQ(ROMDEC, 221), + INTC_IRQ(ROMDEC, 222), INTC_IRQ(ROMDEC, 223), + + INTC_IRQ(SDHI, 228), INTC_IRQ(SDHI, 229), + INTC_IRQ(SDHI, 230), + + INTC_IRQ(SRC, 244), INTC_IRQ(SRC, 245), + INTC_IRQ(SRC, 246), + + INTC_IRQ(IEBI, 247), +#endif +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, + PINT4, PINT5, PINT6, PINT7), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } }, + { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } }, + { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } }, + { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { USB, LCDC, CMT0, CMT1 } }, + { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } }, + { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU1_AB, MTU1_VU, MTU2_AB, + MTU2_VU } }, + { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU3_ABCD, MTU2_TCI3V, MTU4_ABCD, + MTU2_TCI4V } }, + { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { ADC_ADI, IIC30, IIC31, IIC32 } }, + { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { IIC33, SCIF0, SCIF1, SCIF2 } }, + { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF3, SSU0, SSU1, SSI0_SSII } }, +#ifdef CONFIG_CPU_SUBTYPE_SH7203 + { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII, + SSI3_SSII, 0 } }, + { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, 0, RTC, RCAN0 } }, + { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, 0, 0, 0 } }, +#else + { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSI1_SSII, SSI2_SSII, + SSI3_SSII, ROMDEC } }, + { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { FLCTL, SDHI, RTC, RCAN0 } }, + { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { RCAN1, SRC, IEBI, 0 } }, +#endif +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfffe0808, 0, 16, /* PINTER */ + { 0, 0, 0, 0, 0, 0, 0, 0, + PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7203", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xfffe8000, 0x100), + DEFINE_RES_IRQ(192), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xfffe8800, 0x100), + DEFINE_RES_IRQ(196), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xfffe9000, 0x100), + DEFINE_RES_IRQ(200), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xfffe9800, 0x100), + DEFINE_RES_IRQ(204), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 3, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0xfffec000, 0x10), + DEFINE_RES_IRQ(142), + DEFINE_RES_IRQ(143), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-16", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct resource mtu2_resources[] = { + DEFINE_RES_MEM(0xfffe4000, 0x400), + DEFINE_RES_IRQ_NAMED(146, "tgi0a"), + DEFINE_RES_IRQ_NAMED(153, "tgi1a"), +}; + +static struct platform_device mtu2_device = { + .name = "sh-mtu2", + .id = -1, + .resource = mtu2_resources, + .num_resources = ARRAY_SIZE(mtu2_resources), +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xffff2000, + .end = 0xffff2000 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Shared Period/Carry/Alarm IRQ */ + .start = 231, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct platform_device *sh7203_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &cmt_device, + &mtu2_device, + &rtc_device, +}; + +static int __init sh7203_devices_setup(void) +{ + return platform_add_devices(sh7203_devices, + ARRAY_SIZE(sh7203_devices)); +} +arch_initcall(sh7203_devices_setup); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +static struct platform_device *sh7203_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &cmt_device, + &mtu2_device, +}; + +#define STBCR3 0xfffe0408 +#define STBCR4 0xfffe040c + +void __init plat_early_device_setup(void) +{ + /* enable CMT clock */ + __raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4); + + /* enable MTU2 clock */ + __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3); + + sh_early_platform_add_devices(sh7203_early_devices, + ARRAY_SIZE(sh7203_early_devices)); +} diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7206.c b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c new file mode 100644 index 000000000..68add5af4 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7206.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7206 Setup + * + * Copyright (C) 2006 Yoshinori Sato + * Copyright (C) 2009 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, + ADC_ADI0, ADC_ADI1, + + DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, + + MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, + MTU3_ABCD, MTU4_ABCD, MTU5, POE2_12, MTU3S_ABCD, MTU4S_ABCD, MTU5S, + IIC3, + + CMT0, CMT1, BSC, WDT, + + MTU2_TCI3V, MTU2_TCI4V, MTU2S_TCI3V, MTU2S_TCI4V, + + POE2_OEI3, + + SCIF0, SCIF1, SCIF2, SCIF3, + + /* interrupt groups */ + PINT, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), + INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), + INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), + INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), + INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), + INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), + INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), + INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), + INTC_IRQ(ADC_ADI0, 92), INTC_IRQ(ADC_ADI1, 96), + INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109), + INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113), + INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117), + INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121), + INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125), + INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129), + INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133), + INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137), + INTC_IRQ(CMT0, 140), INTC_IRQ(CMT1, 144), + INTC_IRQ(BSC, 148), INTC_IRQ(WDT, 152), + INTC_IRQ(MTU0_ABCD, 156), INTC_IRQ(MTU0_ABCD, 157), + INTC_IRQ(MTU0_ABCD, 158), INTC_IRQ(MTU0_ABCD, 159), + INTC_IRQ(MTU0_VEF, 160), INTC_IRQ(MTU0_VEF, 161), + INTC_IRQ(MTU0_VEF, 162), + INTC_IRQ(MTU1_AB, 164), INTC_IRQ(MTU1_AB, 165), + INTC_IRQ(MTU1_VU, 168), INTC_IRQ(MTU1_VU, 169), + INTC_IRQ(MTU2_AB, 172), INTC_IRQ(MTU2_AB, 173), + INTC_IRQ(MTU2_VU, 176), INTC_IRQ(MTU2_VU, 177), + INTC_IRQ(MTU3_ABCD, 180), INTC_IRQ(MTU3_ABCD, 181), + INTC_IRQ(MTU3_ABCD, 182), INTC_IRQ(MTU3_ABCD, 183), + INTC_IRQ(MTU2_TCI3V, 184), + INTC_IRQ(MTU4_ABCD, 188), INTC_IRQ(MTU4_ABCD, 189), + INTC_IRQ(MTU4_ABCD, 190), INTC_IRQ(MTU4_ABCD, 191), + INTC_IRQ(MTU2_TCI4V, 192), + INTC_IRQ(MTU5, 196), INTC_IRQ(MTU5, 197), + INTC_IRQ(MTU5, 198), + INTC_IRQ(POE2_12, 200), INTC_IRQ(POE2_12, 201), + INTC_IRQ(MTU3S_ABCD, 204), INTC_IRQ(MTU3S_ABCD, 205), + INTC_IRQ(MTU3S_ABCD, 206), INTC_IRQ(MTU3S_ABCD, 207), + INTC_IRQ(MTU2S_TCI3V, 208), + INTC_IRQ(MTU4S_ABCD, 212), INTC_IRQ(MTU4S_ABCD, 213), + INTC_IRQ(MTU4S_ABCD, 214), INTC_IRQ(MTU4S_ABCD, 215), + INTC_IRQ(MTU2S_TCI4V, 216), + INTC_IRQ(MTU5S, 220), INTC_IRQ(MTU5S, 221), + INTC_IRQ(MTU5S, 222), + INTC_IRQ(POE2_OEI3, 224), + INTC_IRQ(IIC3, 228), INTC_IRQ(IIC3, 229), + INTC_IRQ(IIC3, 230), INTC_IRQ(IIC3, 231), + INTC_IRQ(IIC3, 232), + INTC_IRQ(SCIF0, 240), INTC_IRQ(SCIF0, 241), + INTC_IRQ(SCIF0, 242), INTC_IRQ(SCIF0, 243), + INTC_IRQ(SCIF1, 244), INTC_IRQ(SCIF1, 245), + INTC_IRQ(SCIF1, 246), INTC_IRQ(SCIF1, 247), + INTC_IRQ(SCIF2, 248), INTC_IRQ(SCIF2, 249), + INTC_IRQ(SCIF2, 250), INTC_IRQ(SCIF2, 251), + INTC_IRQ(SCIF3, 252), INTC_IRQ(SCIF3, 253), + INTC_IRQ(SCIF3, 254), INTC_IRQ(SCIF3, 255), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, + PINT4, PINT5, PINT6, PINT7), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, ADC_ADI0, ADC_ADI1 } }, + { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } }, + { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } }, + { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { CMT0, CMT1, BSC, WDT } }, + { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { MTU0_ABCD, MTU0_VEF, + MTU1_AB, MTU1_VU } }, + { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { MTU2_AB, MTU2_VU, + MTU3_ABCD, MTU2_TCI3V } }, + { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { MTU4_ABCD, MTU2_TCI4V, + MTU5, POE2_12 } }, + { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU3S_ABCD, MTU2S_TCI3V, + MTU4S_ABCD, MTU2S_TCI4V } }, + { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU5S, POE2_OEI3, IIC3, 0 } }, + { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { SCIF0, SCIF1, SCIF2, SCIF3 } }, +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfffe0808, 0, 16, /* PINTER */ + { 0, 0, 0, 0, 0, 0, 0, 0, + PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7206", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xfffe8000, 0x100), + DEFINE_RES_IRQ(240), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xfffe8800, 0x100), + DEFINE_RES_IRQ(244), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xfffe9000, 0x100), + DEFINE_RES_IRQ(248), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xfffe9800, 0x100), + DEFINE_RES_IRQ(252), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 3, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0xfffec000, 0x10), + DEFINE_RES_IRQ(140), + DEFINE_RES_IRQ(144), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-16", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct resource mtu2_resources[] = { + DEFINE_RES_MEM(0xfffe4000, 0x400), + DEFINE_RES_IRQ_NAMED(156, "tgi0a"), + DEFINE_RES_IRQ_NAMED(164, "tgi1a"), + DEFINE_RES_IRQ_NAMED(180, "tgi2a"), +}; + +static struct platform_device mtu2_device = { + .name = "sh-mtu2s", + .id = -1, + .resource = mtu2_resources, + .num_resources = ARRAY_SIZE(mtu2_resources), +}; + +static struct platform_device *sh7206_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &cmt_device, + &mtu2_device, +}; + +static int __init sh7206_devices_setup(void) +{ + return platform_add_devices(sh7206_devices, + ARRAY_SIZE(sh7206_devices)); +} +arch_initcall(sh7206_devices_setup); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +static struct platform_device *sh7206_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &cmt_device, + &mtu2_device, +}; + +#define STBCR3 0xfffe0408 +#define STBCR4 0xfffe040c + +void __init plat_early_device_setup(void) +{ + /* enable CMT clock */ + __raw_writeb(__raw_readb(STBCR4) & ~0x04, STBCR4); + + /* enable MTU2 clock */ + __raw_writeb(__raw_readb(STBCR3) & ~0x20, STBCR3); + + sh_early_platform_add_devices(sh7206_early_devices, + ARRAY_SIZE(sh7206_early_devices)); +} diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7264.c b/arch/sh/kernel/cpu/sh2a/setup-sh7264.c new file mode 100644 index 000000000..8a1cb613d --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7264.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7264 Setup + * + * Copyright (C) 2012 Renesas Electronics Europe Ltd + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/usb/r8a66597.h> +#include <linux/sh_timer.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, + + DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, + DMAC8, DMAC9, DMAC10, DMAC11, DMAC12, DMAC13, DMAC14, DMAC15, + USB, VDC3, CMT0, CMT1, BSC, WDT, + MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, + MTU3_ABCD, MTU3_TCI3V, MTU4_ABCD, MTU4_TCI4V, + PWMT1, PWMT2, ADC_ADI, + SSIF0, SSII1, SSII2, SSII3, + RSPDIF, + IIC30, IIC31, IIC32, IIC33, + SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, + SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI, + SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI, + SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI, + SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI, + SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI, + SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI, + SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI, + SIO_FIFO, RSPIC0, RSPIC1, + RCAN0, RCAN1, IEBC, CD_ROMD, + NFMC, SDHI, RTC, + SRCC0, SRCC1, DCOMU, OFFI, IFEI, + + /* interrupt groups */ + PINT, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), + INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), + INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), + INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), + + INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), + INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), + INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), + INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), + + INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109), + INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113), + INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117), + INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121), + INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125), + INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129), + INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133), + INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137), + INTC_IRQ(DMAC8, 140), INTC_IRQ(DMAC8, 141), + INTC_IRQ(DMAC9, 144), INTC_IRQ(DMAC9, 145), + INTC_IRQ(DMAC10, 148), INTC_IRQ(DMAC10, 149), + INTC_IRQ(DMAC11, 152), INTC_IRQ(DMAC11, 153), + INTC_IRQ(DMAC12, 156), INTC_IRQ(DMAC12, 157), + INTC_IRQ(DMAC13, 160), INTC_IRQ(DMAC13, 161), + INTC_IRQ(DMAC14, 164), INTC_IRQ(DMAC14, 165), + INTC_IRQ(DMAC15, 168), INTC_IRQ(DMAC15, 169), + + INTC_IRQ(USB, 170), + INTC_IRQ(VDC3, 171), INTC_IRQ(VDC3, 172), + INTC_IRQ(VDC3, 173), INTC_IRQ(VDC3, 174), + INTC_IRQ(CMT0, 175), INTC_IRQ(CMT1, 176), + INTC_IRQ(BSC, 177), INTC_IRQ(WDT, 178), + + INTC_IRQ(MTU0_ABCD, 179), INTC_IRQ(MTU0_ABCD, 180), + INTC_IRQ(MTU0_ABCD, 181), INTC_IRQ(MTU0_ABCD, 182), + INTC_IRQ(MTU0_VEF, 183), + INTC_IRQ(MTU0_VEF, 184), INTC_IRQ(MTU0_VEF, 185), + INTC_IRQ(MTU1_AB, 186), INTC_IRQ(MTU1_AB, 187), + INTC_IRQ(MTU1_VU, 188), INTC_IRQ(MTU1_VU, 189), + INTC_IRQ(MTU2_AB, 190), INTC_IRQ(MTU2_AB, 191), + INTC_IRQ(MTU2_VU, 192), INTC_IRQ(MTU2_VU, 193), + INTC_IRQ(MTU3_ABCD, 194), INTC_IRQ(MTU3_ABCD, 195), + INTC_IRQ(MTU3_ABCD, 196), INTC_IRQ(MTU3_ABCD, 197), + INTC_IRQ(MTU3_TCI3V, 198), + INTC_IRQ(MTU4_ABCD, 199), INTC_IRQ(MTU4_ABCD, 200), + INTC_IRQ(MTU4_ABCD, 201), INTC_IRQ(MTU4_ABCD, 202), + INTC_IRQ(MTU4_TCI4V, 203), + + INTC_IRQ(PWMT1, 204), INTC_IRQ(PWMT2, 205), + + INTC_IRQ(ADC_ADI, 206), + + INTC_IRQ(SSIF0, 207), INTC_IRQ(SSIF0, 208), + INTC_IRQ(SSIF0, 209), + INTC_IRQ(SSII1, 210), INTC_IRQ(SSII1, 211), + INTC_IRQ(SSII2, 212), INTC_IRQ(SSII2, 213), + INTC_IRQ(SSII3, 214), INTC_IRQ(SSII3, 215), + + INTC_IRQ(RSPDIF, 216), + + INTC_IRQ(IIC30, 217), INTC_IRQ(IIC30, 218), + INTC_IRQ(IIC30, 219), INTC_IRQ(IIC30, 220), + INTC_IRQ(IIC30, 221), + INTC_IRQ(IIC31, 222), INTC_IRQ(IIC31, 223), + INTC_IRQ(IIC31, 224), INTC_IRQ(IIC31, 225), + INTC_IRQ(IIC31, 226), + INTC_IRQ(IIC32, 227), INTC_IRQ(IIC32, 228), + INTC_IRQ(IIC32, 229), INTC_IRQ(IIC32, 230), + INTC_IRQ(IIC32, 231), + + INTC_IRQ(SCIF0_BRI, 232), INTC_IRQ(SCIF0_ERI, 233), + INTC_IRQ(SCIF0_RXI, 234), INTC_IRQ(SCIF0_TXI, 235), + INTC_IRQ(SCIF1_BRI, 236), INTC_IRQ(SCIF1_ERI, 237), + INTC_IRQ(SCIF1_RXI, 238), INTC_IRQ(SCIF1_TXI, 239), + INTC_IRQ(SCIF2_BRI, 240), INTC_IRQ(SCIF2_ERI, 241), + INTC_IRQ(SCIF2_RXI, 242), INTC_IRQ(SCIF2_TXI, 243), + INTC_IRQ(SCIF3_BRI, 244), INTC_IRQ(SCIF3_ERI, 245), + INTC_IRQ(SCIF3_RXI, 246), INTC_IRQ(SCIF3_TXI, 247), + INTC_IRQ(SCIF4_BRI, 248), INTC_IRQ(SCIF4_ERI, 249), + INTC_IRQ(SCIF4_RXI, 250), INTC_IRQ(SCIF4_TXI, 251), + INTC_IRQ(SCIF5_BRI, 252), INTC_IRQ(SCIF5_ERI, 253), + INTC_IRQ(SCIF5_RXI, 254), INTC_IRQ(SCIF5_TXI, 255), + INTC_IRQ(SCIF6_BRI, 256), INTC_IRQ(SCIF6_ERI, 257), + INTC_IRQ(SCIF6_RXI, 258), INTC_IRQ(SCIF6_TXI, 259), + INTC_IRQ(SCIF7_BRI, 260), INTC_IRQ(SCIF7_ERI, 261), + INTC_IRQ(SCIF7_RXI, 262), INTC_IRQ(SCIF7_TXI, 263), + + INTC_IRQ(SIO_FIFO, 264), + + INTC_IRQ(RSPIC0, 265), INTC_IRQ(RSPIC0, 266), + INTC_IRQ(RSPIC0, 267), + INTC_IRQ(RSPIC1, 268), INTC_IRQ(RSPIC1, 269), + INTC_IRQ(RSPIC1, 270), + + INTC_IRQ(RCAN0, 271), INTC_IRQ(RCAN0, 272), + INTC_IRQ(RCAN0, 273), INTC_IRQ(RCAN0, 274), + INTC_IRQ(RCAN0, 275), + INTC_IRQ(RCAN1, 276), INTC_IRQ(RCAN1, 277), + INTC_IRQ(RCAN1, 278), INTC_IRQ(RCAN1, 279), + INTC_IRQ(RCAN1, 280), + + INTC_IRQ(IEBC, 281), + + INTC_IRQ(CD_ROMD, 282), INTC_IRQ(CD_ROMD, 283), + INTC_IRQ(CD_ROMD, 284), INTC_IRQ(CD_ROMD, 285), + INTC_IRQ(CD_ROMD, 286), INTC_IRQ(CD_ROMD, 287), + + INTC_IRQ(NFMC, 288), INTC_IRQ(NFMC, 289), + INTC_IRQ(NFMC, 290), INTC_IRQ(NFMC, 291), + + INTC_IRQ(SDHI, 292), INTC_IRQ(SDHI, 293), + INTC_IRQ(SDHI, 294), + + INTC_IRQ(RTC, 296), INTC_IRQ(RTC, 297), + INTC_IRQ(RTC, 298), + + INTC_IRQ(SRCC0, 299), INTC_IRQ(SRCC0, 300), + INTC_IRQ(SRCC0, 301), INTC_IRQ(SRCC0, 302), + INTC_IRQ(SRCC0, 303), + INTC_IRQ(SRCC1, 304), INTC_IRQ(SRCC1, 305), + INTC_IRQ(SRCC1, 306), INTC_IRQ(SRCC1, 307), + INTC_IRQ(SRCC1, 308), + + INTC_IRQ(DCOMU, 310), INTC_IRQ(DCOMU, 311), + INTC_IRQ(DCOMU, 312), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, + PINT4, PINT5, PINT6, PINT7), + INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), + INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI), + INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), + INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI), + INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI), + INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI), + INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI), + INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } }, + { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } }, + { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } }, + { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { DMAC8, DMAC9, + DMAC10, DMAC11 } }, + { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { DMAC12, DMAC13, + DMAC14, DMAC15 } }, + { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { USB, VDC3, CMT0, CMT1 } }, + { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } }, + { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU1_AB, MTU1_VU, + MTU2_AB, MTU2_VU } }, + { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU3_ABCD, MTU3_TCI3V, + MTU4_ABCD, MTU4_TCI4V } }, + { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { PWMT1, PWMT2, ADC_ADI, 0 } }, + { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSIF0, SSII1, SSII2, SSII3 } }, + { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { RSPDIF, IIC30, IIC31, IIC32 } }, + { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { SCIF0, SCIF1, SCIF2, SCIF3 } }, + { 0xfffe0c18, 0, 16, 4, /* IPR18 */ { SCIF4, SCIF5, SCIF6, SCIF7 } }, + { 0xfffe0c1a, 0, 16, 4, /* IPR19 */ { SIO_FIFO, 0, RSPIC0, RSPIC1, } }, + { 0xfffe0c1c, 0, 16, 4, /* IPR20 */ { RCAN0, RCAN1, IEBC, CD_ROMD } }, + { 0xfffe0c1e, 0, 16, 4, /* IPR21 */ { NFMC, SDHI, RTC, 0 } }, + { 0xfffe0c20, 0, 16, 4, /* IPR22 */ { SRCC0, SRCC1, 0, DCOMU } }, +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfffe0808, 0, 16, /* PINTER */ + { 0, 0, 0, 0, 0, 0, 0, 0, + PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7264", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xfffe8000, 0x100), + DEFINE_RES_IRQ(233), + DEFINE_RES_IRQ(234), + DEFINE_RES_IRQ(235), + DEFINE_RES_IRQ(232), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xfffe8800, 0x100), + DEFINE_RES_IRQ(237), + DEFINE_RES_IRQ(238), + DEFINE_RES_IRQ(239), + DEFINE_RES_IRQ(236), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xfffe9000, 0x100), + DEFINE_RES_IRQ(241), + DEFINE_RES_IRQ(242), + DEFINE_RES_IRQ(243), + DEFINE_RES_IRQ(240), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xfffe9800, 0x100), + DEFINE_RES_IRQ(245), + DEFINE_RES_IRQ(246), + DEFINE_RES_IRQ(247), + DEFINE_RES_IRQ(244), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xfffea000, 0x100), + DEFINE_RES_IRQ(249), + DEFINE_RES_IRQ(250), + DEFINE_RES_IRQ(251), + DEFINE_RES_IRQ(248), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xfffea800, 0x100), + DEFINE_RES_IRQ(253), + DEFINE_RES_IRQ(254), + DEFINE_RES_IRQ(255), + DEFINE_RES_IRQ(252), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +static struct plat_sci_port scif6_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif6_resources[] = { + DEFINE_RES_MEM(0xfffeb000, 0x100), + DEFINE_RES_IRQ(257), + DEFINE_RES_IRQ(258), + DEFINE_RES_IRQ(259), + DEFINE_RES_IRQ(256), +}; + +static struct platform_device scif6_device = { + .name = "sh-sci", + .id = 6, + .resource = scif6_resources, + .num_resources = ARRAY_SIZE(scif6_resources), + .dev = { + .platform_data = &scif6_platform_data, + }, +}; + +static struct plat_sci_port scif7_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif7_resources[] = { + DEFINE_RES_MEM(0xfffeb800, 0x100), + DEFINE_RES_IRQ(261), + DEFINE_RES_IRQ(262), + DEFINE_RES_IRQ(263), + DEFINE_RES_IRQ(260), +}; + +static struct platform_device scif7_device = { + .name = "sh-sci", + .id = 7, + .resource = scif7_resources, + .num_resources = ARRAY_SIZE(scif7_resources), + .dev = { + .platform_data = &scif7_platform_data, + }, +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 3, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0xfffec000, 0x10), + DEFINE_RES_IRQ(175), + DEFINE_RES_IRQ(176), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-16", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct resource mtu2_resources[] = { + DEFINE_RES_MEM(0xfffe4000, 0x400), + DEFINE_RES_IRQ_NAMED(179, "tgi0a"), + DEFINE_RES_IRQ_NAMED(186, "tgi1a"), +}; + +static struct platform_device mtu2_device = { + .name = "sh-mtu2", + .id = -1, + .resource = mtu2_resources, + .num_resources = ARRAY_SIZE(mtu2_resources), +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xfffe6000, + .end = 0xfffe6000 + 0x30 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Shared Period/Carry/Alarm IRQ */ + .start = 296, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +/* USB Host */ +static void usb_port_power(int port, int power) +{ + __raw_writew(0x200 , 0xffffc0c2) ; /* Initialise UACS25 */ +} + +static struct r8a66597_platdata r8a66597_data = { + .on_chip = 1, + .endian = 1, + .port_power = usb_port_power, +}; + +static struct resource r8a66597_usb_host_resources[] = { + [0] = { + .start = 0xffffc000, + .end = 0xffffc0e4, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 170, + .end = 170, + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, + }, +}; + +static struct platform_device r8a66597_usb_host_device = { + .name = "r8a66597_hcd", + .id = 0, + .dev = { + .dma_mask = NULL, /* not use dma */ + .coherent_dma_mask = 0xffffffff, + .platform_data = &r8a66597_data, + }, + .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources), + .resource = r8a66597_usb_host_resources, +}; + +static struct platform_device *sh7264_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &scif6_device, + &scif7_device, + &cmt_device, + &mtu2_device, + &rtc_device, + &r8a66597_usb_host_device, +}; + +static int __init sh7264_devices_setup(void) +{ + return platform_add_devices(sh7264_devices, + ARRAY_SIZE(sh7264_devices)); +} +arch_initcall(sh7264_devices_setup); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +static struct platform_device *sh7264_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &scif6_device, + &scif7_device, + &cmt_device, + &mtu2_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7264_early_devices, + ARRAY_SIZE(sh7264_early_devices)); +} diff --git a/arch/sh/kernel/cpu/sh2a/setup-sh7269.c b/arch/sh/kernel/cpu/sh2a/setup-sh7269.c new file mode 100644 index 000000000..8b1ef3028 --- /dev/null +++ b/arch/sh/kernel/cpu/sh2a/setup-sh7269.c @@ -0,0 +1,568 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7269 Setup + * + * Copyright (C) 2012 Renesas Electronics Europe Ltd + * Copyright (C) 2012 Phil Edworthy + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/usb/r8a66597.h> +#include <linux/sh_timer.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, + + DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, + DMAC8, DMAC9, DMAC10, DMAC11, DMAC12, DMAC13, DMAC14, DMAC15, + USB, VDC4, CMT0, CMT1, BSC, WDT, + MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, + MTU3_ABCD, MTU3_TCI3V, MTU4_ABCD, MTU4_TCI4V, + PWMT1, PWMT2, ADC_ADI, + SSIF0, SSII1, SSII2, SSII3, SSII4, SSII5, + RSPDIF, + IIC30, IIC31, IIC32, IIC33, + SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, + SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI, + SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI, + SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI, + SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI, + SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI, + SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI, + SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI, + RCAN0, RCAN1, RCAN2, + RSPIC0, RSPIC1, + IEBC, CD_ROMD, + NFMC, + SDHI0, SDHI1, + RTC, + SRCC0, SRCC1, SRCC2, + + /* interrupt groups */ + PINT, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), + INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), + INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), + INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), + + INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), + INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), + INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), + INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), + + INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109), + INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113), + INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117), + INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121), + INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125), + INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129), + INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133), + INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137), + INTC_IRQ(DMAC8, 140), INTC_IRQ(DMAC8, 141), + INTC_IRQ(DMAC9, 144), INTC_IRQ(DMAC9, 145), + INTC_IRQ(DMAC10, 148), INTC_IRQ(DMAC10, 149), + INTC_IRQ(DMAC11, 152), INTC_IRQ(DMAC11, 153), + INTC_IRQ(DMAC12, 156), INTC_IRQ(DMAC12, 157), + INTC_IRQ(DMAC13, 160), INTC_IRQ(DMAC13, 161), + INTC_IRQ(DMAC14, 164), INTC_IRQ(DMAC14, 165), + INTC_IRQ(DMAC15, 168), INTC_IRQ(DMAC15, 169), + + INTC_IRQ(USB, 170), + + INTC_IRQ(VDC4, 171), INTC_IRQ(VDC4, 172), + INTC_IRQ(VDC4, 173), INTC_IRQ(VDC4, 174), + INTC_IRQ(VDC4, 175), INTC_IRQ(VDC4, 176), + INTC_IRQ(VDC4, 177), INTC_IRQ(VDC4, 177), + + INTC_IRQ(CMT0, 188), INTC_IRQ(CMT1, 189), + + INTC_IRQ(BSC, 190), INTC_IRQ(WDT, 191), + + INTC_IRQ(MTU0_ABCD, 192), INTC_IRQ(MTU0_ABCD, 193), + INTC_IRQ(MTU0_ABCD, 194), INTC_IRQ(MTU0_ABCD, 195), + INTC_IRQ(MTU0_VEF, 196), INTC_IRQ(MTU0_VEF, 197), + INTC_IRQ(MTU0_VEF, 198), + INTC_IRQ(MTU1_AB, 199), INTC_IRQ(MTU1_AB, 200), + INTC_IRQ(MTU1_VU, 201), INTC_IRQ(MTU1_VU, 202), + INTC_IRQ(MTU2_AB, 203), INTC_IRQ(MTU2_AB, 204), + INTC_IRQ(MTU2_VU, 205), INTC_IRQ(MTU2_VU, 206), + INTC_IRQ(MTU3_ABCD, 207), INTC_IRQ(MTU3_ABCD, 208), + INTC_IRQ(MTU3_ABCD, 209), INTC_IRQ(MTU3_ABCD, 210), + INTC_IRQ(MTU3_TCI3V, 211), + INTC_IRQ(MTU4_ABCD, 212), INTC_IRQ(MTU4_ABCD, 213), + INTC_IRQ(MTU4_ABCD, 214), INTC_IRQ(MTU4_ABCD, 215), + INTC_IRQ(MTU4_TCI4V, 216), + + INTC_IRQ(PWMT1, 217), INTC_IRQ(PWMT2, 218), + + INTC_IRQ(ADC_ADI, 223), + + INTC_IRQ(SSIF0, 224), INTC_IRQ(SSIF0, 225), + INTC_IRQ(SSIF0, 226), + INTC_IRQ(SSII1, 227), INTC_IRQ(SSII1, 228), + INTC_IRQ(SSII2, 229), INTC_IRQ(SSII2, 230), + INTC_IRQ(SSII3, 231), INTC_IRQ(SSII3, 232), + INTC_IRQ(SSII4, 233), INTC_IRQ(SSII4, 234), + INTC_IRQ(SSII5, 235), INTC_IRQ(SSII5, 236), + + INTC_IRQ(RSPDIF, 237), + + INTC_IRQ(IIC30, 238), INTC_IRQ(IIC30, 239), + INTC_IRQ(IIC30, 240), INTC_IRQ(IIC30, 241), + INTC_IRQ(IIC30, 242), + INTC_IRQ(IIC31, 243), INTC_IRQ(IIC31, 244), + INTC_IRQ(IIC31, 245), INTC_IRQ(IIC31, 246), + INTC_IRQ(IIC31, 247), + INTC_IRQ(IIC32, 248), INTC_IRQ(IIC32, 249), + INTC_IRQ(IIC32, 250), INTC_IRQ(IIC32, 251), + INTC_IRQ(IIC32, 252), + INTC_IRQ(IIC33, 253), INTC_IRQ(IIC33, 254), + INTC_IRQ(IIC33, 255), INTC_IRQ(IIC33, 256), + INTC_IRQ(IIC33, 257), + + INTC_IRQ(SCIF0_BRI, 258), INTC_IRQ(SCIF0_ERI, 259), + INTC_IRQ(SCIF0_RXI, 260), INTC_IRQ(SCIF0_TXI, 261), + INTC_IRQ(SCIF1_BRI, 262), INTC_IRQ(SCIF1_ERI, 263), + INTC_IRQ(SCIF1_RXI, 264), INTC_IRQ(SCIF1_TXI, 265), + INTC_IRQ(SCIF2_BRI, 266), INTC_IRQ(SCIF2_ERI, 267), + INTC_IRQ(SCIF2_RXI, 268), INTC_IRQ(SCIF2_TXI, 269), + INTC_IRQ(SCIF3_BRI, 270), INTC_IRQ(SCIF3_ERI, 271), + INTC_IRQ(SCIF3_RXI, 272), INTC_IRQ(SCIF3_TXI, 273), + INTC_IRQ(SCIF4_BRI, 274), INTC_IRQ(SCIF4_ERI, 275), + INTC_IRQ(SCIF4_RXI, 276), INTC_IRQ(SCIF4_TXI, 277), + INTC_IRQ(SCIF5_BRI, 278), INTC_IRQ(SCIF5_ERI, 279), + INTC_IRQ(SCIF5_RXI, 280), INTC_IRQ(SCIF5_TXI, 281), + INTC_IRQ(SCIF6_BRI, 282), INTC_IRQ(SCIF6_ERI, 283), + INTC_IRQ(SCIF6_RXI, 284), INTC_IRQ(SCIF6_TXI, 285), + INTC_IRQ(SCIF7_BRI, 286), INTC_IRQ(SCIF7_ERI, 287), + INTC_IRQ(SCIF7_RXI, 288), INTC_IRQ(SCIF7_TXI, 289), + + INTC_IRQ(RCAN0, 291), INTC_IRQ(RCAN0, 292), + INTC_IRQ(RCAN0, 293), INTC_IRQ(RCAN0, 294), + INTC_IRQ(RCAN0, 295), + INTC_IRQ(RCAN1, 296), INTC_IRQ(RCAN1, 297), + INTC_IRQ(RCAN1, 298), INTC_IRQ(RCAN1, 299), + INTC_IRQ(RCAN1, 300), + INTC_IRQ(RCAN2, 301), INTC_IRQ(RCAN2, 302), + INTC_IRQ(RCAN2, 303), INTC_IRQ(RCAN2, 304), + INTC_IRQ(RCAN2, 305), + + INTC_IRQ(RSPIC0, 306), INTC_IRQ(RSPIC0, 307), + INTC_IRQ(RSPIC0, 308), + INTC_IRQ(RSPIC1, 309), INTC_IRQ(RSPIC1, 310), + INTC_IRQ(RSPIC1, 311), + + INTC_IRQ(IEBC, 318), + + INTC_IRQ(CD_ROMD, 319), INTC_IRQ(CD_ROMD, 320), + INTC_IRQ(CD_ROMD, 321), INTC_IRQ(CD_ROMD, 322), + INTC_IRQ(CD_ROMD, 323), INTC_IRQ(CD_ROMD, 324), + + INTC_IRQ(NFMC, 325), INTC_IRQ(NFMC, 326), + INTC_IRQ(NFMC, 327), INTC_IRQ(NFMC, 328), + + INTC_IRQ(SDHI0, 332), INTC_IRQ(SDHI0, 333), + INTC_IRQ(SDHI0, 334), + INTC_IRQ(SDHI1, 335), INTC_IRQ(SDHI1, 336), + INTC_IRQ(SDHI1, 337), + + INTC_IRQ(RTC, 338), INTC_IRQ(RTC, 339), + INTC_IRQ(RTC, 340), + + INTC_IRQ(SRCC0, 341), INTC_IRQ(SRCC0, 342), + INTC_IRQ(SRCC0, 343), INTC_IRQ(SRCC0, 344), + INTC_IRQ(SRCC0, 345), + INTC_IRQ(SRCC1, 346), INTC_IRQ(SRCC1, 347), + INTC_IRQ(SRCC1, 348), INTC_IRQ(SRCC1, 349), + INTC_IRQ(SRCC1, 350), + INTC_IRQ(SRCC2, 351), INTC_IRQ(SRCC2, 352), + INTC_IRQ(SRCC2, 353), INTC_IRQ(SRCC2, 354), + INTC_IRQ(SRCC2, 355), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, + PINT4, PINT5, PINT6, PINT7), + INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), + INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI), + INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), + INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI), + INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI), + INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI), + INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI), + INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } }, + { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } }, + { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } }, + { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { DMAC8, DMAC9, + DMAC10, DMAC11 } }, + { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { DMAC12, DMAC13, + DMAC14, DMAC15 } }, + { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { USB, VDC4, VDC4, VDC4 } }, + { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { 0, 0, 0, 0 } }, + { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { CMT0, CMT1, BSC, WDT } }, + { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU0_ABCD, MTU0_VEF, + MTU1_AB, MTU1_VU } }, + { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { MTU2_AB, MTU2_VU, + MTU3_ABCD, MTU3_TCI3V } }, + { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { MTU4_ABCD, MTU4_TCI4V, + PWMT1, PWMT2 } }, + { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { 0, 0, 0, 0 } }, + { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { ADC_ADI, SSIF0, SSII1, SSII2 } }, + { 0xfffe0c18, 0, 16, 4, /* IPR18 */ { SSII3, SSII4, SSII5, RSPDIF} }, + { 0xfffe0c1a, 0, 16, 4, /* IPR19 */ { IIC30, IIC31, IIC32, IIC33 } }, + { 0xfffe0c1c, 0, 16, 4, /* IPR20 */ { SCIF0, SCIF1, SCIF2, SCIF3 } }, + { 0xfffe0c1e, 0, 16, 4, /* IPR21 */ { SCIF4, SCIF5, SCIF6, SCIF7 } }, + { 0xfffe0c20, 0, 16, 4, /* IPR22 */ { 0, RCAN0, RCAN1, RCAN2 } }, + { 0xfffe0c22, 0, 16, 4, /* IPR23 */ { RSPIC0, RSPIC1, 0, 0 } }, + { 0xfffe0c24, 0, 16, 4, /* IPR24 */ { IEBC, CD_ROMD, NFMC, 0 } }, + { 0xfffe0c26, 0, 16, 4, /* IPR25 */ { SDHI0, SDHI1, RTC, 0 } }, + { 0xfffe0c28, 0, 16, 4, /* IPR26 */ { SRCC0, SRCC1, SRCC2, 0 } }, +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfffe0808, 0, 16, /* PINTER */ + { 0, 0, 0, 0, 0, 0, 0, 0, + PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7269", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xe8007000, 0x100), + DEFINE_RES_IRQ(259), + DEFINE_RES_IRQ(260), + DEFINE_RES_IRQ(261), + DEFINE_RES_IRQ(258), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xe8007800, 0x100), + DEFINE_RES_IRQ(263), + DEFINE_RES_IRQ(264), + DEFINE_RES_IRQ(265), + DEFINE_RES_IRQ(262), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xe8008000, 0x100), + DEFINE_RES_IRQ(267), + DEFINE_RES_IRQ(268), + DEFINE_RES_IRQ(269), + DEFINE_RES_IRQ(266), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xe8008800, 0x100), + DEFINE_RES_IRQ(271), + DEFINE_RES_IRQ(272), + DEFINE_RES_IRQ(273), + DEFINE_RES_IRQ(270), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xe8009000, 0x100), + DEFINE_RES_IRQ(275), + DEFINE_RES_IRQ(276), + DEFINE_RES_IRQ(277), + DEFINE_RES_IRQ(274), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xe8009800, 0x100), + DEFINE_RES_IRQ(279), + DEFINE_RES_IRQ(280), + DEFINE_RES_IRQ(281), + DEFINE_RES_IRQ(278), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +static struct plat_sci_port scif6_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif6_resources[] = { + DEFINE_RES_MEM(0xe800a000, 0x100), + DEFINE_RES_IRQ(283), + DEFINE_RES_IRQ(284), + DEFINE_RES_IRQ(285), + DEFINE_RES_IRQ(282), +}; + +static struct platform_device scif6_device = { + .name = "sh-sci", + .id = 6, + .resource = scif6_resources, + .num_resources = ARRAY_SIZE(scif6_resources), + .dev = { + .platform_data = &scif6_platform_data, + }, +}; + +static struct plat_sci_port scif7_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif7_resources[] = { + DEFINE_RES_MEM(0xe800a800, 0x100), + DEFINE_RES_IRQ(287), + DEFINE_RES_IRQ(288), + DEFINE_RES_IRQ(289), + DEFINE_RES_IRQ(286), +}; + +static struct platform_device scif7_device = { + .name = "sh-sci", + .id = 7, + .resource = scif7_resources, + .num_resources = ARRAY_SIZE(scif7_resources), + .dev = { + .platform_data = &scif7_platform_data, + }, +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 3, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0xfffec000, 0x10), + DEFINE_RES_IRQ(188), + DEFINE_RES_IRQ(189), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-16", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct resource mtu2_resources[] = { + DEFINE_RES_MEM(0xfffe4000, 0x400), + DEFINE_RES_IRQ_NAMED(192, "tgi0a"), + DEFINE_RES_IRQ_NAMED(203, "tgi1a"), +}; + +static struct platform_device mtu2_device = { + .name = "sh-mtu2", + .id = -1, + .resource = mtu2_resources, + .num_resources = ARRAY_SIZE(mtu2_resources), +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xfffe6000, + .end = 0xfffe6000 + 0x30 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Shared Period/Carry/Alarm IRQ */ + .start = 338, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +/* USB Host */ +static struct r8a66597_platdata r8a66597_data = { + .on_chip = 1, + .endian = 1, +}; + +static struct resource r8a66597_usb_host_resources[] = { + [0] = { + .start = 0xe8010000, + .end = 0xe80100e4, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = 170, + .end = 170, + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, + }, +}; + +static struct platform_device r8a66597_usb_host_device = { + .name = "r8a66597_hcd", + .id = 0, + .dev = { + .dma_mask = NULL, /* not use dma */ + .coherent_dma_mask = 0xffffffff, + .platform_data = &r8a66597_data, + }, + .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources), + .resource = r8a66597_usb_host_resources, +}; + +static struct platform_device *sh7269_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &scif6_device, + &scif7_device, + &cmt_device, + &mtu2_device, + &rtc_device, + &r8a66597_usb_host_device, +}; + +static int __init sh7269_devices_setup(void) +{ + return platform_add_devices(sh7269_devices, + ARRAY_SIZE(sh7269_devices)); +} +arch_initcall(sh7269_devices_setup); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +static struct platform_device *sh7269_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &scif6_device, + &scif7_device, + &cmt_device, + &mtu2_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7269_early_devices, + ARRAY_SIZE(sh7269_early_devices)); +} diff --git a/arch/sh/kernel/cpu/sh3/Makefile b/arch/sh/kernel/cpu/sh3/Makefile new file mode 100644 index 000000000..1dcb43d93 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/Makefile @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH SH-3 backends. +# + +obj-y := ex.o probe.o entry.o setup-sh3.o + +obj-$(CONFIG_HIBERNATION) += swsusp.o + +# CPU subtype setup +obj-$(CONFIG_CPU_SUBTYPE_SH7705) += setup-sh7705.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7706) += setup-sh770x.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7707) += setup-sh770x.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7708) += setup-sh770x.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7709) += setup-sh770x.o serial-sh770x.o +obj-$(CONFIG_CPU_SUBTYPE_SH7710) += setup-sh7710.o serial-sh7710.o +obj-$(CONFIG_CPU_SUBTYPE_SH7712) += setup-sh7710.o serial-sh7710.o +obj-$(CONFIG_CPU_SUBTYPE_SH7720) += setup-sh7720.o serial-sh7720.o +obj-$(CONFIG_CPU_SUBTYPE_SH7721) += setup-sh7720.o serial-sh7720.o + +# Primary on-chip clocks (common) +clock-$(CONFIG_CPU_SH3) := clock-sh3.o +clock-$(CONFIG_CPU_SUBTYPE_SH7705) := clock-sh7705.o +clock-$(CONFIG_CPU_SUBTYPE_SH7706) := clock-sh7706.o +clock-$(CONFIG_CPU_SUBTYPE_SH7709) := clock-sh7709.o +clock-$(CONFIG_CPU_SUBTYPE_SH7710) := clock-sh7710.o +clock-$(CONFIG_CPU_SUBTYPE_SH7720) := clock-sh7710.o +clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o + +# Pinmux setup +pinmux-$(CONFIG_CPU_SUBTYPE_SH7720) := pinmux-sh7720.o + +obj-y += $(clock-y) +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) diff --git a/arch/sh/kernel/cpu/sh3/clock-sh3.c b/arch/sh/kernel/cpu/sh3/clock-sh3.c new file mode 100644 index 000000000..d7765728c --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/clock-sh3.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh3/clock-sh3.c + * + * Generic SH-3 support for the clock framework + * + * Copyright (C) 2005 Paul Mundt + * + * FRQCR parsing hacked out of arch/sh/kernel/time.c + * + * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 }; +static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 }; +static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); + + clk->rate *= pfc_divisors[idx]; +} + +static struct sh_clk_ops sh3_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); + + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh3_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4); + + return clk->parent->rate / stc_multipliers[idx]; +} + +static struct sh_clk_ops sh3_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); + + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh3_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh3_clk_ops[] = { + &sh3_master_clk_ops, + &sh3_module_clk_ops, + &sh3_bus_clk_ops, + &sh3_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh3_clk_ops)) + *ops = sh3_clk_ops[idx]; +} + diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7705.c b/arch/sh/kernel/cpu/sh3/clock-sh7705.c new file mode 100644 index 000000000..4947114af --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/clock-sh7705.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh3/clock-sh7705.c + * + * SH7705 support for the clock framework + * + * Copyright (C) 2005 Paul Mundt + * + * FRQCR parsing hacked out of arch/sh/kernel/time.c + * + * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +/* + * SH7705 uses the same divisors as the generic SH-3 case, it's just the + * FRQCR layout that is a bit different.. + */ +static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 }; +static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 }; +static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003]; +} + +static struct sh_clk_ops sh7705_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = __raw_readw(FRQCR) & 0x0003; + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7705_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FRQCR) & 0x0300) >> 8; + return clk->parent->rate / stc_multipliers[idx]; +} + +static struct sh_clk_ops sh7705_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FRQCR) & 0x0030) >> 4; + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh7705_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7705_clk_ops[] = { + &sh7705_master_clk_ops, + &sh7705_module_clk_ops, + &sh7705_bus_clk_ops, + &sh7705_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7705_clk_ops)) + *ops = sh7705_clk_ops[idx]; +} + diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7706.c b/arch/sh/kernel/cpu/sh3/clock-sh7706.c new file mode 100644 index 000000000..17855022c --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/clock-sh7706.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh3/clock-sh7706.c + * + * SH7706 support for the clock framework + * + * Copyright (C) 2006 Takashi YOSHII + * + * Based on arch/sh/kernel/cpu/sh3/clock-sh7709.c + * Copyright (C) 2005 Andriy Skulysh + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int stc_multipliers[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; +static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 }; +static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); + + clk->rate *= pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7706_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); + + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7706_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4); + + return clk->parent->rate / stc_multipliers[idx]; +} + +static struct sh_clk_ops sh7706_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); + + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh7706_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7706_clk_ops[] = { + &sh7706_master_clk_ops, + &sh7706_module_clk_ops, + &sh7706_bus_clk_ops, + &sh7706_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7706_clk_ops)) + *ops = sh7706_clk_ops[idx]; +} diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7709.c b/arch/sh/kernel/cpu/sh3/clock-sh7709.c new file mode 100644 index 000000000..54701bbf7 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/clock-sh7709.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh3/clock-sh7709.c + * + * SH7709 support for the clock framework + * + * Copyright (C) 2005 Andriy Skulysh + * + * Based on arch/sh/kernel/cpu/sh3/clock-sh7705.c + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int stc_multipliers[] = { 1, 2, 4, 8, 3, 6, 1, 1 }; +static int ifc_divisors[] = { 1, 2, 4, 1, 3, 1, 1, 1 }; +static int pfc_divisors[] = { 1, 2, 4, 1, 3, 6, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); + + clk->rate *= pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7709_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x2000) >> 11) | (frqcr & 0x0003); + + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7709_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = (frqcr & 0x0080) ? + ((frqcr & 0x8000) >> 13) | ((frqcr & 0x0030) >> 4) : 1; + + return clk->parent->rate * stc_multipliers[idx]; +} + +static struct sh_clk_ops sh7709_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = ((frqcr & 0x4000) >> 12) | ((frqcr & 0x000c) >> 2); + + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh7709_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7709_clk_ops[] = { + &sh7709_master_clk_ops, + &sh7709_module_clk_ops, + &sh7709_bus_clk_ops, + &sh7709_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7709_clk_ops)) + *ops = sh7709_clk_ops[idx]; +} diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7710.c b/arch/sh/kernel/cpu/sh3/clock-sh7710.c new file mode 100644 index 000000000..e60d0bc19 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/clock-sh7710.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh3/clock-sh7710.c + * + * SH7710 support for the clock framework + * + * Copyright (C) 2005 Paul Mundt + * + * FRQCR parsing hacked out of arch/sh/kernel/time.c + * + * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int md_table[] = { 1, 2, 3, 4, 6, 8, 12 }; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= md_table[__raw_readw(FRQCR) & 0x0007]; +} + +static struct sh_clk_ops sh7710_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FRQCR) & 0x0007); + return clk->parent->rate / md_table[idx]; +} + +static struct sh_clk_ops sh7710_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FRQCR) & 0x0700) >> 8; + return clk->parent->rate / md_table[idx]; +} + +static struct sh_clk_ops sh7710_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FRQCR) & 0x0070) >> 4; + return clk->parent->rate / md_table[idx]; +} + +static struct sh_clk_ops sh7710_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7710_clk_ops[] = { + &sh7710_master_clk_ops, + &sh7710_module_clk_ops, + &sh7710_bus_clk_ops, + &sh7710_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7710_clk_ops)) + *ops = sh7710_clk_ops[idx]; +} + diff --git a/arch/sh/kernel/cpu/sh3/clock-sh7712.c b/arch/sh/kernel/cpu/sh3/clock-sh7712.c new file mode 100644 index 000000000..5af553f38 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/clock-sh7712.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh3/clock-sh7712.c + * + * SH7712 support for the clock framework + * + * Copyright (C) 2007 Andrew Murray <amurray@mpc-data.co.uk> + * + * Based on arch/sh/kernel/cpu/sh3/clock-sh3.c + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int multipliers[] = { 1, 2, 3 }; +static int divisors[] = { 1, 2, 3, 4, 6 }; + +static void master_clk_init(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = (frqcr & 0x0300) >> 8; + + clk->rate *= multipliers[idx]; +} + +static struct sh_clk_ops sh7712_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = frqcr & 0x0007; + + return clk->parent->rate / divisors[idx]; +} + +static struct sh_clk_ops sh7712_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int frqcr = __raw_readw(FRQCR); + int idx = (frqcr & 0x0030) >> 4; + + return clk->parent->rate / divisors[idx]; +} + +static struct sh_clk_ops sh7712_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7712_clk_ops[] = { + &sh7712_master_clk_ops, + &sh7712_module_clk_ops, + &sh7712_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7712_clk_ops)) + *ops = sh7712_clk_ops[idx]; +} + diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S new file mode 100644 index 000000000..b1f5b3c58 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -0,0 +1,509 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/cpu/sh3/entry.S + * + * Copyright (C) 1999, 2000, 2002 Niibe Yutaka + * Copyright (C) 2003 - 2012 Paul Mundt + */ +#include <linux/sys.h> +#include <linux/errno.h> +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/thread_info.h> +#include <asm/unistd.h> +#include <cpu/mmu_context.h> +#include <asm/page.h> +#include <asm/cache.h> + +! NOTE: +! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address +! to be jumped is too far, but it causes illegal slot exception. + +/* + * entry.S contains the system-call and fault low-level handling routines. + * This also contains the timer-interrupt handler, as well as all interrupts + * and faults that can result in a task-switch. + * + * NOTE: This code handles signal-recognition, which happens every time + * after a timer-interrupt and after each system call. + * + * NOTE: This code uses a convention that instructions in the delay slot + * of a transfer-control instruction are indented by an extra space, thus: + * + * jmp @k0 ! control-transfer instruction + * ldc k1, ssr ! delay slot + * + * Stack layout in 'ret_from_syscall': + * ptrace needs to have all regs on the stack. + * if the order here is changed, it needs to be + * updated in ptrace.c and ptrace.h + * + * r0 + * ... + * r15 = stack pointer + * spc + * pr + * ssr + * gbr + * mach + * macl + * syscall # + * + */ +/* Offsets to the stack */ +OFF_R0 = 0 /* Return value. New ABI also arg4 */ +OFF_R1 = 4 /* New ABI: arg5 */ +OFF_R2 = 8 /* New ABI: arg6 */ +OFF_R3 = 12 /* New ABI: syscall_nr */ +OFF_R4 = 16 /* New ABI: arg0 */ +OFF_R5 = 20 /* New ABI: arg1 */ +OFF_R6 = 24 /* New ABI: arg2 */ +OFF_R7 = 28 /* New ABI: arg3 */ +OFF_SP = (15*4) +OFF_PC = (16*4) +OFF_SR = (16*4+8) +OFF_TRA = (16*4+6*4) + +#define k0 r0 +#define k1 r1 +#define k2 r2 +#define k3 r3 +#define k4 r4 + +#define g_imask r6 /* r6_bank1 */ +#define k_g_imask r6_bank /* r6_bank1 */ +#define current r7 /* r7_bank1 */ + +#include <asm/entry-macros.S> + +/* + * Kernel mode register usage: + * k0 scratch + * k1 scratch + * k2 scratch (Exception code) + * k3 scratch (Return address) + * k4 scratch + * k5 reserved + * k6 Global Interrupt Mask (0--15 << 4) + * k7 CURRENT_THREAD_INFO (pointer to current thread info) + */ + +! +! TLB Miss / Initial Page write exception handling +! _and_ +! TLB hits, but the access violate the protection. +! It can be valid access, such as stack grow and/or C-O-W. +! +! +! Find the pmd/pte entry and loadtlb +! If it's not found, cause address error (SEGV) +! +! Although this could be written in assembly language (and it'd be faster), +! this first version depends *much* on C implementation. +! + +#if defined(CONFIG_MMU) + .align 2 +ENTRY(tlb_miss_load) + bra call_handle_tlbmiss + mov #0, r5 + + .align 2 +ENTRY(tlb_miss_store) + bra call_handle_tlbmiss + mov #FAULT_CODE_WRITE, r5 + + .align 2 +ENTRY(initial_page_write) + bra call_handle_tlbmiss + mov #FAULT_CODE_INITIAL, r5 + + .align 2 +ENTRY(tlb_protection_violation_load) + bra call_do_page_fault + mov #FAULT_CODE_PROT, r5 + + .align 2 +ENTRY(tlb_protection_violation_store) + bra call_do_page_fault + mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5 + +call_handle_tlbmiss: + mov.l 1f, r0 + mov r5, r8 + mov.l @r0, r6 + mov.l 2f, r0 + sts pr, r10 + jsr @r0 + mov r15, r4 + ! + tst r0, r0 + bf/s 0f + lds r10, pr + rts + nop +0: + mov r8, r5 +call_do_page_fault: + mov.l 1f, r0 + mov.l @r0, r6 + + mov.l 3f, r0 + mov.l 4f, r1 + mov r15, r4 + jmp @r0 + lds r1, pr + + .align 2 +1: .long MMU_TEA +2: .long handle_tlbmiss +3: .long do_page_fault +4: .long ret_from_exception + + .align 2 +ENTRY(address_error_load) + bra call_dae + mov #0,r5 ! writeaccess = 0 + + .align 2 +ENTRY(address_error_store) + bra call_dae + mov #1,r5 ! writeaccess = 1 + + .align 2 +call_dae: + mov.l 1f, r0 + mov.l @r0, r6 ! address + mov.l 2f, r0 + jmp @r0 + mov r15, r4 ! regs + + .align 2 +1: .long MMU_TEA +2: .long do_address_error +#endif /* CONFIG_MMU */ + +#if defined(CONFIG_SH_STANDARD_BIOS) + /* Unwind the stack and jmp to the debug entry */ +ENTRY(sh_bios_handler) + mov.l 1f, r8 + bsr restore_regs + nop + + lds k2, pr ! restore pr + mov k4, r15 + ! + mov.l 2f, k0 + mov.l @k0, k0 + jmp @k0 + ldc k3, ssr + .align 2 +1: .long 0x300000f0 +2: .long gdb_vbr_vector +#endif /* CONFIG_SH_STANDARD_BIOS */ + +! restore_regs() +! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack +! - switch bank +! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack +! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra +! k2 returns original pr +! k3 returns original sr +! k4 returns original stack pointer +! r8 passes SR bitmask, overwritten with restored data on return +! r9 trashed +! BL=0 on entry, on exit BL=1 (depending on r8). + +ENTRY(restore_regs) + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + ! + stc sr, r9 + or r8, r9 + ldc r9, sr + ! + mov.l @r15+, r8 + mov.l @r15+, r9 + mov.l @r15+, r10 + mov.l @r15+, r11 + mov.l @r15+, r12 + mov.l @r15+, r13 + mov.l @r15+, r14 + mov.l @r15+, k4 ! original stack pointer + ldc.l @r15+, spc + mov.l @r15+, k2 ! original PR + mov.l @r15+, k3 ! original SR + ldc.l @r15+, gbr + lds.l @r15+, mach + lds.l @r15+, macl + rts + add #4, r15 ! Skip syscall number + +restore_all: + mov.l 7f, r8 + bsr restore_regs + nop + + lds k2, pr ! restore pr + ! + ! Calculate new SR value + mov k3, k2 ! original SR value + mov #0xfffffff0, k1 + extu.b k1, k1 + not k1, k1 + and k1, k2 ! Mask original SR value + ! + mov k3, k0 ! Calculate IMASK-bits + shlr2 k0 + and #0x3c, k0 + cmp/eq #0x3c, k0 + bt/s 6f + shll2 k0 + mov g_imask, k0 + ! +6: or k0, k2 ! Set the IMASK-bits + ldc k2, ssr + ! + mov k4, r15 + rte + nop + + .align 2 +5: .long 0x00001000 ! DSP +7: .long 0x30000000 + +! common exception handler +#include "../../entry-common.S" + +! Exception Vector Base +! +! Should be aligned page boundary. +! + .balign 4096,0,4096 +ENTRY(vbr_base) + .long 0 +! +! 0x100: General exception vector +! + .balign 256,0,256 +general_exception: + bra handle_exception + sts pr, k3 ! save original pr value in k3 + +! prepare_stack() +! - roll back gRB +! - switch to kernel stack +! k0 returns original sp (after roll back) +! k1 trashed +! k2 trashed + +prepare_stack: +#ifdef CONFIG_GUSA + ! Check for roll back gRB (User and Kernel) + mov r15, k0 + shll k0 + bf/s 1f + shll k0 + bf/s 1f + stc spc, k1 + stc r0_bank, k0 + cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0) + bt/s 2f + stc r1_bank, k1 + + add #-2, k0 + add r15, k0 + ldc k0, spc ! PC = saved r0 + r15 - 2 +2: mov k1, r15 ! SP = r1 +1: +#endif + ! Switch to kernel stack if needed + stc ssr, k0 ! Is it from kernel space? + shll k0 ! Check MD bit (bit30) by shifting it into... + shll k0 ! ...the T bit + bt/s 1f ! It's a kernel to kernel transition. + mov r15, k0 ! save original stack to k0 + /* User space to kernel */ + mov #(THREAD_SIZE >> 10), k1 + shll8 k1 ! k1 := THREAD_SIZE + shll2 k1 + add current, k1 + mov k1, r15 ! change to kernel stack + ! +1: + rts + nop + +! +! 0x400: Instruction and Data TLB miss exception vector +! + .balign 1024,0,1024 +tlb_miss: + sts pr, k3 ! save original pr value in k3 + +handle_exception: + mova exception_data, k0 + + ! Setup stack and save DSP context (k0 contains original r15 on return) + bsr prepare_stack + PREF(k0) + + ! Save registers / Switch to bank 0 + mov.l 5f, k2 ! vector register address + mov.l 1f, k4 ! SR bits to clear in k4 + bsr save_regs ! needs original pr value in k3 + mov.l @k2, k2 ! read out vector and keep in k2 + +handle_exception_special: + setup_frame_reg + + ! Setup return address and jump to exception handler + mov.l 7f, r9 ! fetch return address + stc r2_bank, r0 ! k2 (vector) + mov.l 6f, r10 + shlr2 r0 + shlr r0 + mov.l @(r0, r10), r10 + jmp @r10 + lds r9, pr ! put return address in pr + + .align L1_CACHE_SHIFT + +! save_regs() +! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack +! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack +! - switch bank +! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack +! k0 contains original stack pointer* +! k1 trashed +! k3 passes original pr* +! k4 passes SR bitmask +! BL=1 on entry, on exit BL=0. + +ENTRY(save_regs) + mov #-1, r1 + mov.l k1, @-r15 ! set TRA (default: -1) + sts.l macl, @-r15 + sts.l mach, @-r15 + stc.l gbr, @-r15 + stc.l ssr, @-r15 + mov.l k3, @-r15 ! original pr in k3 + stc.l spc, @-r15 + + mov.l k0, @-r15 ! original stack pointer in k0 + mov.l r14, @-r15 + mov.l r13, @-r15 + mov.l r12, @-r15 + mov.l r11, @-r15 + mov.l r10, @-r15 + mov.l r9, @-r15 + mov.l r8, @-r15 + + mov.l 0f, k3 ! SR bits to set in k3 + + ! fall-through + +! save_low_regs() +! - modify SR for bank switch +! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack +! k3 passes bits to set in SR +! k4 passes bits to clear in SR + +ENTRY(save_low_regs) + stc sr, r8 + or k3, r8 + and k4, r8 + ldc r8, sr + + mov.l r7, @-r15 + mov.l r6, @-r15 + mov.l r5, @-r15 + mov.l r4, @-r15 + mov.l r3, @-r15 + mov.l r2, @-r15 + mov.l r1, @-r15 + rts + mov.l r0, @-r15 + +! +! 0x600: Interrupt / NMI vector +! + .balign 512,0,512 +ENTRY(handle_interrupt) + sts pr, k3 ! save original pr value in k3 + mova exception_data, k0 + + ! Setup stack and save DSP context (k0 contains original r15 on return) + bsr prepare_stack + PREF(k0) + + ! Save registers / Switch to bank 0 + mov.l 1f, k4 ! SR bits to clear in k4 + bsr save_regs ! needs original pr value in k3 + mov #-1, k2 ! default vector kept in k2 + + setup_frame_reg + + stc sr, r0 ! get status register + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bf 9f + TRACE_IRQS_OFF +9: + + ! Setup return address and jump to do_IRQ + mov.l 4f, r9 ! fetch return address + lds r9, pr ! put return address in pr + mov.l 2f, r4 + mov.l 3f, r9 + mov.l @r4, r4 ! pass INTEVT vector as arg0 + + shlr2 r4 + shlr r4 + mov r4, r0 ! save vector->jmp table offset for later + + shlr2 r4 ! vector to IRQ# conversion + + mov #0x10, r5 + cmp/hs r5, r4 ! is it a valid IRQ? + bt 10f + + /* + * We got here as a result of taking the INTEVT path for something + * that isn't a valid hard IRQ, therefore we bypass the do_IRQ() + * path and special case the event dispatch instead. This is the + * expected path for the NMI (and any other brilliantly implemented + * exception), which effectively wants regular exception dispatch + * but is unfortunately reported through INTEVT rather than + * EXPEVT. Grr. + */ + mov.l 6f, r9 + mov.l @(r0, r9), r9 + jmp @r9 + mov r15, r8 ! trap handlers take saved regs in r8 + +10: + jmp @r9 ! Off to do_IRQ() we go. + mov r15, r5 ! pass saved registers as arg1 + +ENTRY(exception_none) + rts + nop + + .align L1_CACHE_SHIFT +exception_data: +0: .long 0x000080f0 ! FD=1, IMASK=15 +1: .long 0xcfffffff ! RB=0, BL=0 +2: .long INTEVT +3: .long do_IRQ +4: .long ret_from_irq +5: .long EXPEVT +6: .long exception_handling_table +7: .long ret_from_exception diff --git a/arch/sh/kernel/cpu/sh3/ex.S b/arch/sh/kernel/cpu/sh3/ex.S new file mode 100644 index 000000000..ee2113f42 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/ex.S @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/cpu/sh3/ex.S + * + * The SH-3 and SH-4 exception vector table. + * + * Copyright (C) 1999, 2000, 2002 Niibe Yutaka + * Copyright (C) 2003 - 2008 Paul Mundt + */ +#include <linux/linkage.h> + +#if !defined(CONFIG_MMU) +#define tlb_miss_load exception_error +#define tlb_miss_store exception_error +#define initial_page_write exception_error +#define tlb_protection_violation_load exception_error +#define tlb_protection_violation_store exception_error +#define address_error_load exception_error +#define address_error_store exception_error +#endif + +#if !defined(CONFIG_SH_FPU) +#define fpu_error_trap_handler exception_error +#endif + +#if !defined(CONFIG_KGDB) +#define kgdb_handle_exception exception_error +#endif + + .align 2 + .data + +ENTRY(exception_handling_table) + .long exception_error /* 000 */ + .long exception_error + .long tlb_miss_load /* 040 */ + .long tlb_miss_store + .long initial_page_write + .long tlb_protection_violation_load + .long tlb_protection_violation_store + .long address_error_load + .long address_error_store /* 100 */ + .long fpu_error_trap_handler /* 120 */ + .long exception_error /* 140 */ + .long system_call ! Unconditional Trap /* 160 */ + .long exception_error ! reserved_instruction (filled by trap_init) /* 180 */ + .long exception_error ! illegal_slot_instruction (filled by trap_init) /*1A0*/ + .long nmi_trap_handler /* 1C0 */ ! Allow trap to debugger + .long breakpoint_trap_handler /* 1E0 */ + + /* + * Pad the remainder of the table out, exceptions residing in far + * away offsets can be manually inserted in to their appropriate + * location via set_exception_table_{evt,vec}(). + */ + .balign 4096,0,4096 diff --git a/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c new file mode 100644 index 000000000..34015e608 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/pinmux-sh7720.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7720 Pinmux + * + * Copyright (C) 2008 Magnus Damm + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7720_pfc_resources[] = { + [0] = { + .start = 0xa4050100, + .end = 0xa405016f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7720", sh7720_pfc_resources, + ARRAY_SIZE(sh7720_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c new file mode 100644 index 000000000..5e7ad591a --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/probe.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh3/probe.c + * + * CPU Subtype Probing for SH-3. + * + * Copyright (C) 1999, 2000 Niibe Yutaka + * Copyright (C) 2002 Paul Mundt + */ + +#include <linux/init.h> +#include <asm/processor.h> +#include <asm/cache.h> +#include <asm/io.h> + +void cpu_probe(void) +{ + unsigned long addr0, addr1, data0, data1, data2, data3; + + jump_to_uncached(); + /* + * Check if the entry shadows or not. + * When shadowed, it's 128-entry system. + * Otherwise, it's 256-entry system. + */ + addr0 = CACHE_OC_ADDRESS_ARRAY + (3 << 12); + addr1 = CACHE_OC_ADDRESS_ARRAY + (1 << 12); + + /* First, write back & invalidate */ + data0 = __raw_readl(addr0); + __raw_writel(data0&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr0); + data1 = __raw_readl(addr1); + __raw_writel(data1&~(SH_CACHE_VALID|SH_CACHE_UPDATED), addr1); + + /* Next, check if there's shadow or not */ + data0 = __raw_readl(addr0); + data0 ^= SH_CACHE_VALID; + __raw_writel(data0, addr0); + data1 = __raw_readl(addr1); + data2 = data1 ^ SH_CACHE_VALID; + __raw_writel(data2, addr1); + data3 = __raw_readl(addr0); + + /* Lastly, invaliate them. */ + __raw_writel(data0&~SH_CACHE_VALID, addr0); + __raw_writel(data2&~SH_CACHE_VALID, addr1); + + back_to_cached(); + + boot_cpu_data.dcache.ways = 4; + boot_cpu_data.dcache.entry_shift = 4; + boot_cpu_data.dcache.linesz = L1_CACHE_BYTES; + boot_cpu_data.dcache.flags = 0; + + /* + * 7709A/7729 has 16K cache (256-entry), while 7702 has only + * 2K(direct) 7702 is not supported (yet) + */ + if (data0 == data1 && data2 == data3) { /* Shadow */ + boot_cpu_data.dcache.way_incr = (1 << 11); + boot_cpu_data.dcache.entry_mask = 0x7f0; + boot_cpu_data.dcache.sets = 128; + boot_cpu_data.type = CPU_SH7708; + + boot_cpu_data.flags |= CPU_HAS_MMU_PAGE_ASSOC; + } else { /* 7709A or 7729 */ + boot_cpu_data.dcache.way_incr = (1 << 12); + boot_cpu_data.dcache.entry_mask = 0xff0; + boot_cpu_data.dcache.sets = 256; + boot_cpu_data.type = CPU_SH7729; + +#if defined(CONFIG_CPU_SUBTYPE_SH7706) + boot_cpu_data.type = CPU_SH7706; +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7710) + boot_cpu_data.type = CPU_SH7710; +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7712) + boot_cpu_data.type = CPU_SH7712; +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7720) + boot_cpu_data.type = CPU_SH7720; +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7721) + boot_cpu_data.type = CPU_SH7721; +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7705) + boot_cpu_data.type = CPU_SH7705; + +#if defined(CONFIG_SH7705_CACHE_32KB) + boot_cpu_data.dcache.way_incr = (1 << 13); + boot_cpu_data.dcache.entry_mask = 0x1ff0; + boot_cpu_data.dcache.sets = 512; + __raw_writel(CCR_CACHE_32KB, CCR3_REG); +#else + __raw_writel(CCR_CACHE_16KB, CCR3_REG); +#endif +#endif + } + + /* + * SH-3 doesn't have separate caches + */ + boot_cpu_data.dcache.flags |= SH_CACHE_COMBINED; + boot_cpu_data.icache = boot_cpu_data.dcache; + + boot_cpu_data.family = CPU_FAMILY_SH3; +} diff --git a/arch/sh/kernel/cpu/sh3/serial-sh770x.c b/arch/sh/kernel/cpu/sh3/serial-sh770x.c new file mode 100644 index 000000000..dec027f23 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/serial-sh770x.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/serial_sci.h> +#include <linux/serial_core.h> +#include <linux/io.h> +#include <cpu/serial.h> + +#define SCPCR 0xA4000116 +#define SCPDR 0xA4000136 + +static void sh770x_sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + unsigned short data; + + /* We need to set SCPCR to enable RTS/CTS */ + data = __raw_readw(SCPCR); + /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ + __raw_writew(data & 0x0fcf, SCPCR); + + if (!(cflag & CRTSCTS)) { + /* We need to set SCPCR to enable RTS/CTS */ + data = __raw_readw(SCPCR); + /* Clear out SCP7MD1,0, SCP4MD1,0, + Set SCP6MD1,0 = {01} (output) */ + __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); + + data = __raw_readb(SCPDR); + /* Set /RTS2 (bit6) = 0 */ + __raw_writeb(data & 0xbf, SCPDR); + } +} + +struct plat_sci_port_ops sh770x_sci_port_ops = { + .init_pins = sh770x_sci_init_pins, +}; diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7710.c b/arch/sh/kernel/cpu/sh3/serial-sh7710.c new file mode 100644 index 000000000..ee04052e5 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/serial-sh7710.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/serial_sci.h> +#include <linux/serial_core.h> +#include <linux/io.h> +#include <cpu/serial.h> + +#define PACR 0xa4050100 +#define PBCR 0xa4050102 + +static void sh7710_sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + if (port->mapbase == 0xA4400000) { + __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); + __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); + } else if (port->mapbase == 0xA4410000) + __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); +} + +struct plat_sci_port_ops sh7710_sci_port_ops = { + .init_pins = sh7710_sci_init_pins, +}; diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7720.c b/arch/sh/kernel/cpu/sh3/serial-sh7720.c new file mode 100644 index 000000000..75aaea49d --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/serial-sh7720.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/serial_sci.h> +#include <linux/serial_core.h> +#include <linux/io.h> +#include <cpu/serial.h> +#include <cpu/gpio.h> + +static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + unsigned short data; + + if (cflag & CRTSCTS) { + /* enable RTS/CTS */ + if (port->mapbase == 0xa4430000) { /* SCIF0 */ + /* Clear PTCR bit 9-2; enable all scif pins but sck */ + data = __raw_readw(PORT_PTCR); + __raw_writew((data & 0xfc03), PORT_PTCR); + } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ + /* Clear PVCR bit 9-2 */ + data = __raw_readw(PORT_PVCR); + __raw_writew((data & 0xfc03), PORT_PVCR); + } + } else { + if (port->mapbase == 0xa4430000) { /* SCIF0 */ + /* Clear PTCR bit 5-2; enable only tx and rx */ + data = __raw_readw(PORT_PTCR); + __raw_writew((data & 0xffc3), PORT_PTCR); + } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ + /* Clear PVCR bit 5-2 */ + data = __raw_readw(PORT_PVCR); + __raw_writew((data & 0xffc3), PORT_PVCR); + } + } +} + +struct plat_sci_port_ops sh7720_sci_port_ops = { + .init_pins = sh7720_sci_init_pins, +}; diff --git a/arch/sh/kernel/cpu/sh3/setup-sh3.c b/arch/sh/kernel/cpu/sh3/setup-sh3.c new file mode 100644 index 000000000..cf2a3f09f --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/setup-sh3.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Shared SH3 Setup code + * + * Copyright (C) 2008 Magnus Damm + */ + +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +/* All SH3 devices are equipped with IRQ0->5 (except sh7708) */ + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, +}; + +static struct intc_vect vectors_irq0123[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), +}; + +static struct intc_vect vectors_irq45[] __initdata = { + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, + { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xa4000004, 0, 8, /* IRR0 */ + { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } }, +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xa4000010, 16, 2, { 0, 0, IRQ5, IRQ4, IRQ3, IRQ2, IRQ1, IRQ0 } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh3-irq0123", + vectors_irq0123, NULL, NULL, + prio_registers, sense_registers, ack_registers); + +static DECLARE_INTC_DESC_ACK(intc_desc_irq45, "sh3-irq45", + vectors_irq45, NULL, NULL, + prio_registers, sense_registers, ack_registers); + +#define INTC_ICR1 0xa4000010UL +#define INTC_ICR1_IRQLVL (1<<14) + +void __init plat_irq_setup_pins(int mode) +{ + if (mode == IRQ_MODE_IRQ) { + __raw_writew(__raw_readw(INTC_ICR1) & ~INTC_ICR1_IRQLVL, INTC_ICR1); + register_intc_controller(&intc_desc_irq0123); + return; + } + BUG(); +} + +void __init plat_irq_setup_sh3(void) +{ + register_intc_controller(&intc_desc_irq45); +} diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7705.c b/arch/sh/kernel/cpu/sh3/setup-sh7705.c new file mode 100644 index 000000000..0544134b3 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/setup-sh7705.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7705 Setup + * + * Copyright (C) 2006 - 2009 Paul Mundt + * Copyright (C) 2007 Nobuhiro Iwamatsu + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <asm/rtc.h> +#include <cpu/serial.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, + PINT07, PINT815, + + DMAC, SCIF0, SCIF2, ADC_ADI, USB, + + TPU0, TPU1, TPU2, TPU3, + TMU0, TMU1, TMU2, + + RTC, WDT, REF_RCMI, +}; + +static struct intc_vect vectors[] __initdata = { + /* IRQ0->5 are handled in setup-sh3.c */ + INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720), + INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820), + INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860), + INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0), + INTC_VECT(SCIF0, 0x8e0), + INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920), + INTC_VECT(SCIF2, 0x960), + INTC_VECT(ADC_ADI, 0x980), + INTC_VECT(USB, 0xa20), INTC_VECT(USB, 0xa40), + INTC_VECT(TPU0, 0xc00), INTC_VECT(TPU1, 0xc20), + INTC_VECT(TPU2, 0xc80), INTC_VECT(TPU3, 0xca0), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF_RCMI, 0x580), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, 0, 0 } }, + { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, + { 0xa4000018, 0, 16, 4, /* IPRD */ { PINT07, PINT815, IRQ5, IRQ4 } }, + { 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC, SCIF0, SCIF2, ADC_ADI } }, + { 0xa4080000, 0, 16, 4, /* IPRF */ { 0, 0, USB } }, + { 0xa4080002, 0, 16, 4, /* IPRG */ { TPU0, TPU1 } }, + { 0xa4080004, 0, 16, 4, /* IPRH */ { TPU2, TPU3 } }, + +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7705", vectors, NULL, + NULL, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_CKE1, + .type = PORT_SCIF, + .ops = &sh770x_sci_port_ops, + .regtype = SCIx_SH7705_SCIF_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xa4410000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x900)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .type = PORT_SCIF, + .ops = &sh770x_sci_port_ops, + .regtype = SCIx_SH7705_SCIF_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xa4400000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x880)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xfffffec0, + .end = 0xfffffec0 + 0x1e, + .flags = IORESOURCE_IO, + }, + [1] = { + .start = evt2irq(0x480), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct sh_rtc_platform_info rtc_info = { + .capabilities = RTC_CAP_4_DIGIT_YEAR, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, + .dev = { + .platform_data = &rtc_info, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xfffffe90, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu-sh3", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct platform_device *sh7705_devices[] __initdata = { + &scif0_device, + &scif1_device, + &tmu0_device, + &rtc_device, +}; + +static int __init sh7705_devices_setup(void) +{ + return platform_add_devices(sh7705_devices, + ARRAY_SIZE(sh7705_devices)); +} +arch_initcall(sh7705_devices_setup); + +static struct platform_device *sh7705_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7705_early_devices, + ARRAY_SIZE(sh7705_early_devices)); +} + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); + plat_irq_setup_sh3(); +} diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c new file mode 100644 index 000000000..4947f5774 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH3 Setup code for SH7706, SH7707, SH7708, SH7709 + * + * Copyright (C) 2007 Magnus Damm + * Copyright (C) 2009 Paul Mundt + * + * Based on setup-sh7709.c + * + * Copyright (C) 2006 Paul Mundt + */ +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/platform_device.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <cpu/serial.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, + PINT07, PINT815, + DMAC, SCIF0, SCIF2, SCI, ADC_ADI, + LCDC, PCC0, PCC1, + TMU0, TMU1, TMU2, + RTC, WDT, REF, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(SCI, 0x4e0), INTC_VECT(SCI, 0x500), + INTC_VECT(SCI, 0x520), INTC_VECT(SCI, 0x540), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF, 0x580), + INTC_VECT(REF, 0x5a0), +#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + /* IRQ0->5 are handled in setup-sh3.c */ + INTC_VECT(DMAC, 0x800), INTC_VECT(DMAC, 0x820), + INTC_VECT(DMAC, 0x840), INTC_VECT(DMAC, 0x860), + INTC_VECT(ADC_ADI, 0x980), + INTC_VECT(SCIF2, 0x900), INTC_VECT(SCIF2, 0x920), + INTC_VECT(SCIF2, 0x940), INTC_VECT(SCIF2, 0x960), +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + INTC_VECT(PINT07, 0x700), INTC_VECT(PINT815, 0x720), + INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0), + INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0), +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7707) + INTC_VECT(LCDC, 0x9a0), + INTC_VECT(PCC0, 0x9c0), INTC_VECT(PCC1, 0x9e0), +#endif +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, SCI, 0 } }, +#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, + { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } }, + { 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC, 0, SCIF2, ADC_ADI } }, +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + { 0xa4000018, 0, 16, 4, /* IPRD */ { PINT07, PINT815, } }, + { 0xa400001a, 0, 16, 4, /* IPRE */ { 0, SCIF0 } }, +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7707) + { 0xa400001c, 0, 16, 4, /* IPRF */ { 0, LCDC, PCC0, PCC1, } }, +#endif +}; + +static DECLARE_INTC_DESC(intc_desc, "sh770x", vectors, NULL, + NULL, prio_registers, NULL); + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xfffffec0, + .end = 0xfffffec0 + 0x1e, + .flags = IORESOURCE_IO, + }, + [1] = { + .start = evt2irq(0x480), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct plat_sci_port scif0_platform_data = { + .type = PORT_SCI, + .ops = &sh770x_sci_port_ops, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xfffffe80, 0x10), + DEFINE_RES_IRQ(evt2irq(0x4e0)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; +#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) +static struct plat_sci_port scif1_platform_data = { + .type = PORT_SCIF, + .ops = &sh770x_sci_port_ops, + .regtype = SCIx_SH3_SCIF_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xa4000150, 0x10), + DEFINE_RES_IRQ(evt2irq(0x900)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) +static struct plat_sci_port scif2_platform_data = { + .type = PORT_IRDA, + .ops = &sh770x_sci_port_ops, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xa4000140, 0x10), + DEFINE_RES_IRQ(evt2irq(0x880)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; +#endif + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xfffffe90, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu-sh3", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct platform_device *sh770x_devices[] __initdata = { + &scif0_device, +#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + &scif1_device, +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + &scif2_device, +#endif + &tmu0_device, + &rtc_device, +}; + +static int __init sh770x_devices_setup(void) +{ + return platform_add_devices(sh770x_devices, + ARRAY_SIZE(sh770x_devices)); +} +arch_initcall(sh770x_devices_setup); + +static struct platform_device *sh770x_early_devices[] __initdata = { + &scif0_device, +#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + &scif1_device, +#endif +#if defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + &scif2_device, +#endif + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh770x_early_devices, + ARRAY_SIZE(sh770x_early_devices)); +} + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +#if defined(CONFIG_CPU_SUBTYPE_SH7706) || \ + defined(CONFIG_CPU_SUBTYPE_SH7707) || \ + defined(CONFIG_CPU_SUBTYPE_SH7709) + plat_irq_setup_sh3(); +#endif +} diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7710.c b/arch/sh/kernel/cpu/sh3/setup-sh7710.c new file mode 100644 index 000000000..381910761 --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/setup-sh7710.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH3 Setup code for SH7710, SH7712 + * + * Copyright (C) 2006 - 2009 Paul Mundt + * Copyright (C) 2007 Nobuhiro Iwamatsu + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <asm/rtc.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, + DMAC1, SCIF0, SCIF1, DMAC2, IPSEC, + EDMAC0, EDMAC1, EDMAC2, + SIOF0, SIOF1, + + TMU0, TMU1, TMU2, + RTC, WDT, REF, +}; + +static struct intc_vect vectors[] __initdata = { + /* IRQ0->5 are handled in setup-sh3.c */ + INTC_VECT(DMAC1, 0x800), INTC_VECT(DMAC1, 0x820), + INTC_VECT(DMAC1, 0x840), INTC_VECT(DMAC1, 0x860), + INTC_VECT(SCIF0, 0x880), INTC_VECT(SCIF0, 0x8a0), + INTC_VECT(SCIF0, 0x8c0), INTC_VECT(SCIF0, 0x8e0), + INTC_VECT(SCIF1, 0x900), INTC_VECT(SCIF1, 0x920), + INTC_VECT(SCIF1, 0x940), INTC_VECT(SCIF1, 0x960), + INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0), +#ifdef CONFIG_CPU_SUBTYPE_SH7710 + INTC_VECT(IPSEC, 0xbe0), +#endif + INTC_VECT(EDMAC0, 0xc00), INTC_VECT(EDMAC1, 0xc20), + INTC_VECT(EDMAC2, 0xc40), + INTC_VECT(SIOF0, 0xe00), INTC_VECT(SIOF0, 0xe20), + INTC_VECT(SIOF0, 0xe40), INTC_VECT(SIOF0, 0xe60), + INTC_VECT(SIOF1, 0xe80), INTC_VECT(SIOF1, 0xea0), + INTC_VECT(SIOF1, 0xec0), INTC_VECT(SIOF1, 0xee0), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF, 0x580), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfffffee2, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xfffffee4, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } }, + { 0xa4000016, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, + { 0xa4000018, 0, 16, 4, /* IPRD */ { 0, 0, IRQ5, IRQ4 } }, + { 0xa400001a, 0, 16, 4, /* IPRE */ { DMAC1, SCIF0, SCIF1 } }, + { 0xa4080000, 0, 16, 4, /* IPRF */ { IPSEC, DMAC2 } }, + { 0xa4080002, 0, 16, 4, /* IPRG */ { EDMAC0, EDMAC1, EDMAC2 } }, + { 0xa4080004, 0, 16, 4, /* IPRH */ { 0, 0, 0, SIOF0 } }, + { 0xa4080006, 0, 16, 4, /* IPRI */ { 0, 0, SIOF1 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7710", vectors, NULL, + NULL, prio_registers, NULL); + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xa413fec0, + .end = 0xa413fec0 + 0x1e, + .flags = IORESOURCE_IO, + }, + [1] = { + .start = evt2irq(0x480), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct sh_rtc_platform_info rtc_info = { + .capabilities = RTC_CAP_4_DIGIT_YEAR, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, + .dev = { + .platform_data = &rtc_info, + }, +}; + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xa4400000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x880)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xa4410000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x900)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xa412fe90, 0x28), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu-sh3", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct platform_device *sh7710_devices[] __initdata = { + &scif0_device, + &scif1_device, + &tmu0_device, + &rtc_device, +}; + +static int __init sh7710_devices_setup(void) +{ + return platform_add_devices(sh7710_devices, + ARRAY_SIZE(sh7710_devices)); +} +arch_initcall(sh7710_devices_setup); + +static struct platform_device *sh7710_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7710_early_devices, + ARRAY_SIZE(sh7710_early_devices)); +} + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); + plat_irq_setup_sh3(); +} diff --git a/arch/sh/kernel/cpu/sh3/setup-sh7720.c b/arch/sh/kernel/cpu/sh3/setup-sh7720.c new file mode 100644 index 000000000..425d067da --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/setup-sh7720.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Setup code for SH7720, SH7721. + * + * Copyright (C) 2007 Markus Brunner, Mark Jonas + * Copyright (C) 2009 Paul Mundt + * + * Based on arch/sh/kernel/cpu/sh4/setup-sh7750.c: + * + * Copyright (C) 2006 Paul Mundt + * Copyright (C) 2006 Jamie Lenehan + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/io.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/usb/ohci_pdriver.h> +#include <asm/rtc.h> +#include <asm/platform_early.h> +#include <cpu/serial.h> + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xa413fec0, + .end = 0xa413fec0 + 0x28 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Shared Period/Carry/Alarm IRQ */ + .start = evt2irq(0x480), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct sh_rtc_platform_info rtc_info = { + .capabilities = RTC_CAP_4_DIGIT_YEAR, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, + .dev = { + .platform_data = &rtc_info, + }, +}; + +static struct plat_sci_port scif0_platform_data = { + .type = PORT_SCIF, + .ops = &sh7720_sci_port_ops, + .regtype = SCIx_SH7705_SCIF_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xa4430000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc00)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .type = PORT_SCIF, + .ops = &sh7720_sci_port_ops, + .regtype = SCIx_SH7705_SCIF_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xa4438000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc20)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct resource usb_ohci_resources[] = { + [0] = { + .start = 0xA4428000, + .end = 0xA44280FF, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xa60), + .end = evt2irq(0xa60), + .flags = IORESOURCE_IRQ, + }, +}; + +static u64 usb_ohci_dma_mask = 0xffffffffUL; + +static struct usb_ohci_pdata usb_ohci_pdata; + +static struct platform_device usb_ohci_device = { + .name = "ohci-platform", + .id = -1, + .dev = { + .dma_mask = &usb_ohci_dma_mask, + .coherent_dma_mask = 0xffffffff, + .platform_data = &usb_ohci_pdata, + }, + .num_resources = ARRAY_SIZE(usb_ohci_resources), + .resource = usb_ohci_resources, +}; + +static struct resource usbf_resources[] = { + [0] = { + .name = "sh_udc", + .start = 0xA4420000, + .end = 0xA44200FF, + .flags = IORESOURCE_MEM, + }, + [1] = { + .name = "sh_udc", + .start = evt2irq(0xa20), + .end = evt2irq(0xa20), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usbf_device = { + .name = "sh_udc", + .id = -1, + .dev = { + .dma_mask = NULL, + .coherent_dma_mask = 0xffffffff, + }, + .num_resources = ARRAY_SIZE(usbf_resources), + .resource = usbf_resources, +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 0x1f, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0x044a0000, 0x60), + DEFINE_RES_IRQ(evt2irq(0xf00)), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-32", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xa412fe90, 0x28), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu-sh3", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct platform_device *sh7720_devices[] __initdata = { + &scif0_device, + &scif1_device, + &cmt_device, + &tmu0_device, + &rtc_device, + &usb_ohci_device, + &usbf_device, +}; + +static int __init sh7720_devices_setup(void) +{ + return platform_add_devices(sh7720_devices, + ARRAY_SIZE(sh7720_devices)); +} +arch_initcall(sh7720_devices_setup); + +static struct platform_device *sh7720_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &cmt_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7720_early_devices, + ARRAY_SIZE(sh7720_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + TMU0, TMU1, TMU2, RTC, + WDT, REF_RCMI, SIM, + IRQ0, IRQ1, IRQ2, IRQ3, + USBF_SPD, TMU_SUNI, IRQ5, IRQ4, + DMAC1, LCDC, SSL, + ADC, DMAC2, USBFI, CMT, + SCIF0, SCIF1, + PINT07, PINT815, TPU, IIC, + SIOF0, SIOF1, MMC, PCC, + USBHI, AFEIF, + H_UDI, +}; + +static struct intc_vect vectors[] __initdata = { + /* IRQ0->5 are handled in setup-sh3.c */ + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(RTC, 0x480), + INTC_VECT(RTC, 0x4a0), INTC_VECT(RTC, 0x4c0), + INTC_VECT(SIM, 0x4e0), INTC_VECT(SIM, 0x500), + INTC_VECT(SIM, 0x520), INTC_VECT(SIM, 0x540), + INTC_VECT(WDT, 0x560), INTC_VECT(REF_RCMI, 0x580), + /* H_UDI cannot be masked */ INTC_VECT(TMU_SUNI, 0x6c0), + INTC_VECT(USBF_SPD, 0x6e0), INTC_VECT(DMAC1, 0x800), + INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC1, 0x840), + INTC_VECT(DMAC1, 0x860), INTC_VECT(LCDC, 0x900), +#if defined(CONFIG_CPU_SUBTYPE_SH7720) + INTC_VECT(SSL, 0x980), +#endif + INTC_VECT(USBFI, 0xa20), INTC_VECT(USBFI, 0xa40), + INTC_VECT(USBHI, 0xa60), + INTC_VECT(DMAC2, 0xb80), INTC_VECT(DMAC2, 0xba0), + INTC_VECT(ADC, 0xbe0), INTC_VECT(SCIF0, 0xc00), + INTC_VECT(SCIF1, 0xc20), INTC_VECT(PINT07, 0xc80), + INTC_VECT(PINT815, 0xca0), INTC_VECT(SIOF0, 0xd00), + INTC_VECT(SIOF1, 0xd20), INTC_VECT(TPU, 0xd80), + INTC_VECT(TPU, 0xda0), INTC_VECT(TPU, 0xdc0), + INTC_VECT(TPU, 0xde0), INTC_VECT(IIC, 0xe00), + INTC_VECT(MMC, 0xe80), INTC_VECT(MMC, 0xea0), + INTC_VECT(MMC, 0xec0), INTC_VECT(MMC, 0xee0), + INTC_VECT(CMT, 0xf00), INTC_VECT(PCC, 0xf60), + INTC_VECT(AFEIF, 0xfe0), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xA414FEE2UL, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xA414FEE4UL, 0, 16, 4, /* IPRB */ { WDT, REF_RCMI, SIM, 0 } }, + { 0xA4140016UL, 0, 16, 4, /* IPRC */ { IRQ3, IRQ2, IRQ1, IRQ0 } }, + { 0xA4140018UL, 0, 16, 4, /* IPRD */ { USBF_SPD, TMU_SUNI, IRQ5, IRQ4 } }, + { 0xA414001AUL, 0, 16, 4, /* IPRE */ { DMAC1, 0, LCDC, SSL } }, + { 0xA4080000UL, 0, 16, 4, /* IPRF */ { ADC, DMAC2, USBFI, CMT } }, + { 0xA4080002UL, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, 0, 0 } }, + { 0xA4080004UL, 0, 16, 4, /* IPRH */ { PINT07, PINT815, TPU, IIC } }, + { 0xA4080006UL, 0, 16, 4, /* IPRI */ { SIOF0, SIOF1, MMC, PCC } }, + { 0xA4080008UL, 0, 16, 4, /* IPRJ */ { 0, USBHI, 0, AFEIF } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7720", vectors, NULL, + NULL, prio_registers, NULL); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); + plat_irq_setup_sh3(); +} diff --git a/arch/sh/kernel/cpu/sh3/swsusp.S b/arch/sh/kernel/cpu/sh3/swsusp.S new file mode 100644 index 000000000..dc111c4cc --- /dev/null +++ b/arch/sh/kernel/cpu/sh3/swsusp.S @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/cpu/sh3/swsusp.S + * + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/sys.h> +#include <linux/errno.h> +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/page.h> + +#define k0 r0 +#define k1 r1 +#define k2 r2 +#define k3 r3 +#define k4 r4 + +! swsusp_arch_resume() +! - copy restore_pblist pages +! - restore registers from swsusp_arch_regs_cpu0 + +ENTRY(swsusp_arch_resume) + mov.l 1f, r15 + mov.l 2f, r4 + mov.l @r4, r4 + +swsusp_copy_loop: + mov r4, r0 + cmp/eq #0, r0 + bt swsusp_restore_regs + + mov.l @(PBE_ADDRESS, r4), r2 + mov.l @(PBE_ORIG_ADDRESS, r4), r5 + + mov #(PAGE_SIZE >> 10), r3 + shll8 r3 + shlr2 r3 /* PAGE_SIZE / 16 */ +swsusp_copy_page: + dt r3 + mov.l @r2+,r1 /* 16n+0 */ + mov.l r1,@r5 + add #4,r5 + mov.l @r2+,r1 /* 16n+4 */ + mov.l r1,@r5 + add #4,r5 + mov.l @r2+,r1 /* 16n+8 */ + mov.l r1,@r5 + add #4,r5 + mov.l @r2+,r1 /* 16n+12 */ + mov.l r1,@r5 + bf/s swsusp_copy_page + add #4,r5 + + bra swsusp_copy_loop + mov.l @(PBE_NEXT, r4), r4 + +swsusp_restore_regs: + ! BL=0: R7->R0 is bank0 + mov.l 3f, r8 + mov.l 4f, r5 + jsr @r5 + nop + + ! BL=1: R7->R0 is bank1 + lds k2, pr + ldc k3, ssr + + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + + rte + nop + ! BL=0: R7->R0 is bank0 + + .align 2 +1: .long swsusp_arch_regs_cpu0 +2: .long restore_pblist +3: .long 0x20000000 ! RB=1 +4: .long restore_regs + +! swsusp_arch_suspend() +! - prepare pc for resume, return from function without swsusp_save on resume +! - save registers in swsusp_arch_regs_cpu0 +! - call swsusp_save write suspend image + +ENTRY(swsusp_arch_suspend) + sts pr, r0 ! save pr in r0 + mov r15, r2 ! save sp in r2 + mov r8, r5 ! save r8 in r5 + stc sr, r1 + ldc r1, ssr ! save sr in ssr + mov.l 1f, r1 + ldc r1, spc ! setup pc value for resuming + mov.l 5f, r15 ! use swsusp_arch_regs_cpu0 as stack + mov.l 6f, r3 + add r3, r15 ! save from top of structure + + ! BL=0: R7->R0 is bank0 + mov.l 2f, r3 ! get new SR value for bank1 + mov #0, r4 + mov.l 7f, r1 + jsr @r1 ! switch to bank1 and save bank1 r7->r0 + not r4, r4 + + ! BL=1: R7->R0 is bank1 + stc r2_bank, k0 ! fetch old sp from r2_bank0 + mov.l 3f, k4 ! SR bits to clear in k4 + mov.l 8f, k1 + jsr @k1 ! switch to bank0 and save all regs + stc r0_bank, k3 ! fetch old pr from r0_bank0 + + ! BL=0: R7->R0 is bank0 + mov r2, r15 ! restore old sp + mov r5, r8 ! restore old r8 + stc ssr, r1 + ldc r1, sr ! restore old sr + lds r0, pr ! restore old pr + mov.l 4f, r0 + jmp @r0 + nop + +swsusp_call_save: + mov r2, r15 ! restore old sp + mov r5, r8 ! restore old r8 + lds r0, pr ! restore old pr + rts + mov #0, r0 + + .align 2 +1: .long swsusp_call_save +2: .long 0x20000000 ! RB=1 +3: .long 0xdfffffff ! RB=0 +4: .long swsusp_save +5: .long swsusp_arch_regs_cpu0 +6: .long SWSUSP_ARCH_REGS_SIZE +7: .long save_low_regs +8: .long save_regs diff --git a/arch/sh/kernel/cpu/sh4/Makefile b/arch/sh/kernel/cpu/sh4/Makefile new file mode 100644 index 000000000..00c16331e --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/Makefile @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH SH-4 backends. +# + +obj-y := probe.o common.o +common-y += $(addprefix ../sh3/, entry.o ex.o) + +obj-$(CONFIG_HIBERNATION) += $(addprefix ../sh3/, swsusp.o) +obj-$(CONFIG_SH_FPU) += fpu.o softfloat.o +obj-$(CONFIG_SH_STORE_QUEUES) += sq.o + +# Perf events +perf-$(CONFIG_CPU_SUBTYPE_SH7750) := perf_event.o +perf-$(CONFIG_CPU_SUBTYPE_SH7750S) := perf_event.o +perf-$(CONFIG_CPU_SUBTYPE_SH7091) := perf_event.o + +# CPU subtype setup +obj-$(CONFIG_CPU_SUBTYPE_SH7750) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7750R) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7750S) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7091) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7751) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7751R) += setup-sh7750.o +obj-$(CONFIG_CPU_SUBTYPE_SH7760) += setup-sh7760.o +obj-$(CONFIG_CPU_SUBTYPE_SH4_202) += setup-sh4-202.o + +# Primary on-chip clocks (common) +ifndef CONFIG_CPU_SH4A +clock-$(CONFIG_CPU_SH4) := clock-sh4.o +endif + +# Additional clocks by subtype +clock-$(CONFIG_CPU_SUBTYPE_SH4_202) += clock-sh4-202.o + +obj-y += $(clock-y) +obj-$(CONFIG_PERF_EVENTS) += $(perf-y) diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c new file mode 100644 index 000000000..c1cdef763 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4/clock-sh4-202.c + * + * Additional SH4-202 support for the clock framework + * + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> +#include <asm/freq.h> + +#define CPG2_FRQCR3 0xfe0a0018 + +static int frqcr3_divisors[] = { 1, 2, 3, 4, 6, 8, 16 }; +static int frqcr3_values[] = { 0, 1, 2, 3, 4, 5, 6 }; + +static unsigned long emi_clk_recalc(struct clk *clk) +{ + int idx = __raw_readl(CPG2_FRQCR3) & 0x0007; + return clk->parent->rate / frqcr3_divisors[idx]; +} + +static inline int frqcr3_lookup(struct clk *clk, unsigned long rate) +{ + int divisor = clk->parent->rate / rate; + int i; + + for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) + if (frqcr3_divisors[i] == divisor) + return frqcr3_values[i]; + + /* Safe fallback */ + return 5; +} + +static struct sh_clk_ops sh4202_emi_clk_ops = { + .recalc = emi_clk_recalc, +}; + +static struct clk sh4202_emi_clk = { + .flags = CLK_ENABLE_ON_INIT, + .ops = &sh4202_emi_clk_ops, +}; + +static unsigned long femi_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readl(CPG2_FRQCR3) >> 3) & 0x0007; + return clk->parent->rate / frqcr3_divisors[idx]; +} + +static struct sh_clk_ops sh4202_femi_clk_ops = { + .recalc = femi_clk_recalc, +}; + +static struct clk sh4202_femi_clk = { + .flags = CLK_ENABLE_ON_INIT, + .ops = &sh4202_femi_clk_ops, +}; + +static void shoc_clk_init(struct clk *clk) +{ + int i; + + /* + * For some reason, the shoc_clk seems to be set to some really + * insane value at boot (values outside of the allowable frequency + * range for instance). We deal with this by scaling it back down + * to something sensible just in case. + * + * Start scaling from the high end down until we find something + * that passes rate verification.. + */ + for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) { + int divisor = frqcr3_divisors[i]; + + if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0) + break; + } + + WARN_ON(i == ARRAY_SIZE(frqcr3_divisors)); /* Undefined clock */ +} + +static unsigned long shoc_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readl(CPG2_FRQCR3) >> 6) & 0x0007; + return clk->parent->rate / frqcr3_divisors[idx]; +} + +static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate) +{ + struct clk *bclk = clk_get(NULL, "bus_clk"); + unsigned long bclk_rate = clk_get_rate(bclk); + + clk_put(bclk); + + if (rate > bclk_rate) + return 1; + if (rate > 66000000) + return 1; + + return 0; +} + +static int shoc_clk_set_rate(struct clk *clk, unsigned long rate) +{ + unsigned long frqcr3; + unsigned int tmp; + + /* Make sure we have something sensible to switch to */ + if (shoc_clk_verify_rate(clk, rate) != 0) + return -EINVAL; + + tmp = frqcr3_lookup(clk, rate); + + frqcr3 = __raw_readl(CPG2_FRQCR3); + frqcr3 &= ~(0x0007 << 6); + frqcr3 |= tmp << 6; + __raw_writel(frqcr3, CPG2_FRQCR3); + + clk->rate = clk->parent->rate / frqcr3_divisors[tmp]; + + return 0; +} + +static struct sh_clk_ops sh4202_shoc_clk_ops = { + .init = shoc_clk_init, + .recalc = shoc_clk_recalc, + .set_rate = shoc_clk_set_rate, +}; + +static struct clk sh4202_shoc_clk = { + .flags = CLK_ENABLE_ON_INIT, + .ops = &sh4202_shoc_clk_ops, +}; + +static struct clk *sh4202_onchip_clocks[] = { + &sh4202_emi_clk, + &sh4202_femi_clk, + &sh4202_shoc_clk, +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("emi_clk", &sh4202_emi_clk), + CLKDEV_CON_ID("femi_clk", &sh4202_femi_clk), + CLKDEV_CON_ID("shoc_clk", &sh4202_shoc_clk), +}; + +int __init arch_clk_init(void) +{ + struct clk *clk; + int i, ret = 0; + + cpg_clk_init(); + + clk = clk_get(NULL, "master_clk"); + for (i = 0; i < ARRAY_SIZE(sh4202_onchip_clocks); i++) { + struct clk *clkp = sh4202_onchip_clocks[i]; + + clkp->parent = clk; + ret |= clk_register(clkp); + } + + clk_put(clk); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4.c b/arch/sh/kernel/cpu/sh4/clock-sh4.c new file mode 100644 index 000000000..ee3c5537a --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/clock-sh4.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4/clock-sh4.c + * + * Generic SH-4 support for the clock framework + * + * Copyright (C) 2005 Paul Mundt + * + * FRQCR parsing hacked out of arch/sh/kernel/time.c + * + * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2002, 2003, 2004 Paul Mundt + * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int ifc_divisors[] = { 1, 2, 3, 4, 6, 8, 1, 1 }; +#define bfc_divisors ifc_divisors /* Same */ +static int pfc_divisors[] = { 2, 3, 4, 6, 8, 2, 2, 2 }; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0007]; +} + +static struct sh_clk_ops sh4_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FRQCR) & 0x0007); + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh4_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FRQCR) >> 3) & 0x0007; + return clk->parent->rate / bfc_divisors[idx]; +} + +static struct sh_clk_ops sh4_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readw(FRQCR) >> 6) & 0x0007; + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh4_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh4_clk_ops[] = { + &sh4_master_clk_ops, + &sh4_module_clk_ops, + &sh4_bus_clk_ops, + &sh4_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh4_clk_ops)) + *ops = sh4_clk_ops[idx]; +} + diff --git a/arch/sh/kernel/cpu/sh4/fpu.c b/arch/sh/kernel/cpu/sh4/fpu.c new file mode 100644 index 000000000..03ffd8cdf --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/fpu.c @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Save/restore floating point context for signal handlers. + * + * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka + * Copyright (C) 2006 ST Microelectronics Ltd. (denorm support) + * + * FIXME! These routines have not been tested for big endian case. + */ +#include <linux/sched/signal.h> +#include <linux/io.h> +#include <cpu/fpu.h> +#include <asm/processor.h> +#include <asm/fpu.h> +#include <asm/traps.h> + +/* The PR (precision) bit in the FP Status Register must be clear when + * an frchg instruction is executed, otherwise the instruction is undefined. + * Executing frchg with PR set causes a trap on some SH4 implementations. + */ + +#define FPSCR_RCHG 0x00000000 +extern unsigned long long float64_div(unsigned long long a, + unsigned long long b); +extern unsigned long int float32_div(unsigned long int a, unsigned long int b); +extern unsigned long long float64_mul(unsigned long long a, + unsigned long long b); +extern unsigned long int float32_mul(unsigned long int a, unsigned long int b); +extern unsigned long long float64_add(unsigned long long a, + unsigned long long b); +extern unsigned long int float32_add(unsigned long int a, unsigned long int b); +extern unsigned long long float64_sub(unsigned long long a, + unsigned long long b); +extern unsigned long int float32_sub(unsigned long int a, unsigned long int b); +extern unsigned long int float64_to_float32(unsigned long long a); +static unsigned int fpu_exception_flags; + +/* + * Save FPU registers onto task structure. + */ +void save_fpu(struct task_struct *tsk) +{ + unsigned long dummy; + + enable_fpu(); + asm volatile ("sts.l fpul, @-%0\n\t" + "sts.l fpscr, @-%0\n\t" + "lds %2, fpscr\n\t" + "frchg\n\t" + "fmov.s fr15, @-%0\n\t" + "fmov.s fr14, @-%0\n\t" + "fmov.s fr13, @-%0\n\t" + "fmov.s fr12, @-%0\n\t" + "fmov.s fr11, @-%0\n\t" + "fmov.s fr10, @-%0\n\t" + "fmov.s fr9, @-%0\n\t" + "fmov.s fr8, @-%0\n\t" + "fmov.s fr7, @-%0\n\t" + "fmov.s fr6, @-%0\n\t" + "fmov.s fr5, @-%0\n\t" + "fmov.s fr4, @-%0\n\t" + "fmov.s fr3, @-%0\n\t" + "fmov.s fr2, @-%0\n\t" + "fmov.s fr1, @-%0\n\t" + "fmov.s fr0, @-%0\n\t" + "frchg\n\t" + "fmov.s fr15, @-%0\n\t" + "fmov.s fr14, @-%0\n\t" + "fmov.s fr13, @-%0\n\t" + "fmov.s fr12, @-%0\n\t" + "fmov.s fr11, @-%0\n\t" + "fmov.s fr10, @-%0\n\t" + "fmov.s fr9, @-%0\n\t" + "fmov.s fr8, @-%0\n\t" + "fmov.s fr7, @-%0\n\t" + "fmov.s fr6, @-%0\n\t" + "fmov.s fr5, @-%0\n\t" + "fmov.s fr4, @-%0\n\t" + "fmov.s fr3, @-%0\n\t" + "fmov.s fr2, @-%0\n\t" + "fmov.s fr1, @-%0\n\t" + "fmov.s fr0, @-%0\n\t" + "lds %3, fpscr\n\t":"=r" (dummy) + :"0"((char *)(&tsk->thread.xstate->hardfpu.status)), + "r"(FPSCR_RCHG), "r"(FPSCR_INIT) + :"memory"); + + disable_fpu(); +} + +void restore_fpu(struct task_struct *tsk) +{ + unsigned long dummy; + + enable_fpu(); + asm volatile ("lds %2, fpscr\n\t" + "fmov.s @%0+, fr0\n\t" + "fmov.s @%0+, fr1\n\t" + "fmov.s @%0+, fr2\n\t" + "fmov.s @%0+, fr3\n\t" + "fmov.s @%0+, fr4\n\t" + "fmov.s @%0+, fr5\n\t" + "fmov.s @%0+, fr6\n\t" + "fmov.s @%0+, fr7\n\t" + "fmov.s @%0+, fr8\n\t" + "fmov.s @%0+, fr9\n\t" + "fmov.s @%0+, fr10\n\t" + "fmov.s @%0+, fr11\n\t" + "fmov.s @%0+, fr12\n\t" + "fmov.s @%0+, fr13\n\t" + "fmov.s @%0+, fr14\n\t" + "fmov.s @%0+, fr15\n\t" + "frchg\n\t" + "fmov.s @%0+, fr0\n\t" + "fmov.s @%0+, fr1\n\t" + "fmov.s @%0+, fr2\n\t" + "fmov.s @%0+, fr3\n\t" + "fmov.s @%0+, fr4\n\t" + "fmov.s @%0+, fr5\n\t" + "fmov.s @%0+, fr6\n\t" + "fmov.s @%0+, fr7\n\t" + "fmov.s @%0+, fr8\n\t" + "fmov.s @%0+, fr9\n\t" + "fmov.s @%0+, fr10\n\t" + "fmov.s @%0+, fr11\n\t" + "fmov.s @%0+, fr12\n\t" + "fmov.s @%0+, fr13\n\t" + "fmov.s @%0+, fr14\n\t" + "fmov.s @%0+, fr15\n\t" + "frchg\n\t" + "lds.l @%0+, fpscr\n\t" + "lds.l @%0+, fpul\n\t" + :"=r" (dummy) + :"0" (tsk->thread.xstate), "r" (FPSCR_RCHG) + :"memory"); + disable_fpu(); +} + +/** + * denormal_to_double - Given denormalized float number, + * store double float + * + * @fpu: Pointer to sh_fpu_hard structure + * @n: Index to FP register + */ +static void denormal_to_double(struct sh_fpu_hard_struct *fpu, int n) +{ + unsigned long du, dl; + unsigned long x = fpu->fpul; + int exp = 1023 - 126; + + if (x != 0 && (x & 0x7f800000) == 0) { + du = (x & 0x80000000); + while ((x & 0x00800000) == 0) { + x <<= 1; + exp--; + } + x &= 0x007fffff; + du |= (exp << 20) | (x >> 3); + dl = x << 29; + + fpu->fp_regs[n] = du; + fpu->fp_regs[n + 1] = dl; + } +} + +/** + * ieee_fpe_handler - Handle denormalized number exception + * + * @regs: Pointer to register structure + * + * Returns 1 when it's handled (should not cause exception). + */ +static int ieee_fpe_handler(struct pt_regs *regs) +{ + unsigned short insn = *(unsigned short *)regs->pc; + unsigned short finsn; + unsigned long nextpc; + int nib[4] = { + (insn >> 12) & 0xf, + (insn >> 8) & 0xf, + (insn >> 4) & 0xf, + insn & 0xf + }; + + if (nib[0] == 0xb || (nib[0] == 0x4 && nib[2] == 0x0 && nib[3] == 0xb)) + regs->pr = regs->pc + 4; /* bsr & jsr */ + + if (nib[0] == 0xa || nib[0] == 0xb) { + /* bra & bsr */ + nextpc = regs->pc + 4 + ((short)((insn & 0xfff) << 4) >> 3); + finsn = *(unsigned short *)(regs->pc + 2); + } else if (nib[0] == 0x8 && nib[1] == 0xd) { + /* bt/s */ + if (regs->sr & 1) + nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1); + else + nextpc = regs->pc + 4; + finsn = *(unsigned short *)(regs->pc + 2); + } else if (nib[0] == 0x8 && nib[1] == 0xf) { + /* bf/s */ + if (regs->sr & 1) + nextpc = regs->pc + 4; + else + nextpc = regs->pc + 4 + ((char)(insn & 0xff) << 1); + finsn = *(unsigned short *)(regs->pc + 2); + } else if (nib[0] == 0x4 && nib[3] == 0xb && + (nib[2] == 0x0 || nib[2] == 0x2)) { + /* jmp & jsr */ + nextpc = regs->regs[nib[1]]; + finsn = *(unsigned short *)(regs->pc + 2); + } else if (nib[0] == 0x0 && nib[3] == 0x3 && + (nib[2] == 0x0 || nib[2] == 0x2)) { + /* braf & bsrf */ + nextpc = regs->pc + 4 + regs->regs[nib[1]]; + finsn = *(unsigned short *)(regs->pc + 2); + } else if (insn == 0x000b) { + /* rts */ + nextpc = regs->pr; + finsn = *(unsigned short *)(regs->pc + 2); + } else { + nextpc = regs->pc + instruction_size(insn); + finsn = insn; + } + + if ((finsn & 0xf1ff) == 0xf0ad) { + /* fcnvsd */ + struct task_struct *tsk = current; + + if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR)) + /* FPU error */ + denormal_to_double(&tsk->thread.xstate->hardfpu, + (finsn >> 8) & 0xf); + else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf00f) == 0xf002) { + /* fmul */ + struct task_struct *tsk = current; + int fpscr; + int n, m, prec; + unsigned int hx, hy; + + n = (finsn >> 8) & 0xf; + m = (finsn >> 4) & 0xf; + hx = tsk->thread.xstate->hardfpu.fp_regs[n]; + hy = tsk->thread.xstate->hardfpu.fp_regs[m]; + fpscr = tsk->thread.xstate->hardfpu.fpscr; + prec = fpscr & FPSCR_DBL_PRECISION; + + if ((fpscr & FPSCR_CAUSE_ERROR) + && (prec && ((hx & 0x7fffffff) < 0x00100000 + || (hy & 0x7fffffff) < 0x00100000))) { + long long llx, lly; + + /* FPU error because of denormal (doubles) */ + llx = ((long long)hx << 32) + | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; + lly = ((long long)hy << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; + llx = float64_mul(llx, lly); + tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; + tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; + } else if ((fpscr & FPSCR_CAUSE_ERROR) + && (!prec && ((hx & 0x7fffffff) < 0x00800000 + || (hy & 0x7fffffff) < 0x00800000))) { + /* FPU error because of denormal (floats) */ + hx = float32_mul(hx, hy); + tsk->thread.xstate->hardfpu.fp_regs[n] = hx; + } else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf00e) == 0xf000) { + /* fadd, fsub */ + struct task_struct *tsk = current; + int fpscr; + int n, m, prec; + unsigned int hx, hy; + + n = (finsn >> 8) & 0xf; + m = (finsn >> 4) & 0xf; + hx = tsk->thread.xstate->hardfpu.fp_regs[n]; + hy = tsk->thread.xstate->hardfpu.fp_regs[m]; + fpscr = tsk->thread.xstate->hardfpu.fpscr; + prec = fpscr & FPSCR_DBL_PRECISION; + + if ((fpscr & FPSCR_CAUSE_ERROR) + && (prec && ((hx & 0x7fffffff) < 0x00100000 + || (hy & 0x7fffffff) < 0x00100000))) { + long long llx, lly; + + /* FPU error because of denormal (doubles) */ + llx = ((long long)hx << 32) + | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; + lly = ((long long)hy << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; + if ((finsn & 0xf00f) == 0xf000) + llx = float64_add(llx, lly); + else + llx = float64_sub(llx, lly); + tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; + tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; + } else if ((fpscr & FPSCR_CAUSE_ERROR) + && (!prec && ((hx & 0x7fffffff) < 0x00800000 + || (hy & 0x7fffffff) < 0x00800000))) { + /* FPU error because of denormal (floats) */ + if ((finsn & 0xf00f) == 0xf000) + hx = float32_add(hx, hy); + else + hx = float32_sub(hx, hy); + tsk->thread.xstate->hardfpu.fp_regs[n] = hx; + } else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf003) == 0xf003) { + /* fdiv */ + struct task_struct *tsk = current; + int fpscr; + int n, m, prec; + unsigned int hx, hy; + + n = (finsn >> 8) & 0xf; + m = (finsn >> 4) & 0xf; + hx = tsk->thread.xstate->hardfpu.fp_regs[n]; + hy = tsk->thread.xstate->hardfpu.fp_regs[m]; + fpscr = tsk->thread.xstate->hardfpu.fpscr; + prec = fpscr & FPSCR_DBL_PRECISION; + + if ((fpscr & FPSCR_CAUSE_ERROR) + && (prec && ((hx & 0x7fffffff) < 0x00100000 + || (hy & 0x7fffffff) < 0x00100000))) { + long long llx, lly; + + /* FPU error because of denormal (doubles) */ + llx = ((long long)hx << 32) + | tsk->thread.xstate->hardfpu.fp_regs[n + 1]; + lly = ((long long)hy << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; + + llx = float64_div(llx, lly); + + tsk->thread.xstate->hardfpu.fp_regs[n] = llx >> 32; + tsk->thread.xstate->hardfpu.fp_regs[n + 1] = llx & 0xffffffff; + } else if ((fpscr & FPSCR_CAUSE_ERROR) + && (!prec && ((hx & 0x7fffffff) < 0x00800000 + || (hy & 0x7fffffff) < 0x00800000))) { + /* FPU error because of denormal (floats) */ + hx = float32_div(hx, hy); + tsk->thread.xstate->hardfpu.fp_regs[n] = hx; + } else + return 0; + + regs->pc = nextpc; + return 1; + } else if ((finsn & 0xf0bd) == 0xf0bd) { + /* fcnvds - double to single precision convert */ + struct task_struct *tsk = current; + int m; + unsigned int hx; + + m = (finsn >> 8) & 0x7; + hx = tsk->thread.xstate->hardfpu.fp_regs[m]; + + if ((tsk->thread.xstate->hardfpu.fpscr & FPSCR_CAUSE_ERROR) + && ((hx & 0x7fffffff) < 0x00100000)) { + /* subnormal double to float conversion */ + long long llx; + + llx = ((long long)tsk->thread.xstate->hardfpu.fp_regs[m] << 32) + | tsk->thread.xstate->hardfpu.fp_regs[m + 1]; + + tsk->thread.xstate->hardfpu.fpul = float64_to_float32(llx); + } else + return 0; + + regs->pc = nextpc; + return 1; + } + + return 0; +} + +void float_raise(unsigned int flags) +{ + fpu_exception_flags |= flags; +} + +int float_rounding_mode(void) +{ + struct task_struct *tsk = current; + int roundingMode = FPSCR_ROUNDING_MODE(tsk->thread.xstate->hardfpu.fpscr); + return roundingMode; +} + +BUILD_TRAP_HANDLER(fpu_error) +{ + struct task_struct *tsk = current; + TRAP_HANDLER_DECL; + + __unlazy_fpu(tsk, regs); + fpu_exception_flags = 0; + if (ieee_fpe_handler(regs)) { + tsk->thread.xstate->hardfpu.fpscr &= + ~(FPSCR_CAUSE_MASK | FPSCR_FLAG_MASK); + tsk->thread.xstate->hardfpu.fpscr |= fpu_exception_flags; + /* Set the FPSCR flag as well as cause bits - simply + * replicate the cause */ + tsk->thread.xstate->hardfpu.fpscr |= (fpu_exception_flags >> 10); + grab_fpu(regs); + restore_fpu(tsk); + task_thread_info(tsk)->status |= TS_USEDFPU; + if ((((tsk->thread.xstate->hardfpu.fpscr & FPSCR_ENABLE_MASK) >> 7) & + (fpu_exception_flags >> 2)) == 0) { + return; + } + } + + force_sig(SIGFPE); +} diff --git a/arch/sh/kernel/cpu/sh4/perf_event.c b/arch/sh/kernel/cpu/sh4/perf_event.c new file mode 100644 index 000000000..db5847bb7 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/perf_event.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for SH7750-style performance counters + * + * Copyright (C) 2009 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/perf_event.h> +#include <asm/processor.h> + +#define PM_CR_BASE 0xff000084 /* 16-bit */ +#define PM_CTR_BASE 0xff100004 /* 32-bit */ + +#define PMCR(n) (PM_CR_BASE + ((n) * 0x04)) +#define PMCTRH(n) (PM_CTR_BASE + 0x00 + ((n) * 0x08)) +#define PMCTRL(n) (PM_CTR_BASE + 0x04 + ((n) * 0x08)) + +#define PMCR_PMM_MASK 0x0000003f + +#define PMCR_CLKF 0x00000100 +#define PMCR_PMCLR 0x00002000 +#define PMCR_PMST 0x00004000 +#define PMCR_PMEN 0x00008000 + +static struct sh_pmu sh7750_pmu; + +/* + * There are a number of events supported by each counter (33 in total). + * Since we have 2 counters, each counter will take the event code as it + * corresponds to the PMCR PMM setting. Each counter can be configured + * independently. + * + * Event Code Description + * ---------- ----------- + * + * 0x01 Operand read access + * 0x02 Operand write access + * 0x03 UTLB miss + * 0x04 Operand cache read miss + * 0x05 Operand cache write miss + * 0x06 Instruction fetch (w/ cache) + * 0x07 Instruction TLB miss + * 0x08 Instruction cache miss + * 0x09 All operand accesses + * 0x0a All instruction accesses + * 0x0b OC RAM operand access + * 0x0d On-chip I/O space access + * 0x0e Operand access (r/w) + * 0x0f Operand cache miss (r/w) + * 0x10 Branch instruction + * 0x11 Branch taken + * 0x12 BSR/BSRF/JSR + * 0x13 Instruction execution + * 0x14 Instruction execution in parallel + * 0x15 FPU Instruction execution + * 0x16 Interrupt + * 0x17 NMI + * 0x18 trapa instruction execution + * 0x19 UBCA match + * 0x1a UBCB match + * 0x21 Instruction cache fill + * 0x22 Operand cache fill + * 0x23 Elapsed time + * 0x24 Pipeline freeze by I-cache miss + * 0x25 Pipeline freeze by D-cache miss + * 0x27 Pipeline freeze by branch instruction + * 0x28 Pipeline freeze by CPU register + * 0x29 Pipeline freeze by FPU + */ + +static const int sh7750_general_events[] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x0023, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x000a, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0006, /* I-cache */ + [PERF_COUNT_HW_CACHE_MISSES] = 0x0008, /* I-cache */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0010, + [PERF_COUNT_HW_BRANCH_MISSES] = -1, + [PERF_COUNT_HW_BUS_CYCLES] = -1, +}; + +#define C(x) PERF_COUNT_HW_CACHE_##x + +static const int sh7750_cache_events + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0001, + [ C(RESULT_MISS) ] = 0x0004, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0002, + [ C(RESULT_MISS) ] = 0x0005, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(L1I) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0006, + [ C(RESULT_MISS) ] = 0x0008, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(LL) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0x0003, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0x0007, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + + [ C(BPU) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +static int sh7750_event_map(int event) +{ + return sh7750_general_events[event]; +} + +static u64 sh7750_pmu_read(int idx) +{ + return (u64)((u64)(__raw_readl(PMCTRH(idx)) & 0xffff) << 32) | + __raw_readl(PMCTRL(idx)); +} + +static void sh7750_pmu_disable(struct hw_perf_event *hwc, int idx) +{ + unsigned int tmp; + + tmp = __raw_readw(PMCR(idx)); + tmp &= ~(PMCR_PMM_MASK | PMCR_PMEN); + __raw_writew(tmp, PMCR(idx)); +} + +static void sh7750_pmu_enable(struct hw_perf_event *hwc, int idx) +{ + __raw_writew(__raw_readw(PMCR(idx)) | PMCR_PMCLR, PMCR(idx)); + __raw_writew(hwc->config | PMCR_PMEN | PMCR_PMST, PMCR(idx)); +} + +static void sh7750_pmu_disable_all(void) +{ + int i; + + for (i = 0; i < sh7750_pmu.num_events; i++) + __raw_writew(__raw_readw(PMCR(i)) & ~PMCR_PMEN, PMCR(i)); +} + +static void sh7750_pmu_enable_all(void) +{ + int i; + + for (i = 0; i < sh7750_pmu.num_events; i++) + __raw_writew(__raw_readw(PMCR(i)) | PMCR_PMEN, PMCR(i)); +} + +static struct sh_pmu sh7750_pmu = { + .name = "sh7750", + .num_events = 2, + .event_map = sh7750_event_map, + .max_events = ARRAY_SIZE(sh7750_general_events), + .raw_event_mask = PMCR_PMM_MASK, + .cache_events = &sh7750_cache_events, + .read = sh7750_pmu_read, + .disable = sh7750_pmu_disable, + .enable = sh7750_pmu_enable, + .disable_all = sh7750_pmu_disable_all, + .enable_all = sh7750_pmu_enable_all, +}; + +static int __init sh7750_pmu_init(void) +{ + /* + * Make sure this CPU actually has perf counters. + */ + if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { + pr_notice("HW perf events unsupported, software events only.\n"); + return -ENODEV; + } + + return register_sh_pmu(&sh7750_pmu); +} +early_initcall(sh7750_pmu_init); diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c new file mode 100644 index 000000000..ef4dd6295 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/probe.c @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4/probe.c + * + * CPU Subtype Probing for SH-4. + * + * Copyright (C) 2001 - 2007 Paul Mundt + * Copyright (C) 2003 Richard Curnow + */ +#include <linux/init.h> +#include <linux/io.h> +#include <asm/processor.h> +#include <asm/cache.h> + +void cpu_probe(void) +{ + unsigned long pvr, prr, cvr; + unsigned long size; + + static unsigned long sizes[16] = { + [1] = (1 << 12), + [2] = (1 << 13), + [4] = (1 << 14), + [8] = (1 << 15), + [9] = (1 << 16) + }; + + pvr = (__raw_readl(CCN_PVR) >> 8) & 0xffffff; + prr = (__raw_readl(CCN_PRR) >> 4) & 0xff; + cvr = (__raw_readl(CCN_CVR)); + + /* + * Setup some sane SH-4 defaults for the icache + */ + boot_cpu_data.icache.way_incr = (1 << 13); + boot_cpu_data.icache.entry_shift = 5; + boot_cpu_data.icache.sets = 256; + boot_cpu_data.icache.ways = 1; + boot_cpu_data.icache.linesz = L1_CACHE_BYTES; + + /* + * And again for the dcache .. + */ + boot_cpu_data.dcache.way_incr = (1 << 14); + boot_cpu_data.dcache.entry_shift = 5; + boot_cpu_data.dcache.sets = 512; + boot_cpu_data.dcache.ways = 1; + boot_cpu_data.dcache.linesz = L1_CACHE_BYTES; + + /* We don't know the chip cut */ + boot_cpu_data.cut_major = boot_cpu_data.cut_minor = -1; + + /* + * Setup some generic flags we can probe on SH-4A parts + */ + if (((pvr >> 16) & 0xff) == 0x10) { + boot_cpu_data.family = CPU_FAMILY_SH4A; + + if ((cvr & 0x10000000) == 0) { + boot_cpu_data.flags |= CPU_HAS_DSP; + boot_cpu_data.family = CPU_FAMILY_SH4AL_DSP; + } + + boot_cpu_data.flags |= CPU_HAS_LLSC | CPU_HAS_PERF_COUNTER; + boot_cpu_data.cut_major = pvr & 0x7f; + + boot_cpu_data.icache.ways = 4; + boot_cpu_data.dcache.ways = 4; + } else { + /* And some SH-4 defaults.. */ + boot_cpu_data.flags |= CPU_HAS_PTEA | CPU_HAS_FPU; + boot_cpu_data.family = CPU_FAMILY_SH4; + } + + /* FPU detection works for almost everyone */ + if ((cvr & 0x20000000)) + boot_cpu_data.flags |= CPU_HAS_FPU; + + /* Mask off the upper chip ID */ + pvr &= 0xffff; + + /* + * Probe the underlying processor version/revision and + * adjust cpu_data setup accordingly. + */ + switch (pvr) { + case 0x205: + boot_cpu_data.type = CPU_SH7750; + boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | + CPU_HAS_PERF_COUNTER; + break; + case 0x206: + boot_cpu_data.type = CPU_SH7750S; + boot_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | + CPU_HAS_PERF_COUNTER; + break; + case 0x1100: + boot_cpu_data.type = CPU_SH7751; + break; + case 0x2001: + case 0x2004: + boot_cpu_data.type = CPU_SH7770; + break; + case 0x2006: + case 0x200A: + if (prr == 0x61) + boot_cpu_data.type = CPU_SH7781; + else if (prr == 0xa1) + boot_cpu_data.type = CPU_SH7763; + else + boot_cpu_data.type = CPU_SH7780; + + break; + case 0x3000: + case 0x3003: + case 0x3009: + boot_cpu_data.type = CPU_SH7343; + break; + case 0x3004: + case 0x3007: + boot_cpu_data.type = CPU_SH7785; + break; + case 0x4004: + case 0x4005: + boot_cpu_data.type = CPU_SH7786; + boot_cpu_data.flags |= CPU_HAS_PTEAEX | CPU_HAS_L2_CACHE; + break; + case 0x3008: + switch (prr) { + case 0x50: + case 0x51: + boot_cpu_data.type = CPU_SH7723; + boot_cpu_data.flags |= CPU_HAS_L2_CACHE; + break; + case 0x70: + boot_cpu_data.type = CPU_SH7366; + break; + case 0xa0: + case 0xa1: + boot_cpu_data.type = CPU_SH7722; + break; + } + break; + case 0x300b: + switch (prr) { + case 0x20: + boot_cpu_data.type = CPU_SH7724; + boot_cpu_data.flags |= CPU_HAS_L2_CACHE; + break; + case 0x10: + case 0x11: + boot_cpu_data.type = CPU_SH7757; + break; + case 0xd0: + case 0x40: /* yon-ten-go */ + boot_cpu_data.type = CPU_SH7372; + break; + case 0xE0: /* 0x4E0 */ + boot_cpu_data.type = CPU_SH7734; /* SH7733/SH7734 */ + break; + + } + break; + case 0x4000: /* 1st cut */ + case 0x4001: /* 2nd cut */ + boot_cpu_data.type = CPU_SHX3; + break; + case 0x700: + boot_cpu_data.type = CPU_SH4_501; + boot_cpu_data.flags &= ~CPU_HAS_FPU; + boot_cpu_data.icache.ways = 2; + boot_cpu_data.dcache.ways = 2; + break; + case 0x600: + boot_cpu_data.type = CPU_SH4_202; + boot_cpu_data.icache.ways = 2; + boot_cpu_data.dcache.ways = 2; + break; + case 0x500 ... 0x501: + switch (prr) { + case 0x10: + boot_cpu_data.type = CPU_SH7750R; + break; + case 0x11: + boot_cpu_data.type = CPU_SH7751R; + break; + case 0x50 ... 0x5f: + boot_cpu_data.type = CPU_SH7760; + break; + } + + boot_cpu_data.icache.ways = 2; + boot_cpu_data.dcache.ways = 2; + + break; + } + + /* + * On anything that's not a direct-mapped cache, look to the CVR + * for I/D-cache specifics. + */ + if (boot_cpu_data.icache.ways > 1) { + size = sizes[(cvr >> 20) & 0xf]; + boot_cpu_data.icache.way_incr = (size >> 1); + boot_cpu_data.icache.sets = (size >> 6); + + } + + /* And the rest of the D-cache */ + if (boot_cpu_data.dcache.ways > 1) { + size = sizes[(cvr >> 16) & 0xf]; + boot_cpu_data.dcache.way_incr = (size >> 1); + boot_cpu_data.dcache.sets = (size >> 6); + } + + /* + * SH-4A's have an optional PIPT L2. + */ + if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { + /* + * Verify that it really has something hooked up, this + * is the safety net for CPUs that have optional L2 + * support yet do not implement it. + */ + if ((cvr & 0xf) == 0) + boot_cpu_data.flags &= ~CPU_HAS_L2_CACHE; + else { + /* + * Silicon and specifications have clearly never + * met.. + */ + cvr ^= 0xf; + + /* + * Size calculation is much more sensible + * than it is for the L1. + * + * Sizes are 128KB, 256KB, 512KB, and 1MB. + */ + size = (cvr & 0xf) << 17; + + boot_cpu_data.scache.way_incr = (1 << 16); + boot_cpu_data.scache.entry_shift = 5; + boot_cpu_data.scache.ways = 4; + boot_cpu_data.scache.linesz = L1_CACHE_BYTES; + + boot_cpu_data.scache.entry_mask = + (boot_cpu_data.scache.way_incr - + boot_cpu_data.scache.linesz); + + boot_cpu_data.scache.sets = size / + (boot_cpu_data.scache.linesz * + boot_cpu_data.scache.ways); + + boot_cpu_data.scache.way_size = + (boot_cpu_data.scache.sets * + boot_cpu_data.scache.linesz); + } + } +} diff --git a/arch/sh/kernel/cpu/sh4/setup-sh4-202.c b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c new file mode 100644 index 000000000..e6737f3d0 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh4-202.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH4-202 Setup + * + * Copyright (C) 2006 Paul Mundt + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe80000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), + DEFINE_RES_IRQ(evt2irq(0x720)), + DEFINE_RES_IRQ(evt2irq(0x760)), + DEFINE_RES_IRQ(evt2irq(0x740)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct platform_device *sh4202_devices[] __initdata = { + &scif0_device, + &tmu0_device, +}; + +static int __init sh4202_devices_setup(void) +{ + return platform_add_devices(sh4202_devices, + ARRAY_SIZE(sh4202_devices)); +} +arch_initcall(sh4202_devices_setup); + +static struct platform_device *sh4202_early_devices[] __initdata = { + &scif0_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh4202_early_devices, + ARRAY_SIZE(sh4202_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */ + HUDI, TMU0, TMU1, TMU2, RTC, SCIF, WDT, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(HUDI, 0x600), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720), + INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760), + INTC_VECT(WDT, 0x560), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, 0, 0, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { 0, 0, SCIF, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh4-202", vectors, NULL, + NULL, prio_registers, NULL); + +static struct intc_vect vectors_irlm[] __initdata = { + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), +}; + +static DECLARE_INTC_DESC(intc_desc_irlm, "sh4-202_irlm", vectors_irlm, NULL, + NULL, prio_registers, NULL); + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +#define INTC_ICR 0xffd00000UL +#define INTC_ICR_IRLM (1<<7) + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ + __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); + register_intc_controller(&intc_desc_irlm); + break; + default: + BUG(); + } +} diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c new file mode 100644 index 000000000..19c8f1d69 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7091/SH7750/SH7750S/SH7750R/SH7751/SH7751R Setup + * + * Copyright (C) 2006 Paul Mundt + * Copyright (C) 2006 Jamie Lenehan + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/io.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/serial_sci.h> +#include <generated/machtypes.h> +#include <asm/platform_early.h> + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xffc80000, + .end = 0xffc80000 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Shared Period/Carry/Alarm IRQ */ + .start = evt2irq(0x480), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct plat_sci_port sci_platform_data = { + .type = PORT_SCI, +}; + +static struct resource sci_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x20), + DEFINE_RES_IRQ(evt2irq(0x4e0)), +}; + +static struct platform_device sci_device = { + .name = "sh-sci", + .id = 0, + .resource = sci_resources, + .num_resources = ARRAY_SIZE(sci_resources), + .dev = { + .platform_data = &sci_platform_data, + }, +}; + +static struct plat_sci_port scif_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif_resources[] = { + DEFINE_RES_MEM(0xffe80000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), +}; + +static struct platform_device scif_device = { + .name = "sh-sci", + .id = 1, + .resource = scif_resources, + .num_resources = ARRAY_SIZE(scif_resources), + .dev = { + .platform_data = &scif_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +/* SH7750R, SH7751 and SH7751R all have two extra timer channels */ +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751R) + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 3, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xfe100000, 0x20), + DEFINE_RES_IRQ(evt2irq(0xb00)), + DEFINE_RES_IRQ(evt2irq(0xb80)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +#endif + +static struct platform_device *sh7750_devices[] __initdata = { + &rtc_device, + &tmu0_device, +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751R) + &tmu1_device, +#endif +}; + +static int __init sh7750_devices_setup(void) +{ + if (mach_is_rts7751r2d()) { + platform_device_register(&scif_device); + } else { + platform_device_register(&sci_device); + platform_device_register(&scif_device); + } + + return platform_add_devices(sh7750_devices, + ARRAY_SIZE(sh7750_devices)); +} +arch_initcall(sh7750_devices_setup); + +static struct platform_device *sh7750_early_devices[] __initdata = { + &tmu0_device, +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751R) + &tmu1_device, +#endif +}; + +void __init plat_early_device_setup(void) +{ + struct platform_device *dev[1]; + + if (mach_is_rts7751r2d()) { + scif_platform_data.scscr |= SCSCR_CKE1; + dev[0] = &scif_device; + sh_early_platform_add_devices(dev, 1); + } else { + dev[0] = &sci_device; + sh_early_platform_add_devices(dev, 1); + dev[0] = &scif_device; + sh_early_platform_add_devices(dev, 1); + } + + sh_early_platform_add_devices(sh7750_early_devices, + ARRAY_SIZE(sh7750_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL0, IRL1, IRL2, IRL3, /* only IRLM mode supported */ + HUDI, GPIOI, DMAC, + PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3, + TMU3, TMU4, TMU0, TMU1, TMU2, RTC, SCI1, SCIF, WDT, REF, + + /* interrupt groups */ + PCIC1, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(SCI1, 0x4e0), INTC_VECT(SCI1, 0x500), + INTC_VECT(SCI1, 0x520), INTC_VECT(SCI1, 0x540), + INTC_VECT(SCIF, 0x700), INTC_VECT(SCIF, 0x720), + INTC_VECT(SCIF, 0x740), INTC_VECT(SCIF, 0x760), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0), +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, + { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0, + TMU4, TMU3, + PCIC1, PCIC0_PCISERR } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7750", vectors, NULL, + NULL, prio_registers, NULL); + +/* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */ +#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ + defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7091) +static struct intc_vect vectors_dma4[] __initdata = { + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x6c0), +}; + +static DECLARE_INTC_DESC(intc_desc_dma4, "sh7750_dma4", + vectors_dma4, NULL, + NULL, prio_registers, NULL); +#endif + +/* SH7750R and SH7751R both have 8-channel DMA controllers */ +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || defined(CONFIG_CPU_SUBTYPE_SH7751R) +static struct intc_vect vectors_dma8[] __initdata = { + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), + INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0), + INTC_VECT(DMAC, 0x6c0), +}; + +static DECLARE_INTC_DESC(intc_desc_dma8, "sh7750_dma8", + vectors_dma8, NULL, + NULL, prio_registers, NULL); +#endif + +/* SH7750R, SH7751 and SH7751R all have two extra timer channels */ +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751) || \ + defined(CONFIG_CPU_SUBTYPE_SH7751R) +static struct intc_vect vectors_tmu34[] __initdata = { + INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, TMU4, TMU3, + PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, + PCIC1_PCIDMA3, PCIC0_PCISERR } }, +}; + +static DECLARE_INTC_DESC(intc_desc_tmu34, "sh7750_tmu34", + vectors_tmu34, NULL, + mask_registers, prio_registers, NULL); +#endif + +/* SH7750S, SH7750R, SH7751 and SH7751R all have IRLM priority registers */ +static struct intc_vect vectors_irlm[] __initdata = { + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), +}; + +static DECLARE_INTC_DESC(intc_desc_irlm, "sh7750_irlm", vectors_irlm, NULL, + NULL, prio_registers, NULL); + +/* SH7751 and SH7751R both have PCI */ +#if defined(CONFIG_CPU_SUBTYPE_SH7751) || defined(CONFIG_CPU_SUBTYPE_SH7751R) +static struct intc_vect vectors_pci[] __initdata = { + INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0), + INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0), + INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60), + INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20), +}; + +static struct intc_group groups_pci[] __initdata = { + INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3), +}; + +static DECLARE_INTC_DESC(intc_desc_pci, "sh7750_pci", vectors_pci, groups_pci, + mask_registers, prio_registers, NULL); +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7750) || \ + defined(CONFIG_CPU_SUBTYPE_SH7750S) || \ + defined(CONFIG_CPU_SUBTYPE_SH7091) +void __init plat_irq_setup(void) +{ + /* + * same vectors for SH7750, SH7750S and SH7091 except for IRLM, + * see below.. + */ + register_intc_controller(&intc_desc); + register_intc_controller(&intc_desc_dma4); +} +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7750R) +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); + register_intc_controller(&intc_desc_dma8); + register_intc_controller(&intc_desc_tmu34); +} +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7751) +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); + register_intc_controller(&intc_desc_dma4); + register_intc_controller(&intc_desc_tmu34); + register_intc_controller(&intc_desc_pci); +} +#endif + +#if defined(CONFIG_CPU_SUBTYPE_SH7751R) +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); + register_intc_controller(&intc_desc_dma8); + register_intc_controller(&intc_desc_tmu34); + register_intc_controller(&intc_desc_pci); +} +#endif + +#define INTC_ICR 0xffd00000UL +#define INTC_ICR_IRLM (1<<7) + +void __init plat_irq_setup_pins(int mode) +{ +#if defined(CONFIG_CPU_SUBTYPE_SH7750) || defined(CONFIG_CPU_SUBTYPE_SH7091) + BUG(); /* impossible to mask interrupts on SH7750 and SH7091 */ + return; +#endif + + switch (mode) { + case IRQ_MODE_IRQ: /* individual interrupt mode for IRL3-0 */ + __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); + register_intc_controller(&intc_desc_irlm); + break; + default: + BUG(); + } +} diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7760.c b/arch/sh/kernel/cpu/sh4/setup-sh7760.c new file mode 100644 index 000000000..14212f5d8 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/setup-sh7760.c @@ -0,0 +1,297 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7760 Setup + * + * Copyright (C) 2006 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/serial_sci.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL0, IRL1, IRL2, IRL3, + HUDI, GPIOI, DMAC, + IRQ4, IRQ5, IRQ6, IRQ7, + HCAN20, HCAN21, + SSI0, SSI1, + HAC0, HAC1, + I2C0, I2C1, + USB, LCDC, + DMABRG0, DMABRG1, DMABRG2, + SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, + SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, + SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, + SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI, + HSPI, + MMCIF0, MMCIF1, MMCIF2, MMCIF3, + MFI, ADC, CMT, + TMU0, TMU1, TMU2, + WDT, REF, + + /* interrupt groups */ + DMABRG, SCIF0, SCIF1, SCIF2, SIM, MMCIF, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), + INTC_VECT(DMAC, 0x7c0), INTC_VECT(DMAC, 0x7e0), + INTC_VECT(DMAC, 0x6c0), + INTC_VECT(IRQ4, 0x800), INTC_VECT(IRQ5, 0x820), + INTC_VECT(IRQ6, 0x840), INTC_VECT(IRQ6, 0x860), + INTC_VECT(HCAN20, 0x900), INTC_VECT(HCAN21, 0x920), + INTC_VECT(SSI0, 0x940), INTC_VECT(SSI1, 0x960), + INTC_VECT(HAC0, 0x980), INTC_VECT(HAC1, 0x9a0), + INTC_VECT(I2C0, 0x9c0), INTC_VECT(I2C1, 0x9e0), + INTC_VECT(USB, 0xa00), INTC_VECT(LCDC, 0xa20), + INTC_VECT(DMABRG0, 0xa80), INTC_VECT(DMABRG1, 0xaa0), + INTC_VECT(DMABRG2, 0xac0), + INTC_VECT(SCIF0_ERI, 0x880), INTC_VECT(SCIF0_RXI, 0x8a0), + INTC_VECT(SCIF0_BRI, 0x8c0), INTC_VECT(SCIF0_TXI, 0x8e0), + INTC_VECT(SCIF1_ERI, 0xb00), INTC_VECT(SCIF1_RXI, 0xb20), + INTC_VECT(SCIF1_BRI, 0xb40), INTC_VECT(SCIF1_TXI, 0xb60), + INTC_VECT(SCIF2_ERI, 0xb80), INTC_VECT(SCIF2_RXI, 0xba0), + INTC_VECT(SCIF2_BRI, 0xbc0), INTC_VECT(SCIF2_TXI, 0xbe0), + INTC_VECT(SIM_ERI, 0xc00), INTC_VECT(SIM_RXI, 0xc20), + INTC_VECT(SIM_TXI, 0xc40), INTC_VECT(SIM_TEI, 0xc60), + INTC_VECT(HSPI, 0xc80), + INTC_VECT(MMCIF0, 0xd00), INTC_VECT(MMCIF1, 0xd20), + INTC_VECT(MMCIF2, 0xd40), INTC_VECT(MMCIF3, 0xd60), + INTC_VECT(MFI, 0xe80), /* 0xf80 according to data sheet */ + INTC_VECT(ADC, 0xf80), INTC_VECT(CMT, 0xfa0), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2, 0x460), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF, 0x580), INTC_VECT(REF, 0x5a0), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(DMABRG, DMABRG0, DMABRG1, DMABRG2), + INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), + INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), + INTC_GROUP(SCIF2, SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI), + INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI), + INTC_GROUP(MMCIF, MMCIF0, MMCIF1, MMCIF2, MMCIF3), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ + { IRQ4, IRQ5, IRQ6, IRQ7, 0, 0, HCAN20, HCAN21, + SSI0, SSI1, HAC0, HAC1, I2C0, I2C1, USB, LCDC, + 0, DMABRG0, DMABRG1, DMABRG2, + SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, + SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, + SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, } }, + { 0xfe080044, 0xfe080064, 32, /* INTMSK04 / INTMSKCLR04 */ + { 0, 0, 0, 0, 0, 0, 0, 0, + SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI, + HSPI, MMCIF0, MMCIF1, MMCIF2, + MMCIF3, 0, 0, 0, 0, 0, 0, 0, + 0, MFI, 0, 0, 0, 0, ADC, CMT, } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, 0, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, 0, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, + { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfe080004, 0, 32, 4, /* INTPRI04 */ { HCAN20, HCAN21, SSI0, SSI1, + HAC0, HAC1, I2C0, I2C1 } }, + { 0xfe080008, 0, 32, 4, /* INTPRI08 */ { USB, LCDC, DMABRG, SCIF0, + SCIF1, SCIF2, SIM, HSPI } }, + { 0xfe08000c, 0, 32, 4, /* INTPRI0C */ { 0, 0, MMCIF, 0, + MFI, 0, ADC, CMT } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7760", vectors, groups, + mask_registers, prio_registers, NULL); + +static struct intc_vect vectors_irq[] __initdata = { + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), +}; + +static DECLARE_INTC_DESC(intc_desc_irq, "sh7760-irq", vectors_irq, groups, + mask_registers, prio_registers, NULL); + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xfe600000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x880)), + DEFINE_RES_IRQ(evt2irq(0x8a0)), + DEFINE_RES_IRQ(evt2irq(0x8e0)), + DEFINE_RES_IRQ(evt2irq(0x8c0)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .type = PORT_SCIF, + .scscr = SCSCR_REIE, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xfe610000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xb00)), + DEFINE_RES_IRQ(evt2irq(0xb20)), + DEFINE_RES_IRQ(evt2irq(0xb60)), + DEFINE_RES_IRQ(evt2irq(0xb40)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xfe620000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xb80)), + DEFINE_RES_IRQ(evt2irq(0xba0)), + DEFINE_RES_IRQ(evt2irq(0xbe0)), + DEFINE_RES_IRQ(evt2irq(0xbc0)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + /* + * This is actually a SIM card module serial port, based on an SCI with + * additional registers. The sh-sci driver doesn't support the SIM port + * type, declare it as a SCI. Don't declare the additional registers in + * the memory resource or the driver will compute an incorrect regshift + * value. + */ + .type = PORT_SCI, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xfe480000, 0x10), + DEFINE_RES_IRQ(evt2irq(0xc00)), + DEFINE_RES_IRQ(evt2irq(0xc20)), + DEFINE_RES_IRQ(evt2irq(0xc40)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + + +static struct platform_device *sh7760_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &tmu0_device, +}; + +static int __init sh7760_devices_setup(void) +{ + return platform_add_devices(sh7760_devices, + ARRAY_SIZE(sh7760_devices)); +} +arch_initcall(sh7760_devices_setup); + +static struct platform_device *sh7760_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7760_early_devices, + ARRAY_SIZE(sh7760_early_devices)); +} + +#define INTC_ICR 0xffd00000UL +#define INTC_ICR_IRLM (1 << 7) + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ: + __raw_writew(__raw_readw(INTC_ICR) | INTC_ICR_IRLM, INTC_ICR); + register_intc_controller(&intc_desc_irq); + break; + default: + BUG(); + } +} + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} diff --git a/arch/sh/kernel/cpu/sh4/softfloat.c b/arch/sh/kernel/cpu/sh4/softfloat.c new file mode 100644 index 000000000..42edf2e54 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/softfloat.c @@ -0,0 +1,930 @@ +/* + * Floating point emulation support for subnormalised numbers on SH4 + * architecture This file is derived from the SoftFloat IEC/IEEE + * Floating-point Arithmetic Package, Release 2 the original license of + * which is reproduced below. + * + * ======================================================================== + * + * This C source file is part of the SoftFloat IEC/IEEE Floating-point + * Arithmetic Package, Release 2. + * + * Written by John R. Hauser. This work was made possible in part by the + * International Computer Science Institute, located at Suite 600, 1947 Center + * Street, Berkeley, California 94704. Funding was partially provided by the + * National Science Foundation under grant MIP-9311980. The original version + * of this code was written as part of a project to build a fixed-point vector + * processor in collaboration with the University of California at Berkeley, + * overseen by Profs. Nelson Morgan and John Wawrzynek. More information + * is available through the web page `http://HTTP.CS.Berkeley.EDU/~jhauser/ + * arithmetic/softfloat.html'. + * + * THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort + * has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT + * TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO + * PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY + * AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE. + * + * Derivative works are acceptable, even for commercial purposes, so long as + * (1) they include prominent notice that the work is derivative, and (2) they + * include prominent notice akin to these three paragraphs for those parts of + * this code that are retained. + * + * ======================================================================== + * + * SH4 modifications by Ismail Dhaoui <ismail.dhaoui@st.com> + * and Kamel Khelifi <kamel.khelifi@st.com> + */ +#include <linux/kernel.h> +#include <cpu/fpu.h> +#include <asm/div64.h> + +#define LIT64( a ) a##LL + +typedef char flag; +typedef unsigned char uint8; +typedef signed char int8; +typedef int uint16; +typedef int int16; +typedef unsigned int uint32; +typedef signed int int32; + +typedef unsigned long long int bits64; +typedef signed long long int sbits64; + +typedef unsigned char bits8; +typedef signed char sbits8; +typedef unsigned short int bits16; +typedef signed short int sbits16; +typedef unsigned int bits32; +typedef signed int sbits32; + +typedef unsigned long long int uint64; +typedef signed long long int int64; + +typedef unsigned long int float32; +typedef unsigned long long float64; + +extern void float_raise(unsigned int flags); /* in fpu.c */ +extern int float_rounding_mode(void); /* in fpu.c */ + +bits64 extractFloat64Frac(float64 a); +flag extractFloat64Sign(float64 a); +int16 extractFloat64Exp(float64 a); +int16 extractFloat32Exp(float32 a); +flag extractFloat32Sign(float32 a); +bits32 extractFloat32Frac(float32 a); +float64 packFloat64(flag zSign, int16 zExp, bits64 zSig); +void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr); +float32 packFloat32(flag zSign, int16 zExp, bits32 zSig); +void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr); +float64 float64_sub(float64 a, float64 b); +float32 float32_sub(float32 a, float32 b); +float32 float32_add(float32 a, float32 b); +float64 float64_add(float64 a, float64 b); +float64 float64_div(float64 a, float64 b); +float32 float32_div(float32 a, float32 b); +float32 float32_mul(float32 a, float32 b); +float64 float64_mul(float64 a, float64 b); +float32 float64_to_float32(float64 a); +void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, + bits64 * z1Ptr); +void sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, + bits64 * z1Ptr); +void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr); + +static int8 countLeadingZeros32(bits32 a); +static int8 countLeadingZeros64(bits64 a); +static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, + bits64 zSig); +static float64 subFloat64Sigs(float64 a, float64 b, flag zSign); +static float64 addFloat64Sigs(float64 a, float64 b, flag zSign); +static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig); +static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, + bits32 zSig); +static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig); +static float32 subFloat32Sigs(float32 a, float32 b, flag zSign); +static float32 addFloat32Sigs(float32 a, float32 b, flag zSign); +static void normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, + bits64 * zSigPtr); +static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b); +static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr, + bits32 * zSigPtr); + +bits64 extractFloat64Frac(float64 a) +{ + return a & LIT64(0x000FFFFFFFFFFFFF); +} + +flag extractFloat64Sign(float64 a) +{ + return a >> 63; +} + +int16 extractFloat64Exp(float64 a) +{ + return (a >> 52) & 0x7FF; +} + +int16 extractFloat32Exp(float32 a) +{ + return (a >> 23) & 0xFF; +} + +flag extractFloat32Sign(float32 a) +{ + return a >> 31; +} + +bits32 extractFloat32Frac(float32 a) +{ + return a & 0x007FFFFF; +} + +float64 packFloat64(flag zSign, int16 zExp, bits64 zSig) +{ + return (((bits64) zSign) << 63) + (((bits64) zExp) << 52) + zSig; +} + +void shift64RightJamming(bits64 a, int16 count, bits64 * zPtr) +{ + bits64 z; + + if (count == 0) { + z = a; + } else if (count < 64) { + z = (a >> count) | ((a << ((-count) & 63)) != 0); + } else { + z = (a != 0); + } + *zPtr = z; +} + +static int8 countLeadingZeros32(bits32 a) +{ + static const int8 countLeadingZerosHigh[] = { + 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + int8 shiftCount; + + shiftCount = 0; + if (a < 0x10000) { + shiftCount += 16; + a <<= 16; + } + if (a < 0x1000000) { + shiftCount += 8; + a <<= 8; + } + shiftCount += countLeadingZerosHigh[a >> 24]; + return shiftCount; + +} + +static int8 countLeadingZeros64(bits64 a) +{ + int8 shiftCount; + + shiftCount = 0; + if (a < ((bits64) 1) << 32) { + shiftCount += 32; + } else { + a >>= 32; + } + shiftCount += countLeadingZeros32(a); + return shiftCount; + +} + +static float64 normalizeRoundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64(zSig) - 1; + return roundAndPackFloat64(zSign, zExp - shiftCount, + zSig << shiftCount); + +} + +static float64 subFloat64Sigs(float64 a, float64 b, flag zSign) +{ + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + expDiff = aExp - bExp; + aSig <<= 10; + bSig <<= 10; + if (0 < expDiff) + goto aExpBigger; + if (expDiff < 0) + goto bExpBigger; + if (aExp == 0) { + aExp = 1; + bExp = 1; + } + if (bSig < aSig) + goto aBigger; + if (aSig < bSig) + goto bBigger; + return packFloat64(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0); + bExpBigger: + if (bExp == 0x7FF) { + return packFloat64(zSign ^ 1, 0x7FF, 0); + } + if (aExp == 0) { + ++expDiff; + } else { + aSig |= LIT64(0x4000000000000000); + } + shift64RightJamming(aSig, -expDiff, &aSig); + bSig |= LIT64(0x4000000000000000); + bBigger: + zSig = bSig - aSig; + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if (aExp == 0x7FF) { + return a; + } + if (bExp == 0) { + --expDiff; + } else { + bSig |= LIT64(0x4000000000000000); + } + shift64RightJamming(bSig, expDiff, &bSig); + aSig |= LIT64(0x4000000000000000); + aBigger: + zSig = aSig - bSig; + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat64(zSign, zExp, zSig); + +} +static float64 addFloat64Sigs(float64 a, float64 b, flag zSign) +{ + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + expDiff = aExp - bExp; + aSig <<= 9; + bSig <<= 9; + if (0 < expDiff) { + if (aExp == 0x7FF) { + return a; + } + if (bExp == 0) { + --expDiff; + } else { + bSig |= LIT64(0x2000000000000000); + } + shift64RightJamming(bSig, expDiff, &bSig); + zExp = aExp; + } else if (expDiff < 0) { + if (bExp == 0x7FF) { + return packFloat64(zSign, 0x7FF, 0); + } + if (aExp == 0) { + ++expDiff; + } else { + aSig |= LIT64(0x2000000000000000); + } + shift64RightJamming(aSig, -expDiff, &aSig); + zExp = bExp; + } else { + if (aExp == 0x7FF) { + return a; + } + if (aExp == 0) + return packFloat64(zSign, 0, (aSig + bSig) >> 9); + zSig = LIT64(0x4000000000000000) + aSig + bSig; + zExp = aExp; + goto roundAndPack; + } + aSig |= LIT64(0x2000000000000000); + zSig = (aSig + bSig) << 1; + --zExp; + if ((sbits64) zSig < 0) { + zSig = aSig + bSig; + ++zExp; + } + roundAndPack: + return roundAndPackFloat64(zSign, zExp, zSig); + +} + +float32 packFloat32(flag zSign, int16 zExp, bits32 zSig) +{ + return (((bits32) zSign) << 31) + (((bits32) zExp) << 23) + zSig; +} + +void shift32RightJamming(bits32 a, int16 count, bits32 * zPtr) +{ + bits32 z; + if (count == 0) { + z = a; + } else if (count < 32) { + z = (a >> count) | ((a << ((-count) & 31)) != 0); + } else { + z = (a != 0); + } + *zPtr = z; +} + +static float32 roundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig) +{ + flag roundNearestEven; + int8 roundIncrement, roundBits; + flag isTiny; + + /* SH4 has only 2 rounding modes - round to nearest and round to zero */ + roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST); + roundIncrement = 0x40; + if (!roundNearestEven) { + roundIncrement = 0; + } + roundBits = zSig & 0x7F; + if (0xFD <= (bits16) zExp) { + if ((0xFD < zExp) + || ((zExp == 0xFD) + && ((sbits32) (zSig + roundIncrement) < 0)) + ) { + float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT); + return packFloat32(zSign, 0xFF, + 0) - (roundIncrement == 0); + } + if (zExp < 0) { + isTiny = (zExp < -1) + || (zSig + roundIncrement < 0x80000000); + shift32RightJamming(zSig, -zExp, &zSig); + zExp = 0; + roundBits = zSig & 0x7F; + if (isTiny && roundBits) + float_raise(FPSCR_CAUSE_UNDERFLOW); + } + } + if (roundBits) + float_raise(FPSCR_CAUSE_INEXACT); + zSig = (zSig + roundIncrement) >> 7; + zSig &= ~(((roundBits ^ 0x40) == 0) & roundNearestEven); + if (zSig == 0) + zExp = 0; + return packFloat32(zSign, zExp, zSig); + +} + +static float32 normalizeRoundAndPackFloat32(flag zSign, int16 zExp, bits32 zSig) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32(zSig) - 1; + return roundAndPackFloat32(zSign, zExp - shiftCount, + zSig << shiftCount); +} + +static float64 roundAndPackFloat64(flag zSign, int16 zExp, bits64 zSig) +{ + flag roundNearestEven; + int16 roundIncrement, roundBits; + flag isTiny; + + /* SH4 has only 2 rounding modes - round to nearest and round to zero */ + roundNearestEven = (float_rounding_mode() == FPSCR_RM_NEAREST); + roundIncrement = 0x200; + if (!roundNearestEven) { + roundIncrement = 0; + } + roundBits = zSig & 0x3FF; + if (0x7FD <= (bits16) zExp) { + if ((0x7FD < zExp) + || ((zExp == 0x7FD) + && ((sbits64) (zSig + roundIncrement) < 0)) + ) { + float_raise(FPSCR_CAUSE_OVERFLOW | FPSCR_CAUSE_INEXACT); + return packFloat64(zSign, 0x7FF, + 0) - (roundIncrement == 0); + } + if (zExp < 0) { + isTiny = (zExp < -1) + || (zSig + roundIncrement < + LIT64(0x8000000000000000)); + shift64RightJamming(zSig, -zExp, &zSig); + zExp = 0; + roundBits = zSig & 0x3FF; + if (isTiny && roundBits) + float_raise(FPSCR_CAUSE_UNDERFLOW); + } + } + if (roundBits) + float_raise(FPSCR_CAUSE_INEXACT); + zSig = (zSig + roundIncrement) >> 10; + zSig &= ~(((roundBits ^ 0x200) == 0) & roundNearestEven); + if (zSig == 0) + zExp = 0; + return packFloat64(zSign, zExp, zSig); + +} + +static float32 subFloat32Sigs(float32 a, float32 b, flag zSign) +{ + int16 aExp, bExp, zExp; + bits32 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + expDiff = aExp - bExp; + aSig <<= 7; + bSig <<= 7; + if (0 < expDiff) + goto aExpBigger; + if (expDiff < 0) + goto bExpBigger; + if (aExp == 0) { + aExp = 1; + bExp = 1; + } + if (bSig < aSig) + goto aBigger; + if (aSig < bSig) + goto bBigger; + return packFloat32(float_rounding_mode() == FPSCR_RM_ZERO, 0, 0); + bExpBigger: + if (bExp == 0xFF) { + return packFloat32(zSign ^ 1, 0xFF, 0); + } + if (aExp == 0) { + ++expDiff; + } else { + aSig |= 0x40000000; + } + shift32RightJamming(aSig, -expDiff, &aSig); + bSig |= 0x40000000; + bBigger: + zSig = bSig - aSig; + zExp = bExp; + zSign ^= 1; + goto normalizeRoundAndPack; + aExpBigger: + if (aExp == 0xFF) { + return a; + } + if (bExp == 0) { + --expDiff; + } else { + bSig |= 0x40000000; + } + shift32RightJamming(bSig, expDiff, &bSig); + aSig |= 0x40000000; + aBigger: + zSig = aSig - bSig; + zExp = aExp; + normalizeRoundAndPack: + --zExp; + return normalizeRoundAndPackFloat32(zSign, zExp, zSig); + +} + +static float32 addFloat32Sigs(float32 a, float32 b, flag zSign) +{ + int16 aExp, bExp, zExp; + bits32 aSig, bSig, zSig; + int16 expDiff; + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + expDiff = aExp - bExp; + aSig <<= 6; + bSig <<= 6; + if (0 < expDiff) { + if (aExp == 0xFF) { + return a; + } + if (bExp == 0) { + --expDiff; + } else { + bSig |= 0x20000000; + } + shift32RightJamming(bSig, expDiff, &bSig); + zExp = aExp; + } else if (expDiff < 0) { + if (bExp == 0xFF) { + return packFloat32(zSign, 0xFF, 0); + } + if (aExp == 0) { + ++expDiff; + } else { + aSig |= 0x20000000; + } + shift32RightJamming(aSig, -expDiff, &aSig); + zExp = bExp; + } else { + if (aExp == 0xFF) { + return a; + } + if (aExp == 0) + return packFloat32(zSign, 0, (aSig + bSig) >> 6); + zSig = 0x40000000 + aSig + bSig; + zExp = aExp; + goto roundAndPack; + } + aSig |= 0x20000000; + zSig = (aSig + bSig) << 1; + --zExp; + if ((sbits32) zSig < 0) { + zSig = aSig + bSig; + ++zExp; + } + roundAndPack: + return roundAndPackFloat32(zSign, zExp, zSig); + +} + +float64 float64_sub(float64 a, float64 b) +{ + flag aSign, bSign; + + aSign = extractFloat64Sign(a); + bSign = extractFloat64Sign(b); + if (aSign == bSign) { + return subFloat64Sigs(a, b, aSign); + } else { + return addFloat64Sigs(a, b, aSign); + } + +} + +float32 float32_sub(float32 a, float32 b) +{ + flag aSign, bSign; + + aSign = extractFloat32Sign(a); + bSign = extractFloat32Sign(b); + if (aSign == bSign) { + return subFloat32Sigs(a, b, aSign); + } else { + return addFloat32Sigs(a, b, aSign); + } + +} + +float32 float32_add(float32 a, float32 b) +{ + flag aSign, bSign; + + aSign = extractFloat32Sign(a); + bSign = extractFloat32Sign(b); + if (aSign == bSign) { + return addFloat32Sigs(a, b, aSign); + } else { + return subFloat32Sigs(a, b, aSign); + } + +} + +float64 float64_add(float64 a, float64 b) +{ + flag aSign, bSign; + + aSign = extractFloat64Sign(a); + bSign = extractFloat64Sign(b); + if (aSign == bSign) { + return addFloat64Sigs(a, b, aSign); + } else { + return subFloat64Sigs(a, b, aSign); + } +} + +static void +normalizeFloat64Subnormal(bits64 aSig, int16 * zExpPtr, bits64 * zSigPtr) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros64(aSig) - 11; + *zSigPtr = aSig << shiftCount; + *zExpPtr = 1 - shiftCount; +} + +void add128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, + bits64 * z1Ptr) +{ + bits64 z1; + + z1 = a1 + b1; + *z1Ptr = z1; + *z0Ptr = a0 + b0 + (z1 < a1); +} + +void +sub128(bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 * z0Ptr, + bits64 * z1Ptr) +{ + *z1Ptr = a1 - b1; + *z0Ptr = a0 - b0 - (a1 < b1); +} + +static bits64 estimateDiv128To64(bits64 a0, bits64 a1, bits64 b) +{ + bits64 b0, b1; + bits64 rem0, rem1, term0, term1; + bits64 z, tmp; + if (b <= a0) + return LIT64(0xFFFFFFFFFFFFFFFF); + b0 = b >> 32; + tmp = a0; + do_div(tmp, b0); + + z = (b0 << 32 <= a0) ? LIT64(0xFFFFFFFF00000000) : tmp << 32; + mul64To128(b, z, &term0, &term1); + sub128(a0, a1, term0, term1, &rem0, &rem1); + while (((sbits64) rem0) < 0) { + z -= LIT64(0x100000000); + b1 = b << 32; + add128(rem0, rem1, b0, b1, &rem0, &rem1); + } + rem0 = (rem0 << 32) | (rem1 >> 32); + tmp = rem0; + do_div(tmp, b0); + z |= (b0 << 32 <= rem0) ? 0xFFFFFFFF : tmp; + return z; +} + +void mul64To128(bits64 a, bits64 b, bits64 * z0Ptr, bits64 * z1Ptr) +{ + bits32 aHigh, aLow, bHigh, bLow; + bits64 z0, zMiddleA, zMiddleB, z1; + + aLow = a; + aHigh = a >> 32; + bLow = b; + bHigh = b >> 32; + z1 = ((bits64) aLow) * bLow; + zMiddleA = ((bits64) aLow) * bHigh; + zMiddleB = ((bits64) aHigh) * bLow; + z0 = ((bits64) aHigh) * bHigh; + zMiddleA += zMiddleB; + z0 += (((bits64) (zMiddleA < zMiddleB)) << 32) + (zMiddleA >> 32); + zMiddleA <<= 32; + z1 += zMiddleA; + z0 += (z1 < zMiddleA); + *z1Ptr = z1; + *z0Ptr = z0; + +} + +static void normalizeFloat32Subnormal(bits32 aSig, int16 * zExpPtr, + bits32 * zSigPtr) +{ + int8 shiftCount; + + shiftCount = countLeadingZeros32(aSig) - 8; + *zSigPtr = aSig << shiftCount; + *zExpPtr = 1 - shiftCount; + +} + +float64 float64_div(float64 a, float64 b) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, zExp; + bits64 aSig, bSig, zSig; + bits64 rem0, rem1; + bits64 term0, term1; + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + aSign = extractFloat64Sign(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + bSign = extractFloat64Sign(b); + zSign = aSign ^ bSign; + if (aExp == 0x7FF) { + if (bExp == 0x7FF) { + } + return packFloat64(zSign, 0x7FF, 0); + } + if (bExp == 0x7FF) { + return packFloat64(zSign, 0, 0); + } + if (bExp == 0) { + if (bSig == 0) { + if ((aExp | aSig) == 0) { + float_raise(FPSCR_CAUSE_INVALID); + } + return packFloat64(zSign, 0x7FF, 0); + } + normalizeFloat64Subnormal(bSig, &bExp, &bSig); + } + if (aExp == 0) { + if (aSig == 0) + return packFloat64(zSign, 0, 0); + normalizeFloat64Subnormal(aSig, &aExp, &aSig); + } + zExp = aExp - bExp + 0x3FD; + aSig = (aSig | LIT64(0x0010000000000000)) << 10; + bSig = (bSig | LIT64(0x0010000000000000)) << 11; + if (bSig <= (aSig + aSig)) { + aSig >>= 1; + ++zExp; + } + zSig = estimateDiv128To64(aSig, 0, bSig); + if ((zSig & 0x1FF) <= 2) { + mul64To128(bSig, zSig, &term0, &term1); + sub128(aSig, 0, term0, term1, &rem0, &rem1); + while ((sbits64) rem0 < 0) { + --zSig; + add128(rem0, rem1, 0, bSig, &rem0, &rem1); + } + zSig |= (rem1 != 0); + } + return roundAndPackFloat64(zSign, zExp, zSig); + +} + +float32 float32_div(float32 a, float32 b) +{ + flag aSign, bSign, zSign; + int16 aExp, bExp, zExp; + bits32 aSig, bSig; + uint64_t zSig; + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + aSign = extractFloat32Sign(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + bSign = extractFloat32Sign(b); + zSign = aSign ^ bSign; + if (aExp == 0xFF) { + if (bExp == 0xFF) { + } + return packFloat32(zSign, 0xFF, 0); + } + if (bExp == 0xFF) { + return packFloat32(zSign, 0, 0); + } + if (bExp == 0) { + if (bSig == 0) { + return packFloat32(zSign, 0xFF, 0); + } + normalizeFloat32Subnormal(bSig, &bExp, &bSig); + } + if (aExp == 0) { + if (aSig == 0) + return packFloat32(zSign, 0, 0); + normalizeFloat32Subnormal(aSig, &aExp, &aSig); + } + zExp = aExp - bExp + 0x7D; + aSig = (aSig | 0x00800000) << 7; + bSig = (bSig | 0x00800000) << 8; + if (bSig <= (aSig + aSig)) { + aSig >>= 1; + ++zExp; + } + zSig = (((bits64) aSig) << 32); + do_div(zSig, bSig); + + if ((zSig & 0x3F) == 0) { + zSig |= (((bits64) bSig) * zSig != ((bits64) aSig) << 32); + } + return roundAndPackFloat32(zSign, zExp, (bits32)zSig); + +} + +float32 float32_mul(float32 a, float32 b) +{ + char aSign, bSign, zSign; + int aExp, bExp, zExp; + unsigned int aSig, bSig; + unsigned long long zSig64; + unsigned int zSig; + + aSig = extractFloat32Frac(a); + aExp = extractFloat32Exp(a); + aSign = extractFloat32Sign(a); + bSig = extractFloat32Frac(b); + bExp = extractFloat32Exp(b); + bSign = extractFloat32Sign(b); + zSign = aSign ^ bSign; + if (aExp == 0) { + if (aSig == 0) + return packFloat32(zSign, 0, 0); + normalizeFloat32Subnormal(aSig, &aExp, &aSig); + } + if (bExp == 0) { + if (bSig == 0) + return packFloat32(zSign, 0, 0); + normalizeFloat32Subnormal(bSig, &bExp, &bSig); + } + if ((bExp == 0xff && bSig == 0) || (aExp == 0xff && aSig == 0)) + return roundAndPackFloat32(zSign, 0xff, 0); + + zExp = aExp + bExp - 0x7F; + aSig = (aSig | 0x00800000) << 7; + bSig = (bSig | 0x00800000) << 8; + shift64RightJamming(((unsigned long long)aSig) * bSig, 32, &zSig64); + zSig = zSig64; + if (0 <= (signed int)(zSig << 1)) { + zSig <<= 1; + --zExp; + } + return roundAndPackFloat32(zSign, zExp, zSig); + +} + +float64 float64_mul(float64 a, float64 b) +{ + char aSign, bSign, zSign; + int aExp, bExp, zExp; + unsigned long long int aSig, bSig, zSig0, zSig1; + + aSig = extractFloat64Frac(a); + aExp = extractFloat64Exp(a); + aSign = extractFloat64Sign(a); + bSig = extractFloat64Frac(b); + bExp = extractFloat64Exp(b); + bSign = extractFloat64Sign(b); + zSign = aSign ^ bSign; + + if (aExp == 0) { + if (aSig == 0) + return packFloat64(zSign, 0, 0); + normalizeFloat64Subnormal(aSig, &aExp, &aSig); + } + if (bExp == 0) { + if (bSig == 0) + return packFloat64(zSign, 0, 0); + normalizeFloat64Subnormal(bSig, &bExp, &bSig); + } + if ((aExp == 0x7ff && aSig == 0) || (bExp == 0x7ff && bSig == 0)) + return roundAndPackFloat64(zSign, 0x7ff, 0); + + zExp = aExp + bExp - 0x3FF; + aSig = (aSig | 0x0010000000000000LL) << 10; + bSig = (bSig | 0x0010000000000000LL) << 11; + mul64To128(aSig, bSig, &zSig0, &zSig1); + zSig0 |= (zSig1 != 0); + if (0 <= (signed long long int)(zSig0 << 1)) { + zSig0 <<= 1; + --zExp; + } + return roundAndPackFloat64(zSign, zExp, zSig0); +} + +/* + * ------------------------------------------------------------------------------- + * Returns the result of converting the double-precision floating-point value + * `a' to the single-precision floating-point format. The conversion is + * performed according to the IEC/IEEE Standard for Binary Floating-point + * Arithmetic. + * ------------------------------------------------------------------------------- + * */ +float32 float64_to_float32(float64 a) +{ + flag aSign; + int16 aExp; + bits64 aSig; + bits32 zSig; + + aSig = extractFloat64Frac( a ); + aExp = extractFloat64Exp( a ); + aSign = extractFloat64Sign( a ); + + shift64RightJamming( aSig, 22, &aSig ); + zSig = aSig; + if ( aExp || zSig ) { + zSig |= 0x40000000; + aExp -= 0x381; + } + return roundAndPackFloat32(aSign, aExp, zSig); +} diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c new file mode 100644 index 000000000..8ddfe9989 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4/sq.c @@ -0,0 +1,414 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4/sq.c + * + * General management API for SH-4 integrated Store Queues + * + * Copyright (C) 2001 - 2006 Paul Mundt + * Copyright (C) 2001, 2002 M. R. Brown + */ +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/bitmap.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/io.h> +#include <linux/prefetch.h> +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <cpu/sq.h> + +struct sq_mapping; + +struct sq_mapping { + const char *name; + + unsigned long sq_addr; + unsigned long addr; + unsigned int size; + + struct sq_mapping *next; +}; + +static struct sq_mapping *sq_mapping_list; +static DEFINE_SPINLOCK(sq_mapping_lock); +static struct kmem_cache *sq_cache; +static unsigned long *sq_bitmap; + +#define store_queue_barrier() \ +do { \ + (void)__raw_readl(P4SEG_STORE_QUE); \ + __raw_writel(0, P4SEG_STORE_QUE + 0); \ + __raw_writel(0, P4SEG_STORE_QUE + 8); \ +} while (0); + +/** + * sq_flush_range - Flush (prefetch) a specific SQ range + * @start: the store queue address to start flushing from + * @len: the length to flush + * + * Flushes the store queue cache from @start to @start + @len in a + * linear fashion. + */ +void sq_flush_range(unsigned long start, unsigned int len) +{ + unsigned long *sq = (unsigned long *)start; + + /* Flush the queues */ + for (len >>= 5; len--; sq += 8) + prefetchw(sq); + + /* Wait for completion */ + store_queue_barrier(); +} +EXPORT_SYMBOL(sq_flush_range); + +static inline void sq_mapping_list_add(struct sq_mapping *map) +{ + struct sq_mapping **p, *tmp; + + spin_lock_irq(&sq_mapping_lock); + + p = &sq_mapping_list; + while ((tmp = *p) != NULL) + p = &tmp->next; + + map->next = tmp; + *p = map; + + spin_unlock_irq(&sq_mapping_lock); +} + +static inline void sq_mapping_list_del(struct sq_mapping *map) +{ + struct sq_mapping **p, *tmp; + + spin_lock_irq(&sq_mapping_lock); + + for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next) + if (tmp == map) { + *p = tmp->next; + break; + } + + spin_unlock_irq(&sq_mapping_lock); +} + +static int __sq_remap(struct sq_mapping *map, pgprot_t prot) +{ +#if defined(CONFIG_MMU) + struct vm_struct *vma; + + vma = __get_vm_area_caller(map->size, VM_ALLOC, map->sq_addr, + SQ_ADDRMAX, __builtin_return_address(0)); + if (!vma) + return -ENOMEM; + + vma->phys_addr = map->addr; + + if (ioremap_page_range((unsigned long)vma->addr, + (unsigned long)vma->addr + map->size, + vma->phys_addr, prot)) { + vunmap(vma->addr); + return -EAGAIN; + } +#else + /* + * Without an MMU (or with it turned off), this is much more + * straightforward, as we can just load up each queue's QACR with + * the physical address appropriately masked. + */ + __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); + __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); +#endif + + return 0; +} + +/** + * sq_remap - Map a physical address through the Store Queues + * @phys: Physical address of mapping. + * @size: Length of mapping. + * @name: User invoking mapping. + * @prot: Protection bits. + * + * Remaps the physical address @phys through the next available store queue + * address of @size length. @name is logged at boot time as well as through + * the sysfs interface. + */ +unsigned long sq_remap(unsigned long phys, unsigned int size, + const char *name, pgprot_t prot) +{ + struct sq_mapping *map; + unsigned long end; + unsigned int psz; + int ret, page; + + /* Don't allow wraparound or zero size */ + end = phys + size - 1; + if (unlikely(!size || end < phys)) + return -EINVAL; + /* Don't allow anyone to remap normal memory.. */ + if (unlikely(phys < virt_to_phys(high_memory))) + return -EINVAL; + + phys &= PAGE_MASK; + size = PAGE_ALIGN(end + 1) - phys; + + map = kmem_cache_alloc(sq_cache, GFP_KERNEL); + if (unlikely(!map)) + return -ENOMEM; + + map->addr = phys; + map->size = size; + map->name = name; + + page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT, + get_order(map->size)); + if (unlikely(page < 0)) { + ret = -ENOSPC; + goto out; + } + + map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); + + ret = __sq_remap(map, prot); + if (unlikely(ret != 0)) + goto out; + + psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n", + likely(map->name) ? map->name : "???", + psz, psz == 1 ? " " : "s", + map->sq_addr, map->addr); + + sq_mapping_list_add(map); + + return map->sq_addr; + +out: + kmem_cache_free(sq_cache, map); + return ret; +} +EXPORT_SYMBOL(sq_remap); + +/** + * sq_unmap - Unmap a Store Queue allocation + * @vaddr: Pre-allocated Store Queue mapping. + * + * Unmaps the store queue allocation @map that was previously created by + * sq_remap(). Also frees up the pte that was previously inserted into + * the kernel page table and discards the UTLB translation. + */ +void sq_unmap(unsigned long vaddr) +{ + struct sq_mapping **p, *map; + int page; + + for (p = &sq_mapping_list; (map = *p); p = &map->next) + if (map->sq_addr == vaddr) + break; + + if (unlikely(!map)) { + printk("%s: bad store queue address 0x%08lx\n", + __func__, vaddr); + return; + } + + page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT; + bitmap_release_region(sq_bitmap, page, get_order(map->size)); + +#ifdef CONFIG_MMU + { + /* + * Tear down the VMA in the MMU case. + */ + struct vm_struct *vma; + + vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); + if (!vma) { + printk(KERN_ERR "%s: bad address 0x%08lx\n", + __func__, map->sq_addr); + return; + } + } +#endif + + sq_mapping_list_del(map); + + kmem_cache_free(sq_cache, map); +} +EXPORT_SYMBOL(sq_unmap); + +/* + * Needlessly complex sysfs interface. Unfortunately it doesn't seem like + * there is any other easy way to add things on a per-cpu basis without + * putting the directory entries somewhere stupid and having to create + * links in sysfs by hand back in to the per-cpu directories. + * + * Some day we may want to have an additional abstraction per store + * queue, but considering the kobject hell we already have to deal with, + * it's simply not worth the trouble. + */ +static struct kobject *sq_kobject[NR_CPUS]; + +struct sq_sysfs_attr { + struct attribute attr; + ssize_t (*show)(char *buf); + ssize_t (*store)(const char *buf, size_t count); +}; + +#define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr) + +static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); + + if (likely(sattr->show)) + return sattr->show(buf); + + return -EIO; +} + +static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr); + + if (likely(sattr->store)) + return sattr->store(buf, count); + + return -EIO; +} + +static ssize_t mapping_show(char *buf) +{ + struct sq_mapping **list, *entry; + char *p = buf; + + for (list = &sq_mapping_list; (entry = *list); list = &entry->next) + p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n", + entry->sq_addr, entry->sq_addr + entry->size, + entry->addr, entry->name); + + return p - buf; +} + +static ssize_t mapping_store(const char *buf, size_t count) +{ + unsigned long base = 0, len = 0; + + sscanf(buf, "%lx %lx", &base, &len); + if (!base) + return -EIO; + + if (likely(len)) { + int ret = sq_remap(base, len, "Userspace", PAGE_SHARED); + if (ret < 0) + return ret; + } else + sq_unmap(base); + + return count; +} + +static struct sq_sysfs_attr mapping_attr = + __ATTR(mapping, 0644, mapping_show, mapping_store); + +static struct attribute *sq_sysfs_attrs[] = { + &mapping_attr.attr, + NULL, +}; +ATTRIBUTE_GROUPS(sq_sysfs); + +static const struct sysfs_ops sq_sysfs_ops = { + .show = sq_sysfs_show, + .store = sq_sysfs_store, +}; + +static struct kobj_type ktype_percpu_entry = { + .sysfs_ops = &sq_sysfs_ops, + .default_groups = sq_sysfs_groups, +}; + +static int sq_dev_add(struct device *dev, struct subsys_interface *sif) +{ + unsigned int cpu = dev->id; + struct kobject *kobj; + int error; + + sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (unlikely(!sq_kobject[cpu])) + return -ENOMEM; + + kobj = sq_kobject[cpu]; + error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj, + "%s", "sq"); + if (!error) + kobject_uevent(kobj, KOBJ_ADD); + return error; +} + +static void sq_dev_remove(struct device *dev, struct subsys_interface *sif) +{ + unsigned int cpu = dev->id; + struct kobject *kobj = sq_kobject[cpu]; + + kobject_put(kobj); +} + +static struct subsys_interface sq_interface = { + .name = "sq", + .subsys = &cpu_subsys, + .add_dev = sq_dev_add, + .remove_dev = sq_dev_remove, +}; + +static int __init sq_api_init(void) +{ + unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT; + unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG; + int ret = -ENOMEM; + + printk(KERN_NOTICE "sq: Registering store queue API.\n"); + + sq_cache = kmem_cache_create("store_queue_cache", + sizeof(struct sq_mapping), 0, 0, NULL); + if (unlikely(!sq_cache)) + return ret; + + sq_bitmap = kcalloc(size, sizeof(long), GFP_KERNEL); + if (unlikely(!sq_bitmap)) + goto out; + + ret = subsys_interface_register(&sq_interface); + if (unlikely(ret != 0)) + goto out; + + return 0; + +out: + kfree(sq_bitmap); + kmem_cache_destroy(sq_cache); + + return ret; +} + +static void __exit sq_api_exit(void) +{ + subsys_interface_unregister(&sq_interface); + kfree(sq_bitmap); + kmem_cache_destroy(sq_cache); +} + +module_init(sq_api_init); +module_exit(sq_api_exit); + +MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>"); +MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues"); +MODULE_LICENSE("GPL"); diff --git a/arch/sh/kernel/cpu/sh4a/Makefile b/arch/sh/kernel/cpu/sh4a/Makefile new file mode 100644 index 000000000..baf73a8fc --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH SH-4 backends. +# + +# CPU subtype setup +obj-$(CONFIG_CPU_SUBTYPE_SH7757) += setup-sh7757.o +obj-$(CONFIG_CPU_SUBTYPE_SH7763) += setup-sh7763.o +obj-$(CONFIG_CPU_SUBTYPE_SH7770) += setup-sh7770.o +obj-$(CONFIG_CPU_SUBTYPE_SH7780) += setup-sh7780.o +obj-$(CONFIG_CPU_SUBTYPE_SH7785) += setup-sh7785.o +obj-$(CONFIG_CPU_SUBTYPE_SH7786) += setup-sh7786.o intc-shx3.o +obj-$(CONFIG_CPU_SUBTYPE_SH7343) += setup-sh7343.o +obj-$(CONFIG_CPU_SUBTYPE_SH7722) += setup-sh7722.o serial-sh7722.o +obj-$(CONFIG_CPU_SUBTYPE_SH7723) += setup-sh7723.o +obj-$(CONFIG_CPU_SUBTYPE_SH7724) += setup-sh7724.o +obj-$(CONFIG_CPU_SUBTYPE_SH7734) += setup-sh7734.o +obj-$(CONFIG_CPU_SUBTYPE_SH7366) += setup-sh7366.o +obj-$(CONFIG_CPU_SUBTYPE_SHX3) += setup-shx3.o intc-shx3.o + +# SMP setup +smp-$(CONFIG_CPU_SHX3) := smp-shx3.o + +# Primary on-chip clocks (common) +clock-$(CONFIG_CPU_SUBTYPE_SH7757) := clock-sh7757.o +clock-$(CONFIG_CPU_SUBTYPE_SH7763) := clock-sh7763.o +clock-$(CONFIG_CPU_SUBTYPE_SH7770) := clock-sh7770.o +clock-$(CONFIG_CPU_SUBTYPE_SH7780) := clock-sh7780.o +clock-$(CONFIG_CPU_SUBTYPE_SH7785) := clock-sh7785.o +clock-$(CONFIG_CPU_SUBTYPE_SH7786) := clock-sh7786.o +clock-$(CONFIG_CPU_SUBTYPE_SH7343) := clock-sh7343.o +clock-$(CONFIG_CPU_SUBTYPE_SH7722) := clock-sh7722.o +clock-$(CONFIG_CPU_SUBTYPE_SH7723) := clock-sh7723.o +clock-$(CONFIG_CPU_SUBTYPE_SH7724) := clock-sh7724.o +clock-$(CONFIG_CPU_SUBTYPE_SH7734) := clock-sh7734.o +clock-$(CONFIG_CPU_SUBTYPE_SH7366) := clock-sh7366.o +clock-$(CONFIG_CPU_SUBTYPE_SHX3) := clock-shx3.o + +# Pinmux setup +pinmux-$(CONFIG_CPU_SUBTYPE_SH7722) := pinmux-sh7722.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7723) := pinmux-sh7723.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7724) := pinmux-sh7724.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7734) := pinmux-sh7734.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7757) := pinmux-sh7757.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7785) := pinmux-sh7785.o +pinmux-$(CONFIG_CPU_SUBTYPE_SH7786) := pinmux-sh7786.o +pinmux-$(CONFIG_CPU_SUBTYPE_SHX3) := pinmux-shx3.o + +obj-y += $(clock-y) +obj-$(CONFIG_SMP) += $(smp-y) +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7343.c b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c new file mode 100644 index 000000000..32cb5d1fd --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7343.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7343.c + * + * SH7343 clock framework support + * + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> + +/* SH7343 registers */ +#define FRQCR 0xa4150000 +#define VCLKCR 0xa4150004 +#define SCLKACR 0xa4150008 +#define SCLKBCR 0xa415000c +#define PLLCR 0xa4150024 +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 +#define DLLFRQ 0xa4150050 + +/* Fixed 32 KHz root clock for RTC and Power Management purposes */ +static struct clk r_clk = { + .rate = 32768, +}; + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +struct clk extal_clk = { + .rate = 33333333, +}; + +/* The dll block multiplies the 32khz r_clk, may be used instead of extal */ +static unsigned long dll_recalc(struct clk *clk) +{ + unsigned long mult; + + if (__raw_readl(PLLCR) & 0x1000) + mult = __raw_readl(DLLFRQ); + else + mult = 0; + + return clk->parent->rate * mult; +} + +static struct sh_clk_ops dll_clk_ops = { + .recalc = dll_recalc, +}; + +static struct clk dll_clk = { + .ops = &dll_clk_ops, + .parent = &r_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + unsigned long mult = 1; + + if (__raw_readl(PLLCR) & 0x4000) + mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); + + return clk->parent->rate * mult; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .flags = CLK_ENABLE_ON_INIT, +}; + +struct clk *main_clks[] = { + &r_clk, + &extal_clk, + &dll_clk, + &pll_clk, +}; + +static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; +static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = divisors, + .nr_divisors = ARRAY_SIZE(divisors), + .multipliers = multipliers, + .nr_multipliers = ARRAY_SIZE(multipliers), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, + DIV4_SIUA, DIV4_SIUB, DIV4_NR }; + +#define DIV4(_reg, _bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + [DIV4_I] = DIV4(FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0), + [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0), + [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0), +}; + +enum { DIV6_V, DIV6_NR }; + +struct clk div6_clks[DIV6_NR] = { + [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), +}; + +#define MSTP(_parent, _reg, _bit, _flags) \ + SH_CLK_MSTP32(_parent, _reg, _bit, _flags) + +enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026, + MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016, + MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010, + MSTP007, MSTP006, MSTP005, MSTP004, MSTP003, MSTP002, MSTP001, + MSTP109, MSTP108, MSTP100, + MSTP225, MSTP224, MSTP218, MSTP217, MSTP216, + MSTP214, MSTP213, MSTP212, MSTP211, MSTP208, + MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, + MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), + [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), + [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), + [MSTP028] = MSTP(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), + [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), + [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0), + [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0), + [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0), + [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0), + [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0), + [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0), + [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0), + [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0), + [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0), + [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0), + [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0), + [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0), + [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0), + [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0), + [MSTP004] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 4, 0), + [MSTP003] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 3, 0), + [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0), + [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0), + + [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0), + [MSTP108] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 8, 0), + + [MSTP225] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 25, 0), + [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0), + [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0), + [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0), + [MSTP216] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 16, 0), + [MSTP214] = MSTP(&r_clk, MSTPCR2, 14, 0), + [MSTP213] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 13, 0), + [MSTP212] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 12, 0), + [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0), + [MSTP208] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 8, 0), + [MSTP206] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT), + [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0), + [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0), + [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0), + [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), + [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), + [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("rclk", &r_clk), + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("dll_clk", &dll_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]), + CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]), + + /* DIV6 clocks */ + CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), + + /* MSTP32 clocks */ + CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]), + CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]), + CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]), + CLKDEV_CON_ID("uram0", &mstp_clks[MSTP028]), + CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]), + CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]), + CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]), + CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]), + CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]), + CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]), + CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]), + CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]), + CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[MSTP014]), + CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]), + CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]), + CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]), + + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP007]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP006]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP005]), + CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP004]), + + CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]), + CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]), + CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]), + CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]), + CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP108]), + CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]), + CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]), + CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]), + CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]), + CLKDEV_CON_ID("sim0", &mstp_clks[MSTP216]), + CLKDEV_CON_ID("keysc0", &mstp_clks[MSTP214]), + CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP213]), + CLKDEV_CON_ID("s3d40", &mstp_clks[MSTP212]), + CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]), + CLKDEV_CON_ID("siu0", &mstp_clks[MSTP208]), + CLKDEV_CON_ID("jpu0", &mstp_clks[MSTP206]), + CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]), + CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]), + CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]), + CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]), + CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]), + CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]), +}; + +int __init arch_clk_init(void) +{ + int k, ret = 0; + + /* autodetect extal or dll configuration */ + if (__raw_readl(PLLCR) & 0x1000) + pll_clk.parent = &dll_clk; + else + pll_clk.parent = &extal_clk; + + for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) + ret = clk_register(main_clks[k]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); + + if (!ret) + ret = sh_clk_div6_register(div6_clks, DIV6_NR); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7366.c b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c new file mode 100644 index 000000000..aa3444b41 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7366.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7366.c + * + * SH7366 clock framework support + * + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> + +/* SH7366 registers */ +#define FRQCR 0xa4150000 +#define VCLKCR 0xa4150004 +#define SCLKACR 0xa4150008 +#define SCLKBCR 0xa415000c +#define PLLCR 0xa4150024 +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 +#define DLLFRQ 0xa4150050 + +/* Fixed 32 KHz root clock for RTC and Power Management purposes */ +static struct clk r_clk = { + .rate = 32768, +}; + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +struct clk extal_clk = { + .rate = 33333333, +}; + +/* The dll block multiplies the 32khz r_clk, may be used instead of extal */ +static unsigned long dll_recalc(struct clk *clk) +{ + unsigned long mult; + + if (__raw_readl(PLLCR) & 0x1000) + mult = __raw_readl(DLLFRQ); + else + mult = 0; + + return clk->parent->rate * mult; +} + +static struct sh_clk_ops dll_clk_ops = { + .recalc = dll_recalc, +}; + +static struct clk dll_clk = { + .ops = &dll_clk_ops, + .parent = &r_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + unsigned long mult = 1; + unsigned long div = 1; + + if (__raw_readl(PLLCR) & 0x4000) + mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); + else + div = 2; + + return (clk->parent->rate * mult) / div; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .flags = CLK_ENABLE_ON_INIT, +}; + +struct clk *main_clks[] = { + &r_clk, + &extal_clk, + &dll_clk, + &pll_clk, +}; + +static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; +static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = divisors, + .nr_divisors = ARRAY_SIZE(divisors), + .multipliers = multipliers, + .nr_multipliers = ARRAY_SIZE(multipliers), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, + DIV4_SIUA, DIV4_SIUB, DIV4_NR }; + +#define DIV4(_reg, _bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), + [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0), + [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0), + [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0), +}; + +enum { DIV6_V, DIV6_NR }; + +struct clk div6_clks[DIV6_NR] = { + [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), +}; + +#define MSTP(_parent, _reg, _bit, _flags) \ + SH_CLK_MSTP32(_parent, _reg, _bit, _flags) + +enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026, + MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016, + MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010, + MSTP007, MSTP006, MSTP005, MSTP002, MSTP001, + MSTP109, MSTP100, + MSTP227, MSTP226, MSTP224, MSTP223, MSTP222, MSTP218, MSTP217, + MSTP211, MSTP207, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, + MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + /* See page 52 of Datasheet V0.40: Overview -> Block Diagram */ + [MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), + [MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), + [MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), + [MSTP028] = MSTP(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT), + [MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), + [MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0), + [MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0), + [MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0), + [MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0), + [MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0), + [MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0), + [MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0), + [MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0), + [MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0), + [MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0), + [MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0), + [MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0), + [MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0), + [MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0), + [MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0), + [MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0), + + [MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0), + + [MSTP227] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 27, 0), + [MSTP226] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 26, 0), + [MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0), + [MSTP223] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 23, 0), + [MSTP222] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 22, 0), + [MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0), + [MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0), + [MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0), + [MSTP207] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 7, CLK_ENABLE_ON_INIT), + [MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0), + [MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0), + [MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0), + [MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT), + [MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT), + [MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("rclk", &r_clk), + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("dll_clk", &dll_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]), + CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]), + + /* DIV6 clocks */ + CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), + + /* MSTP32 clocks */ + CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]), + CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]), + CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]), + CLKDEV_CON_ID("rsmem0", &mstp_clks[MSTP028]), + CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]), + CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]), + CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]), + CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]), + CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]), + CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]), + CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]), + CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]), + CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[MSTP014]), + CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]), + CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]), + CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]), + + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP007]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP006]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP005]), + + CLKDEV_CON_ID("msiof0", &mstp_clks[MSTP002]), + CLKDEV_CON_ID("sbr0", &mstp_clks[MSTP001]), + CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]), + CLKDEV_CON_ID("icb0", &mstp_clks[MSTP227]), + CLKDEV_CON_ID("meram0", &mstp_clks[MSTP226]), + CLKDEV_CON_ID("dacy1", &mstp_clks[MSTP224]), + CLKDEV_CON_ID("dacy0", &mstp_clks[MSTP223]), + CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP222]), + CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]), + CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]), + CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]), + CLKDEV_CON_ID("veu1", &mstp_clks[MSTP207]), + CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]), + CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]), + CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]), + CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]), + CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]), + CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]), +}; + +int __init arch_clk_init(void) +{ + int k, ret = 0; + + /* autodetect extal or dll configuration */ + if (__raw_readl(PLLCR) & 0x1000) + pll_clk.parent = &dll_clk; + else + pll_clk.parent = &extal_clk; + + for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) + ret = clk_register(main_clks[k]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); + + if (!ret) + ret = sh_clk_div6_register(div6_clks, DIV6_NR); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7722.c b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c new file mode 100644 index 000000000..38b057703 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7722.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7722.c + * + * SH7722 clock framework support + * + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <linux/sh_clk.h> +#include <asm/clock.h> +#include <cpu/sh7722.h> + +/* SH7722 registers */ +#define FRQCR 0xa4150000 +#define VCLKCR 0xa4150004 +#define SCLKACR 0xa4150008 +#define SCLKBCR 0xa415000c +#define IRDACLKCR 0xa4150018 +#define PLLCR 0xa4150024 +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 +#define DLLFRQ 0xa4150050 + +/* Fixed 32 KHz root clock for RTC and Power Management purposes */ +static struct clk r_clk = { + .rate = 32768, +}; + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +struct clk extal_clk = { + .rate = 33333333, +}; + +/* The dll block multiplies the 32khz r_clk, may be used instead of extal */ +static unsigned long dll_recalc(struct clk *clk) +{ + unsigned long mult; + + if (__raw_readl(PLLCR) & 0x1000) + mult = __raw_readl(DLLFRQ); + else + mult = 0; + + return clk->parent->rate * mult; +} + +static struct sh_clk_ops dll_clk_ops = { + .recalc = dll_recalc, +}; + +static struct clk dll_clk = { + .ops = &dll_clk_ops, + .parent = &r_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + unsigned long mult = 1; + unsigned long div = 1; + + if (__raw_readl(PLLCR) & 0x4000) + mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); + else + div = 2; + + return (clk->parent->rate * mult) / div; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .flags = CLK_ENABLE_ON_INIT, +}; + +struct clk *main_clks[] = { + &r_clk, + &extal_clk, + &dll_clk, + &pll_clk, +}; + +static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; +static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = divisors, + .nr_divisors = ARRAY_SIZE(divisors), + .multipliers = multipliers, + .nr_multipliers = ARRAY_SIZE(multipliers), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +#define DIV4(_reg, _bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) + +enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR }; + +struct clk div4_clks[DIV4_NR] = { + [DIV4_I] = DIV4(FRQCR, 20, 0x1fef, CLK_ENABLE_ON_INIT), + [DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT), + [DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0), +}; + +enum { DIV4_IRDA, DIV4_ENABLE_NR }; + +struct clk div4_enable_clks[DIV4_ENABLE_NR] = { + [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x1fff, 0), +}; + +enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR }; + +struct clk div4_reparent_clks[DIV4_REPARENT_NR] = { + [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0), + [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0), +}; + +enum { DIV6_V, DIV6_NR }; + +struct clk div6_clks[DIV6_NR] = { + [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), +}; + +static struct clk mstp_clks[HWBLK_NR] = { + [HWBLK_URAM] = SH_CLK_MSTP32(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT), + [HWBLK_XYMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT), + [HWBLK_TMU] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), + [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0), + [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0), + [HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0), + [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0), + [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 6, 0), + [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0), + + [HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0), + [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0), + + [HWBLK_SDHI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 18, 0), + [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0), + [HWBLK_USBF] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 11, 0), + [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0), + [HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0), + [HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0), + [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0), + [HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0), + [HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0), + [HWBLK_VEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0), + [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0), + [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 0, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("rclk", &r_clk), + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("dll_clk", &dll_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]), + CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]), + CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]), + + /* DIV6 clocks */ + CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), + + /* MSTP clocks */ + CLKDEV_CON_ID("uram0", &mstp_clks[HWBLK_URAM]), + CLKDEV_CON_ID("xymem0", &mstp_clks[HWBLK_XYMEM]), + + CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU]), + + CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]), + CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]), + CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]), + + CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]), + CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]), + CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]), + + CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]), + CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]), + CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI]), + CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]), + CLKDEV_CON_ID("usbf0", &mstp_clks[HWBLK_USBF]), + CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), + CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]), + CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]), + CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]), + CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]), + CLKDEV_DEV_ID("renesas-ceu.0", &mstp_clks[HWBLK_CEU]), + CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU]), + CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]), + CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]), +}; + +int __init arch_clk_init(void) +{ + int k, ret = 0; + + /* autodetect extal or dll configuration */ + if (__raw_readl(PLLCR) & 0x1000) + pll_clk.parent = &dll_clk; + else + pll_clk.parent = &extal_clk; + + for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) + ret = clk_register(main_clks[k]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); + + if (!ret) + ret = sh_clk_div4_enable_register(div4_enable_clks, + DIV4_ENABLE_NR, &div4_table); + + if (!ret) + ret = sh_clk_div4_reparent_register(div4_reparent_clks, + DIV4_REPARENT_NR, &div4_table); + + if (!ret) + ret = sh_clk_div6_register(div6_clks, DIV6_NR); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7723.c b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c new file mode 100644 index 000000000..9dc3a987d --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7723.c @@ -0,0 +1,301 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7723.c + * + * SH7723 clock framework support + * + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/sh_clk.h> +#include <asm/clock.h> +#include <cpu/sh7723.h> + +/* SH7723 registers */ +#define FRQCR 0xa4150000 +#define VCLKCR 0xa4150004 +#define SCLKACR 0xa4150008 +#define SCLKBCR 0xa415000c +#define IRDACLKCR 0xa4150018 +#define PLLCR 0xa4150024 +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 +#define DLLFRQ 0xa4150050 + +/* Fixed 32 KHz root clock for RTC and Power Management purposes */ +static struct clk r_clk = { + .rate = 32768, +}; + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +struct clk extal_clk = { + .rate = 33333333, +}; + +/* The dll multiplies the 32khz r_clk, may be used instead of extal */ +static unsigned long dll_recalc(struct clk *clk) +{ + unsigned long mult; + + if (__raw_readl(PLLCR) & 0x1000) + mult = __raw_readl(DLLFRQ); + else + mult = 0; + + return clk->parent->rate * mult; +} + +static struct sh_clk_ops dll_clk_ops = { + .recalc = dll_recalc, +}; + +static struct clk dll_clk = { + .ops = &dll_clk_ops, + .parent = &r_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + unsigned long mult = 1; + unsigned long div = 1; + + if (__raw_readl(PLLCR) & 0x4000) + mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1); + else + div = 2; + + return (clk->parent->rate * mult) / div; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .flags = CLK_ENABLE_ON_INIT, +}; + +struct clk *main_clks[] = { + &r_clk, + &extal_clk, + &dll_clk, + &pll_clk, +}; + +static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; +static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = divisors, + .nr_divisors = ARRAY_SIZE(divisors), + .multipliers = multipliers, + .nr_multipliers = ARRAY_SIZE(multipliers), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P, DIV4_NR }; + +#define DIV4(_reg, _bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + [DIV4_I] = DIV4(FRQCR, 20, 0x0dbf, CLK_ENABLE_ON_INIT), + [DIV4_U] = DIV4(FRQCR, 16, 0x0dbf, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(FRQCR, 12, 0x0dbf, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(FRQCR, 8, 0x0dbf, CLK_ENABLE_ON_INIT), + [DIV4_B3] = DIV4(FRQCR, 4, 0x0db4, CLK_ENABLE_ON_INIT), + [DIV4_P] = DIV4(FRQCR, 0, 0x0dbf, 0), +}; + +enum { DIV4_IRDA, DIV4_ENABLE_NR }; + +struct clk div4_enable_clks[DIV4_ENABLE_NR] = { + [DIV4_IRDA] = DIV4(IRDACLKCR, 0, 0x0dbf, 0), +}; + +enum { DIV4_SIUA, DIV4_SIUB, DIV4_REPARENT_NR }; + +struct clk div4_reparent_clks[DIV4_REPARENT_NR] = { + [DIV4_SIUA] = DIV4(SCLKACR, 0, 0x0dbf, 0), + [DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x0dbf, 0), +}; +enum { DIV6_V, DIV6_NR }; + +struct clk div6_clks[DIV6_NR] = { + [DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0), +}; + +static struct clk mstp_clks[] = { + /* See page 60 of Datasheet V1.0: Overview -> Block Diagram */ + [HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), + [HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), + [HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), + [HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 28, CLK_ENABLE_ON_INIT), + [HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT), + [HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT), + [HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 22, CLK_ENABLE_ON_INIT), + [HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0), + [HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT), + [HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0), + [HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0), + [HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), + [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0), + [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0), + [HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0), + [HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0), + [HWBLK_FLCTL] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0), + [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), + [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), + [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0), + [HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0), + [HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0), + [HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0), + [HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0), + [HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0), + [HWBLK_MERAM] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 0, 0), + + [HWBLK_IIC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0), + [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 8, 0), + + [HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 28, 0), + [HWBLK_ADC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 27, 0), + [HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0), + [HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0), + [HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0), + [HWBLK_ICB] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, CLK_ENABLE_ON_INIT), + [HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0), + [HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0), + [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR2, 14, 0), + [HWBLK_USB] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 11, 0), + [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 10, 0), + [HWBLK_SIU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 8, 0), + [HWBLK_VEU2H1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0), + [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0), + [HWBLK_BEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0), + [HWBLK_CEU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0), + [HWBLK_VEU2H0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0), + [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0), + [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("rclk", &r_clk), + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("dll_clk", &dll_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]), + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("irda_clk", &div4_enable_clks[DIV4_IRDA]), + CLKDEV_CON_ID("siua_clk", &div4_reparent_clks[DIV4_SIUA]), + CLKDEV_CON_ID("siub_clk", &div4_reparent_clks[DIV4_SIUB]), + + /* DIV6 clocks */ + CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), + + /* MSTP clocks */ + CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]), + CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]), + CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]), + CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]), + CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]), + CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]), + CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]), + CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]), + CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]), + CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]), + CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]), + CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]), + CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]), + CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]), + CLKDEV_CON_ID("flctl0", &mstp_clks[HWBLK_FLCTL]), + CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]), + CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]), + CLKDEV_DEV_ID("sh_mobile_meram.0", &mstp_clks[HWBLK_MERAM]), + CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC]), + CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]), + CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]), + CLKDEV_CON_ID("adc0", &mstp_clks[HWBLK_ADC]), + CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), + CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), + CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]), + CLKDEV_CON_ID("icb0", &mstp_clks[HWBLK_ICB]), + CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]), + CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]), + CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]), + CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB]), + CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), + CLKDEV_DEV_ID("siu-pcm-audio", &mstp_clks[HWBLK_SIU]), + CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU2H1]), + CLKDEV_DEV_ID("sh-vou.0", &mstp_clks[HWBLK_VOU]), + CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU]), + CLKDEV_DEV_ID("ceu.0", &mstp_clks[HWBLK_CEU]), + CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU2H0]), + CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]), + + CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]), + CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]), + + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[HWBLK_SCIF0]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[HWBLK_SCIF1]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[HWBLK_SCIF2]), + CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[HWBLK_SCIF3]), + CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[HWBLK_SCIF4]), + CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[HWBLK_SCIF5]), + + CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]), +}; + +int __init arch_clk_init(void) +{ + int k, ret = 0; + + /* autodetect extal or dll configuration */ + if (__raw_readl(PLLCR) & 0x1000) + pll_clk.parent = &dll_clk; + else + pll_clk.parent = &extal_clk; + + for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) + ret |= clk_register(main_clks[k]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); + + if (!ret) + ret = sh_clk_div4_enable_register(div4_enable_clks, + DIV4_ENABLE_NR, &div4_table); + + if (!ret) + ret = sh_clk_div4_reparent_register(div4_reparent_clks, + DIV4_REPARENT_NR, &div4_table); + + if (!ret) + ret = sh_clk_div6_register(div6_clks, DIV6_NR); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c new file mode 100644 index 000000000..2a1f0d847 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7724.c + * + * SH7724 clock framework support + * + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/sh_clk.h> +#include <asm/clock.h> +#include <cpu/sh7724.h> + +/* SH7724 registers */ +#define FRQCRA 0xa4150000 +#define FRQCRB 0xa4150004 +#define VCLKCR 0xa4150048 +#define FCLKACR 0xa4150008 +#define FCLKBCR 0xa415000c +#define IRDACLKCR 0xa4150018 +#define PLLCR 0xa4150024 +#define MSTPCR0 0xa4150030 +#define MSTPCR1 0xa4150034 +#define MSTPCR2 0xa4150038 +#define SPUCLKCR 0xa415003c +#define FLLFRQ 0xa4150050 +#define LSTATS 0xa4150060 + +/* Fixed 32 KHz root clock for RTC and Power Management purposes */ +static struct clk r_clk = { + .rate = 32768, +}; + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +static struct clk extal_clk = { + .rate = 33333333, +}; + +/* The fll multiplies the 32khz r_clk, may be used instead of extal */ +static unsigned long fll_recalc(struct clk *clk) +{ + unsigned long mult = 0; + unsigned long div = 1; + + if (__raw_readl(PLLCR) & 0x1000) + mult = __raw_readl(FLLFRQ) & 0x3ff; + + if (__raw_readl(FLLFRQ) & 0x4000) + div = 2; + + return (clk->parent->rate * mult) / div; +} + +static struct sh_clk_ops fll_clk_ops = { + .recalc = fll_recalc, +}; + +static struct clk fll_clk = { + .ops = &fll_clk_ops, + .parent = &r_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + unsigned long mult = 1; + + if (__raw_readl(PLLCR) & 0x4000) + mult = (((__raw_readl(FRQCRA) >> 24) & 0x3f) + 1) * 2; + + return clk->parent->rate * mult; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .flags = CLK_ENABLE_ON_INIT, +}; + +/* A fixed divide-by-3 block use by the div6 clocks */ +static unsigned long div3_recalc(struct clk *clk) +{ + return clk->parent->rate / 3; +} + +static struct sh_clk_ops div3_clk_ops = { + .recalc = div3_recalc, +}; + +static struct clk div3_clk = { + .ops = &div3_clk_ops, + .parent = &pll_clk, +}; + +/* External input clock (pin name: FSIMCKA/FSIMCKB/DV_CLKI ) */ +struct clk sh7724_fsimcka_clk = { +}; + +struct clk sh7724_fsimckb_clk = { +}; + +struct clk sh7724_dv_clki = { +}; + +static struct clk *main_clks[] = { + &r_clk, + &extal_clk, + &fll_clk, + &pll_clk, + &div3_clk, + &sh7724_fsimcka_clk, + &sh7724_fsimckb_clk, + &sh7724_dv_clki, +}; + +static void div4_kick(struct clk *clk) +{ + unsigned long value; + + /* set KICK bit in FRQCRA to update hardware setting */ + value = __raw_readl(FRQCRA); + value |= (1 << 31); + __raw_writel(value, FRQCRA); +} + +static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = divisors, + .nr_divisors = ARRAY_SIZE(divisors), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, + .kick = div4_kick, +}; + +enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR }; + +#define DIV4(_reg, _bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + [DIV4_I] = DIV4(FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT), + [DIV4_P] = DIV4(FRQCRA, 0, 0x2f7c, 0), + [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT), +}; + +enum { DIV6_V, DIV6_I, DIV6_S, DIV6_FA, DIV6_FB, DIV6_NR }; + +/* Indices are important - they are the actual src selecting values */ +static struct clk *common_parent[] = { + [0] = &div3_clk, + [1] = NULL, +}; + +static struct clk *vclkcr_parent[8] = { + [0] = &div3_clk, + [2] = &sh7724_dv_clki, + [4] = &extal_clk, +}; + +static struct clk *fclkacr_parent[] = { + [0] = &div3_clk, + [1] = NULL, + [2] = &sh7724_fsimcka_clk, + [3] = NULL, +}; + +static struct clk *fclkbcr_parent[] = { + [0] = &div3_clk, + [1] = NULL, + [2] = &sh7724_fsimckb_clk, + [3] = NULL, +}; + +static struct clk div6_clks[DIV6_NR] = { + [DIV6_V] = SH_CLK_DIV6_EXT(VCLKCR, 0, + vclkcr_parent, ARRAY_SIZE(vclkcr_parent), 12, 3), + [DIV6_I] = SH_CLK_DIV6_EXT(IRDACLKCR, 0, + common_parent, ARRAY_SIZE(common_parent), 6, 1), + [DIV6_S] = SH_CLK_DIV6_EXT(SPUCLKCR, CLK_ENABLE_ON_INIT, + common_parent, ARRAY_SIZE(common_parent), 6, 1), + [DIV6_FA] = SH_CLK_DIV6_EXT(FCLKACR, 0, + fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2), + [DIV6_FB] = SH_CLK_DIV6_EXT(FCLKBCR, 0, + fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2), +}; + +static struct clk mstp_clks[HWBLK_NR] = { + [HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), + [HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), + [HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), + [HWBLK_RSMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 28, CLK_ENABLE_ON_INIT), + [HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT), + [HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 26, CLK_ENABLE_ON_INIT), + [HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT), + [HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, CLK_ENABLE_ON_INIT), + [HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0), + [HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT), + [HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0), + [HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0), + [HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), + [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0), + [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0), + [HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0), + [HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0), + [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), + [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), + [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0), + [HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0), + [HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0), + [HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0), + [HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0), + [HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0), + + [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 12, 0), + [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 11, 0), + [HWBLK_IIC0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0), + [HWBLK_IIC1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 8, 0), + + [HWBLK_MMC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 29, 0), + [HWBLK_ETHER] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 28, 0), + [HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 26, 0), + [HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0), + [HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0), + [HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0), + [HWBLK_USB1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, 0), + [HWBLK_USB0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 20, 0), + [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 19, 0), + [HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0), + [HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0), + [HWBLK_VEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 15, 0), + [HWBLK_CEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 13, 0), + [HWBLK_BEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 12, 0), + [HWBLK_2DDMAC] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 10, 0), + [HWBLK_SPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0), + [HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0), + [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0), + [HWBLK_BEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0), + [HWBLK_CEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0), + [HWBLK_VEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0), + [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0), + [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("rclk", &r_clk), + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("fll_clk", &fll_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + CLKDEV_CON_ID("div3_clk", &div3_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("vpu_clk", &div4_clks[DIV4_M1]), + + /* DIV6 clocks */ + CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), + CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]), + CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]), + CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]), + CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]), + + /* MSTP clocks */ + CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]), + CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]), + CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]), + CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]), + CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]), + CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]), + CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]), + CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]), + CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]), + CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]), + CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]), + CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]), + + CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]), + CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]), + + CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]), + CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]), + CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]), + + CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]), + CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]), + CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]), + CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[HWBLK_SCIF3]), + CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[HWBLK_SCIF4]), + CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[HWBLK_SCIF5]), + + CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]), + CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]), + CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]), + CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]), + CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]), + CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]), + CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[HWBLK_MMC]), + CLKDEV_DEV_ID("sh7724-ether.0", &mstp_clks[HWBLK_ETHER]), + CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]), + CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), + CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), + CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]), + CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[HWBLK_USB1]), + CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[HWBLK_USB0]), + CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]), + CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]), + CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), + CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]), + CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]), + CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]), + CLKDEV_DEV_ID("renesas-ceu.1", &mstp_clks[HWBLK_CEU1]), + CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]), + CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]), + CLKDEV_DEV_ID("sh_fsi.0", &mstp_clks[HWBLK_SPU]), + CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]), + CLKDEV_DEV_ID("sh-vou", &mstp_clks[HWBLK_VOU]), + CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]), + CLKDEV_DEV_ID("renesas-ceu.0", &mstp_clks[HWBLK_CEU0]), + CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]), + CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]), + CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]), +}; + +int __init arch_clk_init(void) +{ + int k, ret = 0; + + /* autodetect extal or fll configuration */ + if (__raw_readl(PLLCR) & 0x1000) + pll_clk.parent = &fll_clk; + else + pll_clk.parent = &extal_clk; + + for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) + ret = clk_register(main_clks[k]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); + + if (!ret) + ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7734.c b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c new file mode 100644 index 000000000..c81ee60ed --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7734.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7734.c + * + * Clock framework for SH7734 + * + * Copyright (C) 2011, 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> + * Copyright (C) 2011, 2012 Renesas Solutions Corp. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <linux/delay.h> +#include <asm/clock.h> +#include <asm/freq.h> + +static struct clk extal_clk = { + .rate = 33333333, +}; + +#define MODEMR (0xFFCC0020) +#define MODEMR_MASK (0x6) +#define MODEMR_533MHZ (0x2) + +static unsigned long pll_recalc(struct clk *clk) +{ + int mode = 12; + u32 r = __raw_readl(MODEMR); + + if ((r & MODEMR_MASK) & MODEMR_533MHZ) + mode = 16; + + return clk->parent->rate * mode; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .parent = &extal_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static struct clk *main_clks[] = { + &extal_clk, + &pll_clk, +}; + +static int multipliers[] = { 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; +static int divisors[] = { 1, 3, 2, 3, 4, 6, 8, 9, 12, 16, 18, 24 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = divisors, + .nr_divisors = ARRAY_SIZE(divisors), + .multipliers = multipliers, + .nr_multipliers = ARRAY_SIZE(multipliers), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_S, DIV4_B, DIV4_M, DIV4_S1, DIV4_P, DIV4_NR }; + +#define DIV4(_reg, _bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + [DIV4_I] = DIV4(FRQMR1, 28, 0x0003, CLK_ENABLE_ON_INIT), + [DIV4_S] = DIV4(FRQMR1, 20, 0x000C, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(FRQMR1, 16, 0x0140, CLK_ENABLE_ON_INIT), + [DIV4_M] = DIV4(FRQMR1, 12, 0x0004, CLK_ENABLE_ON_INIT), + [DIV4_S1] = DIV4(FRQMR1, 4, 0x0030, CLK_ENABLE_ON_INIT), + [DIV4_P] = DIV4(FRQMR1, 0, 0x0140, CLK_ENABLE_ON_INIT), +}; + +#define MSTPCR0 0xFFC80030 +#define MSTPCR1 0xFFC80034 +#define MSTPCR3 0xFFC8003C + +enum { + MSTP030, MSTP029, /* IIC */ + MSTP026, MSTP025, MSTP024, /* SCIF */ + MSTP023, + MSTP022, MSTP021, + MSTP019, /* HSCIF */ + MSTP016, MSTP015, MSTP014, /* TMU / TIMER */ + MSTP012, MSTP011, MSTP010, MSTP009, MSTP008, /* SSI */ + MSTP007, /* HSPI */ + MSTP115, /* ADMAC */ + MSTP114, /* GETHER */ + MSTP111, /* DMAC */ + MSTP109, /* VIDEOIN1 */ + MSTP108, /* VIDEOIN0 */ + MSTP107, /* RGPVBG */ + MSTP106, /* 2DG */ + MSTP103, /* VIEW */ + MSTP100, /* USB */ + MSTP331, /* MMC */ + MSTP330, /* MIMLB */ + MSTP323, /* SDHI0 */ + MSTP322, /* SDHI1 */ + MSTP321, /* SDHI2 */ + MSTP320, /* RQSPI */ + MSTP319, /* SRC0 */ + MSTP318, /* SRC1 */ + MSTP317, /* RSPI */ + MSTP316, /* RCAN0 */ + MSTP315, /* RCAN1 */ + MSTP314, /* FLTCL */ + MSTP313, /* ADC */ + MSTP312, /* MTU */ + MSTP304, /* IE-BUS */ + MSTP303, /* RTC */ + MSTP302, /* HIF */ + MSTP301, /* STIF0 */ + MSTP300, /* STIF1 */ + MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + /* MSTPCR0 */ + [MSTP030] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 30, 0), + [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0), + [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0), + [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0), + [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0), + [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0), + [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0), + [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0), + [MSTP019] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0), + [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0), + [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), + [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0), + [MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0), + [MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0), + [MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0), + [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), + [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), + [MSTP007] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0), + + /* MSTPCR1 */ + [MSTP115] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 15, 0), + [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0), + [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0), + [MSTP109] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0), + [MSTP108] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 8, 0), + [MSTP107] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 7, 0), + [MSTP106] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 6, 0), + [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0), + [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 0, 0), + + /* MSTPCR3 */ + [MSTP331] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 31, 0), + [MSTP330] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 30, 0), + [MSTP323] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 23, 0), + [MSTP322] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 22, 0), + [MSTP321] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 21, 0), + [MSTP320] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 20, 0), + [MSTP319] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 19, 0), + [MSTP318] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 18, 0), + [MSTP317] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 17, 0), + [MSTP316] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 16, 0), + [MSTP315] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 15, 0), + [MSTP314] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 14, 0), + [MSTP313] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 13, 0), + [MSTP312] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 12, 0), + [MSTP304] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 4, 0), + [MSTP303] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 3, 0), + [MSTP302] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 2, 0), + [MSTP301] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 1, 0), + [MSTP300] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR3, 0, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* clocks */ + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_S]), + CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_M]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("shyway_clk1", &div4_clks[DIV4_S1]), + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + + /* MSTP32 clocks */ + CLKDEV_DEV_ID("i2c-sh7734.0", &mstp_clks[MSTP030]), + CLKDEV_DEV_ID("i2c-sh7734.1", &mstp_clks[MSTP029]), + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP026]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP024]), + CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP023]), + CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP022]), + CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP021]), + CLKDEV_CON_ID("hscif", &mstp_clks[MSTP019]), + CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP016]), + CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP015]), + CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP014]), + CLKDEV_CON_ID("ssi0", &mstp_clks[MSTP012]), + CLKDEV_CON_ID("ssi1", &mstp_clks[MSTP011]), + CLKDEV_CON_ID("ssi2", &mstp_clks[MSTP010]), + CLKDEV_CON_ID("ssi3", &mstp_clks[MSTP009]), + CLKDEV_CON_ID("sss", &mstp_clks[MSTP008]), + CLKDEV_CON_ID("hspi", &mstp_clks[MSTP007]), + CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP100]), + CLKDEV_CON_ID("videoin0", &mstp_clks[MSTP109]), + CLKDEV_CON_ID("videoin1", &mstp_clks[MSTP108]), + CLKDEV_CON_ID("rgpvg", &mstp_clks[MSTP107]), + CLKDEV_CON_ID("2dg", &mstp_clks[MSTP106]), + CLKDEV_CON_ID("view", &mstp_clks[MSTP103]), + + CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP331]), + CLKDEV_CON_ID("mimlb0", &mstp_clks[MSTP330]), + CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP323]), + CLKDEV_CON_ID("sdhi1", &mstp_clks[MSTP322]), + CLKDEV_CON_ID("sdhi2", &mstp_clks[MSTP321]), + CLKDEV_CON_ID("rqspi0", &mstp_clks[MSTP320]), + CLKDEV_CON_ID("src0", &mstp_clks[MSTP319]), + CLKDEV_CON_ID("src1", &mstp_clks[MSTP318]), + CLKDEV_CON_ID("rsp0", &mstp_clks[MSTP317]), + CLKDEV_CON_ID("rcan0", &mstp_clks[MSTP316]), + CLKDEV_CON_ID("rcan1", &mstp_clks[MSTP315]), + CLKDEV_CON_ID("fltcl0", &mstp_clks[MSTP314]), + CLKDEV_CON_ID("adc0", &mstp_clks[MSTP313]), + CLKDEV_CON_ID("mtu0", &mstp_clks[MSTP312]), + CLKDEV_CON_ID("iebus0", &mstp_clks[MSTP304]), + CLKDEV_DEV_ID("sh7734-gether.0", &mstp_clks[MSTP114]), + CLKDEV_CON_ID("rtc0", &mstp_clks[MSTP303]), + CLKDEV_CON_ID("hif0", &mstp_clks[MSTP302]), + CLKDEV_CON_ID("stif0", &mstp_clks[MSTP301]), + CLKDEV_CON_ID("stif1", &mstp_clks[MSTP300]), +}; + +int __init arch_clk_init(void) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(main_clks); i++) + ret |= clk_register(main_clks[i]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), + &div4_table); + + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7757.c b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c new file mode 100644 index 000000000..9acb72210 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7757.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4/clock-sh7757.c + * + * SH7757 support for the clock framework + * + * Copyright (C) 2009-2010 Renesas Solutions Corp. + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> +#include <asm/freq.h> + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +static struct clk extal_clk = { + .rate = 48000000, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + int multiplier; + + multiplier = test_mode_pin(MODE_PIN0) ? 24 : 16; + + return clk->parent->rate * multiplier; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .parent = &extal_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static struct clk *clks[] = { + &extal_clk, + &pll_clk, +}; + +static unsigned int div2[] = { 1, 1, 2, 1, 1, 4, 1, 6, + 1, 1, 1, 16, 1, 24, 1, 1 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = div2, + .nr_divisors = ARRAY_SIZE(div2), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_SH, DIV4_P, DIV4_NR }; + +#define DIV4(_bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, FRQCR, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + /* + * P clock is always enable, because some P clock modules is used + * by Host PC. + */ + [DIV4_P] = DIV4(0, 0x2800, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(12, 0x00a0, CLK_ENABLE_ON_INIT), + [DIV4_I] = DIV4(20, 0x0004, CLK_ENABLE_ON_INIT), +}; + +#define MSTPCR0 0xffc80030 +#define MSTPCR1 0xffc80034 +#define MSTPCR2 0xffc10028 + +enum { MSTP004, MSTP000, MSTP127, MSTP114, MSTP113, MSTP112, + MSTP111, MSTP110, MSTP103, MSTP102, MSTP220, + MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + /* MSTPCR0 */ + [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0), + [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0), + + /* MSTPCR1 */ + [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 27, 0), + [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0), + [MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0), + [MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0), + [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0), + [MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0), + [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0), + [MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0), + + /* MSTPCR2 */ + [MSTP220] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 20, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + + /* MSTP32 clocks */ + CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP004]), + CLKDEV_CON_ID("riic0", &mstp_clks[MSTP000]), + CLKDEV_CON_ID("riic1", &mstp_clks[MSTP000]), + CLKDEV_CON_ID("riic2", &mstp_clks[MSTP000]), + CLKDEV_CON_ID("riic3", &mstp_clks[MSTP000]), + CLKDEV_CON_ID("riic4", &mstp_clks[MSTP000]), + CLKDEV_CON_ID("riic5", &mstp_clks[MSTP000]), + CLKDEV_CON_ID("riic6", &mstp_clks[MSTP000]), + CLKDEV_CON_ID("riic7", &mstp_clks[MSTP000]), + + CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP113]), + CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP114]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP112]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP111]), + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP110]), + + CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP103]), + CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP102]), + CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]), + CLKDEV_DEV_ID("rspi.2", &mstp_clks[MSTP127]), +}; + +int __init arch_clk_init(void) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(clks); i++) + ret |= clk_register(clks[i]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), + &div4_table); + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} + diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7763.c b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c new file mode 100644 index 000000000..aaff4b968 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7763.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7763.c + * + * SH7763 support for the clock framework + * + * Copyright (C) 2005 Paul Mundt + * Copyright (C) 2007 Yoshihiro Shimoda + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int bfc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 }; +static int p0fc_divisors[] = { 1, 1, 1, 8, 1, 1, 1, 1 }; +static int cfc_divisors[] = { 1, 1, 4, 1, 1, 1, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= p0fc_divisors[(__raw_readl(FRQCR) >> 4) & 0x07]; +} + +static struct sh_clk_ops sh7763_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readl(FRQCR) >> 4) & 0x07); + return clk->parent->rate / p0fc_divisors[idx]; +} + +static struct sh_clk_ops sh7763_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readl(FRQCR) >> 16) & 0x07); + return clk->parent->rate / bfc_divisors[idx]; +} + +static struct sh_clk_ops sh7763_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static struct sh_clk_ops sh7763_cpu_clk_ops = { + .recalc = followparent_recalc, +}; + +static struct sh_clk_ops *sh7763_clk_ops[] = { + &sh7763_master_clk_ops, + &sh7763_module_clk_ops, + &sh7763_bus_clk_ops, + &sh7763_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7763_clk_ops)) + *ops = sh7763_clk_ops[idx]; +} + +static unsigned long shyway_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readl(FRQCR) >> 20) & 0x07); + return clk->parent->rate / cfc_divisors[idx]; +} + +static struct sh_clk_ops sh7763_shyway_clk_ops = { + .recalc = shyway_clk_recalc, +}; + +static struct clk sh7763_shyway_clk = { + .flags = CLK_ENABLE_ON_INIT, + .ops = &sh7763_shyway_clk_ops, +}; + +/* + * Additional SH7763-specific on-chip clocks that aren't already part of the + * clock framework + */ +static struct clk *sh7763_onchip_clocks[] = { + &sh7763_shyway_clk, +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("shyway_clk", &sh7763_shyway_clk), +}; + +int __init arch_clk_init(void) +{ + struct clk *clk; + int i, ret = 0; + + cpg_clk_init(); + + clk = clk_get(NULL, "master_clk"); + for (i = 0; i < ARRAY_SIZE(sh7763_onchip_clocks); i++) { + struct clk *clkp = sh7763_onchip_clocks[i]; + + clkp->parent = clk; + ret |= clk_register(clkp); + } + + clk_put(clk); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7770.c b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c new file mode 100644 index 000000000..f356dfcd1 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7770.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7770.c + * + * SH7770 support for the clock framework + * + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int ifc_divisors[] = { 1, 1, 1, 1, 1, 1, 1, 1 }; +static int bfc_divisors[] = { 1, 1, 1, 1, 1, 8,12, 1 }; +static int pfc_divisors[] = { 1, 8, 1,10,12,16, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= pfc_divisors[(__raw_readl(FRQCR) >> 28) & 0x000f]; +} + +static struct sh_clk_ops sh7770_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readl(FRQCR) >> 28) & 0x000f); + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7770_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readl(FRQCR) & 0x000f); + return clk->parent->rate / bfc_divisors[idx]; +} + +static struct sh_clk_ops sh7770_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readl(FRQCR) >> 24) & 0x000f); + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh7770_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7770_clk_ops[] = { + &sh7770_master_clk_ops, + &sh7770_module_clk_ops, + &sh7770_bus_clk_ops, + &sh7770_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7770_clk_ops)) + *ops = sh7770_clk_ops[idx]; +} + diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7780.c b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c new file mode 100644 index 000000000..fc0a3efb5 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7780.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7780.c + * + * SH7780 support for the clock framework + * + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <asm/io.h> + +static int ifc_divisors[] = { 2, 4 }; +static int bfc_divisors[] = { 1, 1, 1, 8, 12, 16, 24, 1 }; +static int pfc_divisors[] = { 1, 24, 24, 1 }; +static int cfc_divisors[] = { 1, 1, 4, 1, 6, 1, 1, 1 }; + +static void master_clk_init(struct clk *clk) +{ + clk->rate *= pfc_divisors[__raw_readl(FRQCR) & 0x0003]; +} + +static struct sh_clk_ops sh7780_master_clk_ops = { + .init = master_clk_init, +}; + +static unsigned long module_clk_recalc(struct clk *clk) +{ + int idx = (__raw_readl(FRQCR) & 0x0003); + return clk->parent->rate / pfc_divisors[idx]; +} + +static struct sh_clk_ops sh7780_module_clk_ops = { + .recalc = module_clk_recalc, +}; + +static unsigned long bus_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readl(FRQCR) >> 16) & 0x0007); + return clk->parent->rate / bfc_divisors[idx]; +} + +static struct sh_clk_ops sh7780_bus_clk_ops = { + .recalc = bus_clk_recalc, +}; + +static unsigned long cpu_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readl(FRQCR) >> 24) & 0x0001); + return clk->parent->rate / ifc_divisors[idx]; +} + +static struct sh_clk_ops sh7780_cpu_clk_ops = { + .recalc = cpu_clk_recalc, +}; + +static struct sh_clk_ops *sh7780_clk_ops[] = { + &sh7780_master_clk_ops, + &sh7780_module_clk_ops, + &sh7780_bus_clk_ops, + &sh7780_cpu_clk_ops, +}; + +void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx) +{ + if (idx < ARRAY_SIZE(sh7780_clk_ops)) + *ops = sh7780_clk_ops[idx]; +} + +static unsigned long shyway_clk_recalc(struct clk *clk) +{ + int idx = ((__raw_readl(FRQCR) >> 20) & 0x0007); + return clk->parent->rate / cfc_divisors[idx]; +} + +static struct sh_clk_ops sh7780_shyway_clk_ops = { + .recalc = shyway_clk_recalc, +}; + +static struct clk sh7780_shyway_clk = { + .flags = CLK_ENABLE_ON_INIT, + .ops = &sh7780_shyway_clk_ops, +}; + +/* + * Additional SH7780-specific on-chip clocks that aren't already part of the + * clock framework + */ +static struct clk *sh7780_onchip_clocks[] = { + &sh7780_shyway_clk, +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("shyway_clk", &sh7780_shyway_clk), +}; + +int __init arch_clk_init(void) +{ + struct clk *clk; + int i, ret = 0; + + cpg_clk_init(); + + clk = clk_get(NULL, "master_clk"); + for (i = 0; i < ARRAY_SIZE(sh7780_onchip_clocks); i++) { + struct clk *clkp = sh7780_onchip_clocks[i]; + + clkp->parent = clk; + ret |= clk_register(clkp); + } + + clk_put(clk); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7785.c b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c new file mode 100644 index 000000000..fca351378 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7785.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7785.c + * + * SH7785 support for the clock framework + * + * Copyright (C) 2007 - 2010 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/cpufreq.h> +#include <linux/clkdev.h> +#include <asm/clock.h> +#include <asm/freq.h> +#include <cpu/sh7785.h> + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +static struct clk extal_clk = { + .rate = 33333333, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + int multiplier; + + multiplier = test_mode_pin(MODE_PIN4) ? 36 : 72; + + return clk->parent->rate * multiplier; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .parent = &extal_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static struct clk *clks[] = { + &extal_clk, + &pll_clk, +}; + +static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, + 24, 32, 36, 48 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = div2, + .nr_divisors = ARRAY_SIZE(div2), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA, + DIV4_DU, DIV4_P, DIV4_NR }; + +#define DIV4(_bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + [DIV4_P] = DIV4(0, 0x0f80, 0), + [DIV4_DU] = DIV4(4, 0x0ff0, 0), + [DIV4_GA] = DIV4(8, 0x0030, 0), + [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT), + [DIV4_U] = DIV4(24, 0x000c, CLK_ENABLE_ON_INIT), + [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT), +}; + +#define MSTPCR0 0xffc80030 +#define MSTPCR1 0xffc80034 + +enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, + MSTP021, MSTP020, MSTP017, MSTP016, + MSTP013, MSTP012, MSTP009, MSTP008, MSTP003, MSTP002, + MSTP119, MSTP117, MSTP105, MSTP104, MSTP100, + MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + /* MSTPCR0 */ + [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0), + [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0), + [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0), + [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0), + [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0), + [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0), + [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0), + [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0), + [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0), + [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0), + [MSTP013] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 13, 0), + [MSTP012] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 12, 0), + [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), + [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), + [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0), + [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0), + + /* MSTPCR1 */ + [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0), + [MSTP117] = SH_CLK_MSTP32(NULL, MSTPCR1, 17, 0), + [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0), + [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0), + [MSTP100] = SH_CLK_MSTP32(NULL, MSTPCR1, 0, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]), + CLKDEV_CON_ID("ga_clk", &div4_clks[DIV4_GA]), + CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]), + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + + /* MSTP32 clocks */ + CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP029]), + CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP028]), + CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP027]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP026]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]), + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP024]), + + CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]), + CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]), + CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]), + CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]), + CLKDEV_CON_ID("mmcif_fck", &mstp_clks[MSTP013]), + CLKDEV_CON_ID("flctl_fck", &mstp_clks[MSTP012]), + + CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]), + CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]), + + CLKDEV_CON_ID("siof_fck", &mstp_clks[MSTP003]), + CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]), + CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]), + CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP117]), + CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]), + CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]), + CLKDEV_CON_ID("gdta_fck", &mstp_clks[MSTP100]), +}; + +int __init arch_clk_init(void) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(clks); i++) + ret |= clk_register(clks[i]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), + &div4_table); + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7786.c b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c new file mode 100644 index 000000000..f23862df3 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7786.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/clock-sh7786.c + * + * SH7786 support for the clock framework + * + * Copyright (C) 2010 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> +#include <asm/freq.h> + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +static struct clk extal_clk = { + .rate = 33333333, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + int multiplier; + + /* + * Clock modes 0, 1, and 2 use an x64 multiplier against PLL1, + * while modes 3, 4, and 5 use an x32. + */ + multiplier = (sh_mv.mv_mode_pins() & 0xf) < 3 ? 64 : 32; + + return clk->parent->rate * multiplier; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .parent = &extal_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static struct clk *clks[] = { + &extal_clk, + &pll_clk, +}; + +static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, + 24, 32, 36, 48 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = div2, + .nr_divisors = ARRAY_SIZE(div2), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_DU, DIV4_P, DIV4_NR }; + +#define DIV4(_bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + [DIV4_P] = DIV4(0, 0x0b40, 0), + [DIV4_DU] = DIV4(4, 0x0010, 0), + [DIV4_DDR] = DIV4(12, 0x0002, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(16, 0x0360, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(20, 0x0002, CLK_ENABLE_ON_INIT), + [DIV4_I] = DIV4(28, 0x0006, CLK_ENABLE_ON_INIT), +}; + +#define MSTPCR0 0xffc40030 +#define MSTPCR1 0xffc40034 + +enum { MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, + MSTP023, MSTP022, MSTP021, MSTP020, MSTP017, MSTP016, + MSTP015, MSTP014, MSTP011, MSTP010, MSTP009, MSTP008, + MSTP005, MSTP004, MSTP002, + MSTP112, MSTP110, MSTP109, MSTP108, + MSTP105, MSTP104, MSTP103, MSTP102, + MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + /* MSTPCR0 */ + [MSTP029] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 29, 0), + [MSTP028] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 28, 0), + [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0), + [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0), + [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0), + [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0), + [MSTP023] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 23, 0), + [MSTP022] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, 0), + [MSTP021] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 21, 0), + [MSTP020] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 20, 0), + [MSTP017] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 17, 0), + [MSTP016] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 16, 0), + [MSTP015] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), + [MSTP014] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 14, 0), + [MSTP011] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 11, 0), + [MSTP010] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0), + [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), + [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), + [MSTP005] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 5, 0), + [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0), + [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0), + + /* MSTPCR1 */ + [MSTP112] = SH_CLK_MSTP32(NULL, MSTPCR1, 12, 0), + [MSTP110] = SH_CLK_MSTP32(NULL, MSTPCR1, 10, 0), + [MSTP109] = SH_CLK_MSTP32(NULL, MSTPCR1, 9, 0), + [MSTP108] = SH_CLK_MSTP32(NULL, MSTPCR1, 8, 0), + [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0), + [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0), + [MSTP103] = SH_CLK_MSTP32(NULL, MSTPCR1, 3, 0), + [MSTP102] = SH_CLK_MSTP32(NULL, MSTPCR1, 2, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("du_clk", &div4_clks[DIV4_DU]), + CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + + /* MSTP32 clocks */ + CLKDEV_ICK_ID("fck", "sh-sci.5", &mstp_clks[MSTP029]), + CLKDEV_ICK_ID("fck", "sh-sci.4", &mstp_clks[MSTP028]), + CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP027]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP026]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]), + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP024]), + + CLKDEV_CON_ID("ssi3_fck", &mstp_clks[MSTP023]), + CLKDEV_CON_ID("ssi2_fck", &mstp_clks[MSTP022]), + CLKDEV_CON_ID("ssi1_fck", &mstp_clks[MSTP021]), + CLKDEV_CON_ID("ssi0_fck", &mstp_clks[MSTP020]), + CLKDEV_CON_ID("hac1_fck", &mstp_clks[MSTP017]), + CLKDEV_CON_ID("hac0_fck", &mstp_clks[MSTP016]), + CLKDEV_CON_ID("i2c1_fck", &mstp_clks[MSTP015]), + CLKDEV_CON_ID("i2c0_fck", &mstp_clks[MSTP014]), + + CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]), + CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]), + CLKDEV_ICK_ID("fck", "sh-tmu.2", &mstp_clks[MSTP010]), + CLKDEV_ICK_ID("fck", "sh-tmu.3", &mstp_clks[MSTP011]), + + CLKDEV_CON_ID("sdif1_fck", &mstp_clks[MSTP005]), + CLKDEV_CON_ID("sdif0_fck", &mstp_clks[MSTP004]), + CLKDEV_CON_ID("hspi_fck", &mstp_clks[MSTP002]), + CLKDEV_CON_ID("usb_fck", &mstp_clks[MSTP112]), + CLKDEV_CON_ID("pcie2_fck", &mstp_clks[MSTP110]), + CLKDEV_CON_ID("pcie1_fck", &mstp_clks[MSTP109]), + CLKDEV_CON_ID("pcie0_fck", &mstp_clks[MSTP108]), + CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]), + CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]), + CLKDEV_CON_ID("du_fck", &mstp_clks[MSTP103]), + CLKDEV_CON_ID("ether_fck", &mstp_clks[MSTP102]), +}; + +int __init arch_clk_init(void) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(clks); i++) + ret |= clk_register(clks[i]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), + &div4_table); + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/clock-shx3.c b/arch/sh/kernel/cpu/sh4a/clock-shx3.c new file mode 100644 index 000000000..6c7b6ab6c --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/clock-shx3.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4/clock-shx3.c + * + * SH-X3 support for the clock framework + * + * Copyright (C) 2006-2007 Renesas Technology Corp. + * Copyright (C) 2006-2007 Renesas Solutions Corp. + * Copyright (C) 2006-2010 Paul Mundt + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/clkdev.h> +#include <asm/clock.h> +#include <asm/freq.h> + +/* + * Default rate for the root input clock, reset this with clk_set_rate() + * from the platform code. + */ +static struct clk extal_clk = { + .rate = 16666666, +}; + +static unsigned long pll_recalc(struct clk *clk) +{ + /* PLL1 has a fixed x72 multiplier. */ + return clk->parent->rate * 72; +} + +static struct sh_clk_ops pll_clk_ops = { + .recalc = pll_recalc, +}; + +static struct clk pll_clk = { + .ops = &pll_clk_ops, + .parent = &extal_clk, + .flags = CLK_ENABLE_ON_INIT, +}; + +static struct clk *clks[] = { + &extal_clk, + &pll_clk, +}; + +static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, + 24, 32, 36, 48 }; + +static struct clk_div_mult_table div4_div_mult_table = { + .divisors = div2, + .nr_divisors = ARRAY_SIZE(div2), +}; + +static struct clk_div4_table div4_table = { + .div_mult_table = &div4_div_mult_table, +}; + +enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_SHA, DIV4_P, DIV4_NR }; + +#define DIV4(_bit, _mask, _flags) \ + SH_CLK_DIV4(&pll_clk, FRQMR1, _bit, _mask, _flags) + +struct clk div4_clks[DIV4_NR] = { + [DIV4_P] = DIV4(0, 0x0f80, 0), + [DIV4_SHA] = DIV4(4, 0x0ff0, 0), + [DIV4_DDR] = DIV4(12, 0x000c, CLK_ENABLE_ON_INIT), + [DIV4_B] = DIV4(16, 0x0fe0, CLK_ENABLE_ON_INIT), + [DIV4_SH] = DIV4(20, 0x000c, CLK_ENABLE_ON_INIT), + [DIV4_I] = DIV4(28, 0x000e, CLK_ENABLE_ON_INIT), +}; + +#define MSTPCR0 0xffc00030 +#define MSTPCR1 0xffc00034 + +enum { MSTP027, MSTP026, MSTP025, MSTP024, + MSTP009, MSTP008, MSTP003, MSTP002, + MSTP001, MSTP000, MSTP119, MSTP105, + MSTP104, MSTP_NR }; + +static struct clk mstp_clks[MSTP_NR] = { + /* MSTPCR0 */ + [MSTP027] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 27, 0), + [MSTP026] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 26, 0), + [MSTP025] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 25, 0), + [MSTP024] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 24, 0), + [MSTP009] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), + [MSTP008] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), + [MSTP003] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 3, 0), + [MSTP002] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 2, 0), + [MSTP001] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 1, 0), + [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0), + + /* MSTPCR1 */ + [MSTP119] = SH_CLK_MSTP32(NULL, MSTPCR1, 19, 0), + [MSTP105] = SH_CLK_MSTP32(NULL, MSTPCR1, 5, 0), + [MSTP104] = SH_CLK_MSTP32(NULL, MSTPCR1, 4, 0), +}; + +static struct clk_lookup lookups[] = { + /* main clocks */ + CLKDEV_CON_ID("extal", &extal_clk), + CLKDEV_CON_ID("pll_clk", &pll_clk), + + /* DIV4 clocks */ + CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), + CLKDEV_CON_ID("shywaya_clk", &div4_clks[DIV4_SHA]), + CLKDEV_CON_ID("ddr_clk", &div4_clks[DIV4_DDR]), + CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), + CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), + CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), + + /* MSTP32 clocks */ + CLKDEV_ICK_ID("fck", "sh-sci.3", &mstp_clks[MSTP027]), + CLKDEV_ICK_ID("fck", "sh-sci.2", &mstp_clks[MSTP026]), + CLKDEV_ICK_ID("fck", "sh-sci.1", &mstp_clks[MSTP025]), + CLKDEV_ICK_ID("fck", "sh-sci.0", &mstp_clks[MSTP024]), + + CLKDEV_CON_ID("h8ex_fck", &mstp_clks[MSTP003]), + CLKDEV_CON_ID("csm_fck", &mstp_clks[MSTP002]), + CLKDEV_CON_ID("fe1_fck", &mstp_clks[MSTP001]), + CLKDEV_CON_ID("fe0_fck", &mstp_clks[MSTP000]), + + CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[MSTP008]), + CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[MSTP009]), + + CLKDEV_CON_ID("hudi_fck", &mstp_clks[MSTP119]), + CLKDEV_CON_ID("dmac_11_6_fck", &mstp_clks[MSTP105]), + CLKDEV_CON_ID("dmac_5_0_fck", &mstp_clks[MSTP104]), +}; + +int __init arch_clk_init(void) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(clks); i++) + ret |= clk_register(clks[i]); + + clkdev_add_table(lookups, ARRAY_SIZE(lookups)); + + if (!ret) + ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), + &div4_table); + if (!ret) + ret = sh_clk_mstp_register(mstp_clks, MSTP_NR); + + return ret; +} diff --git a/arch/sh/kernel/cpu/sh4a/intc-shx3.c b/arch/sh/kernel/cpu/sh4a/intc-shx3.c new file mode 100644 index 000000000..eea87d25e --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/intc-shx3.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Shared support for SH-X3 interrupt controllers. + * + * Copyright (C) 2009 - 2010 Paul Mundt + */ +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/init.h> + +#define INTACK 0xfe4100b8 +#define INTACKCLR 0xfe4100bc +#define INTC_USERIMASK 0xfe411000 + +#ifdef CONFIG_INTC_BALANCING +unsigned int irq_lookup(unsigned int irq) +{ + return __raw_readl(INTACK) & 1 ? irq : NO_IRQ_IGNORE; +} + +void irq_finish(unsigned int irq) +{ + __raw_writel(irq2evt(irq), INTACKCLR); +} +#endif + +static int __init shx3_irq_setup(void) +{ + return register_intc_userimask(INTC_USERIMASK); +} +arch_initcall(shx3_irq_setup); diff --git a/arch/sh/kernel/cpu/sh4a/perf_event.c b/arch/sh/kernel/cpu/sh4a/perf_event.c new file mode 100644 index 000000000..3beb8fed3 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/perf_event.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for SH-4A performance counters + * + * Copyright (C) 2009, 2010 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/perf_event.h> +#include <asm/processor.h> + +#define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx)) +#define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx)) + +#define CCBR_CIT_MASK (0x7ff << 6) +#define CCBR_DUC (1 << 3) +#define CCBR_CMDS (1 << 1) +#define CCBR_PPCE (1 << 0) + +#ifdef CONFIG_CPU_SHX3 +/* + * The PMCAT location for SH-X3 CPUs was quietly moved, while the CCBR + * and PMCTR locations remains tentatively constant. This change remains + * wholly undocumented, and was simply found through trial and error. + * + * Early cuts of SH-X3 still appear to use the SH-X/SH-X2 locations, and + * it's unclear when this ceased to be the case. For now we always use + * the new location (if future parts keep up with this trend then + * scanning for them at runtime also remains a viable option.) + * + * The gap in the register space also suggests that there are other + * undocumented counters, so this will need to be revisited at a later + * point in time. + */ +#define PPC_PMCAT 0xfc100240 +#else +#define PPC_PMCAT 0xfc100080 +#endif + +#define PMCAT_OVF3 (1 << 27) +#define PMCAT_CNN3 (1 << 26) +#define PMCAT_CLR3 (1 << 25) +#define PMCAT_OVF2 (1 << 19) +#define PMCAT_CLR2 (1 << 17) +#define PMCAT_OVF1 (1 << 11) +#define PMCAT_CNN1 (1 << 10) +#define PMCAT_CLR1 (1 << 9) +#define PMCAT_OVF0 (1 << 3) +#define PMCAT_CLR0 (1 << 1) + +static struct sh_pmu sh4a_pmu; + +/* + * Supported raw event codes: + * + * Event Code Description + * ---------- ----------- + * + * 0x0000 number of elapsed cycles + * 0x0200 number of elapsed cycles in privileged mode + * 0x0280 number of elapsed cycles while SR.BL is asserted + * 0x0202 instruction execution + * 0x0203 instruction execution in parallel + * 0x0204 number of unconditional branches + * 0x0208 number of exceptions + * 0x0209 number of interrupts + * 0x0220 UTLB miss caused by instruction fetch + * 0x0222 UTLB miss caused by operand access + * 0x02a0 number of ITLB misses + * 0x0028 number of accesses to instruction memories + * 0x0029 number of accesses to instruction cache + * 0x002a instruction cache miss + * 0x022e number of access to instruction X/Y memory + * 0x0030 number of reads to operand memories + * 0x0038 number of writes to operand memories + * 0x0031 number of operand cache read accesses + * 0x0039 number of operand cache write accesses + * 0x0032 operand cache read miss + * 0x003a operand cache write miss + * 0x0236 number of reads to operand X/Y memory + * 0x023e number of writes to operand X/Y memory + * 0x0237 number of reads to operand U memory + * 0x023f number of writes to operand U memory + * 0x0337 number of U memory read buffer misses + * 0x02b4 number of wait cycles due to operand read access + * 0x02bc number of wait cycles due to operand write access + * 0x0033 number of wait cycles due to operand cache read miss + * 0x003b number of wait cycles due to operand cache write miss + */ + +/* + * Special reserved bits used by hardware emulators, read values will + * vary, but writes must always be 0. + */ +#define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0)) + +static const int sh4a_general_events[] = { + [PERF_COUNT_HW_CPU_CYCLES] = 0x0000, + [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */ + [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204, + [PERF_COUNT_HW_BRANCH_MISSES] = -1, + [PERF_COUNT_HW_BUS_CYCLES] = -1, +}; + +#define C(x) PERF_COUNT_HW_CACHE_##x + +static const int sh4a_cache_events + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = +{ + [ C(L1D) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0031, + [ C(RESULT_MISS) ] = 0x0032, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0039, + [ C(RESULT_MISS) ] = 0x003a, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(L1I) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0029, + [ C(RESULT_MISS) ] = 0x002a, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(LL) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0030, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0x0038, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(DTLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0x0222, + [ C(RESULT_MISS) ] = 0x0220, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0, + }, + }, + + [ C(ITLB) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = 0, + [ C(RESULT_MISS) ] = 0x02a0, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + + [ C(BPU) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, + + [ C(NODE) ] = { + [ C(OP_READ) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = -1, + [ C(RESULT_MISS) ] = -1, + }, + }, +}; + +static int sh4a_event_map(int event) +{ + return sh4a_general_events[event]; +} + +static u64 sh4a_pmu_read(int idx) +{ + return __raw_readl(PPC_PMCTR(idx)); +} + +static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx) +{ + unsigned int tmp; + + tmp = __raw_readl(PPC_CCBR(idx)); + tmp &= ~(CCBR_CIT_MASK | CCBR_DUC); + __raw_writel(tmp, PPC_CCBR(idx)); +} + +static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx) +{ + unsigned int tmp; + + tmp = __raw_readl(PPC_PMCAT); + tmp &= ~PMCAT_EMU_CLR_MASK; + tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0; + __raw_writel(tmp, PPC_PMCAT); + + tmp = __raw_readl(PPC_CCBR(idx)); + tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE; + __raw_writel(tmp, PPC_CCBR(idx)); + + __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx)); +} + +static void sh4a_pmu_disable_all(void) +{ + int i; + + for (i = 0; i < sh4a_pmu.num_events; i++) + __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i)); +} + +static void sh4a_pmu_enable_all(void) +{ + int i; + + for (i = 0; i < sh4a_pmu.num_events; i++) + __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i)); +} + +static struct sh_pmu sh4a_pmu = { + .name = "sh4a", + .num_events = 2, + .event_map = sh4a_event_map, + .max_events = ARRAY_SIZE(sh4a_general_events), + .raw_event_mask = 0x3ff, + .cache_events = &sh4a_cache_events, + .read = sh4a_pmu_read, + .disable = sh4a_pmu_disable, + .enable = sh4a_pmu_enable, + .disable_all = sh4a_pmu_disable_all, + .enable_all = sh4a_pmu_enable_all, +}; + +static int __init sh4a_pmu_init(void) +{ + /* + * Make sure this CPU actually has perf counters. + */ + if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { + pr_notice("HW perf events unsupported, software events only.\n"); + return -ENODEV; + } + + return register_sh_pmu(&sh4a_pmu); +} +early_initcall(sh4a_pmu_init); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c new file mode 100644 index 000000000..0dd5312f9 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7722.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7722_pfc_resources[] = { + [0] = { + .start = 0xa4050100, + .end = 0xa405018f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7722", sh7722_pfc_resources, + ARRAY_SIZE(sh7722_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c new file mode 100644 index 000000000..b67abc063 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7723.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7723 Pinmux + * + * Copyright (C) 2008 Magnus Damm + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7723_pfc_resources[] = { + [0] = { + .start = 0xa4050100, + .end = 0xa405016f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7723", sh7723_pfc_resources, + ARRAY_SIZE(sh7723_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c new file mode 100644 index 000000000..b43c32590 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7724.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7724 Pinmux + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * Kuninori Morimoto <morimoto.kuninori@renesas.com> + * + * Based on SH7723 Pinmux + * Copyright (C) 2008 Magnus Damm + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7724_pfc_resources[] = { + [0] = { + .start = 0xa4050100, + .end = 0xa405016f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7724", sh7724_pfc_resources, + ARRAY_SIZE(sh7724_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c new file mode 100644 index 000000000..46256b196 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7734.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7734 processor support - PFC hardware block + * + * Copyright (C) 2012 Renesas Solutions Corp. + * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> + */ +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7734_pfc_resources[] = { + [0] = { /* PFC */ + .start = 0xFFFC0000, + .end = 0xFFFC011C, + .flags = IORESOURCE_MEM, + }, + [1] = { /* GPIO */ + .start = 0xFFC40000, + .end = 0xFFC4502B, + .flags = IORESOURCE_MEM, + } +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7734", sh7734_pfc_resources, + ARRAY_SIZE(sh7734_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c new file mode 100644 index 000000000..c92f304cb --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7757.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7757 (B0 step) Pinmux + * + * Copyright (C) 2009-2010 Renesas Solutions Corp. + * + * Author : Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> + * + * Based on SH7723 Pinmux + * Copyright (C) 2008 Magnus Damm + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7757_pfc_resources[] = { + [0] = { + .start = 0xffec0000, + .end = 0xffec008f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7757", sh7757_pfc_resources, + ARRAY_SIZE(sh7757_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c new file mode 100644 index 000000000..f329de6e7 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7785.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7785 Pinmux + * + * Copyright (C) 2008 Magnus Damm + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7785_pfc_resources[] = { + [0] = { + .start = 0xffe70000, + .end = 0xffe7008f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7785", sh7785_pfc_resources, + ARRAY_SIZE(sh7785_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c new file mode 100644 index 000000000..47e8639f3 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-sh7786.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7786 Pinmux + * + * Copyright (C) 2008, 2009 Renesas Solutions Corp. + * Kuninori Morimoto <morimoto.kuninori@renesas.com> + * + * Based on SH7785 pinmux + * + * Copyright (C) 2008 Magnus Damm + */ + +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource sh7786_pfc_resources[] = { + [0] = { + .start = 0xffcc0000, + .end = 0xffcc008f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-sh7786", sh7786_pfc_resources, + ARRAY_SIZE(sh7786_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c new file mode 100644 index 000000000..6c02f6256 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/pinmux-shx3.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH-X3 prototype CPU pinmux + * + * Copyright (C) 2010 Paul Mundt + */ +#include <linux/bug.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/ioport.h> +#include <cpu/pfc.h> + +static struct resource shx3_pfc_resources[] = { + [0] = { + .start = 0xffc70000, + .end = 0xffc7001f, + .flags = IORESOURCE_MEM, + }, +}; + +static int __init plat_pinmux_setup(void) +{ + return sh_pfc_register("pfc-shx3", shx3_pfc_resources, + ARRAY_SIZE(shx3_pfc_resources)); +} +arch_initcall(plat_pinmux_setup); diff --git a/arch/sh/kernel/cpu/sh4a/serial-sh7722.c b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c new file mode 100644 index 000000000..6ecc8b6e1 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/serial-sh7722.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/serial_sci.h> +#include <linux/serial_core.h> +#include <linux/io.h> + +#define PSCR 0xA405011E + +static void sh7722_sci_init_pins(struct uart_port *port, unsigned int cflag) +{ + unsigned short data; + + if (port->mapbase == 0xffe00000) { + data = __raw_readw(PSCR); + data &= ~0x03cf; + if (!(cflag & CRTSCTS)) + data |= 0x0340; + + __raw_writew(data, PSCR); + } +} + +struct plat_sci_port_ops sh7722_sci_port_ops = { + .init_pins = sh7722_sci_init_pins, +}; diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7343.c b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c new file mode 100644 index 000000000..b6015188f --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7343.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7343 Setup + * + * Copyright (C) 2006 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/uio_driver.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <asm/clock.h> +#include <asm/platform_early.h> + +/* Serial */ +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_CKE1, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc00)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_CKE1, + .type = PORT_SCIF, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffe10000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc20)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_CKE1, + .type = PORT_SCIF, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffe20000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc40)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_CKE1, + .type = PORT_SCIF, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xffe30000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc60)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct resource iic0_resources[] = { + [0] = { + .name = "IIC0", + .start = 0x04470000, + .end = 0x04470017, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xe00), + .end = evt2irq(0xe60), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device iic0_device = { + .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ + .num_resources = ARRAY_SIZE(iic0_resources), + .resource = iic0_resources, +}; + +static struct resource iic1_resources[] = { + [0] = { + .name = "IIC1", + .start = 0x04750000, + .end = 0x04750017, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0x780), + .end = evt2irq(0x7e0), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device iic1_device = { + .name = "i2c-sh_mobile", + .id = 1, /* "i2c1" clock */ + .num_resources = ARRAY_SIZE(iic1_resources), + .resource = iic1_resources, +}; + +static struct uio_info vpu_platform_data = { + .name = "VPU4", + .version = "0", + .irq = evt2irq(0x980), +}; + +static struct resource vpu_resources[] = { + [0] = { + .name = "VPU", + .start = 0xfe900000, + .end = 0xfe9022eb, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device vpu_device = { + .name = "uio_pdrv_genirq", + .id = 0, + .dev = { + .platform_data = &vpu_platform_data, + }, + .resource = vpu_resources, + .num_resources = ARRAY_SIZE(vpu_resources), +}; + +static struct uio_info veu_platform_data = { + .name = "VEU", + .version = "0", + .irq = evt2irq(0x8c0), +}; + +static struct resource veu_resources[] = { + [0] = { + .name = "VEU", + .start = 0xfe920000, + .end = 0xfe9200b7, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device veu_device = { + .name = "uio_pdrv_genirq", + .id = 1, + .dev = { + .platform_data = &veu_platform_data, + }, + .resource = veu_resources, + .num_resources = ARRAY_SIZE(veu_resources), +}; + +static struct uio_info jpu_platform_data = { + .name = "JPU", + .version = "0", + .irq = evt2irq(0x560), +}; + +static struct resource jpu_resources[] = { + [0] = { + .name = "JPU", + .start = 0xfea00000, + .end = 0xfea102d3, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device jpu_device = { + .name = "uio_pdrv_genirq", + .id = 2, + .dev = { + .platform_data = &jpu_platform_data, + }, + .resource = jpu_resources, + .num_resources = ARRAY_SIZE(jpu_resources), +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 0x20, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0x044a0000, 0x70), + DEFINE_RES_IRQ(evt2irq(0xf00)), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-32", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct platform_device *sh7343_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &cmt_device, + &tmu0_device, + &iic0_device, + &iic1_device, + &vpu_device, + &veu_device, + &jpu_device, +}; + +static int __init sh7343_devices_setup(void) +{ + platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20); + platform_resource_setup_memory(&veu_device, "veu", 2 << 20); + platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); + + return platform_add_devices(sh7343_devices, + ARRAY_SIZE(sh7343_devices)); +} +arch_initcall(sh7343_devices_setup); + +static struct platform_device *sh7343_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &cmt_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7343_early_devices, + ARRAY_SIZE(sh7343_early_devices)); +} + +enum { + UNUSED = 0, + ENABLED, + DISABLED, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + DMAC0, DMAC1, DMAC2, DMAC3, + VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, + MFI, VPU, TPU, Z3D4, USBI0, USBI1, + MMC_ERR, MMC_TRAN, MMC_FSTAT, MMC_FRDY, + DMAC4, DMAC5, DMAC_DADERR, + KEYSC, + SCIF, SCIF1, SCIF2, SCIF3, + SIOF0, SIOF1, SIO, + FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, + I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, + I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, + SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, + IRDA, SDHI, CMT, TSIF, SIU, + TMU0, TMU1, TMU2, + JPU, LCDC, + + /* interrupt groups */ + + DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C0, I2C1, SIM, USB, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), + INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), + INTC_VECT(I2C1_ALI, 0x780), INTC_VECT(I2C1_TACKI, 0x7a0), + INTC_VECT(I2C1_WAITI, 0x7c0), INTC_VECT(I2C1_DTEI, 0x7e0), + INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), + INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), + INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), + INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), + INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), + INTC_VECT(TPU, 0x9a0), INTC_VECT(Z3D4, 0x9e0), + INTC_VECT(USBI0, 0xa20), INTC_VECT(USBI1, 0xa40), + INTC_VECT(MMC_ERR, 0xb00), INTC_VECT(MMC_TRAN, 0xb20), + INTC_VECT(MMC_FSTAT, 0xb40), INTC_VECT(MMC_FRDY, 0xb60), + INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), + INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0), + INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIF1, 0xc20), + INTC_VECT(SCIF2, 0xc40), INTC_VECT(SCIF3, 0xc60), + INTC_VECT(SIOF0, 0xc80), INTC_VECT(SIOF1, 0xca0), + INTC_VECT(SIO, 0xd00), + INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), + INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), + INTC_VECT(I2C0_ALI, 0xe00), INTC_VECT(I2C0_TACKI, 0xe20), + INTC_VECT(I2C0_WAITI, 0xe40), INTC_VECT(I2C0_DTEI, 0xe60), + INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), + INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), + INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), + INTC_VECT(SIU, 0xf80), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), + INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), + INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), + INTC_GROUP(MMC, MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR), + INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), + INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, + FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), + INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), + INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), + INTC_GROUP(SIM, SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI), + INTC_GROUP(USB, USBI0, USBI1), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ + { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, + { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ + { 0, 0, 0, VPU, 0, 0, 0, MFI } }, + { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ + { SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } }, + { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ + { 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } }, + { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ + { KEYSC, DMAC_DADERR, DMAC5, DMAC4, SCIF3, SCIF2, SCIF1, SCIF } }, + { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ + { 0, 0, 0, SIO, Z3D4, 0, SIOF1, SIOF0 } }, + { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ + { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, + FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, + { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ + { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } }, + { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ + { 0, 0, 0, CMT, 0, USBI1, USBI0 } }, + { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ + { MMC_FRDY, MMC_FSTAT, MMC_TRAN, MMC_ERR } }, + { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ + { I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI, TPU, 0, 0, TSIF } }, + { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, + { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, SIM } }, + { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } }, + { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } }, + { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIF1, SCIF2, SCIF3 } }, + { 0xa408001c, 0, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C0 } }, + { 0xa4080020, 0, 16, 4, /* IPRI */ { SIO, 0, TSIF, I2C1 } }, + { 0xa4080024, 0, 16, 4, /* IPRJ */ { Z3D4, 0, SIU } }, + { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } }, + { 0xa408002c, 0, 16, 4, /* IPRL */ { 0, 0, TPU } }, + { 0xa4140010, 0, 32, 4, /* INTPRI00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xa414001c, 16, 2, /* ICR1 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xa4140024, 0, 8, /* INTREQ00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_desc intc_desc __initdata = { + .name = "sh7343", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(vectors, groups, mask_registers, + prio_registers, sense_registers, ack_registers), +}; + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7366.c b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c new file mode 100644 index 000000000..6676beef0 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7366.c @@ -0,0 +1,388 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7366 Setup + * + * Copyright (C) 2008 Renesas Solutions + * + * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/uio_driver.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/usb/r8a66597.h> +#include <asm/clock.h> +#include <asm/platform_early.h> + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc00)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct resource iic_resources[] = { + [0] = { + .name = "IIC", + .start = 0x04470000, + .end = 0x04470017, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xe00), + .end = evt2irq(0xe60), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device iic_device = { + .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ + .num_resources = ARRAY_SIZE(iic_resources), + .resource = iic_resources, +}; + +static struct r8a66597_platdata r8a66597_data = { + .on_chip = 1, +}; + +static struct resource usb_host_resources[] = { + [0] = { + .start = 0xa4d80000, + .end = 0xa4d800ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xa20), + .end = evt2irq(0xa20), + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, + }, +}; + +static struct platform_device usb_host_device = { + .name = "r8a66597_hcd", + .id = -1, + .dev = { + .dma_mask = NULL, + .coherent_dma_mask = 0xffffffff, + .platform_data = &r8a66597_data, + }, + .num_resources = ARRAY_SIZE(usb_host_resources), + .resource = usb_host_resources, +}; + +static struct uio_info vpu_platform_data = { + .name = "VPU5", + .version = "0", + .irq = evt2irq(0x980), +}; + +static struct resource vpu_resources[] = { + [0] = { + .name = "VPU", + .start = 0xfe900000, + .end = 0xfe902807, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device vpu_device = { + .name = "uio_pdrv_genirq", + .id = 0, + .dev = { + .platform_data = &vpu_platform_data, + }, + .resource = vpu_resources, + .num_resources = ARRAY_SIZE(vpu_resources), +}; + +static struct uio_info veu0_platform_data = { + .name = "VEU", + .version = "0", + .irq = evt2irq(0x8c0), +}; + +static struct resource veu0_resources[] = { + [0] = { + .name = "VEU(1)", + .start = 0xfe920000, + .end = 0xfe9200b7, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device veu0_device = { + .name = "uio_pdrv_genirq", + .id = 1, + .dev = { + .platform_data = &veu0_platform_data, + }, + .resource = veu0_resources, + .num_resources = ARRAY_SIZE(veu0_resources), +}; + +static struct uio_info veu1_platform_data = { + .name = "VEU", + .version = "0", + .irq = evt2irq(0x560), +}; + +static struct resource veu1_resources[] = { + [0] = { + .name = "VEU(2)", + .start = 0xfe924000, + .end = 0xfe9240b7, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device veu1_device = { + .name = "uio_pdrv_genirq", + .id = 2, + .dev = { + .platform_data = &veu1_platform_data, + }, + .resource = veu1_resources, + .num_resources = ARRAY_SIZE(veu1_resources), +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 0x20, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0x044a0000, 0x70), + DEFINE_RES_IRQ(evt2irq(0xf00)), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-32", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct platform_device *sh7366_devices[] __initdata = { + &scif0_device, + &cmt_device, + &tmu0_device, + &iic_device, + &usb_host_device, + &vpu_device, + &veu0_device, + &veu1_device, +}; + +static int __init sh7366_devices_setup(void) +{ + platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); + platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); + platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); + + return platform_add_devices(sh7366_devices, + ARRAY_SIZE(sh7366_devices)); +} +arch_initcall(sh7366_devices_setup); + +static struct platform_device *sh7366_early_devices[] __initdata = { + &scif0_device, + &cmt_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7366_early_devices, + ARRAY_SIZE(sh7366_early_devices)); +} + +enum { + UNUSED=0, + ENABLED, + DISABLED, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + ICB, + DMAC0, DMAC1, DMAC2, DMAC3, + VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, + MFI, VPU, USB, + MMC_MMC1I, MMC_MMC2I, MMC_MMC3I, + DMAC4, DMAC5, DMAC_DADERR, + SCIF, SCIFA1, SCIFA2, + DENC, MSIOF, + FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, + I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, + SDHI, CMT, TSIF, SIU, + TMU0, TMU1, TMU2, + VEU2, LCDC, + + /* interrupt groups */ + + DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), + INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), + INTC_VECT(ICB, 0x700), + INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), + INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), + INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), + INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), + INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20), + INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20), + INTC_VECT(MMC_MMC3I, 0xb40), + INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), + INTC_VECT(DMAC_DADERR, 0xbc0), + INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20), + INTC_VECT(SCIFA2, 0xc40), + INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80), + INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), + INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), + INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), + INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), + INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), + INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), + INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), + INTC_VECT(SIU, 0xf80), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), + INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), + INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), + INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I), + INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), + INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, + FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), + INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ + { } }, + { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ + { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, + { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ + { 0, 0, 0, VPU, 0, 0, 0, MFI } }, + { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ + { 0, 0, 0, ICB } }, + { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ + { 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } }, + { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ + { 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } }, + { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ + { 0, 0, 0, 0, 0, 0, 0, MSIOF } }, + { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ + { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, + FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, + { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ + { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } }, + { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ + { 0, 0, 0, CMT, 0, USB, } }, + { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ + { 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } }, + { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ + { 0, 0, 0, 0, 0, 0, 0, TSIF } }, + { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, + { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } }, + { 0xa4080008, 0, 16, 4, /* IPRC */ { } }, + { 0xa408000c, 0, 16, 4, /* IPRD */ { } }, + { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } }, + { 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } }, + { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } }, + { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } }, + { 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } }, + { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } }, + { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } }, + { 0xa408002c, 0, 16, 4, /* IPRL */ { } }, + { 0xa4140010, 0, 32, 4, /* INTPRI00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xa414001c, 16, 2, /* ICR1 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xa4140024, 0, 8, /* INTREQ00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_desc intc_desc __initdata = { + .name = "sh7366", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(vectors, groups, mask_registers, + prio_registers, sense_registers, ack_registers), +}; + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +void __init plat_mem_setup(void) +{ + /* TODO: Register Node 1 */ +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c new file mode 100644 index 000000000..0c6757ef6 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7722 Setup + * + * Copyright (C) 2006 - 2008 Paul Mundt + */ +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/platform_device.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_dma.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/uio_driver.h> +#include <linux/usb/m66592.h> + +#include <asm/clock.h> +#include <asm/mmzone.h> +#include <asm/siu.h> +#include <asm/platform_early.h> + +#include <cpu/dma-register.h> +#include <cpu/sh7722.h> +#include <cpu/serial.h> + +static const struct sh_dmae_slave_config sh7722_dmae_slaves[] = { + { + .slave_id = SHDMA_SLAVE_SCIF0_TX, + .addr = 0xffe0000c, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x21, + }, { + .slave_id = SHDMA_SLAVE_SCIF0_RX, + .addr = 0xffe00014, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x22, + }, { + .slave_id = SHDMA_SLAVE_SCIF1_TX, + .addr = 0xffe1000c, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x25, + }, { + .slave_id = SHDMA_SLAVE_SCIF1_RX, + .addr = 0xffe10014, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x26, + }, { + .slave_id = SHDMA_SLAVE_SCIF2_TX, + .addr = 0xffe2000c, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x29, + }, { + .slave_id = SHDMA_SLAVE_SCIF2_RX, + .addr = 0xffe20014, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x2a, + }, { + .slave_id = SHDMA_SLAVE_SIUA_TX, + .addr = 0xa454c098, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xb1, + }, { + .slave_id = SHDMA_SLAVE_SIUA_RX, + .addr = 0xa454c090, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xb2, + }, { + .slave_id = SHDMA_SLAVE_SIUB_TX, + .addr = 0xa454c09c, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xb5, + }, { + .slave_id = SHDMA_SLAVE_SIUB_RX, + .addr = 0xa454c094, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xb6, + }, { + .slave_id = SHDMA_SLAVE_SDHI0_TX, + .addr = 0x04ce0030, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc1, + }, { + .slave_id = SHDMA_SLAVE_SDHI0_RX, + .addr = 0x04ce0030, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc2, + }, +}; + +static const struct sh_dmae_channel sh7722_dmae_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static const unsigned int ts_shift[] = TS_SHIFT; + +static struct sh_dmae_pdata dma_platform_data = { + .slave = sh7722_dmae_slaves, + .slave_num = ARRAY_SIZE(sh7722_dmae_slaves), + .channel = sh7722_dmae_channels, + .channel_num = ARRAY_SIZE(sh7722_dmae_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +static struct resource sh7722_dmae_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfe008020, + .end = 0xfe00808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xfe009000, + .end = 0xfe00900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0xbc0), + .end = evt2irq(0xbc0), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-3 */ + .start = evt2irq(0x800), + .end = evt2irq(0x860), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 4-5 */ + .start = evt2irq(0xb80), + .end = evt2irq(0xba0), + .flags = IORESOURCE_IRQ, + }, +}; + +struct platform_device dma_device = { + .name = "sh-dma-engine", + .id = -1, + .resource = sh7722_dmae_resources, + .num_resources = ARRAY_SIZE(sh7722_dmae_resources), + .dev = { + .platform_data = &dma_platform_data, + }, +}; + +/* Serial */ +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .ops = &sh7722_sci_port_ops, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc00)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .ops = &sh7722_sci_port_ops, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffe10000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc20)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .ops = &sh7722_sci_port_ops, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffe20000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc40)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xa465fec0, + .end = 0xa465fec0 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Period IRQ */ + .start = evt2irq(0x7a0), + .flags = IORESOURCE_IRQ, + }, + [2] = { + /* Carry IRQ */ + .start = evt2irq(0x7c0), + .flags = IORESOURCE_IRQ, + }, + [3] = { + /* Alarm IRQ */ + .start = evt2irq(0x780), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct m66592_platdata usbf_platdata = { + .on_chip = 1, +}; + +static struct resource usbf_resources[] = { + [0] = { + .name = "USBF", + .start = 0x04480000, + .end = 0x044800FF, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xa20), + .end = evt2irq(0xa20), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usbf_device = { + .name = "m66592_udc", + .id = 0, /* "usbf0" clock */ + .dev = { + .dma_mask = NULL, + .coherent_dma_mask = 0xffffffff, + .platform_data = &usbf_platdata, + }, + .num_resources = ARRAY_SIZE(usbf_resources), + .resource = usbf_resources, +}; + +static struct resource iic_resources[] = { + [0] = { + .name = "IIC", + .start = 0x04470000, + .end = 0x04470017, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xe00), + .end = evt2irq(0xe60), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device iic_device = { + .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ + .num_resources = ARRAY_SIZE(iic_resources), + .resource = iic_resources, +}; + +static struct uio_info vpu_platform_data = { + .name = "VPU4", + .version = "0", + .irq = evt2irq(0x980), +}; + +static struct resource vpu_resources[] = { + [0] = { + .name = "VPU", + .start = 0xfe900000, + .end = 0xfe9022eb, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device vpu_device = { + .name = "uio_pdrv_genirq", + .id = 0, + .dev = { + .platform_data = &vpu_platform_data, + }, + .resource = vpu_resources, + .num_resources = ARRAY_SIZE(vpu_resources), +}; + +static struct uio_info veu_platform_data = { + .name = "VEU", + .version = "0", + .irq = evt2irq(0x8c0), +}; + +static struct resource veu_resources[] = { + [0] = { + .name = "VEU", + .start = 0xfe920000, + .end = 0xfe9200b7, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device veu_device = { + .name = "uio_pdrv_genirq", + .id = 1, + .dev = { + .platform_data = &veu_platform_data, + }, + .resource = veu_resources, + .num_resources = ARRAY_SIZE(veu_resources), +}; + +static struct uio_info jpu_platform_data = { + .name = "JPU", + .version = "0", + .irq = evt2irq(0x560), +}; + +static struct resource jpu_resources[] = { + [0] = { + .name = "JPU", + .start = 0xfea00000, + .end = 0xfea102d3, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device jpu_device = { + .name = "uio_pdrv_genirq", + .id = 2, + .dev = { + .platform_data = &jpu_platform_data, + }, + .resource = jpu_resources, + .num_resources = ARRAY_SIZE(jpu_resources), +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 0x20, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0x044a0000, 0x70), + DEFINE_RES_IRQ(evt2irq(0xf00)), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-32", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct siu_platform siu_platform_data = { + .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX, + .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX, + .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX, + .dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX, +}; + +static struct resource siu_resources[] = { + [0] = { + .start = 0xa4540000, + .end = 0xa454c10f, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xf80), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device siu_device = { + .name = "siu-pcm-audio", + .id = -1, + .dev = { + .platform_data = &siu_platform_data, + }, + .resource = siu_resources, + .num_resources = ARRAY_SIZE(siu_resources), +}; + +static struct platform_device *sh7722_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &cmt_device, + &tmu0_device, + &rtc_device, + &usbf_device, + &iic_device, + &vpu_device, + &veu_device, + &jpu_device, + &siu_device, + &dma_device, +}; + +static int __init sh7722_devices_setup(void) +{ + platform_resource_setup_memory(&vpu_device, "vpu", 1 << 20); + platform_resource_setup_memory(&veu_device, "veu", 2 << 20); + platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); + + return platform_add_devices(sh7722_devices, + ARRAY_SIZE(sh7722_devices)); +} +arch_initcall(sh7722_devices_setup); + +static struct platform_device *sh7722_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &cmt_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7722_early_devices, + ARRAY_SIZE(sh7722_early_devices)); +} + +enum { + UNUSED=0, + ENABLED, + DISABLED, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + HUDI, + SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI, + RTC_ATI, RTC_PRI, RTC_CUI, + DMAC0, DMAC1, DMAC2, DMAC3, + VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, + VPU, TPU, + USB_USBI0, USB_USBI1, + DMAC4, DMAC5, DMAC_DADERR, + KEYSC, + SCIF0, SCIF1, SCIF2, SIOF0, SIOF1, SIO, + FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, + I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, + CMT, TSIF, SIU, TWODG, + TMU0, TMU1, TMU2, + IRDA, JPU, LCDC, + + /* interrupt groups */ + SIM, RTC, DMAC0123, VIOVOU, USB, DMAC45, FLCTL, I2C, SDHI, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), + INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), + INTC_VECT(SIM_ERI, 0x700), INTC_VECT(SIM_RXI, 0x720), + INTC_VECT(SIM_TXI, 0x740), INTC_VECT(SIM_TEI, 0x760), + INTC_VECT(RTC_ATI, 0x780), INTC_VECT(RTC_PRI, 0x7a0), + INTC_VECT(RTC_CUI, 0x7c0), + INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), + INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), + INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), + INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), + INTC_VECT(VPU, 0x980), INTC_VECT(TPU, 0x9a0), + INTC_VECT(USB_USBI0, 0xa20), INTC_VECT(USB_USBI1, 0xa40), + INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), + INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(KEYSC, 0xbe0), + INTC_VECT(SCIF0, 0xc00), INTC_VECT(SCIF1, 0xc20), + INTC_VECT(SCIF2, 0xc40), INTC_VECT(SIOF0, 0xc80), + INTC_VECT(SIOF1, 0xca0), INTC_VECT(SIO, 0xd00), + INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), + INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), + INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), + INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), + INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), + INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), + INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), + INTC_VECT(SIU, 0xf80), INTC_VECT(TWODG, 0xfa0), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(IRDA, 0x480), + INTC_VECT(JPU, 0x560), INTC_VECT(LCDC, 0x580), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(SIM, SIM_ERI, SIM_RXI, SIM_TXI, SIM_TEI), + INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), + INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), + INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), + INTC_GROUP(USB, USB_USBI0, USB_USBI1), + INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), + INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, + FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), + INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ + { } }, + { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ + { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, + { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ + { 0, 0, 0, VPU, } }, + { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ + { SIM_TEI, SIM_TXI, SIM_RXI, SIM_ERI, 0, 0, 0, IRDA } }, + { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ + { 0, TMU2, TMU1, TMU0, JPU, 0, 0, LCDC } }, + { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ + { KEYSC, DMAC_DADERR, DMAC5, DMAC4, 0, SCIF2, SCIF1, SCIF0 } }, + { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ + { 0, 0, 0, SIO, 0, 0, SIOF1, SIOF0 } }, + { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ + { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, + FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, + { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ + { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, TWODG, SIU } }, + { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ + { 0, 0, 0, CMT, 0, USB_USBI1, USB_USBI0, } }, + { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ + { } }, + { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ + { 0, RTC_CUI, RTC_PRI, RTC_ATI, 0, TPU, 0, TSIF } }, + { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, IRDA } }, + { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, SIM } }, + { 0xa4080008, 0, 16, 4, /* IPRC */ { } }, + { 0xa408000c, 0, 16, 4, /* IPRD */ { } }, + { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, 0, VPU } }, + { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC45, USB, CMT } }, + { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF0, SCIF1, SCIF2 } }, + { 0xa408001c, 0, 16, 4, /* IPRH */ { SIOF0, SIOF1, FLCTL, I2C } }, + { 0xa4080020, 0, 16, 4, /* IPRI */ { SIO, 0, TSIF, RTC } }, + { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } }, + { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, 0, 0, SDHI } }, + { 0xa408002c, 0, 16, 4, /* IPRL */ { TWODG, 0, TPU } }, + { 0xa4140010, 0, 32, 4, /* INTPRI00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xa414001c, 16, 2, /* ICR1 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xa4140024, 0, 8, /* INTREQ00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_desc intc_desc __initdata = { + .name = "sh7722", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(vectors, groups, mask_registers, + prio_registers, sense_registers, ack_registers), +}; + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +void __init plat_mem_setup(void) +{ + /* Register the URAM space as Node 1 */ + setup_bootmem_node(1, 0x055f0000, 0x05610000); +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7723.c b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c new file mode 100644 index 000000000..83ae1ad4a --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7723.c @@ -0,0 +1,644 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7723 Setup + * + * Copyright (C) 2008 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/mm.h> +#include <linux/serial_sci.h> +#include <linux/uio_driver.h> +#include <linux/usb/r8a66597.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/io.h> +#include <asm/clock.h> +#include <asm/mmzone.h> +#include <asm/platform_early.h> +#include <cpu/sh7723.h> + +/* Serial */ +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc00)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffe10000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc20)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffe20000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc40)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .sampling_rate = 8, + .type = PORT_SCIFA, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xa4e30000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x900)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .sampling_rate = 8, + .type = PORT_SCIFA, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xa4e40000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xd00)), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .sampling_rate = 8, + .type = PORT_SCIFA, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xa4e50000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xfa0)), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +static struct uio_info vpu_platform_data = { + .name = "VPU5", + .version = "0", + .irq = evt2irq(0x980), +}; + +static struct resource vpu_resources[] = { + [0] = { + .name = "VPU", + .start = 0xfe900000, + .end = 0xfe902807, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device vpu_device = { + .name = "uio_pdrv_genirq", + .id = 0, + .dev = { + .platform_data = &vpu_platform_data, + }, + .resource = vpu_resources, + .num_resources = ARRAY_SIZE(vpu_resources), +}; + +static struct uio_info veu0_platform_data = { + .name = "VEU2H", + .version = "0", + .irq = evt2irq(0x8c0), +}; + +static struct resource veu0_resources[] = { + [0] = { + .name = "VEU2H0", + .start = 0xfe920000, + .end = 0xfe92027b, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device veu0_device = { + .name = "uio_pdrv_genirq", + .id = 1, + .dev = { + .platform_data = &veu0_platform_data, + }, + .resource = veu0_resources, + .num_resources = ARRAY_SIZE(veu0_resources), +}; + +static struct uio_info veu1_platform_data = { + .name = "VEU2H", + .version = "0", + .irq = evt2irq(0x560), +}; + +static struct resource veu1_resources[] = { + [0] = { + .name = "VEU2H1", + .start = 0xfe924000, + .end = 0xfe92427b, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device veu1_device = { + .name = "uio_pdrv_genirq", + .id = 2, + .dev = { + .platform_data = &veu1_platform_data, + }, + .resource = veu1_resources, + .num_resources = ARRAY_SIZE(veu1_resources), +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 0x20, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0x044a0000, 0x70), + DEFINE_RES_IRQ(evt2irq(0xf00)), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-32", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffd90000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x920)), + DEFINE_RES_IRQ(evt2irq(0x940)), + DEFINE_RES_IRQ(evt2irq(0x960)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xa465fec0, + .end = 0xa465fec0 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Period IRQ */ + .start = evt2irq(0xaa0), + .flags = IORESOURCE_IRQ, + }, + [2] = { + /* Carry IRQ */ + .start = evt2irq(0xac0), + .flags = IORESOURCE_IRQ, + }, + [3] = { + /* Alarm IRQ */ + .start = evt2irq(0xa80), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct r8a66597_platdata r8a66597_data = { + .on_chip = 1, +}; + +static struct resource sh7723_usb_host_resources[] = { + [0] = { + .start = 0xa4d80000, + .end = 0xa4d800ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xa20), + .end = evt2irq(0xa20), + .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, + }, +}; + +static struct platform_device sh7723_usb_host_device = { + .name = "r8a66597_hcd", + .id = 0, + .dev = { + .dma_mask = NULL, /* not use dma */ + .coherent_dma_mask = 0xffffffff, + .platform_data = &r8a66597_data, + }, + .num_resources = ARRAY_SIZE(sh7723_usb_host_resources), + .resource = sh7723_usb_host_resources, +}; + +static struct resource iic_resources[] = { + [0] = { + .name = "IIC", + .start = 0x04470000, + .end = 0x04470017, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xe00), + .end = evt2irq(0xe60), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device iic_device = { + .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ + .num_resources = ARRAY_SIZE(iic_resources), + .resource = iic_resources, +}; + +static struct platform_device *sh7723_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &cmt_device, + &tmu0_device, + &tmu1_device, + &rtc_device, + &iic_device, + &sh7723_usb_host_device, + &vpu_device, + &veu0_device, + &veu1_device, +}; + +static int __init sh7723_devices_setup(void) +{ + platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); + platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); + platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); + + return platform_add_devices(sh7723_devices, + ARRAY_SIZE(sh7723_devices)); +} +arch_initcall(sh7723_devices_setup); + +static struct platform_device *sh7723_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &cmt_device, + &tmu0_device, + &tmu1_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7723_early_devices, + ARRAY_SIZE(sh7723_early_devices)); +} + +#define RAMCR_CACHE_L2FC 0x0002 +#define RAMCR_CACHE_L2E 0x0001 +#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) + +void l2_cache_init(void) +{ + /* Enable L2 cache */ + __raw_writel(L2_CACHE_ENABLE, RAMCR); +} + +enum { + UNUSED=0, + ENABLED, + DISABLED, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + HUDI, + DMAC1A_DEI0,DMAC1A_DEI1,DMAC1A_DEI2,DMAC1A_DEI3, + _2DG_TRI,_2DG_INI,_2DG_CEI, + DMAC0A_DEI0,DMAC0A_DEI1,DMAC0A_DEI2,DMAC0A_DEI3, + VIO_CEUI,VIO_BEUI,VIO_VEU2HI,VIO_VOUI, + SCIFA_SCIFA0, + VPU_VPUI, + TPU_TPUI, + ADC_ADI, + USB_USI0, + RTC_ATI,RTC_PRI,RTC_CUI, + DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR, + DMAC0B_DEI4,DMAC0B_DEI5,DMAC0B_DADERR, + KEYSC_KEYI, + SCIF_SCIF0,SCIF_SCIF1,SCIF_SCIF2, + MSIOF_MSIOFI0,MSIOF_MSIOFI1, + SCIFA_SCIFA1, + FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I, + I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI, + CMT_CMTI, + TSIF_TSIFI, + SIU_SIUI, + SCIFA_SCIFA2, + TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, + IRDA_IRDAI, + ATAPI_ATAPII, + VEU2H1_VEU2HI, + LCDC_LCDCI, + TMU1_TUNI0,TMU1_TUNI1,TMU1_TUNI2, + + /* interrupt groups */ + DMAC1A, DMAC0A, VIO, DMAC0B, FLCTL, I2C, _2DG, + SDHI1, RTC, DMAC1B, SDHI0, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), + INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), + + INTC_VECT(DMAC1A_DEI0,0x700), + INTC_VECT(DMAC1A_DEI1,0x720), + INTC_VECT(DMAC1A_DEI2,0x740), + INTC_VECT(DMAC1A_DEI3,0x760), + + INTC_VECT(_2DG_TRI, 0x780), + INTC_VECT(_2DG_INI, 0x7A0), + INTC_VECT(_2DG_CEI, 0x7C0), + + INTC_VECT(DMAC0A_DEI0,0x800), + INTC_VECT(DMAC0A_DEI1,0x820), + INTC_VECT(DMAC0A_DEI2,0x840), + INTC_VECT(DMAC0A_DEI3,0x860), + + INTC_VECT(VIO_CEUI,0x880), + INTC_VECT(VIO_BEUI,0x8A0), + INTC_VECT(VIO_VEU2HI,0x8C0), + INTC_VECT(VIO_VOUI,0x8E0), + + INTC_VECT(SCIFA_SCIFA0,0x900), + INTC_VECT(VPU_VPUI,0x980), + INTC_VECT(TPU_TPUI,0x9A0), + INTC_VECT(ADC_ADI,0x9E0), + INTC_VECT(USB_USI0,0xA20), + + INTC_VECT(RTC_ATI,0xA80), + INTC_VECT(RTC_PRI,0xAA0), + INTC_VECT(RTC_CUI,0xAC0), + + INTC_VECT(DMAC1B_DEI4,0xB00), + INTC_VECT(DMAC1B_DEI5,0xB20), + INTC_VECT(DMAC1B_DADERR,0xB40), + + INTC_VECT(DMAC0B_DEI4,0xB80), + INTC_VECT(DMAC0B_DEI5,0xBA0), + INTC_VECT(DMAC0B_DADERR,0xBC0), + + INTC_VECT(KEYSC_KEYI,0xBE0), + INTC_VECT(SCIF_SCIF0,0xC00), + INTC_VECT(SCIF_SCIF1,0xC20), + INTC_VECT(SCIF_SCIF2,0xC40), + INTC_VECT(MSIOF_MSIOFI0,0xC80), + INTC_VECT(MSIOF_MSIOFI1,0xCA0), + INTC_VECT(SCIFA_SCIFA1,0xD00), + + INTC_VECT(FLCTL_FLSTEI,0xD80), + INTC_VECT(FLCTL_FLTENDI,0xDA0), + INTC_VECT(FLCTL_FLTREQ0I,0xDC0), + INTC_VECT(FLCTL_FLTREQ1I,0xDE0), + + INTC_VECT(I2C_ALI,0xE00), + INTC_VECT(I2C_TACKI,0xE20), + INTC_VECT(I2C_WAITI,0xE40), + INTC_VECT(I2C_DTEI,0xE60), + + INTC_VECT(SDHI0, 0xE80), + INTC_VECT(SDHI0, 0xEA0), + INTC_VECT(SDHI0, 0xEC0), + + INTC_VECT(CMT_CMTI,0xF00), + INTC_VECT(TSIF_TSIFI,0xF20), + INTC_VECT(SIU_SIUI,0xF80), + INTC_VECT(SCIFA_SCIFA2,0xFA0), + + INTC_VECT(TMU0_TUNI0,0x400), + INTC_VECT(TMU0_TUNI1,0x420), + INTC_VECT(TMU0_TUNI2,0x440), + + INTC_VECT(IRDA_IRDAI,0x480), + INTC_VECT(ATAPI_ATAPII,0x4A0), + + INTC_VECT(SDHI1, 0x4E0), + INTC_VECT(SDHI1, 0x500), + INTC_VECT(SDHI1, 0x520), + + INTC_VECT(VEU2H1_VEU2HI,0x560), + INTC_VECT(LCDC_LCDCI,0x580), + + INTC_VECT(TMU1_TUNI0,0x920), + INTC_VECT(TMU1_TUNI1,0x940), + INTC_VECT(TMU1_TUNI2,0x960), + +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(DMAC1A,DMAC1A_DEI0,DMAC1A_DEI1,DMAC1A_DEI2,DMAC1A_DEI3), + INTC_GROUP(DMAC0A,DMAC0A_DEI0,DMAC0A_DEI1,DMAC0A_DEI2,DMAC0A_DEI3), + INTC_GROUP(VIO, VIO_CEUI,VIO_BEUI,VIO_VEU2HI,VIO_VOUI), + INTC_GROUP(DMAC0B, DMAC0B_DEI4,DMAC0B_DEI5,DMAC0B_DADERR), + INTC_GROUP(FLCTL,FLCTL_FLSTEI,FLCTL_FLTENDI,FLCTL_FLTREQ0I,FLCTL_FLTREQ1I), + INTC_GROUP(I2C,I2C_ALI,I2C_TACKI,I2C_WAITI,I2C_DTEI), + INTC_GROUP(_2DG, _2DG_TRI,_2DG_INI,_2DG_CEI), + INTC_GROUP(RTC, RTC_ATI,RTC_PRI,RTC_CUI), + INTC_GROUP(DMAC1B, DMAC1B_DEI4,DMAC1B_DEI5,DMAC1B_DADERR), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ + { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, + 0, ENABLED, ENABLED, ENABLED } }, + { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ + { VIO_VOUI, VIO_VEU2HI,VIO_BEUI,VIO_CEUI,DMAC0A_DEI3,DMAC0A_DEI2,DMAC0A_DEI1,DMAC0A_DEI0 } }, + { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ + { 0, 0, 0, VPU_VPUI,0,0,0,SCIFA_SCIFA0 } }, + { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ + { DMAC1A_DEI3,DMAC1A_DEI2,DMAC1A_DEI1,DMAC1A_DEI0,0,0,0,IRDA_IRDAI } }, + { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ + { 0,TMU0_TUNI2,TMU0_TUNI1,TMU0_TUNI0,VEU2H1_VEU2HI,0,0,LCDC_LCDCI } }, + { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ + { KEYSC_KEYI,DMAC0B_DADERR,DMAC0B_DEI5,DMAC0B_DEI4,0,SCIF_SCIF2,SCIF_SCIF1,SCIF_SCIF0 } }, + { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ + { 0,0,0,SCIFA_SCIFA1,ADC_ADI,0,MSIOF_MSIOFI1,MSIOF_MSIOFI0 } }, + { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ + { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, + FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLTENDI, FLCTL_FLSTEI } }, + { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ + { 0, ENABLED, ENABLED, ENABLED, + 0, 0, SCIFA_SCIFA2, SIU_SIUI } }, + { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ + { 0, 0, 0, CMT_CMTI, 0, 0, USB_USI0,0 } }, + { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ + { 0, DMAC1B_DADERR,DMAC1B_DEI5,DMAC1B_DEI4,0,RTC_ATI,RTC_PRI,RTC_CUI } }, + { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ + { 0,_2DG_CEI,_2DG_INI,_2DG_TRI,0,TPU_TPUI,0,TSIF_TSIFI } }, + { 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */ + { 0,0,0,0,0,0,0,ATAPI_ATAPII } }, + { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, IRDA_IRDAI } }, + { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2H1_VEU2HI, LCDC_LCDCI, DMAC1A, 0} }, + { 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, 0} }, + { 0xa408000c, 0, 16, 4, /* IPRD */ { } }, + { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA_SCIFA0, VPU_VPUI } }, + { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC_KEYI, DMAC0B, USB_USI0, CMT_CMTI } }, + { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2,0 } }, + { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0,MSIOF_MSIOFI1, FLCTL, I2C } }, + { 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA_SCIFA1,0,TSIF_TSIFI,_2DG } }, + { 0xa4080024, 0, 16, 4, /* IPRJ */ { ADC_ADI,0,SIU_SIUI,SDHI1 } }, + { 0xa4080028, 0, 16, 4, /* IPRK */ { RTC,DMAC1B,0,SDHI0 } }, + { 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA_SCIFA2,0,TPU_TPUI,ATAPI_ATAPII } }, + { 0xa4140010, 0, 32, 4, /* INTPRI00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xa414001c, 16, 2, /* ICR1 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xa4140024, 0, 8, /* INTREQ00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_desc intc_desc __initdata = { + .name = "sh7723", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(vectors, groups, mask_registers, + prio_registers, sense_registers, ack_registers), +}; + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c new file mode 100644 index 000000000..0d990ab1b --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c @@ -0,0 +1,1288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7724 Setup + * + * Copyright (C) 2009 Renesas Solutions Corp. + * + * Kuninori Morimoto <morimoto.kuninori@renesas.com> + * + * Based on SH7723 Setup + * Copyright (C) 2008 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/mm.h> +#include <linux/serial_sci.h> +#include <linux/uio_driver.h> +#include <linux/sh_dma.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/io.h> +#include <linux/notifier.h> + +#include <asm/suspend.h> +#include <asm/clock.h> +#include <asm/mmzone.h> +#include <asm/platform_early.h> + +#include <cpu/dma-register.h> +#include <cpu/sh7724.h> + +/* DMA */ +static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = { + { + .slave_id = SHDMA_SLAVE_SCIF0_TX, + .addr = 0xffe0000c, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x21, + }, { + .slave_id = SHDMA_SLAVE_SCIF0_RX, + .addr = 0xffe00014, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x22, + }, { + .slave_id = SHDMA_SLAVE_SCIF1_TX, + .addr = 0xffe1000c, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x25, + }, { + .slave_id = SHDMA_SLAVE_SCIF1_RX, + .addr = 0xffe10014, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x26, + }, { + .slave_id = SHDMA_SLAVE_SCIF2_TX, + .addr = 0xffe2000c, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x29, + }, { + .slave_id = SHDMA_SLAVE_SCIF2_RX, + .addr = 0xffe20014, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x2a, + }, { + .slave_id = SHDMA_SLAVE_SCIF3_TX, + .addr = 0xa4e30020, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x2d, + }, { + .slave_id = SHDMA_SLAVE_SCIF3_RX, + .addr = 0xa4e30024, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x2e, + }, { + .slave_id = SHDMA_SLAVE_SCIF4_TX, + .addr = 0xa4e40020, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x31, + }, { + .slave_id = SHDMA_SLAVE_SCIF4_RX, + .addr = 0xa4e40024, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x32, + }, { + .slave_id = SHDMA_SLAVE_SCIF5_TX, + .addr = 0xa4e50020, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x35, + }, { + .slave_id = SHDMA_SLAVE_SCIF5_RX, + .addr = 0xa4e50024, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x36, + }, { + .slave_id = SHDMA_SLAVE_USB0D0_TX, + .addr = 0xA4D80100, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0x73, + }, { + .slave_id = SHDMA_SLAVE_USB0D0_RX, + .addr = 0xA4D80100, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0x73, + }, { + .slave_id = SHDMA_SLAVE_USB0D1_TX, + .addr = 0xA4D80120, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0x77, + }, { + .slave_id = SHDMA_SLAVE_USB0D1_RX, + .addr = 0xA4D80120, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0x77, + }, { + .slave_id = SHDMA_SLAVE_USB1D0_TX, + .addr = 0xA4D90100, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xab, + }, { + .slave_id = SHDMA_SLAVE_USB1D0_RX, + .addr = 0xA4D90100, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xab, + }, { + .slave_id = SHDMA_SLAVE_USB1D1_TX, + .addr = 0xA4D90120, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xaf, + }, { + .slave_id = SHDMA_SLAVE_USB1D1_RX, + .addr = 0xA4D90120, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xaf, + }, { + .slave_id = SHDMA_SLAVE_SDHI0_TX, + .addr = 0x04ce0030, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc1, + }, { + .slave_id = SHDMA_SLAVE_SDHI0_RX, + .addr = 0x04ce0030, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc2, + }, { + .slave_id = SHDMA_SLAVE_SDHI1_TX, + .addr = 0x04cf0030, + .chcr = DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc9, + }, { + .slave_id = SHDMA_SLAVE_SDHI1_RX, + .addr = 0x04cf0030, + .chcr = DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xca, + }, +}; + +static const struct sh_dmae_channel sh7724_dmae_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static const unsigned int ts_shift[] = TS_SHIFT; + +static struct sh_dmae_pdata dma_platform_data = { + .slave = sh7724_dmae_slaves, + .slave_num = ARRAY_SIZE(sh7724_dmae_slaves), + .channel = sh7724_dmae_channels, + .channel_num = ARRAY_SIZE(sh7724_dmae_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +/* Resource order important! */ +static struct resource sh7724_dmae0_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xfe008020, + .end = 0xfe00808f, + .flags = IORESOURCE_MEM, + }, + { + /* DMARSx */ + .start = 0xfe009000, + .end = 0xfe00900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0xbc0), + .end = evt2irq(0xbc0), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-3 */ + .start = evt2irq(0x800), + .end = evt2irq(0x860), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 4-5 */ + .start = evt2irq(0xb80), + .end = evt2irq(0xba0), + .flags = IORESOURCE_IRQ, + }, +}; + +/* Resource order important! */ +static struct resource sh7724_dmae1_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xfdc08020, + .end = 0xfdc0808f, + .flags = IORESOURCE_MEM, + }, + { + /* DMARSx */ + .start = 0xfdc09000, + .end = 0xfdc0900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0xb40), + .end = evt2irq(0xb40), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 0-3 */ + .start = evt2irq(0x700), + .end = evt2irq(0x760), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 4-5 */ + .start = evt2irq(0xb00), + .end = evt2irq(0xb20), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device dma0_device = { + .name = "sh-dma-engine", + .id = 0, + .resource = sh7724_dmae0_resources, + .num_resources = ARRAY_SIZE(sh7724_dmae0_resources), + .dev = { + .platform_data = &dma_platform_data, + }, +}; + +static struct platform_device dma1_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = sh7724_dmae1_resources, + .num_resources = ARRAY_SIZE(sh7724_dmae1_resources), + .dev = { + .platform_data = &dma_platform_data, + }, +}; + +/* Serial */ +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc00)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffe10000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc20)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffe20000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xc40)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .sampling_rate = 8, + .type = PORT_SCIFA, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xa4e30000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x900)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .sampling_rate = 8, + .type = PORT_SCIFA, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xa4e40000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xd00)), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .sampling_rate = 8, + .type = PORT_SCIFA, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xa4e50000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xfa0)), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +/* RTC */ +static struct resource rtc_resources[] = { + [0] = { + .start = 0xa465fec0, + .end = 0xa465fec0 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Period IRQ */ + .start = evt2irq(0xaa0), + .flags = IORESOURCE_IRQ, + }, + [2] = { + /* Carry IRQ */ + .start = evt2irq(0xac0), + .flags = IORESOURCE_IRQ, + }, + [3] = { + /* Alarm IRQ */ + .start = evt2irq(0xa80), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +/* I2C0 */ +static struct resource iic0_resources[] = { + [0] = { + .name = "IIC0", + .start = 0x04470000, + .end = 0x04470018 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xe00), + .end = evt2irq(0xe60), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device iic0_device = { + .name = "i2c-sh_mobile", + .id = 0, /* "i2c0" clock */ + .num_resources = ARRAY_SIZE(iic0_resources), + .resource = iic0_resources, +}; + +/* I2C1 */ +static struct resource iic1_resources[] = { + [0] = { + .name = "IIC1", + .start = 0x04750000, + .end = 0x04750018 - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xd80), + .end = evt2irq(0xde0), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device iic1_device = { + .name = "i2c-sh_mobile", + .id = 1, /* "i2c1" clock */ + .num_resources = ARRAY_SIZE(iic1_resources), + .resource = iic1_resources, +}; + +/* VPU */ +static struct uio_info vpu_platform_data = { + .name = "VPU5F", + .version = "0", + .irq = evt2irq(0x980), +}; + +static struct resource vpu_resources[] = { + [0] = { + .name = "VPU", + .start = 0xfe900000, + .end = 0xfe902807, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device vpu_device = { + .name = "uio_pdrv_genirq", + .id = 0, + .dev = { + .platform_data = &vpu_platform_data, + }, + .resource = vpu_resources, + .num_resources = ARRAY_SIZE(vpu_resources), +}; + +/* VEU0 */ +static struct uio_info veu0_platform_data = { + .name = "VEU3F0", + .version = "0", + .irq = evt2irq(0xc60), +}; + +static struct resource veu0_resources[] = { + [0] = { + .name = "VEU3F0", + .start = 0xfe920000, + .end = 0xfe9200cb, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device veu0_device = { + .name = "uio_pdrv_genirq", + .id = 1, + .dev = { + .platform_data = &veu0_platform_data, + }, + .resource = veu0_resources, + .num_resources = ARRAY_SIZE(veu0_resources), +}; + +/* VEU1 */ +static struct uio_info veu1_platform_data = { + .name = "VEU3F1", + .version = "0", + .irq = evt2irq(0x8c0), +}; + +static struct resource veu1_resources[] = { + [0] = { + .name = "VEU3F1", + .start = 0xfe924000, + .end = 0xfe9240cb, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device veu1_device = { + .name = "uio_pdrv_genirq", + .id = 2, + .dev = { + .platform_data = &veu1_platform_data, + }, + .resource = veu1_resources, + .num_resources = ARRAY_SIZE(veu1_resources), +}; + +/* BEU0 */ +static struct uio_info beu0_platform_data = { + .name = "BEU0", + .version = "0", + .irq = evt2irq(0x8A0), +}; + +static struct resource beu0_resources[] = { + [0] = { + .name = "BEU0", + .start = 0xfe930000, + .end = 0xfe933400, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device beu0_device = { + .name = "uio_pdrv_genirq", + .id = 6, + .dev = { + .platform_data = &beu0_platform_data, + }, + .resource = beu0_resources, + .num_resources = ARRAY_SIZE(beu0_resources), +}; + +/* BEU1 */ +static struct uio_info beu1_platform_data = { + .name = "BEU1", + .version = "0", + .irq = evt2irq(0xA00), +}; + +static struct resource beu1_resources[] = { + [0] = { + .name = "BEU1", + .start = 0xfe940000, + .end = 0xfe943400, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device beu1_device = { + .name = "uio_pdrv_genirq", + .id = 7, + .dev = { + .platform_data = &beu1_platform_data, + }, + .resource = beu1_resources, + .num_resources = ARRAY_SIZE(beu1_resources), +}; + +static struct sh_timer_config cmt_platform_data = { + .channels_mask = 0x20, +}; + +static struct resource cmt_resources[] = { + DEFINE_RES_MEM(0x044a0000, 0x70), + DEFINE_RES_IRQ(evt2irq(0xf00)), +}; + +static struct platform_device cmt_device = { + .name = "sh-cmt-32", + .id = 0, + .dev = { + .platform_data = &cmt_platform_data, + }, + .resource = cmt_resources, + .num_resources = ARRAY_SIZE(cmt_resources), +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffd90000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x920)), + DEFINE_RES_IRQ(evt2irq(0x940)), + DEFINE_RES_IRQ(evt2irq(0x960)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +/* JPU */ +static struct uio_info jpu_platform_data = { + .name = "JPU", + .version = "0", + .irq = evt2irq(0x560), +}; + +static struct resource jpu_resources[] = { + [0] = { + .name = "JPU", + .start = 0xfe980000, + .end = 0xfe9902d3, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device jpu_device = { + .name = "uio_pdrv_genirq", + .id = 3, + .dev = { + .platform_data = &jpu_platform_data, + }, + .resource = jpu_resources, + .num_resources = ARRAY_SIZE(jpu_resources), +}; + +/* SPU2DSP0 */ +static struct uio_info spu0_platform_data = { + .name = "SPU2DSP0", + .version = "0", + .irq = evt2irq(0xcc0), +}; + +static struct resource spu0_resources[] = { + [0] = { + .name = "SPU2DSP0", + .start = 0xFE200000, + .end = 0xFE2FFFFF, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device spu0_device = { + .name = "uio_pdrv_genirq", + .id = 4, + .dev = { + .platform_data = &spu0_platform_data, + }, + .resource = spu0_resources, + .num_resources = ARRAY_SIZE(spu0_resources), +}; + +/* SPU2DSP1 */ +static struct uio_info spu1_platform_data = { + .name = "SPU2DSP1", + .version = "0", + .irq = evt2irq(0xce0), +}; + +static struct resource spu1_resources[] = { + [0] = { + .name = "SPU2DSP1", + .start = 0xFE300000, + .end = 0xFE3FFFFF, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* place holder for contiguous memory */ + }, +}; + +static struct platform_device spu1_device = { + .name = "uio_pdrv_genirq", + .id = 5, + .dev = { + .platform_data = &spu1_platform_data, + }, + .resource = spu1_resources, + .num_resources = ARRAY_SIZE(spu1_resources), +}; + +static struct platform_device *sh7724_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &cmt_device, + &tmu0_device, + &tmu1_device, + &dma0_device, + &dma1_device, + &rtc_device, + &iic0_device, + &iic1_device, + &vpu_device, + &veu0_device, + &veu1_device, + &beu0_device, + &beu1_device, + &jpu_device, + &spu0_device, + &spu1_device, +}; + +static int __init sh7724_devices_setup(void) +{ + platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); + platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); + platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); + platform_resource_setup_memory(&jpu_device, "jpu", 2 << 20); + platform_resource_setup_memory(&spu0_device, "spu0", 2 << 20); + platform_resource_setup_memory(&spu1_device, "spu1", 2 << 20); + + return platform_add_devices(sh7724_devices, + ARRAY_SIZE(sh7724_devices)); +} +arch_initcall(sh7724_devices_setup); + +static struct platform_device *sh7724_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &cmt_device, + &tmu0_device, + &tmu1_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7724_early_devices, + ARRAY_SIZE(sh7724_early_devices)); +} + +#define RAMCR_CACHE_L2FC 0x0002 +#define RAMCR_CACHE_L2E 0x0001 +#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) + +void l2_cache_init(void) +{ + /* Enable L2 cache */ + __raw_writel(L2_CACHE_ENABLE, RAMCR); +} + +enum { + UNUSED = 0, + ENABLED, + DISABLED, + + /* interrupt sources */ + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + HUDI, + DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3, + _2DG_TRI, _2DG_INI, _2DG_CEI, + DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3, + VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU, + SCIFA3, + VPU, + TPU, + CEU1, + BEU1, + USB0, USB1, + ATAPI, + RTC_ATI, RTC_PRI, RTC_CUI, + DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR, + DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR, + KEYSC, + SCIF_SCIF0, SCIF_SCIF1, SCIF_SCIF2, + VEU0, + MSIOF_MSIOFI0, MSIOF_MSIOFI1, + SPU_SPUI0, SPU_SPUI1, + SCIFA4, + ICB, + ETHI, + I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI, + I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI, + CMT, + TSIF, + FSI, + SCIFA5, + TMU0_TUNI0, TMU0_TUNI1, TMU0_TUNI2, + IRDA, + JPU, + _2DDMAC, + MMC_MMC2I, MMC_MMC3I, + LCDC, + TMU1_TUNI0, TMU1_TUNI1, TMU1_TUNI2, + + /* interrupt groups */ + DMAC1A, _2DG, DMAC0A, VIO, USB, RTC, + DMAC1B, DMAC0B, I2C0, I2C1, SDHI0, SDHI1, SPU, MMCIF, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), + INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), + INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), + INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), + + INTC_VECT(DMAC1A_DEI0, 0x700), + INTC_VECT(DMAC1A_DEI1, 0x720), + INTC_VECT(DMAC1A_DEI2, 0x740), + INTC_VECT(DMAC1A_DEI3, 0x760), + + INTC_VECT(_2DG_TRI, 0x780), + INTC_VECT(_2DG_INI, 0x7A0), + INTC_VECT(_2DG_CEI, 0x7C0), + + INTC_VECT(DMAC0A_DEI0, 0x800), + INTC_VECT(DMAC0A_DEI1, 0x820), + INTC_VECT(DMAC0A_DEI2, 0x840), + INTC_VECT(DMAC0A_DEI3, 0x860), + + INTC_VECT(VIO_CEU0, 0x880), + INTC_VECT(VIO_BEU0, 0x8A0), + INTC_VECT(VIO_VEU1, 0x8C0), + INTC_VECT(VIO_VOU, 0x8E0), + + INTC_VECT(SCIFA3, 0x900), + INTC_VECT(VPU, 0x980), + INTC_VECT(TPU, 0x9A0), + INTC_VECT(CEU1, 0x9E0), + INTC_VECT(BEU1, 0xA00), + INTC_VECT(USB0, 0xA20), + INTC_VECT(USB1, 0xA40), + INTC_VECT(ATAPI, 0xA60), + + INTC_VECT(RTC_ATI, 0xA80), + INTC_VECT(RTC_PRI, 0xAA0), + INTC_VECT(RTC_CUI, 0xAC0), + + INTC_VECT(DMAC1B_DEI4, 0xB00), + INTC_VECT(DMAC1B_DEI5, 0xB20), + INTC_VECT(DMAC1B_DADERR, 0xB40), + + INTC_VECT(DMAC0B_DEI4, 0xB80), + INTC_VECT(DMAC0B_DEI5, 0xBA0), + INTC_VECT(DMAC0B_DADERR, 0xBC0), + + INTC_VECT(KEYSC, 0xBE0), + INTC_VECT(SCIF_SCIF0, 0xC00), + INTC_VECT(SCIF_SCIF1, 0xC20), + INTC_VECT(SCIF_SCIF2, 0xC40), + INTC_VECT(VEU0, 0xC60), + INTC_VECT(MSIOF_MSIOFI0, 0xC80), + INTC_VECT(MSIOF_MSIOFI1, 0xCA0), + INTC_VECT(SPU_SPUI0, 0xCC0), + INTC_VECT(SPU_SPUI1, 0xCE0), + INTC_VECT(SCIFA4, 0xD00), + + INTC_VECT(ICB, 0xD20), + INTC_VECT(ETHI, 0xD60), + + INTC_VECT(I2C1_ALI, 0xD80), + INTC_VECT(I2C1_TACKI, 0xDA0), + INTC_VECT(I2C1_WAITI, 0xDC0), + INTC_VECT(I2C1_DTEI, 0xDE0), + + INTC_VECT(I2C0_ALI, 0xE00), + INTC_VECT(I2C0_TACKI, 0xE20), + INTC_VECT(I2C0_WAITI, 0xE40), + INTC_VECT(I2C0_DTEI, 0xE60), + + INTC_VECT(SDHI0, 0xE80), + INTC_VECT(SDHI0, 0xEA0), + INTC_VECT(SDHI0, 0xEC0), + INTC_VECT(SDHI0, 0xEE0), + + INTC_VECT(CMT, 0xF00), + INTC_VECT(TSIF, 0xF20), + INTC_VECT(FSI, 0xF80), + INTC_VECT(SCIFA5, 0xFA0), + + INTC_VECT(TMU0_TUNI0, 0x400), + INTC_VECT(TMU0_TUNI1, 0x420), + INTC_VECT(TMU0_TUNI2, 0x440), + + INTC_VECT(IRDA, 0x480), + + INTC_VECT(SDHI1, 0x4E0), + INTC_VECT(SDHI1, 0x500), + INTC_VECT(SDHI1, 0x520), + + INTC_VECT(JPU, 0x560), + INTC_VECT(_2DDMAC, 0x4A0), + + INTC_VECT(MMC_MMC2I, 0x5A0), + INTC_VECT(MMC_MMC3I, 0x5C0), + + INTC_VECT(LCDC, 0xF40), + + INTC_VECT(TMU1_TUNI0, 0x920), + INTC_VECT(TMU1_TUNI1, 0x940), + INTC_VECT(TMU1_TUNI2, 0x960), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(DMAC1A, DMAC1A_DEI0, DMAC1A_DEI1, DMAC1A_DEI2, DMAC1A_DEI3), + INTC_GROUP(_2DG, _2DG_TRI, _2DG_INI, _2DG_CEI), + INTC_GROUP(DMAC0A, DMAC0A_DEI0, DMAC0A_DEI1, DMAC0A_DEI2, DMAC0A_DEI3), + INTC_GROUP(VIO, VIO_CEU0, VIO_BEU0, VIO_VEU1, VIO_VOU), + INTC_GROUP(USB, USB0, USB1), + INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), + INTC_GROUP(DMAC1B, DMAC1B_DEI4, DMAC1B_DEI5, DMAC1B_DADERR), + INTC_GROUP(DMAC0B, DMAC0B_DEI4, DMAC0B_DEI5, DMAC0B_DADERR), + INTC_GROUP(I2C0, I2C0_ALI, I2C0_TACKI, I2C0_WAITI, I2C0_DTEI), + INTC_GROUP(I2C1, I2C1_ALI, I2C1_TACKI, I2C1_WAITI, I2C1_DTEI), + INTC_GROUP(SPU, SPU_SPUI0, SPU_SPUI1), + INTC_GROUP(MMCIF, MMC_MMC2I, MMC_MMC3I), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ + { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0, + 0, ENABLED, ENABLED, ENABLED } }, + { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ + { VIO_VOU, VIO_VEU1, VIO_BEU0, VIO_CEU0, + DMAC0A_DEI3, DMAC0A_DEI2, DMAC0A_DEI1, DMAC0A_DEI0 } }, + { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ + { 0, 0, 0, VPU, ATAPI, ETHI, 0, SCIFA3 } }, + { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ + { DMAC1A_DEI3, DMAC1A_DEI2, DMAC1A_DEI1, DMAC1A_DEI0, + SPU_SPUI1, SPU_SPUI0, BEU1, IRDA } }, + { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ + { 0, TMU0_TUNI2, TMU0_TUNI1, TMU0_TUNI0, + JPU, 0, 0, LCDC } }, + { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ + { KEYSC, DMAC0B_DADERR, DMAC0B_DEI5, DMAC0B_DEI4, + VEU0, SCIF_SCIF2, SCIF_SCIF1, SCIF_SCIF0 } }, + { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ + { 0, 0, ICB, SCIFA4, + CEU1, 0, MSIOF_MSIOFI1, MSIOF_MSIOFI0 } }, + { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ + { I2C0_DTEI, I2C0_WAITI, I2C0_TACKI, I2C0_ALI, + I2C1_DTEI, I2C1_WAITI, I2C1_TACKI, I2C1_ALI } }, + { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ + { DISABLED, ENABLED, ENABLED, ENABLED, + 0, 0, SCIFA5, FSI } }, + { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ + { 0, 0, 0, CMT, 0, USB1, USB0, 0 } }, + { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ + { 0, DMAC1B_DADERR, DMAC1B_DEI5, DMAC1B_DEI4, + 0, RTC_CUI, RTC_PRI, RTC_ATI } }, + { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ + { 0, _2DG_CEI, _2DG_INI, _2DG_TRI, + 0, TPU, 0, TSIF } }, + { 0xa40800b0, 0xa40800f0, 8, /* IMR12 / IMCR12 */ + { 0, 0, MMC_MMC3I, MMC_MMC2I, 0, 0, 0, _2DDMAC } }, + { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0_TUNI0, TMU0_TUNI1, + TMU0_TUNI2, IRDA } }, + { 0xa4080004, 0, 16, 4, /* IPRB */ { JPU, LCDC, DMAC1A, BEU1 } }, + { 0xa4080008, 0, 16, 4, /* IPRC */ { TMU1_TUNI0, TMU1_TUNI1, + TMU1_TUNI2, SPU } }, + { 0xa408000c, 0, 16, 4, /* IPRD */ { 0, MMCIF, 0, ATAPI } }, + { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0A, VIO, SCIFA3, VPU } }, + { 0xa4080014, 0, 16, 4, /* IPRF */ { KEYSC, DMAC0B, USB, CMT } }, + { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF_SCIF0, SCIF_SCIF1, + SCIF_SCIF2, VEU0 } }, + { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF_MSIOFI0, MSIOF_MSIOFI1, + I2C1, I2C0 } }, + { 0xa4080020, 0, 16, 4, /* IPRI */ { SCIFA4, ICB, TSIF, _2DG } }, + { 0xa4080024, 0, 16, 4, /* IPRJ */ { CEU1, ETHI, FSI, SDHI1 } }, + { 0xa4080028, 0, 16, 4, /* IPRK */ { RTC, DMAC1B, 0, SDHI0 } }, + { 0xa408002c, 0, 16, 4, /* IPRL */ { SCIFA5, 0, TPU, _2DDMAC } }, + { 0xa4140010, 0, 32, 4, /* INTPRI00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xa414001c, 16, 2, /* ICR1 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xa4140024, 0, 8, /* INTREQ00 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_desc intc_desc __initdata = { + .name = "sh7724", + .force_enable = ENABLED, + .force_disable = DISABLED, + .hw = INTC_HW_DESC(vectors, groups, mask_registers, + prio_registers, sense_registers, ack_registers), +}; + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +static struct { + /* BSC */ + unsigned long mmselr; + unsigned long cs0bcr; + unsigned long cs4bcr; + unsigned long cs5abcr; + unsigned long cs5bbcr; + unsigned long cs6abcr; + unsigned long cs6bbcr; + unsigned long cs4wcr; + unsigned long cs5awcr; + unsigned long cs5bwcr; + unsigned long cs6awcr; + unsigned long cs6bwcr; + /* INTC */ + unsigned short ipra; + unsigned short iprb; + unsigned short iprc; + unsigned short iprd; + unsigned short ipre; + unsigned short iprf; + unsigned short iprg; + unsigned short iprh; + unsigned short ipri; + unsigned short iprj; + unsigned short iprk; + unsigned short iprl; + unsigned char imr0; + unsigned char imr1; + unsigned char imr2; + unsigned char imr3; + unsigned char imr4; + unsigned char imr5; + unsigned char imr6; + unsigned char imr7; + unsigned char imr8; + unsigned char imr9; + unsigned char imr10; + unsigned char imr11; + unsigned char imr12; + /* RWDT */ + unsigned short rwtcnt; + unsigned short rwtcsr; + /* CPG */ + unsigned long irdaclk; + unsigned long spuclk; +} sh7724_rstandby_state; + +static int sh7724_pre_sleep_notifier_call(struct notifier_block *nb, + unsigned long flags, void *unused) +{ + if (!(flags & SUSP_SH_RSTANDBY)) + return NOTIFY_DONE; + + /* BCR */ + sh7724_rstandby_state.mmselr = __raw_readl(0xff800020); /* MMSELR */ + sh7724_rstandby_state.mmselr |= 0xa5a50000; + sh7724_rstandby_state.cs0bcr = __raw_readl(0xfec10004); /* CS0BCR */ + sh7724_rstandby_state.cs4bcr = __raw_readl(0xfec10010); /* CS4BCR */ + sh7724_rstandby_state.cs5abcr = __raw_readl(0xfec10014); /* CS5ABCR */ + sh7724_rstandby_state.cs5bbcr = __raw_readl(0xfec10018); /* CS5BBCR */ + sh7724_rstandby_state.cs6abcr = __raw_readl(0xfec1001c); /* CS6ABCR */ + sh7724_rstandby_state.cs6bbcr = __raw_readl(0xfec10020); /* CS6BBCR */ + sh7724_rstandby_state.cs4wcr = __raw_readl(0xfec10030); /* CS4WCR */ + sh7724_rstandby_state.cs5awcr = __raw_readl(0xfec10034); /* CS5AWCR */ + sh7724_rstandby_state.cs5bwcr = __raw_readl(0xfec10038); /* CS5BWCR */ + sh7724_rstandby_state.cs6awcr = __raw_readl(0xfec1003c); /* CS6AWCR */ + sh7724_rstandby_state.cs6bwcr = __raw_readl(0xfec10040); /* CS6BWCR */ + + /* INTC */ + sh7724_rstandby_state.ipra = __raw_readw(0xa4080000); /* IPRA */ + sh7724_rstandby_state.iprb = __raw_readw(0xa4080004); /* IPRB */ + sh7724_rstandby_state.iprc = __raw_readw(0xa4080008); /* IPRC */ + sh7724_rstandby_state.iprd = __raw_readw(0xa408000c); /* IPRD */ + sh7724_rstandby_state.ipre = __raw_readw(0xa4080010); /* IPRE */ + sh7724_rstandby_state.iprf = __raw_readw(0xa4080014); /* IPRF */ + sh7724_rstandby_state.iprg = __raw_readw(0xa4080018); /* IPRG */ + sh7724_rstandby_state.iprh = __raw_readw(0xa408001c); /* IPRH */ + sh7724_rstandby_state.ipri = __raw_readw(0xa4080020); /* IPRI */ + sh7724_rstandby_state.iprj = __raw_readw(0xa4080024); /* IPRJ */ + sh7724_rstandby_state.iprk = __raw_readw(0xa4080028); /* IPRK */ + sh7724_rstandby_state.iprl = __raw_readw(0xa408002c); /* IPRL */ + sh7724_rstandby_state.imr0 = __raw_readb(0xa4080080); /* IMR0 */ + sh7724_rstandby_state.imr1 = __raw_readb(0xa4080084); /* IMR1 */ + sh7724_rstandby_state.imr2 = __raw_readb(0xa4080088); /* IMR2 */ + sh7724_rstandby_state.imr3 = __raw_readb(0xa408008c); /* IMR3 */ + sh7724_rstandby_state.imr4 = __raw_readb(0xa4080090); /* IMR4 */ + sh7724_rstandby_state.imr5 = __raw_readb(0xa4080094); /* IMR5 */ + sh7724_rstandby_state.imr6 = __raw_readb(0xa4080098); /* IMR6 */ + sh7724_rstandby_state.imr7 = __raw_readb(0xa408009c); /* IMR7 */ + sh7724_rstandby_state.imr8 = __raw_readb(0xa40800a0); /* IMR8 */ + sh7724_rstandby_state.imr9 = __raw_readb(0xa40800a4); /* IMR9 */ + sh7724_rstandby_state.imr10 = __raw_readb(0xa40800a8); /* IMR10 */ + sh7724_rstandby_state.imr11 = __raw_readb(0xa40800ac); /* IMR11 */ + sh7724_rstandby_state.imr12 = __raw_readb(0xa40800b0); /* IMR12 */ + + /* RWDT */ + sh7724_rstandby_state.rwtcnt = __raw_readb(0xa4520000); /* RWTCNT */ + sh7724_rstandby_state.rwtcnt |= 0x5a00; + sh7724_rstandby_state.rwtcsr = __raw_readb(0xa4520004); /* RWTCSR */ + sh7724_rstandby_state.rwtcsr |= 0xa500; + __raw_writew(sh7724_rstandby_state.rwtcsr & 0x07, 0xa4520004); + + /* CPG */ + sh7724_rstandby_state.irdaclk = __raw_readl(0xa4150018); /* IRDACLKCR */ + sh7724_rstandby_state.spuclk = __raw_readl(0xa415003c); /* SPUCLKCR */ + + return NOTIFY_DONE; +} + +static int sh7724_post_sleep_notifier_call(struct notifier_block *nb, + unsigned long flags, void *unused) +{ + if (!(flags & SUSP_SH_RSTANDBY)) + return NOTIFY_DONE; + + /* BCR */ + __raw_writel(sh7724_rstandby_state.mmselr, 0xff800020); /* MMSELR */ + __raw_writel(sh7724_rstandby_state.cs0bcr, 0xfec10004); /* CS0BCR */ + __raw_writel(sh7724_rstandby_state.cs4bcr, 0xfec10010); /* CS4BCR */ + __raw_writel(sh7724_rstandby_state.cs5abcr, 0xfec10014); /* CS5ABCR */ + __raw_writel(sh7724_rstandby_state.cs5bbcr, 0xfec10018); /* CS5BBCR */ + __raw_writel(sh7724_rstandby_state.cs6abcr, 0xfec1001c); /* CS6ABCR */ + __raw_writel(sh7724_rstandby_state.cs6bbcr, 0xfec10020); /* CS6BBCR */ + __raw_writel(sh7724_rstandby_state.cs4wcr, 0xfec10030); /* CS4WCR */ + __raw_writel(sh7724_rstandby_state.cs5awcr, 0xfec10034); /* CS5AWCR */ + __raw_writel(sh7724_rstandby_state.cs5bwcr, 0xfec10038); /* CS5BWCR */ + __raw_writel(sh7724_rstandby_state.cs6awcr, 0xfec1003c); /* CS6AWCR */ + __raw_writel(sh7724_rstandby_state.cs6bwcr, 0xfec10040); /* CS6BWCR */ + + /* INTC */ + __raw_writew(sh7724_rstandby_state.ipra, 0xa4080000); /* IPRA */ + __raw_writew(sh7724_rstandby_state.iprb, 0xa4080004); /* IPRB */ + __raw_writew(sh7724_rstandby_state.iprc, 0xa4080008); /* IPRC */ + __raw_writew(sh7724_rstandby_state.iprd, 0xa408000c); /* IPRD */ + __raw_writew(sh7724_rstandby_state.ipre, 0xa4080010); /* IPRE */ + __raw_writew(sh7724_rstandby_state.iprf, 0xa4080014); /* IPRF */ + __raw_writew(sh7724_rstandby_state.iprg, 0xa4080018); /* IPRG */ + __raw_writew(sh7724_rstandby_state.iprh, 0xa408001c); /* IPRH */ + __raw_writew(sh7724_rstandby_state.ipri, 0xa4080020); /* IPRI */ + __raw_writew(sh7724_rstandby_state.iprj, 0xa4080024); /* IPRJ */ + __raw_writew(sh7724_rstandby_state.iprk, 0xa4080028); /* IPRK */ + __raw_writew(sh7724_rstandby_state.iprl, 0xa408002c); /* IPRL */ + __raw_writeb(sh7724_rstandby_state.imr0, 0xa4080080); /* IMR0 */ + __raw_writeb(sh7724_rstandby_state.imr1, 0xa4080084); /* IMR1 */ + __raw_writeb(sh7724_rstandby_state.imr2, 0xa4080088); /* IMR2 */ + __raw_writeb(sh7724_rstandby_state.imr3, 0xa408008c); /* IMR3 */ + __raw_writeb(sh7724_rstandby_state.imr4, 0xa4080090); /* IMR4 */ + __raw_writeb(sh7724_rstandby_state.imr5, 0xa4080094); /* IMR5 */ + __raw_writeb(sh7724_rstandby_state.imr6, 0xa4080098); /* IMR6 */ + __raw_writeb(sh7724_rstandby_state.imr7, 0xa408009c); /* IMR7 */ + __raw_writeb(sh7724_rstandby_state.imr8, 0xa40800a0); /* IMR8 */ + __raw_writeb(sh7724_rstandby_state.imr9, 0xa40800a4); /* IMR9 */ + __raw_writeb(sh7724_rstandby_state.imr10, 0xa40800a8); /* IMR10 */ + __raw_writeb(sh7724_rstandby_state.imr11, 0xa40800ac); /* IMR11 */ + __raw_writeb(sh7724_rstandby_state.imr12, 0xa40800b0); /* IMR12 */ + + /* RWDT */ + __raw_writew(sh7724_rstandby_state.rwtcnt, 0xa4520000); /* RWTCNT */ + __raw_writew(sh7724_rstandby_state.rwtcsr, 0xa4520004); /* RWTCSR */ + + /* CPG */ + __raw_writel(sh7724_rstandby_state.irdaclk, 0xa4150018); /* IRDACLKCR */ + __raw_writel(sh7724_rstandby_state.spuclk, 0xa415003c); /* SPUCLKCR */ + + return NOTIFY_DONE; +} + +static struct notifier_block sh7724_pre_sleep_notifier = { + .notifier_call = sh7724_pre_sleep_notifier_call, + .priority = SH_MOBILE_PRE(SH_MOBILE_SLEEP_CPU), +}; + +static struct notifier_block sh7724_post_sleep_notifier = { + .notifier_call = sh7724_post_sleep_notifier_call, + .priority = SH_MOBILE_POST(SH_MOBILE_SLEEP_CPU), +}; + +static int __init sh7724_sleep_setup(void) +{ + atomic_notifier_chain_register(&sh_mobile_pre_sleep_notifier_list, + &sh7724_pre_sleep_notifier); + + atomic_notifier_chain_register(&sh_mobile_post_sleep_notifier_list, + &sh7724_post_sleep_notifier); + return 0; +} +arch_initcall(sh7724_sleep_setup); + diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7734.c b/arch/sh/kernel/cpu/sh4a/setup-sh7734.c new file mode 100644 index 000000000..9911da794 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7734.c @@ -0,0 +1,621 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/setup-sh7734.c + * + * SH7734 Setup + * + * Copyright (C) 2011,2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com> + * Copyright (C) 2011,2012 Renesas Solutions Corp. + */ + +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/io.h> +#include <asm/clock.h> +#include <asm/irq.h> +#include <asm/platform_early.h> +#include <cpu/sh7734.h> + +/* SCIF */ +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_BRG_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe40000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x8c0)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_BRG_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffe41000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x8e0)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_BRG_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffe42000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x900)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_BRG_REGTYPE, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xffe43000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x920)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_BRG_REGTYPE, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xffe44000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x940)), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_BRG_REGTYPE, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xffe43000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x960)), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +/* RTC */ +static struct resource rtc_resources[] = { + [0] = { + .name = "rtc", + .start = 0xFFFC5000, + .end = 0xFFFC5000 + 0x26 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + .start = evt2irq(0xC00), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +/* I2C 0 */ +static struct resource i2c0_resources[] = { + [0] = { + .name = "IIC0", + .start = 0xFFC70000, + .end = 0xFFC7000A - 1, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0x860), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device i2c0_device = { + .name = "i2c-sh7734", + .id = 0, + .num_resources = ARRAY_SIZE(i2c0_resources), + .resource = i2c0_resources, +}; + +/* TMU */ +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffd81000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x480)), + DEFINE_RES_IRQ(evt2irq(0x4a0)), + DEFINE_RES_IRQ(evt2irq(0x4c0)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +static struct sh_timer_config tmu2_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu2_resources[] = { + DEFINE_RES_MEM(0xffd82000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x500)), + DEFINE_RES_IRQ(evt2irq(0x520)), + DEFINE_RES_IRQ(evt2irq(0x540)), +}; + +static struct platform_device tmu2_device = { + .name = "sh-tmu", + .id = 2, + .dev = { + .platform_data = &tmu2_platform_data, + }, + .resource = tmu2_resources, + .num_resources = ARRAY_SIZE(tmu2_resources), +}; + +static struct platform_device *sh7734_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &tmu0_device, + &tmu1_device, + &tmu2_device, + &rtc_device, +}; + +static struct platform_device *sh7734_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &tmu0_device, + &tmu1_device, + &tmu2_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7734_early_devices, + ARRAY_SIZE(sh7734_early_devices)); +} + +#define GROUP 0 +enum { + UNUSED = 0, + + /* interrupt sources */ + + IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, + + IRQ0, IRQ1, IRQ2, IRQ3, + DU, + TMU00, TMU10, TMU20, TMU21, + TMU30, TMU40, TMU50, TMU51, + TMU60, TMU70, TMU80, + RESET_WDT, + USB, + HUDI, + SHDMAC, + SSI0, SSI1, SSI2, SSI3, + VIN0, + RGPVG, + _2DG, + MMC, + HSPI, + LBSCATA, + I2C0, + RCAN0, + MIMLB, + SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, + LBSCDMAC0, LBSCDMAC1, LBSCDMAC2, + RCAN1, + SDHI0, SDHI1, + IEBUS, + HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18, HPBDMAC19_22, HPBDMAC23_25_27_28, + RTC, + VIN1, + LCDC, + SRC0, SRC1, + GETHER, + SDHI2, + GPIO0_3, GPIO4_5, + STIF0, STIF1, + ADMAC, + HIF, + FLCTL, + ADC, + MTU2, + RSPI, + QSPI, + HSCIF, + VEU3F_VE3, + + /* Group */ + /* Mask */ + STIF_M, + GPIO_M, + HPBDMAC_M, + LBSCDMAC_M, + RCAN_M, + SRC_M, + SCIF_M, + LCDC_M, + _2DG_M, + VIN_M, + TMU_3_M, + TMU_0_M, + + /* Priority */ + RCAN_P, + LBSCDMAC_P, + + /* Common */ + SDHI, + SSI, + SPI, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(DU, 0x3E0), + INTC_VECT(TMU00, 0x400), + INTC_VECT(TMU10, 0x420), + INTC_VECT(TMU20, 0x440), + INTC_VECT(TMU30, 0x480), + INTC_VECT(TMU40, 0x4A0), + INTC_VECT(TMU50, 0x4C0), + INTC_VECT(TMU51, 0x4E0), + INTC_VECT(TMU60, 0x500), + INTC_VECT(TMU70, 0x520), + INTC_VECT(TMU80, 0x540), + INTC_VECT(RESET_WDT, 0x560), + INTC_VECT(USB, 0x580), + INTC_VECT(HUDI, 0x600), + INTC_VECT(SHDMAC, 0x620), + INTC_VECT(SSI0, 0x6C0), + INTC_VECT(SSI1, 0x6E0), + INTC_VECT(SSI2, 0x700), + INTC_VECT(SSI3, 0x720), + INTC_VECT(VIN0, 0x740), + INTC_VECT(RGPVG, 0x760), + INTC_VECT(_2DG, 0x780), + INTC_VECT(MMC, 0x7A0), + INTC_VECT(HSPI, 0x7E0), + INTC_VECT(LBSCATA, 0x840), + INTC_VECT(I2C0, 0x860), + INTC_VECT(RCAN0, 0x880), + INTC_VECT(SCIF0, 0x8A0), + INTC_VECT(SCIF1, 0x8C0), + INTC_VECT(SCIF2, 0x900), + INTC_VECT(SCIF3, 0x920), + INTC_VECT(SCIF4, 0x940), + INTC_VECT(SCIF5, 0x960), + INTC_VECT(LBSCDMAC0, 0x9E0), + INTC_VECT(LBSCDMAC1, 0xA00), + INTC_VECT(LBSCDMAC2, 0xA20), + INTC_VECT(RCAN1, 0xA60), + INTC_VECT(SDHI0, 0xAE0), + INTC_VECT(SDHI1, 0xB00), + INTC_VECT(IEBUS, 0xB20), + INTC_VECT(HPBDMAC0_3, 0xB60), + INTC_VECT(HPBDMAC4_10, 0xB80), + INTC_VECT(HPBDMAC11_18, 0xBA0), + INTC_VECT(HPBDMAC19_22, 0xBC0), + INTC_VECT(HPBDMAC23_25_27_28, 0xBE0), + INTC_VECT(RTC, 0xC00), + INTC_VECT(VIN1, 0xC20), + INTC_VECT(LCDC, 0xC40), + INTC_VECT(SRC0, 0xC60), + INTC_VECT(SRC1, 0xC80), + INTC_VECT(GETHER, 0xCA0), + INTC_VECT(SDHI2, 0xCC0), + INTC_VECT(GPIO0_3, 0xCE0), + INTC_VECT(GPIO4_5, 0xD00), + INTC_VECT(STIF0, 0xD20), + INTC_VECT(STIF1, 0xD40), + INTC_VECT(ADMAC, 0xDA0), + INTC_VECT(HIF, 0xDC0), + INTC_VECT(FLCTL, 0xDE0), + INTC_VECT(ADC, 0xE00), + INTC_VECT(MTU2, 0xE20), + INTC_VECT(RSPI, 0xE40), + INTC_VECT(QSPI, 0xE60), + INTC_VECT(HSCIF, 0xFC0), + INTC_VECT(VEU3F_VE3, 0xF40), +}; + +static struct intc_group groups[] __initdata = { + /* Common */ + INTC_GROUP(SDHI, SDHI0, SDHI1, SDHI2), + INTC_GROUP(SPI, HSPI, RSPI, QSPI), + INTC_GROUP(SSI, SSI0, SSI1, SSI2, SSI3), + + /* Mask group */ + INTC_GROUP(STIF_M, STIF0, STIF1), /* 22 */ + INTC_GROUP(GPIO_M, GPIO0_3, GPIO4_5), /* 21 */ + INTC_GROUP(HPBDMAC_M, HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18, + HPBDMAC19_22, HPBDMAC23_25_27_28), /* 19 */ + INTC_GROUP(LBSCDMAC_M, LBSCDMAC0, LBSCDMAC1, LBSCDMAC2), /* 18 */ + INTC_GROUP(RCAN_M, RCAN0, RCAN1, IEBUS), /* 17 */ + INTC_GROUP(SRC_M, SRC0, SRC1), /* 16 */ + INTC_GROUP(SCIF_M, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, + HSCIF), /* 14 */ + INTC_GROUP(LCDC_M, LCDC, MIMLB), /* 13 */ + INTC_GROUP(_2DG_M, _2DG, RGPVG), /* 12 */ + INTC_GROUP(VIN_M, VIN0, VIN1), /* 10 */ + INTC_GROUP(TMU_3_M, TMU30, TMU40, TMU50, TMU51, + TMU60, TMU60, TMU70, TMU80), /* 2 */ + INTC_GROUP(TMU_0_M, TMU00, TMU10, TMU20, TMU21), /* 1 */ + + /* Priority group*/ + INTC_GROUP(RCAN_P, RCAN0, RCAN1), /* INT2PRI5 */ + INTC_GROUP(LBSCDMAC_P, LBSCDMAC0, LBSCDMAC1), /* INT2PRI5 */ +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xFF804040, 0xFF804044, 32, /* INT2MSKRG / INT2MSKCR */ + { 0, + VEU3F_VE3, + SDHI, /* SDHI 0-2 */ + ADMAC, + FLCTL, + RESET_WDT, + HIF, + ADC, + MTU2, + STIF_M, /* STIF 0,1 */ + GPIO_M, /* GPIO 0-5*/ + GETHER, + HPBDMAC_M, /* HPBDMAC 0_3 - 23_25_27_28 */ + LBSCDMAC_M, /* LBSCDMAC 0 - 2 */ + RCAN_M, /* RCAN, IEBUS */ + SRC_M, /* SRC 0,1 */ + LBSCATA, + SCIF_M, /* SCIF 0-5, HSCIF */ + LCDC_M, /* LCDC, MIMLB */ + _2DG_M, /* 2DG, RGPVG */ + SPI, /* HSPI, RSPI, QSPI */ + VIN_M, /* VIN0, 1 */ + SSI, /* SSI 0-3 */ + USB, + SHDMAC, + HUDI, + MMC, + RTC, + I2C0, /* I2C */ /* I2C 0, 1*/ + TMU_3_M, /* TMU30 - TMU80 */ + TMU_0_M, /* TMU00 - TMU21 */ + DU } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xFF804000, 0, 32, 8, /* INT2PRI0 */ + { DU, TMU00, TMU10, TMU20 } }, + { 0xFF804004, 0, 32, 8, /* INT2PRI1 */ + { TMU30, TMU60, RTC, SDHI } }, + { 0xFF804008, 0, 32, 8, /* INT2PRI2 */ + { HUDI, SHDMAC, USB, SSI } }, + { 0xFF80400C, 0, 32, 8, /* INT2PRI3 */ + { VIN0, SPI, _2DG, LBSCATA } }, + { 0xFF804010, 0, 32, 8, /* INT2PRI4 */ + { SCIF0, SCIF3, HSCIF, LCDC } }, + { 0xFF804014, 0, 32, 8, /* INT2PRI5 */ + { RCAN_P, LBSCDMAC_P, LBSCDMAC2, MMC } }, + { 0xFF804018, 0, 32, 8, /* INT2PRI6 */ + { HPBDMAC0_3, HPBDMAC4_10, HPBDMAC11_18, HPBDMAC19_22 } }, + { 0xFF80401C, 0, 32, 8, /* INT2PRI7 */ + { HPBDMAC23_25_27_28, I2C0, SRC0, SRC1 } }, + { 0xFF804020, 0, 32, 8, /* INT2PRI8 */ + { 0 /* ADIF */, VIN1, RESET_WDT, HIF } }, + { 0xFF804024, 0, 32, 8, /* INT2PRI9 */ + { ADMAC, FLCTL, GPIO0_3, GPIO4_5 } }, + { 0xFF804028, 0, 32, 8, /* INT2PRI10 */ + { STIF0, STIF1, VEU3F_VE3, GETHER } }, + { 0xFF80402C, 0, 32, 8, /* INT2PRI11 */ + { MTU2, RGPVG, MIMLB, IEBUS } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7734", vectors, groups, + mask_registers, prio_registers, NULL); + +/* Support for external interrupt pins in IRQ mode */ + +static struct intc_vect irq3210_vectors[] __initdata = { + INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), + INTC_VECT(IRQ2, 0x2C0), INTC_VECT(IRQ3, 0x300), +}; + +static struct intc_sense_reg irq3210_sense_registers[] __initdata = { + { 0xFF80201C, 32, 2, /* ICR1 */ + { IRQ0, IRQ1, IRQ2, IRQ3, } }, +}; + +static struct intc_mask_reg irq3210_ack_registers[] __initdata = { + { 0xFF802024, 0, 32, /* INTREQ */ + { IRQ0, IRQ1, IRQ2, IRQ3, } }, +}; + +static struct intc_mask_reg irq3210_mask_registers[] __initdata = { + { 0xFF802044, 0xFF802064, 32, /* INTMSK0 / INTMSKCLR0 */ + { IRQ0, IRQ1, IRQ2, IRQ3, } }, +}; + +static struct intc_prio_reg irq3210_prio_registers[] __initdata = { + { 0xFF802010, 0, 32, 4, /* INTPRI */ + { IRQ0, IRQ1, IRQ2, IRQ3, } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_desc_irq3210, "sh7734-irq3210", + irq3210_vectors, NULL, + irq3210_mask_registers, irq3210_prio_registers, + irq3210_sense_registers, irq3210_ack_registers); + +/* External interrupt pins in IRL mode */ + +static struct intc_vect vectors_irl3210[] __initdata = { + INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220), + INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260), + INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0), + INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0), + INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320), + INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360), + INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0), + INTC_VECT(IRL0_HHHL, 0x3c0), +}; + +static DECLARE_INTC_DESC(intc_desc_irl3210, "sh7734-irl3210", + vectors_irl3210, NULL, mask_registers, NULL, NULL); + +#define INTC_ICR0 0xFF802000 +#define INTC_INTMSK0 0xFF802044 +#define INTC_INTMSK1 0xFF802048 +#define INTC_INTMSKCLR0 0xFF802064 +#define INTC_INTMSKCLR1 0xFF802068 + +void __init plat_irq_setup(void) +{ + /* disable IRQ3-0 */ + __raw_writel(0xF0000000, INTC_INTMSK0); + + /* disable IRL3-0 */ + __raw_writel(0x80000000, INTC_INTMSK1); + + /* select IRL mode for IRL3-0 */ + __raw_writel(__raw_readl(INTC_ICR0) & ~0x00800000, INTC_ICR0); + + /* disable holding function, ie enable "SH-4 Mode (LVLMODE)" */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); + + register_intc_controller(&intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ3210: + /* select IRQ mode for IRL3-0 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0); + register_intc_controller(&intc_desc_irq3210); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + __raw_writel(0xf0000000, INTC_INTMSKCLR0); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + __raw_writel(0x80000000, INTC_INTMSKCLR0); + register_intc_controller(&intc_desc_irl3210); + break; + default: + BUG(); + } +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7757.c b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c new file mode 100644 index 000000000..67e330b7e --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7757.c @@ -0,0 +1,1242 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7757 Setup + * + * Copyright (C) 2009, 2011 Renesas Solutions Corp. + * + * based on setup-sh7785.c : Copyright (C) 2007 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/sh_timer.h> +#include <linux/sh_dma.h> +#include <linux/sh_intc.h> +#include <linux/usb/ohci_pdriver.h> +#include <cpu/dma-register.h> +#include <cpu/sh7757.h> +#include <asm/platform_early.h> + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xfe4b0000, 0x100), /* SCIF2 */ + DEFINE_RES_IRQ(evt2irq(0x700)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 0, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xfe4c0000, 0x100), /* SCIF3 */ + DEFINE_RES_IRQ(evt2irq(0xb80)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 1, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xfe4d0000, 0x100), /* SCIF4 */ + DEFINE_RES_IRQ(evt2irq(0xf00)), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 2, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 3, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xfe430000, 0x20), + DEFINE_RES_IRQ(evt2irq(0x580)), + DEFINE_RES_IRQ(evt2irq(0x5a0)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct resource spi0_resources[] = { + [0] = { + .start = 0xfe002000, + .end = 0xfe0020ff, + .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT, + }, + [1] = { + .start = evt2irq(0xcc0), + .flags = IORESOURCE_IRQ, + }, +}; + +/* DMA */ +static const struct sh_dmae_slave_config sh7757_dmae0_slaves[] = { + { + .slave_id = SHDMA_SLAVE_SDHI_TX, + .addr = 0x1fe50030, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc5, + }, + { + .slave_id = SHDMA_SLAVE_SDHI_RX, + .addr = 0x1fe50030, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc6, + }, + { + .slave_id = SHDMA_SLAVE_MMCIF_TX, + .addr = 0x1fcb0034, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xd3, + }, + { + .slave_id = SHDMA_SLAVE_MMCIF_RX, + .addr = 0x1fcb0034, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_32BIT), + .mid_rid = 0xd7, + }, +}; + +static const struct sh_dmae_slave_config sh7757_dmae1_slaves[] = { + { + .slave_id = SHDMA_SLAVE_SCIF2_TX, + .addr = 0x1f4b000c, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x21, + }, + { + .slave_id = SHDMA_SLAVE_SCIF2_RX, + .addr = 0x1f4b0014, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x22, + }, + { + .slave_id = SHDMA_SLAVE_SCIF3_TX, + .addr = 0x1f4c000c, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x29, + }, + { + .slave_id = SHDMA_SLAVE_SCIF3_RX, + .addr = 0x1f4c0014, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x2a, + }, + { + .slave_id = SHDMA_SLAVE_SCIF4_TX, + .addr = 0x1f4d000c, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x41, + }, + { + .slave_id = SHDMA_SLAVE_SCIF4_RX, + .addr = 0x1f4d0014, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x42, + }, + { + .slave_id = SHDMA_SLAVE_RSPI_TX, + .addr = 0xfe480004, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc1, + }, + { + .slave_id = SHDMA_SLAVE_RSPI_RX, + .addr = 0xfe480004, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_16BIT), + .mid_rid = 0xc2, + }, +}; + +static const struct sh_dmae_slave_config sh7757_dmae2_slaves[] = { + { + .slave_id = SHDMA_SLAVE_RIIC0_TX, + .addr = 0x1e500012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x21, + }, + { + .slave_id = SHDMA_SLAVE_RIIC0_RX, + .addr = 0x1e500013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x22, + }, + { + .slave_id = SHDMA_SLAVE_RIIC1_TX, + .addr = 0x1e510012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x29, + }, + { + .slave_id = SHDMA_SLAVE_RIIC1_RX, + .addr = 0x1e510013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x2a, + }, + { + .slave_id = SHDMA_SLAVE_RIIC2_TX, + .addr = 0x1e520012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0xa1, + }, + { + .slave_id = SHDMA_SLAVE_RIIC2_RX, + .addr = 0x1e520013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0xa2, + }, + { + .slave_id = SHDMA_SLAVE_RIIC3_TX, + .addr = 0x1e530012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0xa9, + }, + { + .slave_id = SHDMA_SLAVE_RIIC3_RX, + .addr = 0x1e530013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0xaf, + }, + { + .slave_id = SHDMA_SLAVE_RIIC4_TX, + .addr = 0x1e540012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0xc5, + }, + { + .slave_id = SHDMA_SLAVE_RIIC4_RX, + .addr = 0x1e540013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0xc6, + }, +}; + +static const struct sh_dmae_slave_config sh7757_dmae3_slaves[] = { + { + .slave_id = SHDMA_SLAVE_RIIC5_TX, + .addr = 0x1e550012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x21, + }, + { + .slave_id = SHDMA_SLAVE_RIIC5_RX, + .addr = 0x1e550013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x22, + }, + { + .slave_id = SHDMA_SLAVE_RIIC6_TX, + .addr = 0x1e560012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x29, + }, + { + .slave_id = SHDMA_SLAVE_RIIC6_RX, + .addr = 0x1e560013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x2a, + }, + { + .slave_id = SHDMA_SLAVE_RIIC7_TX, + .addr = 0x1e570012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x41, + }, + { + .slave_id = SHDMA_SLAVE_RIIC7_RX, + .addr = 0x1e570013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x42, + }, + { + .slave_id = SHDMA_SLAVE_RIIC8_TX, + .addr = 0x1e580012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x45, + }, + { + .slave_id = SHDMA_SLAVE_RIIC8_RX, + .addr = 0x1e580013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x46, + }, + { + .slave_id = SHDMA_SLAVE_RIIC9_TX, + .addr = 0x1e590012, + .chcr = SM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x51, + }, + { + .slave_id = SHDMA_SLAVE_RIIC9_RX, + .addr = 0x1e590013, + .chcr = DM_INC | RS_ERS | 0x40000000 | + TS_INDEX2VAL(XMIT_SZ_8BIT), + .mid_rid = 0x52, + }, +}; + +static const struct sh_dmae_channel sh7757_dmae_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static const unsigned int ts_shift[] = TS_SHIFT; + +static struct sh_dmae_pdata dma0_platform_data = { + .slave = sh7757_dmae0_slaves, + .slave_num = ARRAY_SIZE(sh7757_dmae0_slaves), + .channel = sh7757_dmae_channels, + .channel_num = ARRAY_SIZE(sh7757_dmae_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +static struct sh_dmae_pdata dma1_platform_data = { + .slave = sh7757_dmae1_slaves, + .slave_num = ARRAY_SIZE(sh7757_dmae1_slaves), + .channel = sh7757_dmae_channels, + .channel_num = ARRAY_SIZE(sh7757_dmae_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +static struct sh_dmae_pdata dma2_platform_data = { + .slave = sh7757_dmae2_slaves, + .slave_num = ARRAY_SIZE(sh7757_dmae2_slaves), + .channel = sh7757_dmae_channels, + .channel_num = ARRAY_SIZE(sh7757_dmae_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +static struct sh_dmae_pdata dma3_platform_data = { + .slave = sh7757_dmae3_slaves, + .slave_num = ARRAY_SIZE(sh7757_dmae3_slaves), + .channel = sh7757_dmae_channels, + .channel_num = ARRAY_SIZE(sh7757_dmae_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +/* channel 0 to 5 */ +static struct resource sh7757_dmae0_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xff608020, + .end = 0xff60808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xff609000, + .end = 0xff60900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0x640), + .end = evt2irq(0x640), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +/* channel 6 to 11 */ +static struct resource sh7757_dmae1_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xff618020, + .end = 0xff61808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xff619000, + .end = 0xff61900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0x640), + .end = evt2irq(0x640), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, + { + /* IRQ for channels 4 */ + .start = evt2irq(0x7c0), + .end = evt2irq(0x7c0), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, + { + /* IRQ for channels 5 */ + .start = evt2irq(0x7c0), + .end = evt2irq(0x7c0), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, + { + /* IRQ for channels 6 */ + .start = evt2irq(0xd00), + .end = evt2irq(0xd00), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, + { + /* IRQ for channels 7 */ + .start = evt2irq(0xd00), + .end = evt2irq(0xd00), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, + { + /* IRQ for channels 8 */ + .start = evt2irq(0xd00), + .end = evt2irq(0xd00), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, + { + /* IRQ for channels 9 */ + .start = evt2irq(0xd00), + .end = evt2irq(0xd00), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, + { + /* IRQ for channels 10 */ + .start = evt2irq(0xd00), + .end = evt2irq(0xd00), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, + { + /* IRQ for channels 11 */ + .start = evt2irq(0xd00), + .end = evt2irq(0xd00), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +/* channel 12 to 17 */ +static struct resource sh7757_dmae2_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xff708020, + .end = 0xff70808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xff709000, + .end = 0xff70900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0x2a60), + .end = evt2irq(0x2a60), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 12 to 16 */ + .start = evt2irq(0x2400), + .end = evt2irq(0x2480), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channel 17 */ + .start = evt2irq(0x24e0), + .end = evt2irq(0x24e0), + .flags = IORESOURCE_IRQ, + }, +}; + +/* channel 18 to 23 */ +static struct resource sh7757_dmae3_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xff718020, + .end = 0xff71808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xff719000, + .end = 0xff71900b, + .flags = IORESOURCE_MEM, + }, + { + .name = "error_irq", + .start = evt2irq(0x2a80), + .end = evt2irq(0x2a80), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channels 18 to 22 */ + .start = evt2irq(0x2500), + .end = evt2irq(0x2580), + .flags = IORESOURCE_IRQ, + }, + { + /* IRQ for channel 23 */ + .start = evt2irq(0x2600), + .end = evt2irq(0x2600), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device dma0_device = { + .name = "sh-dma-engine", + .id = 0, + .resource = sh7757_dmae0_resources, + .num_resources = ARRAY_SIZE(sh7757_dmae0_resources), + .dev = { + .platform_data = &dma0_platform_data, + }, +}; + +static struct platform_device dma1_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = sh7757_dmae1_resources, + .num_resources = ARRAY_SIZE(sh7757_dmae1_resources), + .dev = { + .platform_data = &dma1_platform_data, + }, +}; + +static struct platform_device dma2_device = { + .name = "sh-dma-engine", + .id = 2, + .resource = sh7757_dmae2_resources, + .num_resources = ARRAY_SIZE(sh7757_dmae2_resources), + .dev = { + .platform_data = &dma2_platform_data, + }, +}; + +static struct platform_device dma3_device = { + .name = "sh-dma-engine", + .id = 3, + .resource = sh7757_dmae3_resources, + .num_resources = ARRAY_SIZE(sh7757_dmae3_resources), + .dev = { + .platform_data = &dma3_platform_data, + }, +}; + +static struct platform_device spi0_device = { + .name = "sh_spi", + .id = 0, + .dev = { + .dma_mask = NULL, + .coherent_dma_mask = 0xffffffff, + }, + .num_resources = ARRAY_SIZE(spi0_resources), + .resource = spi0_resources, +}; + +static struct resource spi1_resources[] = { + { + .start = 0xffd8ee70, + .end = 0xffd8eeff, + .flags = IORESOURCE_MEM | IORESOURCE_MEM_8BIT, + }, + { + .start = evt2irq(0x8c0), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device spi1_device = { + .name = "sh_spi", + .id = 1, + .num_resources = ARRAY_SIZE(spi1_resources), + .resource = spi1_resources, +}; + +static struct resource rspi_resources[] = { + { + .start = 0xfe480000, + .end = 0xfe4800ff, + .flags = IORESOURCE_MEM, + }, + { + .start = evt2irq(0x1d80), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rspi_device = { + .name = "rspi", + .id = 2, + .num_resources = ARRAY_SIZE(rspi_resources), + .resource = rspi_resources, +}; + +static struct resource usb_ehci_resources[] = { + [0] = { + .start = 0xfe4f1000, + .end = 0xfe4f10ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0x920), + .end = evt2irq(0x920), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usb_ehci_device = { + .name = "sh_ehci", + .id = -1, + .dev = { + .dma_mask = &usb_ehci_device.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, + .num_resources = ARRAY_SIZE(usb_ehci_resources), + .resource = usb_ehci_resources, +}; + +static struct resource usb_ohci_resources[] = { + [0] = { + .start = 0xfe4f1800, + .end = 0xfe4f18ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0x920), + .end = evt2irq(0x920), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct usb_ohci_pdata usb_ohci_pdata; + +static struct platform_device usb_ohci_device = { + .name = "ohci-platform", + .id = -1, + .dev = { + .dma_mask = &usb_ohci_device.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = &usb_ohci_pdata, + }, + .num_resources = ARRAY_SIZE(usb_ohci_resources), + .resource = usb_ohci_resources, +}; + +static struct platform_device *sh7757_devices[] __initdata = { + &scif2_device, + &scif3_device, + &scif4_device, + &tmu0_device, + &dma0_device, + &dma1_device, + &dma2_device, + &dma3_device, + &spi0_device, + &spi1_device, + &rspi_device, + &usb_ehci_device, + &usb_ohci_device, +}; + +static int __init sh7757_devices_setup(void) +{ + return platform_add_devices(sh7757_devices, + ARRAY_SIZE(sh7757_devices)); +} +arch_initcall(sh7757_devices_setup); + +static struct platform_device *sh7757_early_devices[] __initdata = { + &scif2_device, + &scif3_device, + &scif4_device, + &tmu0_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7757_early_devices, + ARRAY_SIZE(sh7757_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + + IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, + + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + + SDHI, DVC, + IRQ8, IRQ9, IRQ11, IRQ10, IRQ12, IRQ13, IRQ14, IRQ15, + TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5, + HUDI, + ARC4, + DMAC0_5, DMAC6_7, DMAC8_11, + SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, + USB0, USB1, + JMC, + SPI0, SPI1, + TMR01, TMR23, TMR45, + FRT, + LPC, LPC5, LPC6, LPC7, LPC8, + PECI0, PECI1, PECI2, PECI3, PECI4, PECI5, + ETHERC, + ADC0, ADC1, + SIM, + IIC0_0, IIC0_1, IIC0_2, IIC0_3, + IIC1_0, IIC1_1, IIC1_2, IIC1_3, + IIC2_0, IIC2_1, IIC2_2, IIC2_3, + IIC3_0, IIC3_1, IIC3_2, IIC3_3, + IIC4_0, IIC4_1, IIC4_2, IIC4_3, + IIC5_0, IIC5_1, IIC5_2, IIC5_3, + IIC6_0, IIC6_1, IIC6_2, IIC6_3, + IIC7_0, IIC7_1, IIC7_2, IIC7_3, + IIC8_0, IIC8_1, IIC8_2, IIC8_3, + IIC9_0, IIC9_1, IIC9_2, IIC9_3, + ONFICTL, + MMC1, MMC2, + ECCU, + PCIC, + G200, + RSPI, + SGPIO, + DMINT12, DMINT13, DMINT14, DMINT15, DMINT16, DMINT17, DMINT18, DMINT19, + DMINT20, DMINT21, DMINT22, DMINT23, + DDRECC, + TSIP, + PCIE_BRIDGE, + WDT0B, WDT1B, WDT2B, WDT3B, WDT4B, WDT5B, WDT6B, WDT7B, WDT8B, + GETHER0, GETHER1, GETHER2, + PBIA, PBIB, PBIC, + DMAE2, DMAE3, + SERMUX2, SERMUX3, + + /* interrupt groups */ + + TMU012, TMU345, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(SDHI, 0x480), INTC_VECT(SDHI, 0x04a0), + INTC_VECT(SDHI, 0x4c0), + INTC_VECT(DVC, 0x4e0), + INTC_VECT(IRQ8, 0x500), INTC_VECT(IRQ9, 0x520), + INTC_VECT(IRQ10, 0x540), + INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0), + INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0), + INTC_VECT(HUDI, 0x600), + INTC_VECT(ARC4, 0x620), + INTC_VECT(DMAC0_5, 0x640), INTC_VECT(DMAC0_5, 0x660), + INTC_VECT(DMAC0_5, 0x680), INTC_VECT(DMAC0_5, 0x6a0), + INTC_VECT(DMAC0_5, 0x6c0), + INTC_VECT(IRQ11, 0x6e0), + INTC_VECT(SCIF2, 0x700), INTC_VECT(SCIF2, 0x720), + INTC_VECT(SCIF2, 0x740), INTC_VECT(SCIF2, 0x760), + INTC_VECT(DMAC0_5, 0x780), INTC_VECT(DMAC0_5, 0x7a0), + INTC_VECT(DMAC6_7, 0x7c0), INTC_VECT(DMAC6_7, 0x7e0), + INTC_VECT(USB0, 0x840), + INTC_VECT(IRQ12, 0x880), + INTC_VECT(JMC, 0x8a0), + INTC_VECT(SPI1, 0x8c0), + INTC_VECT(IRQ13, 0x8e0), INTC_VECT(IRQ14, 0x900), + INTC_VECT(USB1, 0x920), + INTC_VECT(TMR01, 0xa00), INTC_VECT(TMR23, 0xa20), + INTC_VECT(TMR45, 0xa40), + INTC_VECT(FRT, 0xa80), + INTC_VECT(LPC, 0xaa0), INTC_VECT(LPC, 0xac0), + INTC_VECT(LPC, 0xae0), INTC_VECT(LPC, 0xb00), + INTC_VECT(LPC, 0xb20), + INTC_VECT(SCIF0, 0xb40), INTC_VECT(SCIF1, 0xb60), + INTC_VECT(SCIF3, 0xb80), INTC_VECT(SCIF3, 0xba0), + INTC_VECT(SCIF3, 0xbc0), INTC_VECT(SCIF3, 0xbe0), + INTC_VECT(PECI0, 0xc00), INTC_VECT(PECI1, 0xc20), + INTC_VECT(PECI2, 0xc40), + INTC_VECT(IRQ15, 0xc60), + INTC_VECT(ETHERC, 0xc80), INTC_VECT(ETHERC, 0xca0), + INTC_VECT(SPI0, 0xcc0), + INTC_VECT(ADC1, 0xce0), + INTC_VECT(DMAC8_11, 0xd00), INTC_VECT(DMAC8_11, 0xd20), + INTC_VECT(DMAC8_11, 0xd40), INTC_VECT(DMAC8_11, 0xd60), + INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0), + INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0), + INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), + INTC_VECT(TMU5, 0xe40), + INTC_VECT(ADC0, 0xe60), + INTC_VECT(SCIF4, 0xf00), INTC_VECT(SCIF4, 0xf20), + INTC_VECT(SCIF4, 0xf40), INTC_VECT(SCIF4, 0xf60), + INTC_VECT(IIC0_0, 0x1400), INTC_VECT(IIC0_1, 0x1420), + INTC_VECT(IIC0_2, 0x1440), INTC_VECT(IIC0_3, 0x1460), + INTC_VECT(IIC1_0, 0x1480), INTC_VECT(IIC1_1, 0x14e0), + INTC_VECT(IIC1_2, 0x1500), INTC_VECT(IIC1_3, 0x1520), + INTC_VECT(IIC2_0, 0x1540), INTC_VECT(IIC2_1, 0x1560), + INTC_VECT(IIC2_2, 0x1580), INTC_VECT(IIC2_3, 0x1600), + INTC_VECT(IIC3_0, 0x1620), INTC_VECT(IIC3_1, 0x1640), + INTC_VECT(IIC3_2, 0x16e0), INTC_VECT(IIC3_3, 0x1700), + INTC_VECT(IIC4_0, 0x17c0), INTC_VECT(IIC4_1, 0x1800), + INTC_VECT(IIC4_2, 0x1820), INTC_VECT(IIC4_3, 0x1840), + INTC_VECT(IIC5_0, 0x1860), INTC_VECT(IIC5_1, 0x1880), + INTC_VECT(IIC5_2, 0x18a0), INTC_VECT(IIC5_3, 0x18c0), + INTC_VECT(IIC6_0, 0x18e0), INTC_VECT(IIC6_1, 0x1900), + INTC_VECT(IIC6_2, 0x1920), + INTC_VECT(ONFICTL, 0x1960), + INTC_VECT(IIC6_3, 0x1980), + INTC_VECT(IIC7_0, 0x19a0), INTC_VECT(IIC7_1, 0x1a00), + INTC_VECT(IIC7_2, 0x1a20), INTC_VECT(IIC7_3, 0x1a40), + INTC_VECT(IIC8_0, 0x1a60), INTC_VECT(IIC8_1, 0x1a80), + INTC_VECT(IIC8_2, 0x1aa0), INTC_VECT(IIC8_3, 0x1b40), + INTC_VECT(IIC9_0, 0x1b60), INTC_VECT(IIC9_1, 0x1b80), + INTC_VECT(IIC9_2, 0x1c00), INTC_VECT(IIC9_3, 0x1c20), + INTC_VECT(MMC1, 0x1c60), INTC_VECT(MMC2, 0x1c80), + INTC_VECT(ECCU, 0x1cc0), + INTC_VECT(PCIC, 0x1ce0), + INTC_VECT(G200, 0x1d00), + INTC_VECT(RSPI, 0x1d80), INTC_VECT(RSPI, 0x1da0), + INTC_VECT(RSPI, 0x1dc0), INTC_VECT(RSPI, 0x1de0), + INTC_VECT(PECI3, 0x1ec0), INTC_VECT(PECI4, 0x1ee0), + INTC_VECT(PECI5, 0x1f00), + INTC_VECT(SGPIO, 0x1f80), INTC_VECT(SGPIO, 0x1fa0), + INTC_VECT(SGPIO, 0x1fc0), + INTC_VECT(DMINT12, 0x2400), INTC_VECT(DMINT13, 0x2420), + INTC_VECT(DMINT14, 0x2440), INTC_VECT(DMINT15, 0x2460), + INTC_VECT(DMINT16, 0x2480), INTC_VECT(DMINT17, 0x24e0), + INTC_VECT(DMINT18, 0x2500), INTC_VECT(DMINT19, 0x2520), + INTC_VECT(DMINT20, 0x2540), INTC_VECT(DMINT21, 0x2560), + INTC_VECT(DMINT22, 0x2580), INTC_VECT(DMINT23, 0x2600), + INTC_VECT(DDRECC, 0x2620), + INTC_VECT(TSIP, 0x2640), + INTC_VECT(PCIE_BRIDGE, 0x27c0), + INTC_VECT(WDT0B, 0x2800), INTC_VECT(WDT1B, 0x2820), + INTC_VECT(WDT2B, 0x2840), INTC_VECT(WDT3B, 0x2860), + INTC_VECT(WDT4B, 0x2880), INTC_VECT(WDT5B, 0x28a0), + INTC_VECT(WDT6B, 0x28c0), INTC_VECT(WDT7B, 0x28e0), + INTC_VECT(WDT8B, 0x2900), + INTC_VECT(GETHER0, 0x2960), INTC_VECT(GETHER1, 0x2980), + INTC_VECT(GETHER2, 0x29a0), + INTC_VECT(PBIA, 0x2a00), INTC_VECT(PBIB, 0x2a20), + INTC_VECT(PBIC, 0x2a40), + INTC_VECT(DMAE2, 0x2a60), INTC_VECT(DMAE3, 0x2a80), + INTC_VECT(SERMUX2, 0x2aa0), INTC_VECT(SERMUX3, 0x2b40), + INTC_VECT(LPC5, 0x2b60), INTC_VECT(LPC6, 0x2b80), + INTC_VECT(LPC7, 0x2c00), INTC_VECT(LPC8, 0x2c20), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), + INTC_GROUP(TMU345, TMU3, TMU4, TMU5), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, + + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0, + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } }, + + { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */ + { 0, 0, 0, 0, 0, 0, 0, 0, + 0, DMAC8_11, 0, PECI0, LPC, FRT, 0, TMR45, + TMR23, TMR01, 0, 0, 0, 0, 0, DMAC0_5, + HUDI, 0, 0, SCIF3, SCIF2, SDHI, TMU345, TMU012 + } }, + + { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */ + { IRQ15, IRQ14, IRQ13, IRQ12, IRQ11, IRQ10, SCIF4, ETHERC, + IRQ9, IRQ8, SCIF1, SCIF0, USB0, 0, 0, USB1, + ADC1, 0, DMAC6_7, ADC0, SPI0, SIM, PECI2, PECI1, + ARC4, 0, SPI1, JMC, 0, 0, 0, DVC + } }, + + { 0xffd10038, 0xffd1003c, 32, /* INT2MSKR2 / INT2MSKCR2 */ + { IIC4_1, IIC4_2, IIC5_0, ONFICTL, 0, 0, SGPIO, 0, + 0, G200, 0, IIC9_2, IIC8_2, IIC8_1, IIC8_0, IIC7_3, + IIC7_2, IIC7_1, IIC6_3, IIC0_0, IIC0_1, IIC0_2, IIC0_3, IIC3_1, + IIC2_3, 0, IIC2_1, IIC9_1, IIC3_3, IIC1_0, 0, IIC2_2 + } }, + + { 0xffd100d0, 0xffd100d4, 32, /* INT2MSKR3 / INT2MSKCR3 */ + { MMC1, IIC6_1, IIC6_0, IIC5_1, IIC3_2, IIC2_0, PECI5, MMC2, + IIC1_3, IIC1_2, IIC9_0, IIC8_3, IIC4_3, IIC7_0, 0, IIC6_2, + PCIC, 0, IIC4_0, 0, ECCU, RSPI, 0, IIC9_3, + IIC3_0, 0, IIC5_3, IIC5_2, 0, 0, 0, IIC1_1 + } }, + + { 0xffd20038, 0xffd2003c, 32, /* INT2MSKR4 / INT2MSKCR4 */ + { WDT0B, WDT1B, WDT3B, GETHER0, 0, 0, 0, 0, + 0, 0, 0, LPC7, SERMUX2, DMAE3, DMAE2, PBIC, + PBIB, PBIA, GETHER1, DMINT12, DMINT13, DMINT14, DMINT15, TSIP, + DMINT23, 0, DMINT21, LPC6, 0, DMINT16, 0, DMINT22 + } }, + + { 0xffd200d0, 0xffd200d4, 32, /* INT2MSKR5 / INT2MSKCR5 */ + { 0, WDT8B, WDT7B, WDT4B, 0, DMINT20, 0, 0, + DMINT19, DMINT18, LPC5, SERMUX3, WDT2B, GETHER2, 0, 0, + 0, 0, PCIE_BRIDGE, 0, 0, 0, 0, LPC8, + DDRECC, 0, WDT6B, WDT5B, 0, 0, 0, DMINT17 + } }, +}; + +#define INTPRI 0xffd00010 +#define INT2PRI0 0xffd40000 +#define INT2PRI1 0xffd40004 +#define INT2PRI2 0xffd40008 +#define INT2PRI3 0xffd4000c +#define INT2PRI4 0xffd40010 +#define INT2PRI5 0xffd40014 +#define INT2PRI6 0xffd40018 +#define INT2PRI7 0xffd4001c +#define INT2PRI8 0xffd400a0 +#define INT2PRI9 0xffd400a4 +#define INT2PRI10 0xffd400a8 +#define INT2PRI11 0xffd400ac +#define INT2PRI12 0xffd400b0 +#define INT2PRI13 0xffd400b4 +#define INT2PRI14 0xffd400b8 +#define INT2PRI15 0xffd400bc +#define INT2PRI16 0xffd10000 +#define INT2PRI17 0xffd10004 +#define INT2PRI18 0xffd10008 +#define INT2PRI19 0xffd1000c +#define INT2PRI20 0xffd10010 +#define INT2PRI21 0xffd10014 +#define INT2PRI22 0xffd10018 +#define INT2PRI23 0xffd1001c +#define INT2PRI24 0xffd100a0 +#define INT2PRI25 0xffd100a4 +#define INT2PRI26 0xffd100a8 +#define INT2PRI27 0xffd100ac +#define INT2PRI28 0xffd100b0 +#define INT2PRI29 0xffd100b4 +#define INT2PRI30 0xffd100b8 +#define INT2PRI31 0xffd100bc +#define INT2PRI32 0xffd20000 +#define INT2PRI33 0xffd20004 +#define INT2PRI34 0xffd20008 +#define INT2PRI35 0xffd2000c +#define INT2PRI36 0xffd20010 +#define INT2PRI37 0xffd20014 +#define INT2PRI38 0xffd20018 +#define INT2PRI39 0xffd2001c +#define INT2PRI40 0xffd200a0 +#define INT2PRI41 0xffd200a4 +#define INT2PRI42 0xffd200a8 +#define INT2PRI43 0xffd200ac +#define INT2PRI44 0xffd200b0 +#define INT2PRI45 0xffd200b4 +#define INT2PRI46 0xffd200b8 +#define INT2PRI47 0xffd200bc + +static struct intc_prio_reg prio_registers[] __initdata = { + { INTPRI, 0, 32, 4, { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, + + { INT2PRI0, 0, 32, 8, { TMU0, TMU1, TMU2, TMU2_TICPI } }, + { INT2PRI1, 0, 32, 8, { TMU3, TMU4, TMU5, SDHI } }, + { INT2PRI2, 0, 32, 8, { SCIF2, SCIF3, 0, IRQ8 } }, + { INT2PRI3, 0, 32, 8, { HUDI, DMAC0_5, ADC0, IRQ9 } }, + { INT2PRI4, 0, 32, 8, { IRQ10, 0, TMR01, TMR23 } }, + { INT2PRI5, 0, 32, 8, { TMR45, 0, FRT, LPC } }, + { INT2PRI6, 0, 32, 8, { PECI0, ETHERC, DMAC8_11, 0 } }, + { INT2PRI7, 0, 32, 8, { SCIF4, 0, IRQ11, IRQ12 } }, + { INT2PRI8, 0, 32, 8, { 0, 0, 0, DVC } }, + { INT2PRI9, 0, 32, 8, { ARC4, 0, SPI1, JMC } }, + { INT2PRI10, 0, 32, 8, { SPI0, SIM, PECI2, PECI1 } }, + { INT2PRI11, 0, 32, 8, { ADC1, IRQ13, DMAC6_7, IRQ14 } }, + { INT2PRI12, 0, 32, 8, { USB0, 0, IRQ15, USB1 } }, + { INT2PRI13, 0, 32, 8, { 0, 0, SCIF1, SCIF0 } }, + + { INT2PRI16, 0, 32, 8, { IIC2_2, 0, 0, 0 } }, + { INT2PRI17, 0, 32, 8, { 0, 0, 0, IIC1_0 } }, + { INT2PRI18, 0, 32, 8, { IIC3_3, IIC9_1, IIC2_1, IIC1_2 } }, + { INT2PRI19, 0, 32, 8, { IIC2_3, IIC3_1, 0, IIC1_3 } }, + { INT2PRI20, 0, 32, 8, { IIC2_0, IIC6_3, IIC7_1, IIC7_2 } }, + { INT2PRI21, 0, 32, 8, { IIC7_3, IIC8_0, IIC8_1, IIC8_2 } }, + { INT2PRI22, 0, 32, 8, { IIC9_2, MMC2, G200, 0 } }, + { INT2PRI23, 0, 32, 8, { PECI5, SGPIO, IIC3_2, IIC5_1 } }, + { INT2PRI24, 0, 32, 8, { PECI4, PECI3, 0, IIC1_1 } }, + { INT2PRI25, 0, 32, 8, { IIC3_0, 0, IIC5_3, IIC5_2 } }, + { INT2PRI26, 0, 32, 8, { ECCU, RSPI, 0, IIC9_3 } }, + { INT2PRI27, 0, 32, 8, { PCIC, IIC6_0, IIC4_0, IIC6_1 } }, + { INT2PRI28, 0, 32, 8, { IIC4_3, IIC7_0, MMC1, IIC6_2 } }, + { INT2PRI29, 0, 32, 8, { 0, 0, IIC9_0, IIC8_3 } }, + { INT2PRI30, 0, 32, 8, { IIC4_1, IIC4_2, IIC5_0, ONFICTL } }, + { INT2PRI31, 0, 32, 8, { IIC0_0, IIC0_1, IIC0_2, IIC0_3 } }, + { INT2PRI32, 0, 32, 8, { DMINT22, 0, 0, 0 } }, + { INT2PRI33, 0, 32, 8, { 0, 0, 0, DMINT16 } }, + { INT2PRI34, 0, 32, 8, { 0, LPC6, DMINT21, DMINT18 } }, + { INT2PRI35, 0, 32, 8, { DMINT23, TSIP, 0, DMINT19 } }, + { INT2PRI36, 0, 32, 8, { DMINT20, GETHER1, PBIA, PBIB } }, + { INT2PRI37, 0, 32, 8, { PBIC, DMAE2, DMAE3, SERMUX2 } }, + { INT2PRI38, 0, 32, 8, { LPC7, 0, 0, 0 } }, + { INT2PRI39, 0, 32, 8, { 0, 0, 0, WDT4B } }, + { INT2PRI40, 0, 32, 8, { 0, 0, 0, DMINT17 } }, + { INT2PRI41, 0, 32, 8, { DDRECC, 0, WDT6B, WDT5B } }, + { INT2PRI42, 0, 32, 8, { 0, 0, 0, LPC8 } }, + { INT2PRI43, 0, 32, 8, { 0, WDT7B, PCIE_BRIDGE, WDT8B } }, + { INT2PRI44, 0, 32, 8, { WDT2B, GETHER2, 0, 0 } }, + { INT2PRI45, 0, 32, 8, { 0, 0, LPC5, SERMUX3 } }, + { INT2PRI46, 0, 32, 8, { WDT0B, WDT1B, WDT3B, GETHER0 } }, + { INT2PRI47, 0, 32, 8, { DMINT12, DMINT13, DMINT14, DMINT15 } }, +}; + +static struct intc_sense_reg sense_registers_irq8to15[] __initdata = { + { 0xffd100f8, 32, 2, /* ICR2 */ { IRQ15, IRQ14, IRQ13, IRQ12, + IRQ11, IRQ10, IRQ9, IRQ8 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7757", vectors, groups, + mask_registers, prio_registers, + sense_registers_irq8to15); + +/* Support for external interrupt pins in IRQ mode */ +static struct intc_vect vectors_irq0123[] __initdata = { + INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240), + INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0), +}; + +static struct intc_vect vectors_irq4567[] __initdata = { + INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340), + INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0), +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xffd00024, 0, 32, /* INTREQ */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7757-irq0123", + vectors_irq0123, NULL, mask_registers, + prio_registers, sense_registers, ack_registers); + +static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7757-irq4567", + vectors_irq4567, NULL, mask_registers, + prio_registers, sense_registers, ack_registers); + +/* External interrupt pins in IRL mode */ +static struct intc_vect vectors_irl0123[] __initdata = { + INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220), + INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260), + INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0), + INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0), + INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320), + INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360), + INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0), + INTC_VECT(IRL0_HHHL, 0x3c0), +}; + +static struct intc_vect vectors_irl4567[] __initdata = { + INTC_VECT(IRL4_LLLL, 0x200), INTC_VECT(IRL4_LLLH, 0x220), + INTC_VECT(IRL4_LLHL, 0x240), INTC_VECT(IRL4_LLHH, 0x260), + INTC_VECT(IRL4_LHLL, 0x280), INTC_VECT(IRL4_LHLH, 0x2a0), + INTC_VECT(IRL4_LHHL, 0x2c0), INTC_VECT(IRL4_LHHH, 0x2e0), + INTC_VECT(IRL4_HLLL, 0x300), INTC_VECT(IRL4_HLLH, 0x320), + INTC_VECT(IRL4_HLHL, 0x340), INTC_VECT(IRL4_HLHH, 0x360), + INTC_VECT(IRL4_HHLL, 0x380), INTC_VECT(IRL4_HHLH, 0x3a0), + INTC_VECT(IRL4_HHHL, 0x3c0), +}; + +static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7757-irl0123", vectors_irl0123, + NULL, mask_registers, NULL, NULL); + +static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7757-irl4567", vectors_irl4567, + NULL, mask_registers, NULL, NULL); + +#define INTC_ICR0 0xffd00000 +#define INTC_INTMSK0 0xffd00044 +#define INTC_INTMSK1 0xffd00048 +#define INTC_INTMSK2 0xffd40080 +#define INTC_INTMSKCLR1 0xffd00068 +#define INTC_INTMSKCLR2 0xffd40084 + +void __init plat_irq_setup(void) +{ + /* disable IRQ3-0 + IRQ7-4 */ + __raw_writel(0xff000000, INTC_INTMSK0); + + /* disable IRL3-0 + IRL7-4 */ + __raw_writel(0xc0000000, INTC_INTMSK1); + __raw_writel(0xfffefffe, INTC_INTMSK2); + + /* select IRL mode for IRL3-0 + IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); + + /* disable holding function, ie enable "SH-4 Mode" */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); + + register_intc_controller(&intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ7654: + /* select IRQ mode for IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0); + register_intc_controller(&intc_desc_irq4567); + break; + case IRQ_MODE_IRQ3210: + /* select IRQ mode for IRL3-0 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0); + register_intc_controller(&intc_desc_irq0123); + break; + case IRQ_MODE_IRL7654: + /* enable IRL7-4 but don't provide any masking */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + __raw_writel(0x0000fffe, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + __raw_writel(0xfffe0000, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL7654_MASK: + /* enable IRL7-4 and mask using cpu intc controller */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl4567); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl0123); + break; + default: + BUG(); + } +} + +void __init plat_mem_setup(void) +{ +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7763.c b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c new file mode 100644 index 000000000..b06086647 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7763.c @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7763 Setup + * + * Copyright (C) 2006 Paul Mundt + * Copyright (C) 2007 Yoshihiro Shimoda + * Copyright (C) 2008, 2009 Nobuhiro Iwamatsu + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/io.h> +#include <linux/serial_sci.h> +#include <linux/usb/ohci_pdriver.h> +#include <asm/platform_early.h> + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffe08000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xb80)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffe10000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xf00)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xffe80000, + .end = 0xffe80000 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Shared Period/Carry/Alarm IRQ */ + .start = evt2irq(0x480), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +static struct resource usb_ohci_resources[] = { + [0] = { + .start = 0xffec8000, + .end = 0xffec80ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xc60), + .end = evt2irq(0xc60), + .flags = IORESOURCE_IRQ, + }, +}; + +static u64 usb_ohci_dma_mask = 0xffffffffUL; + +static struct usb_ohci_pdata usb_ohci_pdata; + +static struct platform_device usb_ohci_device = { + .name = "ohci-platform", + .id = -1, + .dev = { + .dma_mask = &usb_ohci_dma_mask, + .coherent_dma_mask = 0xffffffff, + .platform_data = &usb_ohci_pdata, + }, + .num_resources = ARRAY_SIZE(usb_ohci_resources), + .resource = usb_ohci_resources, +}; + +static struct resource usbf_resources[] = { + [0] = { + .start = 0xffec0000, + .end = 0xffec00ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xc80), + .end = evt2irq(0xc80), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usbf_device = { + .name = "sh_udc", + .id = -1, + .dev = { + .dma_mask = NULL, + .coherent_dma_mask = 0xffffffff, + }, + .num_resources = ARRAY_SIZE(usbf_resources), + .resource = usbf_resources, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x580)), + DEFINE_RES_IRQ(evt2irq(0x5a0)), + DEFINE_RES_IRQ(evt2irq(0x5c0)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffd88000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0xe00)), + DEFINE_RES_IRQ(evt2irq(0xe20)), + DEFINE_RES_IRQ(evt2irq(0xe40)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +static struct platform_device *sh7763_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &tmu0_device, + &tmu1_device, + &rtc_device, + &usb_ohci_device, + &usbf_device, +}; + +static int __init sh7763_devices_setup(void) +{ + return platform_add_devices(sh7763_devices, + ARRAY_SIZE(sh7763_devices)); +} +arch_initcall(sh7763_devices_setup); + +static struct platform_device *sh7763_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &tmu0_device, + &tmu1_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7763_early_devices, + ARRAY_SIZE(sh7763_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + + IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, + + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI, + HUDI, LCDC, DMAC, SCIF0, IIC0, IIC1, CMT, GETHER, HAC, + PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5, + STIF0, STIF1, SCIF1, SIOF0, SIOF1, SIOF2, + USBH, USBF, TPU, PCC, MMCIF, SIM, + TMU3, TMU4, TMU5, ADC, SSI0, SSI1, SSI2, SSI3, + SCIF2, GPIO, + + /* interrupt groups */ + + TMU012, TMU345, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(WDT, 0x560), INTC_VECT(TMU0, 0x580), + INTC_VECT(TMU1, 0x5a0), INTC_VECT(TMU2, 0x5c0), + INTC_VECT(TMU2_TICPI, 0x5e0), INTC_VECT(HUDI, 0x600), + INTC_VECT(LCDC, 0x620), + INTC_VECT(DMAC, 0x640), INTC_VECT(DMAC, 0x660), + INTC_VECT(DMAC, 0x680), INTC_VECT(DMAC, 0x6a0), + INTC_VECT(DMAC, 0x6c0), + INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720), + INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760), + INTC_VECT(DMAC, 0x780), INTC_VECT(DMAC, 0x7a0), + INTC_VECT(IIC0, 0x8A0), INTC_VECT(IIC1, 0x8C0), + INTC_VECT(CMT, 0x900), INTC_VECT(GETHER, 0x920), + INTC_VECT(GETHER, 0x940), INTC_VECT(GETHER, 0x960), + INTC_VECT(HAC, 0x980), + INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20), + INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60), + INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0), + INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0), + INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20), + INTC_VECT(STIF0, 0xb40), INTC_VECT(STIF1, 0xb60), + INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0), + INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0), + INTC_VECT(SIOF0, 0xc00), INTC_VECT(SIOF1, 0xc20), + INTC_VECT(USBH, 0xc60), INTC_VECT(USBF, 0xc80), + INTC_VECT(USBF, 0xca0), + INTC_VECT(TPU, 0xcc0), INTC_VECT(PCC, 0xce0), + INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20), + INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60), + INTC_VECT(SIM, 0xd80), INTC_VECT(SIM, 0xda0), + INTC_VECT(SIM, 0xdc0), INTC_VECT(SIM, 0xde0), + INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), + INTC_VECT(TMU5, 0xe40), INTC_VECT(ADC, 0xe60), + INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0), + INTC_VECT(SSI2, 0xec0), INTC_VECT(SSI3, 0xee0), + INTC_VECT(SCIF2, 0xf00), INTC_VECT(SCIF2, 0xf20), + INTC_VECT(SCIF2, 0xf40), INTC_VECT(SCIF2, 0xf60), + INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0), + INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), + INTC_GROUP(TMU345, TMU3, TMU4, TMU5), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */ + { 0, 0, 0, 0, 0, 0, GPIO, 0, + SSI0, MMCIF, 0, SIOF0, PCIC5, PCIINTD, PCIINTC, PCIINTB, + PCIINTA, PCISERR, HAC, CMT, 0, 0, 0, DMAC, + HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } }, + { 0xffd400d0, 0xffd400d4, 32, /* INT2MSKR1 / INT2MSKCR1 */ + { 0, 0, 0, 0, 0, 0, SCIF2, USBF, + 0, 0, STIF1, STIF0, 0, 0, USBH, GETHER, + PCC, 0, 0, ADC, TPU, SIM, SIOF2, SIOF1, + LCDC, 0, IIC1, IIC0, SSI3, SSI2, SSI1, 0 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1, + TMU2, TMU2_TICPI } }, + { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } }, + { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } }, + { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC, ADC } }, + { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC, + PCISERR, PCIINTA } }, + { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC, + PCIINTD, PCIC5 } }, + { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF0, USBF, MMCIF, SSI0 } }, + { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SCIF2, GPIO } }, + { 0xffd400a0, 0, 32, 8, /* INT2PRI8 */ { SSI3, SSI2, SSI1, 0 } }, + { 0xffd400a4, 0, 32, 8, /* INT2PRI9 */ { LCDC, 0, IIC1, IIC0 } }, + { 0xffd400a8, 0, 32, 8, /* INT2PRI10 */ { TPU, SIM, SIOF2, SIOF1 } }, + { 0xffd400ac, 0, 32, 8, /* INT2PRI11 */ { PCC } }, + { 0xffd400b0, 0, 32, 8, /* INT2PRI12 */ { 0, 0, USBH, GETHER } }, + { 0xffd400b4, 0, 32, 8, /* INT2PRI13 */ { 0, 0, STIF1, STIF0 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7763", vectors, groups, + mask_registers, prio_registers, NULL); + +/* Support for external interrupt pins in IRQ mode */ +static struct intc_vect irq_vectors[] __initdata = { + INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), + INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), + INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), + INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200), +}; + +static struct intc_mask_reg irq_mask_registers[] __initdata = { + { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg irq_prio_registers[] __initdata = { + { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg irq_sense_registers[] __initdata = { + { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg irq_ack_registers[] __initdata = { + { 0xffd00024, 0, 32, /* INTREQ */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7763-irq", irq_vectors, + NULL, irq_mask_registers, irq_prio_registers, + irq_sense_registers, irq_ack_registers); + + +/* External interrupt pins in IRL mode */ +static struct intc_vect irl_vectors[] __initdata = { + INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), + INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), + INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), + INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), + INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), + INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), + INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), + INTC_VECT(IRL_HHHL, 0x3c0), +}; + +static struct intc_mask_reg irl3210_mask_registers[] __initdata = { + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, +}; + +static struct intc_mask_reg irl7654_mask_registers[] __initdata = { + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, +}; + +static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7763-irl7654", irl_vectors, + NULL, irl7654_mask_registers, NULL, NULL); + +static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7763-irl3210", irl_vectors, + NULL, irl3210_mask_registers, NULL, NULL); + +#define INTC_ICR0 0xffd00000 +#define INTC_INTMSK0 0xffd00044 +#define INTC_INTMSK1 0xffd00048 +#define INTC_INTMSK2 0xffd40080 +#define INTC_INTMSKCLR1 0xffd00068 +#define INTC_INTMSKCLR2 0xffd40084 + +void __init plat_irq_setup(void) +{ + /* disable IRQ7-0 */ + __raw_writel(0xff000000, INTC_INTMSK0); + + /* disable IRL3-0 + IRL7-4 */ + __raw_writel(0xc0000000, INTC_INTMSK1); + __raw_writel(0xfffefffe, INTC_INTMSK2); + + register_intc_controller(&intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ: + /* select IRQ mode for IRL3-0 + IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); + register_intc_controller(&intc_irq_desc); + break; + case IRQ_MODE_IRL7654: + /* enable IRL7-4 but don't provide any masking */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + __raw_writel(0x0000fffe, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + __raw_writel(0xfffe0000, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL7654_MASK: + /* enable IRL7-4 and mask using cpu intc controller */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_irl7654_desc); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_irl3210_desc); + break; + default: + BUG(); + } +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7770.c b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c new file mode 100644 index 000000000..5efec6ceb --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7770.c @@ -0,0 +1,571 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7770 Setup + * + * Copyright (C) 2006 - 2008 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <linux/io.h> +#include <asm/platform_early.h> + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xff923000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x9a0)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xff924000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x9c0)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xff925000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x9e0)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xff926000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xa00)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xff927000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xa20)), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xff928000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xa40)), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +static struct plat_sci_port scif6_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif6_resources[] = { + DEFINE_RES_MEM(0xff929000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xa60)), +}; + +static struct platform_device scif6_device = { + .name = "sh-sci", + .id = 6, + .resource = scif6_resources, + .num_resources = ARRAY_SIZE(scif6_resources), + .dev = { + .platform_data = &scif6_platform_data, + }, +}; + +static struct plat_sci_port scif7_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif7_resources[] = { + DEFINE_RES_MEM(0xff92a000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xa80)), +}; + +static struct platform_device scif7_device = { + .name = "sh-sci", + .id = 7, + .resource = scif7_resources, + .num_resources = ARRAY_SIZE(scif7_resources), + .dev = { + .platform_data = &scif7_platform_data, + }, +}; + +static struct plat_sci_port scif8_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif8_resources[] = { + DEFINE_RES_MEM(0xff92b000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xaa0)), +}; + +static struct platform_device scif8_device = { + .name = "sh-sci", + .id = 8, + .resource = scif8_resources, + .num_resources = ARRAY_SIZE(scif8_resources), + .dev = { + .platform_data = &scif8_platform_data, + }, +}; + +static struct plat_sci_port scif9_platform_data = { + .scscr = SCSCR_REIE | SCSCR_TOIE, + .type = PORT_SCIF, +}; + +static struct resource scif9_resources[] = { + DEFINE_RES_MEM(0xff92c000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xac0)), +}; + +static struct platform_device scif9_device = { + .name = "sh-sci", + .id = 9, + .resource = scif9_resources, + .num_resources = ARRAY_SIZE(scif9_resources), + .dev = { + .platform_data = &scif9_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffd81000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x460)), + DEFINE_RES_IRQ(evt2irq(0x480)), + DEFINE_RES_IRQ(evt2irq(0x4a0)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +static struct sh_timer_config tmu2_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu2_resources[] = { + DEFINE_RES_MEM(0xffd82000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x4c0)), + DEFINE_RES_IRQ(evt2irq(0x4e0)), + DEFINE_RES_IRQ(evt2irq(0x500)), +}; + +static struct platform_device tmu2_device = { + .name = "sh-tmu", + .id = 2, + .dev = { + .platform_data = &tmu2_platform_data, + }, + .resource = tmu2_resources, + .num_resources = ARRAY_SIZE(tmu2_resources), +}; + +static struct platform_device *sh7770_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &scif6_device, + &scif7_device, + &scif8_device, + &scif9_device, + &tmu0_device, + &tmu1_device, + &tmu2_device, +}; + +static int __init sh7770_devices_setup(void) +{ + return platform_add_devices(sh7770_devices, + ARRAY_SIZE(sh7770_devices)); +} +arch_initcall(sh7770_devices_setup); + +static struct platform_device *sh7770_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &scif6_device, + &scif7_device, + &scif8_device, + &scif9_device, + &tmu0_device, + &tmu1_device, + &tmu2_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7770_early_devices, + ARRAY_SIZE(sh7770_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, + + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, + + GPIO, + TMU0, TMU1, TMU2, TMU2_TICPI, + TMU3, TMU4, TMU5, TMU5_TICPI, + TMU6, TMU7, TMU8, + HAC, IPI, SPDIF, HUDI, I2C, + DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, + I2S0, I2S1, I2S2, I2S3, + SRC_RX, SRC_TX, SRC_SPDIF, + DU, VIDEO_IN, REMOTE, YUV, USB, ATAPI, CAN, GPS, GFX2D, + GFX3D_MBX, GFX3D_DMAC, + EXBUS_ATA, + SPI0, SPI1, + SCIF089, SCIF1234, SCIF567, + ADC, + BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, + BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, + BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31, + + /* interrupt groups */ + TMU, DMAC, I2S, SRC, GFX3D, SPI, SCIF, BBDMAC, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(GPIO, 0x3e0), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2_TICPI, 0x460), + INTC_VECT(TMU3, 0x480), INTC_VECT(TMU4, 0x4a0), + INTC_VECT(TMU5, 0x4c0), INTC_VECT(TMU5_TICPI, 0x4e0), + INTC_VECT(TMU6, 0x500), INTC_VECT(TMU7, 0x520), + INTC_VECT(TMU8, 0x540), + INTC_VECT(HAC, 0x580), INTC_VECT(IPI, 0x5c0), + INTC_VECT(SPDIF, 0x5e0), + INTC_VECT(HUDI, 0x600), INTC_VECT(I2C, 0x620), + INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660), + INTC_VECT(DMAC0_DMINT2, 0x680), + INTC_VECT(I2S0, 0x6a0), INTC_VECT(I2S1, 0x6c0), + INTC_VECT(I2S2, 0x6e0), INTC_VECT(I2S3, 0x700), + INTC_VECT(SRC_RX, 0x720), INTC_VECT(SRC_TX, 0x740), + INTC_VECT(SRC_SPDIF, 0x760), + INTC_VECT(DU, 0x780), INTC_VECT(VIDEO_IN, 0x7a0), + INTC_VECT(REMOTE, 0x7c0), INTC_VECT(YUV, 0x7e0), + INTC_VECT(USB, 0x840), INTC_VECT(ATAPI, 0x860), + INTC_VECT(CAN, 0x880), INTC_VECT(GPS, 0x8a0), + INTC_VECT(GFX2D, 0x8c0), + INTC_VECT(GFX3D_MBX, 0x900), INTC_VECT(GFX3D_DMAC, 0x920), + INTC_VECT(EXBUS_ATA, 0x940), + INTC_VECT(SPI0, 0x960), INTC_VECT(SPI1, 0x980), + INTC_VECT(SCIF089, 0x9a0), INTC_VECT(SCIF1234, 0x9c0), + INTC_VECT(SCIF1234, 0x9e0), INTC_VECT(SCIF1234, 0xa00), + INTC_VECT(SCIF1234, 0xa20), INTC_VECT(SCIF567, 0xa40), + INTC_VECT(SCIF567, 0xa60), INTC_VECT(SCIF567, 0xa80), + INTC_VECT(SCIF089, 0xaa0), INTC_VECT(SCIF089, 0xac0), + INTC_VECT(ADC, 0xb20), + INTC_VECT(BBDMAC_0_3, 0xba0), INTC_VECT(BBDMAC_0_3, 0xbc0), + INTC_VECT(BBDMAC_0_3, 0xbe0), INTC_VECT(BBDMAC_0_3, 0xc00), + INTC_VECT(BBDMAC_4_7, 0xc20), INTC_VECT(BBDMAC_4_7, 0xc40), + INTC_VECT(BBDMAC_4_7, 0xc60), INTC_VECT(BBDMAC_4_7, 0xc80), + INTC_VECT(BBDMAC_8_10, 0xca0), INTC_VECT(BBDMAC_8_10, 0xcc0), + INTC_VECT(BBDMAC_8_10, 0xce0), INTC_VECT(BBDMAC_11_14, 0xd00), + INTC_VECT(BBDMAC_11_14, 0xd20), INTC_VECT(BBDMAC_11_14, 0xd40), + INTC_VECT(BBDMAC_11_14, 0xd60), INTC_VECT(BBDMAC_15_18, 0xd80), + INTC_VECT(BBDMAC_15_18, 0xda0), INTC_VECT(BBDMAC_15_18, 0xdc0), + INTC_VECT(BBDMAC_15_18, 0xde0), INTC_VECT(BBDMAC_19_22, 0xe00), + INTC_VECT(BBDMAC_19_22, 0xe20), INTC_VECT(BBDMAC_19_22, 0xe40), + INTC_VECT(BBDMAC_19_22, 0xe60), INTC_VECT(BBDMAC_23_26, 0xe80), + INTC_VECT(BBDMAC_23_26, 0xea0), INTC_VECT(BBDMAC_23_26, 0xec0), + INTC_VECT(BBDMAC_23_26, 0xee0), INTC_VECT(BBDMAC_27, 0xf00), + INTC_VECT(BBDMAC_28, 0xf20), INTC_VECT(BBDMAC_29, 0xf40), + INTC_VECT(BBDMAC_30, 0xf60), INTC_VECT(BBDMAC_31, 0xf80), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(TMU, TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5, + TMU5_TICPI, TMU6, TMU7, TMU8), + INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2), + INTC_GROUP(I2S, I2S0, I2S1, I2S2, I2S3), + INTC_GROUP(SRC, SRC_RX, SRC_TX, SRC_SPDIF), + INTC_GROUP(GFX3D, GFX3D_MBX, GFX3D_DMAC), + INTC_GROUP(SPI, SPI0, SPI1), + INTC_GROUP(SCIF, SCIF089, SCIF1234, SCIF567), + INTC_GROUP(BBDMAC, + BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, + BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, + BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xffe00040, 0xffe00044, 32, /* INT2MSKR / INT2MSKCR */ + { 0, BBDMAC, ADC, SCIF, SPI, EXBUS_ATA, GFX3D, GFX2D, + GPS, CAN, ATAPI, USB, YUV, REMOTE, VIDEO_IN, DU, SRC, I2S, + DMAC, I2C, HUDI, SPDIF, IPI, HAC, TMU, GPIO } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffe00000, 0, 32, 8, /* INT2PRI0 */ { GPIO, TMU0, 0, HAC } }, + { 0xffe00004, 0, 32, 8, /* INT2PRI1 */ { IPI, SPDIF, HUDI, I2C } }, + { 0xffe00008, 0, 32, 8, /* INT2PRI2 */ { DMAC, I2S, SRC, DU } }, + { 0xffe0000c, 0, 32, 8, /* INT2PRI3 */ { VIDEO_IN, REMOTE, YUV, USB } }, + { 0xffe00010, 0, 32, 8, /* INT2PRI4 */ { ATAPI, CAN, GPS, GFX2D } }, + { 0xffe00014, 0, 32, 8, /* INT2PRI5 */ { 0, GFX3D, EXBUS_ATA, SPI } }, + { 0xffe00018, 0, 32, 8, /* INT2PRI6 */ { SCIF1234, SCIF567, SCIF089 } }, + { 0xffe0001c, 0, 32, 8, /* INT2PRI7 */ { ADC, 0, 0, BBDMAC_0_3 } }, + { 0xffe00020, 0, 32, 8, /* INT2PRI8 */ + { BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18 } }, + { 0xffe00024, 0, 32, 8, /* INT2PRI9 */ + { BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28 } }, + { 0xffe00028, 0, 32, 8, /* INT2PRI10 */ + { BBDMAC_29, BBDMAC_30, BBDMAC_31 } }, + { 0xffe0002c, 0, 32, 8, /* INT2PRI11 */ + { TMU1, TMU2, TMU2_TICPI, TMU3 } }, + { 0xffe00030, 0, 32, 8, /* INT2PRI12 */ + { TMU4, TMU5, TMU5_TICPI, TMU6 } }, + { 0xffe00034, 0, 32, 8, /* INT2PRI13 */ + { TMU7, TMU8 } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7770", vectors, groups, + mask_registers, prio_registers, NULL); + +/* Support for external interrupt pins in IRQ mode */ +static struct intc_vect irq_vectors[] __initdata = { + INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), + INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), + INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), +}; + +static struct intc_mask_reg irq_mask_registers[] __initdata = { + { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } }, +}; + +static struct intc_prio_reg irq_prio_registers[] __initdata = { + { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, } }, +}; + +static struct intc_sense_reg irq_sense_registers[] __initdata = { + { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, } }, +}; + +static DECLARE_INTC_DESC(intc_irq_desc, "sh7770-irq", irq_vectors, + NULL, irq_mask_registers, irq_prio_registers, + irq_sense_registers); + +/* External interrupt pins in IRL mode */ +static struct intc_vect irl_vectors[] __initdata = { + INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), + INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), + INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), + INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), + INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), + INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), + INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), + INTC_VECT(IRL_HHHL, 0x3c0), +}; + +static struct intc_mask_reg irl3210_mask_registers[] __initdata = { + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, +}; + +static struct intc_mask_reg irl7654_mask_registers[] __initdata = { + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, +}; + +static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors, + NULL, irl7654_mask_registers, NULL, NULL); + +static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors, + NULL, irl3210_mask_registers, NULL, NULL); + +#define INTC_ICR0 0xffd00000 +#define INTC_INTMSK0 0xffd00044 +#define INTC_INTMSK1 0xffd00048 +#define INTC_INTMSK2 0xffd40080 +#define INTC_INTMSKCLR1 0xffd00068 +#define INTC_INTMSKCLR2 0xffd40084 + +void __init plat_irq_setup(void) +{ + /* disable IRQ7-0 */ + __raw_writel(0xff000000, INTC_INTMSK0); + + /* disable IRL3-0 + IRL7-4 */ + __raw_writel(0xc0000000, INTC_INTMSK1); + __raw_writel(0xfffefffe, INTC_INTMSK2); + + /* select IRL mode for IRL3-0 + IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); + + /* disable holding function, ie enable "SH-4 Mode" */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); + + register_intc_controller(&intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ: + /* select IRQ mode for IRL3-0 + IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); + register_intc_controller(&intc_irq_desc); + break; + case IRQ_MODE_IRL7654: + /* enable IRL7-4 but don't provide any masking */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + __raw_writel(0x0000fffe, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + __raw_writel(0xfffe0000, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL7654_MASK: + /* enable IRL7-4 and mask using cpu intc controller */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_irl7654_desc); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_irl3210_desc); + break; + default: + BUG(); + } +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c new file mode 100644 index 000000000..c818b788e --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c @@ -0,0 +1,505 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7780 Setup + * + * Copyright (C) 2006 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/io.h> +#include <linux/serial_sci.h> +#include <linux/sh_dma.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <cpu/dma-register.h> +#include <asm/platform_early.h> + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffe00000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffe10000, 0x100), + DEFINE_RES_IRQ(evt2irq(0xb80)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x580)), + DEFINE_RES_IRQ(evt2irq(0x5a0)), + DEFINE_RES_IRQ(evt2irq(0x5c0)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffdc0000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0xe00)), + DEFINE_RES_IRQ(evt2irq(0xe20)), + DEFINE_RES_IRQ(evt2irq(0xe40)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +static struct resource rtc_resources[] = { + [0] = { + .start = 0xffe80000, + .end = 0xffe80000 + 0x58 - 1, + .flags = IORESOURCE_IO, + }, + [1] = { + /* Shared Period/Carry/Alarm IRQ */ + .start = evt2irq(0x480), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device rtc_device = { + .name = "sh-rtc", + .id = -1, + .num_resources = ARRAY_SIZE(rtc_resources), + .resource = rtc_resources, +}; + +/* DMA */ +static const struct sh_dmae_channel sh7780_dmae0_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static const struct sh_dmae_channel sh7780_dmae1_channels[] = { + { + .offset = 0, + }, { + .offset = 0x10, + }, { + .offset = 0x20, + }, { + .offset = 0x30, + }, { + .offset = 0x50, + }, { + .offset = 0x60, + } +}; + +static const unsigned int ts_shift[] = TS_SHIFT; + +static struct sh_dmae_pdata dma0_platform_data = { + .channel = sh7780_dmae0_channels, + .channel_num = ARRAY_SIZE(sh7780_dmae0_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +static struct sh_dmae_pdata dma1_platform_data = { + .channel = sh7780_dmae1_channels, + .channel_num = ARRAY_SIZE(sh7780_dmae1_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +static struct resource sh7780_dmae0_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfc808020, + .end = 0xfc80808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xfc809000, + .end = 0xfc80900b, + .flags = IORESOURCE_MEM, + }, + { + /* + * Real DMA error vector is 0x6c0, and channel + * vectors are 0x640-0x6a0, 0x780-0x7a0 + */ + .name = "error_irq", + .start = evt2irq(0x640), + .end = evt2irq(0x640), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct resource sh7780_dmae1_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfc818020, + .end = 0xfc81808f, + .flags = IORESOURCE_MEM, + }, + /* DMAC1 has no DMARS */ + { + /* + * Real DMA error vector is 0x6c0, and channel + * vectors are 0x7c0-0x7e0, 0xd80-0xde0 + */ + .name = "error_irq", + .start = evt2irq(0x7c0), + .end = evt2irq(0x7c0), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct platform_device dma0_device = { + .name = "sh-dma-engine", + .id = 0, + .resource = sh7780_dmae0_resources, + .num_resources = ARRAY_SIZE(sh7780_dmae0_resources), + .dev = { + .platform_data = &dma0_platform_data, + }, +}; + +static struct platform_device dma1_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = sh7780_dmae1_resources, + .num_resources = ARRAY_SIZE(sh7780_dmae1_resources), + .dev = { + .platform_data = &dma1_platform_data, + }, +}; + +static struct platform_device *sh7780_devices[] __initdata = { + &scif0_device, + &scif1_device, + &tmu0_device, + &tmu1_device, + &rtc_device, + &dma0_device, + &dma1_device, +}; + +static int __init sh7780_devices_setup(void) +{ + return platform_add_devices(sh7780_devices, + ARRAY_SIZE(sh7780_devices)); +} +arch_initcall(sh7780_devices_setup); + +static struct platform_device *sh7780_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &tmu0_device, + &tmu1_device, +}; + +void __init plat_early_device_setup(void) +{ + if (mach_is_sh2007()) { + scif0_platform_data.scscr &= ~SCSCR_CKE1; + scif1_platform_data.scscr &= ~SCSCR_CKE1; + } + + sh_early_platform_add_devices(sh7780_early_devices, + ARRAY_SIZE(sh7780_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + + IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, + + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + RTC, WDT, TMU0, TMU1, TMU2, TMU2_TICPI, + HUDI, DMAC0, SCIF0, DMAC1, CMT, HAC, + PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5, + SCIF1, SIOF, HSPI, MMCIF, TMU3, TMU4, TMU5, SSI, FLCTL, GPIO, + + /* interrupt groups */ + + TMU012, TMU345, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(RTC, 0x480), INTC_VECT(RTC, 0x4a0), + INTC_VECT(RTC, 0x4c0), + INTC_VECT(WDT, 0x560), + INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0), + INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0), + INTC_VECT(HUDI, 0x600), + INTC_VECT(DMAC0, 0x640), INTC_VECT(DMAC0, 0x660), + INTC_VECT(DMAC0, 0x680), INTC_VECT(DMAC0, 0x6a0), + INTC_VECT(DMAC0, 0x6c0), + INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720), + INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760), + INTC_VECT(DMAC0, 0x780), INTC_VECT(DMAC0, 0x7a0), + INTC_VECT(DMAC1, 0x7c0), INTC_VECT(DMAC1, 0x7e0), + INTC_VECT(CMT, 0x900), INTC_VECT(HAC, 0x980), + INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20), + INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60), + INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0), + INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0), + INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20), + INTC_VECT(SCIF1, 0xb80), INTC_VECT(SCIF1, 0xba0), + INTC_VECT(SCIF1, 0xbc0), INTC_VECT(SCIF1, 0xbe0), + INTC_VECT(SIOF, 0xc00), INTC_VECT(HSPI, 0xc80), + INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20), + INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60), + INTC_VECT(DMAC1, 0xd80), INTC_VECT(DMAC1, 0xda0), + INTC_VECT(DMAC1, 0xdc0), INTC_VECT(DMAC1, 0xde0), + INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), + INTC_VECT(TMU5, 0xe40), + INTC_VECT(SSI, 0xe80), + INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20), + INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60), + INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0), + INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), + INTC_GROUP(TMU345, TMU3, TMU4, TMU5), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */ + { 0, 0, 0, 0, 0, 0, GPIO, FLCTL, + SSI, MMCIF, HSPI, SIOF, PCIC5, PCIINTD, PCIINTC, PCIINTB, + PCIINTA, PCISERR, HAC, CMT, 0, 0, DMAC1, DMAC0, + HUDI, 0, WDT, SCIF1, SCIF0, RTC, TMU345, TMU012 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1, + TMU2, TMU2_TICPI } }, + { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, RTC } }, + { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, WDT } }, + { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { HUDI, DMAC0, DMAC1 } }, + { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { CMT, HAC, + PCISERR, PCIINTA, } }, + { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { PCIINTB, PCIINTC, + PCIINTD, PCIC5 } }, + { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { SIOF, HSPI, MMCIF, SSI } }, + { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { FLCTL, GPIO } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7780", vectors, groups, + mask_registers, prio_registers, NULL); + +/* Support for external interrupt pins in IRQ mode */ + +static struct intc_vect irq_vectors[] __initdata = { + INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), + INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), + INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), + INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200), +}; + +static struct intc_mask_reg irq_mask_registers[] __initdata = { + { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_prio_reg irq_prio_registers[] __initdata = { + { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_sense_reg irq_sense_registers[] __initdata = { + { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg irq_ack_registers[] __initdata = { + { 0xffd00024, 0, 32, /* INTREQ */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_irq_desc, "sh7780-irq", irq_vectors, + NULL, irq_mask_registers, irq_prio_registers, + irq_sense_registers, irq_ack_registers); + +/* External interrupt pins in IRL mode */ + +static struct intc_vect irl_vectors[] __initdata = { + INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), + INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), + INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), + INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), + INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), + INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), + INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), + INTC_VECT(IRL_HHHL, 0x3c0), +}; + +static struct intc_mask_reg irl3210_mask_registers[] __initdata = { + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, +}; + +static struct intc_mask_reg irl7654_mask_registers[] __initdata = { + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, +}; + +static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors, + NULL, irl7654_mask_registers, NULL, NULL); + +static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors, + NULL, irl3210_mask_registers, NULL, NULL); + +#define INTC_ICR0 0xffd00000 +#define INTC_INTMSK0 0xffd00044 +#define INTC_INTMSK1 0xffd00048 +#define INTC_INTMSK2 0xffd40080 +#define INTC_INTMSKCLR1 0xffd00068 +#define INTC_INTMSKCLR2 0xffd40084 + +void __init plat_irq_setup(void) +{ + /* disable IRQ7-0 */ + __raw_writel(0xff000000, INTC_INTMSK0); + + /* disable IRL3-0 + IRL7-4 */ + __raw_writel(0xc0000000, INTC_INTMSK1); + __raw_writel(0xfffefffe, INTC_INTMSK2); + + /* select IRL mode for IRL3-0 + IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); + + /* disable holding function, ie enable "SH-4 Mode" */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); + + register_intc_controller(&intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ: + /* select IRQ mode for IRL3-0 + IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); + register_intc_controller(&intc_irq_desc); + break; + case IRQ_MODE_IRL7654: + /* enable IRL7-4 but don't provide any masking */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + __raw_writel(0x0000fffe, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + __raw_writel(0xfffe0000, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL7654_MASK: + /* enable IRL7-4 and mask using cpu intc controller */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_irl7654_desc); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_irl3210_desc); + break; + default: + BUG(); + } +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c new file mode 100644 index 000000000..3b4a414d6 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c @@ -0,0 +1,608 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7785 Setup + * + * Copyright (C) 2007 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/sh_dma.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <asm/mmzone.h> +#include <asm/platform_early.h> +#include <cpu/dma-register.h> + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffea0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffeb0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x780)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffec0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x980)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xffed0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x9a0)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xffee0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x9c0)), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xffef0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x9e0)), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x580)), + DEFINE_RES_IRQ(evt2irq(0x5a0)), + DEFINE_RES_IRQ(evt2irq(0x5c0)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffdc0000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0xe00)), + DEFINE_RES_IRQ(evt2irq(0xe20)), + DEFINE_RES_IRQ(evt2irq(0xe40)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +/* DMA */ +static const struct sh_dmae_channel sh7785_dmae0_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static const struct sh_dmae_channel sh7785_dmae1_channels[] = { + { + .offset = 0, + }, { + .offset = 0x10, + }, { + .offset = 0x20, + }, { + .offset = 0x30, + }, { + .offset = 0x50, + }, { + .offset = 0x60, + } +}; + +static const unsigned int ts_shift[] = TS_SHIFT; + +static struct sh_dmae_pdata dma0_platform_data = { + .channel = sh7785_dmae0_channels, + .channel_num = ARRAY_SIZE(sh7785_dmae0_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +static struct sh_dmae_pdata dma1_platform_data = { + .channel = sh7785_dmae1_channels, + .channel_num = ARRAY_SIZE(sh7785_dmae1_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +static struct resource sh7785_dmae0_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfc808020, + .end = 0xfc80808f, + .flags = IORESOURCE_MEM, + }, + [1] = { + /* DMARSx */ + .start = 0xfc809000, + .end = 0xfc80900b, + .flags = IORESOURCE_MEM, + }, + { + /* + * Real DMA error vector is 0x6e0, and channel + * vectors are 0x620-0x6c0 + */ + .name = "error_irq", + .start = evt2irq(0x620), + .end = evt2irq(0x620), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct resource sh7785_dmae1_resources[] = { + [0] = { + /* Channel registers and DMAOR */ + .start = 0xfcc08020, + .end = 0xfcc0808f, + .flags = IORESOURCE_MEM, + }, + /* DMAC1 has no DMARS */ + { + /* + * Real DMA error vector is 0x940, and channel + * vectors are 0x880-0x920 + */ + .name = "error_irq", + .start = evt2irq(0x880), + .end = evt2irq(0x880), + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, + }, +}; + +static struct platform_device dma0_device = { + .name = "sh-dma-engine", + .id = 0, + .resource = sh7785_dmae0_resources, + .num_resources = ARRAY_SIZE(sh7785_dmae0_resources), + .dev = { + .platform_data = &dma0_platform_data, + }, +}; + +static struct platform_device dma1_device = { + .name = "sh-dma-engine", + .id = 1, + .resource = sh7785_dmae1_resources, + .num_resources = ARRAY_SIZE(sh7785_dmae1_resources), + .dev = { + .platform_data = &dma1_platform_data, + }, +}; + +static struct platform_device *sh7785_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &tmu0_device, + &tmu1_device, + &dma0_device, + &dma1_device, +}; + +static int __init sh7785_devices_setup(void) +{ + return platform_add_devices(sh7785_devices, + ARRAY_SIZE(sh7785_devices)); +} +arch_initcall(sh7785_devices_setup); + +static struct platform_device *sh7785_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &tmu0_device, + &tmu1_device, +}; + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7785_early_devices, + ARRAY_SIZE(sh7785_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + + IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, + + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, + + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + WDT, TMU0, TMU1, TMU2, TMU2_TICPI, + HUDI, DMAC0, SCIF0, SCIF1, DMAC1, HSPI, + SCIF2, SCIF3, SCIF4, SCIF5, + PCISERR, PCIINTA, PCIINTB, PCIINTC, PCIINTD, PCIC5, + SIOF, MMCIF, DU, GDTA, + TMU3, TMU4, TMU5, + SSI0, SSI1, + HAC0, HAC1, + FLCTL, GPIO, + + /* interrupt groups */ + + TMU012, TMU345 +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(WDT, 0x560), + INTC_VECT(TMU0, 0x580), INTC_VECT(TMU1, 0x5a0), + INTC_VECT(TMU2, 0x5c0), INTC_VECT(TMU2_TICPI, 0x5e0), + INTC_VECT(HUDI, 0x600), + INTC_VECT(DMAC0, 0x620), INTC_VECT(DMAC0, 0x640), + INTC_VECT(DMAC0, 0x660), INTC_VECT(DMAC0, 0x680), + INTC_VECT(DMAC0, 0x6a0), INTC_VECT(DMAC0, 0x6c0), + INTC_VECT(DMAC0, 0x6e0), + INTC_VECT(SCIF0, 0x700), INTC_VECT(SCIF0, 0x720), + INTC_VECT(SCIF0, 0x740), INTC_VECT(SCIF0, 0x760), + INTC_VECT(SCIF1, 0x780), INTC_VECT(SCIF1, 0x7a0), + INTC_VECT(SCIF1, 0x7c0), INTC_VECT(SCIF1, 0x7e0), + INTC_VECT(DMAC1, 0x880), INTC_VECT(DMAC1, 0x8a0), + INTC_VECT(DMAC1, 0x8c0), INTC_VECT(DMAC1, 0x8e0), + INTC_VECT(DMAC1, 0x900), INTC_VECT(DMAC1, 0x920), + INTC_VECT(DMAC1, 0x940), + INTC_VECT(HSPI, 0x960), + INTC_VECT(SCIF2, 0x980), INTC_VECT(SCIF3, 0x9a0), + INTC_VECT(SCIF4, 0x9c0), INTC_VECT(SCIF5, 0x9e0), + INTC_VECT(PCISERR, 0xa00), INTC_VECT(PCIINTA, 0xa20), + INTC_VECT(PCIINTB, 0xa40), INTC_VECT(PCIINTC, 0xa60), + INTC_VECT(PCIINTD, 0xa80), INTC_VECT(PCIC5, 0xaa0), + INTC_VECT(PCIC5, 0xac0), INTC_VECT(PCIC5, 0xae0), + INTC_VECT(PCIC5, 0xb00), INTC_VECT(PCIC5, 0xb20), + INTC_VECT(SIOF, 0xc00), + INTC_VECT(MMCIF, 0xd00), INTC_VECT(MMCIF, 0xd20), + INTC_VECT(MMCIF, 0xd40), INTC_VECT(MMCIF, 0xd60), + INTC_VECT(DU, 0xd80), + INTC_VECT(GDTA, 0xda0), INTC_VECT(GDTA, 0xdc0), + INTC_VECT(GDTA, 0xde0), + INTC_VECT(TMU3, 0xe00), INTC_VECT(TMU4, 0xe20), + INTC_VECT(TMU5, 0xe40), + INTC_VECT(SSI0, 0xe80), INTC_VECT(SSI1, 0xea0), + INTC_VECT(HAC0, 0xec0), INTC_VECT(HAC1, 0xee0), + INTC_VECT(FLCTL, 0xf00), INTC_VECT(FLCTL, 0xf20), + INTC_VECT(FLCTL, 0xf40), INTC_VECT(FLCTL, 0xf60), + INTC_VECT(GPIO, 0xf80), INTC_VECT(GPIO, 0xfa0), + INTC_VECT(GPIO, 0xfc0), INTC_VECT(GPIO, 0xfe0), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(TMU012, TMU0, TMU1, TMU2, TMU2_TICPI), + INTC_GROUP(TMU345, TMU3, TMU4, TMU5), +}; + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, + + { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ + { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0, + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } }, + + { 0xffd40038, 0xffd4003c, 32, /* INT2MSKR / INT2MSKCR */ + { 0, 0, 0, GDTA, DU, SSI0, SSI1, GPIO, + FLCTL, MMCIF, HSPI, SIOF, PCIC5, PCIINTD, PCIINTC, PCIINTB, + PCIINTA, PCISERR, HAC1, HAC0, DMAC1, DMAC0, HUDI, WDT, + SCIF5, SCIF4, SCIF3, SCIF2, SCIF1, SCIF0, TMU345, TMU012 } }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xffd40000, 0, 32, 8, /* INT2PRI0 */ { TMU0, TMU1, + TMU2, TMU2_TICPI } }, + { 0xffd40004, 0, 32, 8, /* INT2PRI1 */ { TMU3, TMU4, TMU5, } }, + { 0xffd40008, 0, 32, 8, /* INT2PRI2 */ { SCIF0, SCIF1, + SCIF2, SCIF3 } }, + { 0xffd4000c, 0, 32, 8, /* INT2PRI3 */ { SCIF4, SCIF5, WDT, } }, + { 0xffd40010, 0, 32, 8, /* INT2PRI4 */ { HUDI, DMAC0, DMAC1, } }, + { 0xffd40014, 0, 32, 8, /* INT2PRI5 */ { HAC0, HAC1, + PCISERR, PCIINTA } }, + { 0xffd40018, 0, 32, 8, /* INT2PRI6 */ { PCIINTB, PCIINTC, + PCIINTD, PCIC5 } }, + { 0xffd4001c, 0, 32, 8, /* INT2PRI7 */ { SIOF, HSPI, MMCIF, } }, + { 0xffd40020, 0, 32, 8, /* INT2PRI8 */ { FLCTL, GPIO, SSI0, SSI1, } }, + { 0xffd40024, 0, 32, 8, /* INT2PRI9 */ { DU, GDTA, } }, +}; + +static DECLARE_INTC_DESC(intc_desc, "sh7785", vectors, groups, + mask_registers, prio_registers, NULL); + +/* Support for external interrupt pins in IRQ mode */ + +static struct intc_vect vectors_irq0123[] __initdata = { + INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), + INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), +}; + +static struct intc_vect vectors_irq4567[] __initdata = { + INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), + INTC_VECT(IRQ6, 0x3c0), INTC_VECT(IRQ7, 0x200), +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg ack_registers[] __initdata = { + { 0xffd00024, 0, 32, /* INTREQ */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7785-irq0123", + vectors_irq0123, NULL, mask_registers, + prio_registers, sense_registers, ack_registers); + +static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7785-irq4567", + vectors_irq4567, NULL, mask_registers, + prio_registers, sense_registers, ack_registers); + +/* External interrupt pins in IRL mode */ + +static struct intc_vect vectors_irl0123[] __initdata = { + INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220), + INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260), + INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0), + INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0), + INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320), + INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360), + INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0), + INTC_VECT(IRL0_HHHL, 0x3c0), +}; + +static struct intc_vect vectors_irl4567[] __initdata = { + INTC_VECT(IRL4_LLLL, 0xb00), INTC_VECT(IRL4_LLLH, 0xb20), + INTC_VECT(IRL4_LLHL, 0xb40), INTC_VECT(IRL4_LLHH, 0xb60), + INTC_VECT(IRL4_LHLL, 0xb80), INTC_VECT(IRL4_LHLH, 0xba0), + INTC_VECT(IRL4_LHHL, 0xbc0), INTC_VECT(IRL4_LHHH, 0xbe0), + INTC_VECT(IRL4_HLLL, 0xc00), INTC_VECT(IRL4_HLLH, 0xc20), + INTC_VECT(IRL4_HLHL, 0xc40), INTC_VECT(IRL4_HLHH, 0xc60), + INTC_VECT(IRL4_HHLL, 0xc80), INTC_VECT(IRL4_HHLH, 0xca0), + INTC_VECT(IRL4_HHHL, 0xcc0), +}; + +static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7785-irl0123", vectors_irl0123, + NULL, mask_registers, NULL, NULL); + +static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7785-irl4567", vectors_irl4567, + NULL, mask_registers, NULL, NULL); + +#define INTC_ICR0 0xffd00000 +#define INTC_INTMSK0 0xffd00044 +#define INTC_INTMSK1 0xffd00048 +#define INTC_INTMSK2 0xffd40080 +#define INTC_INTMSKCLR1 0xffd00068 +#define INTC_INTMSKCLR2 0xffd40084 + +void __init plat_irq_setup(void) +{ + /* disable IRQ3-0 + IRQ7-4 */ + __raw_writel(0xff000000, INTC_INTMSK0); + + /* disable IRL3-0 + IRL7-4 */ + __raw_writel(0xc0000000, INTC_INTMSK1); + __raw_writel(0xfffefffe, INTC_INTMSK2); + + /* select IRL mode for IRL3-0 + IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); + + /* disable holding function, ie enable "SH-4 Mode" */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); + + register_intc_controller(&intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ7654: + /* select IRQ mode for IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0); + register_intc_controller(&intc_desc_irq4567); + break; + case IRQ_MODE_IRQ3210: + /* select IRQ mode for IRL3-0 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0); + register_intc_controller(&intc_desc_irq0123); + break; + case IRQ_MODE_IRL7654: + /* enable IRL7-4 but don't provide any masking */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + __raw_writel(0x0000fffe, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + __raw_writel(0xfffe0000, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL7654_MASK: + /* enable IRL7-4 and mask using cpu intc controller */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl4567); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl0123); + break; + default: + BUG(); + } +} + +void __init plat_mem_setup(void) +{ + /* Register the URAM space as Node 1 */ + setup_bootmem_node(1, 0xe55f0000, 0xe5610000); +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7786.c b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c new file mode 100644 index 000000000..74620f30b --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-sh7786.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH7786 Setup + * + * Copyright (C) 2009 - 2011 Renesas Solutions Corp. + * Kuninori Morimoto <morimoto.kuninori@renesas.com> + * Paul Mundt <paul.mundt@renesas.com> + * + * Based on SH7785 Setup + * + * Copyright (C) 2007 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/sh_timer.h> +#include <linux/sh_dma.h> +#include <linux/sh_intc.h> +#include <linux/usb/ohci_pdriver.h> +#include <cpu/dma-register.h> +#include <asm/mmzone.h> +#include <asm/platform_early.h> + +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffea0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), + DEFINE_RES_IRQ(evt2irq(0x720)), + DEFINE_RES_IRQ(evt2irq(0x760)), + DEFINE_RES_IRQ(evt2irq(0x740)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +/* + * The rest of these all have multiplexed IRQs + */ +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffeb0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x780)), +}; + +static struct resource scif1_demux_resources[] = { + DEFINE_RES_MEM(0xffeb0000, 0x100), + /* Placeholders, see sh7786_devices_setup() */ + DEFINE_RES_IRQ(0), + DEFINE_RES_IRQ(0), + DEFINE_RES_IRQ(0), + DEFINE_RES_IRQ(0), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffec0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x840)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct plat_sci_port scif3_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif3_resources[] = { + DEFINE_RES_MEM(0xffed0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x860)), +}; + +static struct platform_device scif3_device = { + .name = "sh-sci", + .id = 3, + .resource = scif3_resources, + .num_resources = ARRAY_SIZE(scif3_resources), + .dev = { + .platform_data = &scif3_platform_data, + }, +}; + +static struct plat_sci_port scif4_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif4_resources[] = { + DEFINE_RES_MEM(0xffee0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x880)), +}; + +static struct platform_device scif4_device = { + .name = "sh-sci", + .id = 4, + .resource = scif4_resources, + .num_resources = ARRAY_SIZE(scif4_resources), + .dev = { + .platform_data = &scif4_platform_data, + }, +}; + +static struct plat_sci_port scif5_platform_data = { + .scscr = SCSCR_REIE | SCSCR_CKE1, + .type = PORT_SCIF, + .regtype = SCIx_SH4_SCIF_FIFODATA_REGTYPE, +}; + +static struct resource scif5_resources[] = { + DEFINE_RES_MEM(0xffef0000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x8a0)), +}; + +static struct platform_device scif5_device = { + .name = "sh-sci", + .id = 5, + .resource = scif5_resources, + .num_resources = ARRAY_SIZE(scif5_resources), + .dev = { + .platform_data = &scif5_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffd80000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffda0000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x480)), + DEFINE_RES_IRQ(evt2irq(0x4a0)), + DEFINE_RES_IRQ(evt2irq(0x4c0)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +static struct sh_timer_config tmu2_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu2_resources[] = { + DEFINE_RES_MEM(0xffdc0000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x7a0)), + DEFINE_RES_IRQ(evt2irq(0x7a0)), + DEFINE_RES_IRQ(evt2irq(0x7a0)), +}; + +static struct platform_device tmu2_device = { + .name = "sh-tmu", + .id = 2, + .dev = { + .platform_data = &tmu2_platform_data, + }, + .resource = tmu2_resources, + .num_resources = ARRAY_SIZE(tmu2_resources), +}; + +static struct sh_timer_config tmu3_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu3_resources[] = { + DEFINE_RES_MEM(0xffde0000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x7c0)), + DEFINE_RES_IRQ(evt2irq(0x7c0)), + DEFINE_RES_IRQ(evt2irq(0x7c0)), +}; + +static struct platform_device tmu3_device = { + .name = "sh-tmu", + .id = 3, + .dev = { + .platform_data = &tmu3_platform_data, + }, + .resource = tmu3_resources, + .num_resources = ARRAY_SIZE(tmu3_resources), +}; + +static const struct sh_dmae_channel dmac0_channels[] = { + { + .offset = 0, + .dmars = 0, + .dmars_bit = 0, + }, { + .offset = 0x10, + .dmars = 0, + .dmars_bit = 8, + }, { + .offset = 0x20, + .dmars = 4, + .dmars_bit = 0, + }, { + .offset = 0x30, + .dmars = 4, + .dmars_bit = 8, + }, { + .offset = 0x50, + .dmars = 8, + .dmars_bit = 0, + }, { + .offset = 0x60, + .dmars = 8, + .dmars_bit = 8, + } +}; + +static const unsigned int ts_shift[] = TS_SHIFT; + +static struct sh_dmae_pdata dma0_platform_data = { + .channel = dmac0_channels, + .channel_num = ARRAY_SIZE(dmac0_channels), + .ts_low_shift = CHCR_TS_LOW_SHIFT, + .ts_low_mask = CHCR_TS_LOW_MASK, + .ts_high_shift = CHCR_TS_HIGH_SHIFT, + .ts_high_mask = CHCR_TS_HIGH_MASK, + .ts_shift = ts_shift, + .ts_shift_num = ARRAY_SIZE(ts_shift), + .dmaor_init = DMAOR_INIT, +}; + +/* Resource order important! */ +static struct resource dmac0_resources[] = { + { + /* Channel registers and DMAOR */ + .start = 0xfe008020, + .end = 0xfe00808f, + .flags = IORESOURCE_MEM, + }, { + /* DMARSx */ + .start = 0xfe009000, + .end = 0xfe00900b, + .flags = IORESOURCE_MEM, + }, { + .name = "error_irq", + .start = evt2irq(0x5c0), + .end = evt2irq(0x5c0), + .flags = IORESOURCE_IRQ, + }, { + /* IRQ for channels 0-5 */ + .start = evt2irq(0x500), + .end = evt2irq(0x5a0), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device dma0_device = { + .name = "sh-dma-engine", + .id = 0, + .resource = dmac0_resources, + .num_resources = ARRAY_SIZE(dmac0_resources), + .dev = { + .platform_data = &dma0_platform_data, + }, +}; + +#define USB_EHCI_START 0xffe70000 +#define USB_OHCI_START 0xffe70400 + +static struct resource usb_ehci_resources[] = { + [0] = { + .start = USB_EHCI_START, + .end = USB_EHCI_START + 0x3ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xba0), + .end = evt2irq(0xba0), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device usb_ehci_device = { + .name = "sh_ehci", + .id = -1, + .dev = { + .dma_mask = &usb_ehci_device.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + }, + .num_resources = ARRAY_SIZE(usb_ehci_resources), + .resource = usb_ehci_resources, +}; + +static struct resource usb_ohci_resources[] = { + [0] = { + .start = USB_OHCI_START, + .end = USB_OHCI_START + 0x3ff, + .flags = IORESOURCE_MEM, + }, + [1] = { + .start = evt2irq(0xba0), + .end = evt2irq(0xba0), + .flags = IORESOURCE_IRQ, + }, +}; + +static struct usb_ohci_pdata usb_ohci_pdata; + +static struct platform_device usb_ohci_device = { + .name = "ohci-platform", + .id = -1, + .dev = { + .dma_mask = &usb_ohci_device.dev.coherent_dma_mask, + .coherent_dma_mask = DMA_BIT_MASK(32), + .platform_data = &usb_ohci_pdata, + }, + .num_resources = ARRAY_SIZE(usb_ohci_resources), + .resource = usb_ohci_resources, +}; + +static struct platform_device *sh7786_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &scif3_device, + &scif4_device, + &scif5_device, + &tmu0_device, + &tmu1_device, + &tmu2_device, + &tmu3_device, +}; + +static struct platform_device *sh7786_devices[] __initdata = { + &dma0_device, + &usb_ehci_device, + &usb_ohci_device, +}; + +/* + * Please call this function if your platform board + * use external clock for USB + * */ +#define USBCTL0 0xffe70858 +#define CLOCK_MODE_MASK 0xffffff7f +#define EXT_CLOCK_MODE 0x00000080 + +void __init sh7786_usb_use_exclock(void) +{ + u32 val = __raw_readl(USBCTL0) & CLOCK_MODE_MASK; + __raw_writel(val | EXT_CLOCK_MODE, USBCTL0); +} + +#define USBINITREG1 0xffe70094 +#define USBINITREG2 0xffe7009c +#define USBINITVAL1 0x00ff0040 +#define USBINITVAL2 0x00000001 + +#define USBPCTL1 0xffe70804 +#define USBST 0xffe70808 +#define PHY_ENB 0x00000001 +#define PLL_ENB 0x00000002 +#define PHY_RST 0x00000004 +#define ACT_PLL_STATUS 0xc0000000 + +static void __init sh7786_usb_setup(void) +{ + int i = 1000000; + + /* + * USB initial settings + * + * The following settings are necessary + * for using the USB modules. + * + * see "USB Initial Settings" for detail + */ + __raw_writel(USBINITVAL1, USBINITREG1); + __raw_writel(USBINITVAL2, USBINITREG2); + + /* + * Set the PHY and PLL enable bit + */ + __raw_writel(PHY_ENB | PLL_ENB, USBPCTL1); + while (i--) { + if (ACT_PLL_STATUS == (__raw_readl(USBST) & ACT_PLL_STATUS)) { + /* Set the PHY RST bit */ + __raw_writel(PHY_ENB | PLL_ENB | PHY_RST, USBPCTL1); + printk(KERN_INFO "sh7786 usb setup done\n"); + break; + } + cpu_relax(); + } +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, + + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, + + IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, + WDT, + TMU0_0, TMU0_1, TMU0_2, TMU0_3, + TMU1_0, TMU1_1, TMU1_2, + DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6, + HUDI1, HUDI0, + DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3, + HPB_0, HPB_1, HPB_2, + SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3, + SCIF1, + TMU2, TMU3, + SCIF2, SCIF3, SCIF4, SCIF5, + Eth_0, Eth_1, + PCIeC0_0, PCIeC0_1, PCIeC0_2, + PCIeC1_0, PCIeC1_1, PCIeC1_2, + USB, + I2C0, I2C1, + DU, + SSI0, SSI1, SSI2, SSI3, + PCIeC2_0, PCIeC2_1, PCIeC2_2, + HAC0, HAC1, + FLCTL, + HSPI, + GPIO0, GPIO1, + Thermal, + INTICI0, INTICI1, INTICI2, INTICI3, + INTICI4, INTICI5, INTICI6, INTICI7, + + /* Muxed sub-events */ + TXI1, BRI1, RXI1, ERI1, +}; + +static struct intc_vect sh7786_vectors[] __initdata = { + INTC_VECT(WDT, 0x3e0), + INTC_VECT(TMU0_0, 0x400), INTC_VECT(TMU0_1, 0x420), + INTC_VECT(TMU0_2, 0x440), INTC_VECT(TMU0_3, 0x460), + INTC_VECT(TMU1_0, 0x480), INTC_VECT(TMU1_1, 0x4a0), + INTC_VECT(TMU1_2, 0x4c0), + INTC_VECT(DMAC0_0, 0x500), INTC_VECT(DMAC0_1, 0x520), + INTC_VECT(DMAC0_2, 0x540), INTC_VECT(DMAC0_3, 0x560), + INTC_VECT(DMAC0_4, 0x580), INTC_VECT(DMAC0_5, 0x5a0), + INTC_VECT(DMAC0_6, 0x5c0), + INTC_VECT(HUDI1, 0x5e0), INTC_VECT(HUDI0, 0x600), + INTC_VECT(DMAC1_0, 0x620), INTC_VECT(DMAC1_1, 0x640), + INTC_VECT(DMAC1_2, 0x660), INTC_VECT(DMAC1_3, 0x680), + INTC_VECT(HPB_0, 0x6a0), INTC_VECT(HPB_1, 0x6c0), + INTC_VECT(HPB_2, 0x6e0), + INTC_VECT(SCIF0_0, 0x700), INTC_VECT(SCIF0_1, 0x720), + INTC_VECT(SCIF0_2, 0x740), INTC_VECT(SCIF0_3, 0x760), + INTC_VECT(SCIF1, 0x780), + INTC_VECT(TMU2, 0x7a0), INTC_VECT(TMU3, 0x7c0), + INTC_VECT(SCIF2, 0x840), INTC_VECT(SCIF3, 0x860), + INTC_VECT(SCIF4, 0x880), INTC_VECT(SCIF5, 0x8a0), + INTC_VECT(Eth_0, 0x8c0), INTC_VECT(Eth_1, 0x8e0), + INTC_VECT(PCIeC0_0, 0xae0), INTC_VECT(PCIeC0_1, 0xb00), + INTC_VECT(PCIeC0_2, 0xb20), + INTC_VECT(PCIeC1_0, 0xb40), INTC_VECT(PCIeC1_1, 0xb60), + INTC_VECT(PCIeC1_2, 0xb80), + INTC_VECT(USB, 0xba0), + INTC_VECT(I2C0, 0xcc0), INTC_VECT(I2C1, 0xce0), + INTC_VECT(DU, 0xd00), + INTC_VECT(SSI0, 0xd20), INTC_VECT(SSI1, 0xd40), + INTC_VECT(SSI2, 0xd60), INTC_VECT(SSI3, 0xd80), + INTC_VECT(PCIeC2_0, 0xda0), INTC_VECT(PCIeC2_1, 0xdc0), + INTC_VECT(PCIeC2_2, 0xde0), + INTC_VECT(HAC0, 0xe00), INTC_VECT(HAC1, 0xe20), + INTC_VECT(FLCTL, 0xe40), + INTC_VECT(HSPI, 0xe80), + INTC_VECT(GPIO0, 0xea0), INTC_VECT(GPIO1, 0xec0), + INTC_VECT(Thermal, 0xee0), + INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20), + INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60), + INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0), + INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0), +}; + +#define CnINTMSK0 0xfe410030 +#define CnINTMSK1 0xfe410040 +#define CnINTMSKCLR0 0xfe410050 +#define CnINTMSKCLR1 0xfe410060 +#define CnINT2MSKR0 0xfe410a20 +#define CnINT2MSKR1 0xfe410a24 +#define CnINT2MSKR2 0xfe410a28 +#define CnINT2MSKR3 0xfe410a2c +#define CnINT2MSKCR0 0xfe410a30 +#define CnINT2MSKCR1 0xfe410a34 +#define CnINT2MSKCR2 0xfe410a38 +#define CnINT2MSKCR3 0xfe410a3c +#define INTMSK2 0xfe410068 +#define INTMSKCLR2 0xfe41006c + +#define INTDISTCR0 0xfe4100b0 +#define INTDISTCR1 0xfe4100b4 +#define INT2DISTCR0 0xfe410900 +#define INT2DISTCR1 0xfe410904 +#define INT2DISTCR2 0xfe410908 +#define INT2DISTCR3 0xfe41090c + +static struct intc_mask_reg sh7786_mask_registers[] __initdata = { + { CnINTMSK0, CnINTMSKCLR0, 32, + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 }, + INTC_SMP_BALANCING(INTDISTCR0) }, + { INTMSK2, INTMSKCLR2, 32, + { IRL0_LLLL, IRL0_LLLH, IRL0_LLHL, IRL0_LLHH, + IRL0_LHLL, IRL0_LHLH, IRL0_LHHL, IRL0_LHHH, + IRL0_HLLL, IRL0_HLLH, IRL0_HLHL, IRL0_HLHH, + IRL0_HHLL, IRL0_HHLH, IRL0_HHHL, 0, + IRL4_LLLL, IRL4_LLLH, IRL4_LLHL, IRL4_LLHH, + IRL4_LHLL, IRL4_LHLH, IRL4_LHHL, IRL4_LHHH, + IRL4_HLLL, IRL4_HLLH, IRL4_HLHL, IRL4_HLHH, + IRL4_HHLL, IRL4_HHLH, IRL4_HHHL, 0, } }, + { CnINT2MSKR0, CnINT2MSKCR0 , 32, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, WDT }, + INTC_SMP_BALANCING(INT2DISTCR0) }, + { CnINT2MSKR1, CnINT2MSKCR1, 32, + { TMU0_0, TMU0_1, TMU0_2, TMU0_3, TMU1_0, TMU1_1, TMU1_2, 0, + DMAC0_0, DMAC0_1, DMAC0_2, DMAC0_3, DMAC0_4, DMAC0_5, DMAC0_6, + HUDI1, HUDI0, + DMAC1_0, DMAC1_1, DMAC1_2, DMAC1_3, + HPB_0, HPB_1, HPB_2, + SCIF0_0, SCIF0_1, SCIF0_2, SCIF0_3, + SCIF1, + TMU2, TMU3, 0, }, INTC_SMP_BALANCING(INT2DISTCR1) }, + { CnINT2MSKR2, CnINT2MSKCR2, 32, + { 0, 0, SCIF2, SCIF3, SCIF4, SCIF5, + Eth_0, Eth_1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + PCIeC0_0, PCIeC0_1, PCIeC0_2, + PCIeC1_0, PCIeC1_1, PCIeC1_2, + USB, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR2) }, + { CnINT2MSKR3, CnINT2MSKCR3, 32, + { 0, 0, 0, 0, 0, 0, + I2C0, I2C1, + DU, SSI0, SSI1, SSI2, SSI3, + PCIeC2_0, PCIeC2_1, PCIeC2_2, + HAC0, HAC1, + FLCTL, 0, + HSPI, GPIO0, GPIO1, Thermal, + 0, 0, 0, 0, 0, 0, 0, 0 }, INTC_SMP_BALANCING(INT2DISTCR3) }, +}; + +static struct intc_prio_reg sh7786_prio_registers[] __initdata = { + { 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, + { 0xfe410800, 0, 32, 8, /* INT2PRI0 */ { 0, 0, 0, WDT } }, + { 0xfe410804, 0, 32, 8, /* INT2PRI1 */ { TMU0_0, TMU0_1, + TMU0_2, TMU0_3 } }, + { 0xfe410808, 0, 32, 8, /* INT2PRI2 */ { TMU1_0, TMU1_1, + TMU1_2, 0 } }, + { 0xfe41080c, 0, 32, 8, /* INT2PRI3 */ { DMAC0_0, DMAC0_1, + DMAC0_2, DMAC0_3 } }, + { 0xfe410810, 0, 32, 8, /* INT2PRI4 */ { DMAC0_4, DMAC0_5, + DMAC0_6, HUDI1 } }, + { 0xfe410814, 0, 32, 8, /* INT2PRI5 */ { HUDI0, DMAC1_0, + DMAC1_1, DMAC1_2 } }, + { 0xfe410818, 0, 32, 8, /* INT2PRI6 */ { DMAC1_3, HPB_0, + HPB_1, HPB_2 } }, + { 0xfe41081c, 0, 32, 8, /* INT2PRI7 */ { SCIF0_0, SCIF0_1, + SCIF0_2, SCIF0_3 } }, + { 0xfe410820, 0, 32, 8, /* INT2PRI8 */ { SCIF1, TMU2, TMU3, 0 } }, + { 0xfe410824, 0, 32, 8, /* INT2PRI9 */ { 0, 0, SCIF2, SCIF3 } }, + { 0xfe410828, 0, 32, 8, /* INT2PRI10 */ { SCIF4, SCIF5, + Eth_0, Eth_1 } }, + { 0xfe41082c, 0, 32, 8, /* INT2PRI11 */ { 0, 0, 0, 0 } }, + { 0xfe410830, 0, 32, 8, /* INT2PRI12 */ { 0, 0, 0, 0 } }, + { 0xfe410834, 0, 32, 8, /* INT2PRI13 */ { 0, 0, 0, 0 } }, + { 0xfe410838, 0, 32, 8, /* INT2PRI14 */ { 0, 0, 0, PCIeC0_0 } }, + { 0xfe41083c, 0, 32, 8, /* INT2PRI15 */ { PCIeC0_1, PCIeC0_2, + PCIeC1_0, PCIeC1_1 } }, + { 0xfe410840, 0, 32, 8, /* INT2PRI16 */ { PCIeC1_2, USB, 0, 0 } }, + { 0xfe410844, 0, 32, 8, /* INT2PRI17 */ { 0, 0, 0, 0 } }, + { 0xfe410848, 0, 32, 8, /* INT2PRI18 */ { 0, 0, I2C0, I2C1 } }, + { 0xfe41084c, 0, 32, 8, /* INT2PRI19 */ { DU, SSI0, SSI1, SSI2 } }, + { 0xfe410850, 0, 32, 8, /* INT2PRI20 */ { SSI3, PCIeC2_0, + PCIeC2_1, PCIeC2_2 } }, + { 0xfe410854, 0, 32, 8, /* INT2PRI21 */ { HAC0, HAC1, FLCTL, 0 } }, + { 0xfe410858, 0, 32, 8, /* INT2PRI22 */ { HSPI, GPIO0, + GPIO1, Thermal } }, + { 0xfe41085c, 0, 32, 8, /* INT2PRI23 */ { 0, 0, 0, 0 } }, + { 0xfe410860, 0, 32, 8, /* INT2PRI24 */ { 0, 0, 0, 0 } }, + { 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */ + { INTICI7, INTICI6, INTICI5, INTICI4, + INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 2) }, +}; + +static struct intc_subgroup sh7786_subgroups[] __initdata = { + { 0xfe410c20, 32, SCIF1, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, TXI1, BRI1, RXI1, ERI1 } }, +}; + +static struct intc_desc sh7786_intc_desc __initdata = { + .name = "sh7786", + .hw = { + .vectors = sh7786_vectors, + .nr_vectors = ARRAY_SIZE(sh7786_vectors), + .mask_regs = sh7786_mask_registers, + .nr_mask_regs = ARRAY_SIZE(sh7786_mask_registers), + .subgroups = sh7786_subgroups, + .nr_subgroups = ARRAY_SIZE(sh7786_subgroups), + .prio_regs = sh7786_prio_registers, + .nr_prio_regs = ARRAY_SIZE(sh7786_prio_registers), + }, +}; + +/* Support for external interrupt pins in IRQ mode */ +static struct intc_vect vectors_irq0123[] __initdata = { + INTC_VECT(IRQ0, 0x200), INTC_VECT(IRQ1, 0x240), + INTC_VECT(IRQ2, 0x280), INTC_VECT(IRQ3, 0x2c0), +}; + +static struct intc_vect vectors_irq4567[] __initdata = { + INTC_VECT(IRQ4, 0x300), INTC_VECT(IRQ5, 0x340), + INTC_VECT(IRQ6, 0x380), INTC_VECT(IRQ7, 0x3c0), +}; + +static struct intc_sense_reg sh7786_sense_registers[] __initdata = { + { 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, + IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static struct intc_mask_reg sh7786_ack_registers[] __initdata = { + { 0xfe410024, 0, 32, /* INTREQ */ + { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, +}; + +static DECLARE_INTC_DESC_ACK(intc_desc_irq0123, "sh7786-irq0123", + vectors_irq0123, NULL, sh7786_mask_registers, + sh7786_prio_registers, sh7786_sense_registers, + sh7786_ack_registers); + +static DECLARE_INTC_DESC_ACK(intc_desc_irq4567, "sh7786-irq4567", + vectors_irq4567, NULL, sh7786_mask_registers, + sh7786_prio_registers, sh7786_sense_registers, + sh7786_ack_registers); + +/* External interrupt pins in IRL mode */ + +static struct intc_vect vectors_irl0123[] __initdata = { + INTC_VECT(IRL0_LLLL, 0x200), INTC_VECT(IRL0_LLLH, 0x220), + INTC_VECT(IRL0_LLHL, 0x240), INTC_VECT(IRL0_LLHH, 0x260), + INTC_VECT(IRL0_LHLL, 0x280), INTC_VECT(IRL0_LHLH, 0x2a0), + INTC_VECT(IRL0_LHHL, 0x2c0), INTC_VECT(IRL0_LHHH, 0x2e0), + INTC_VECT(IRL0_HLLL, 0x300), INTC_VECT(IRL0_HLLH, 0x320), + INTC_VECT(IRL0_HLHL, 0x340), INTC_VECT(IRL0_HLHH, 0x360), + INTC_VECT(IRL0_HHLL, 0x380), INTC_VECT(IRL0_HHLH, 0x3a0), + INTC_VECT(IRL0_HHHL, 0x3c0), +}; + +static struct intc_vect vectors_irl4567[] __initdata = { + INTC_VECT(IRL4_LLLL, 0x900), INTC_VECT(IRL4_LLLH, 0x920), + INTC_VECT(IRL4_LLHL, 0x940), INTC_VECT(IRL4_LLHH, 0x960), + INTC_VECT(IRL4_LHLL, 0x980), INTC_VECT(IRL4_LHLH, 0x9a0), + INTC_VECT(IRL4_LHHL, 0x9c0), INTC_VECT(IRL4_LHHH, 0x9e0), + INTC_VECT(IRL4_HLLL, 0xa00), INTC_VECT(IRL4_HLLH, 0xa20), + INTC_VECT(IRL4_HLHL, 0xa40), INTC_VECT(IRL4_HLHH, 0xa60), + INTC_VECT(IRL4_HHLL, 0xa80), INTC_VECT(IRL4_HHLH, 0xaa0), + INTC_VECT(IRL4_HHHL, 0xac0), +}; + +static DECLARE_INTC_DESC(intc_desc_irl0123, "sh7786-irl0123", vectors_irl0123, + NULL, sh7786_mask_registers, NULL, NULL); + +static DECLARE_INTC_DESC(intc_desc_irl4567, "sh7786-irl4567", vectors_irl4567, + NULL, sh7786_mask_registers, NULL, NULL); + +#define INTC_ICR0 0xfe410000 +#define INTC_INTMSK0 CnINTMSK0 +#define INTC_INTMSK1 CnINTMSK1 +#define INTC_INTMSK2 INTMSK2 +#define INTC_INTMSKCLR1 CnINTMSKCLR1 +#define INTC_INTMSKCLR2 INTMSKCLR2 + +void __init plat_irq_setup(void) +{ + /* disable IRQ3-0 + IRQ7-4 */ + __raw_writel(0xff000000, INTC_INTMSK0); + + /* disable IRL3-0 + IRL7-4 */ + __raw_writel(0xc0000000, INTC_INTMSK1); + __raw_writel(0xfffefffe, INTC_INTMSK2); + + /* select IRL mode for IRL3-0 + IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); + + register_intc_controller(&sh7786_intc_desc); +} + +void __init plat_irq_setup_pins(int mode) +{ + switch (mode) { + case IRQ_MODE_IRQ7654: + /* select IRQ mode for IRL7-4 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00400000, INTC_ICR0); + register_intc_controller(&intc_desc_irq4567); + break; + case IRQ_MODE_IRQ3210: + /* select IRQ mode for IRL3-0 */ + __raw_writel(__raw_readl(INTC_ICR0) | 0x00800000, INTC_ICR0); + register_intc_controller(&intc_desc_irq0123); + break; + case IRQ_MODE_IRL7654: + /* enable IRL7-4 but don't provide any masking */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + __raw_writel(0x0000fffe, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL3210: + /* enable IRL0-3 but don't provide any masking */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + __raw_writel(0xfffe0000, INTC_INTMSKCLR2); + break; + case IRQ_MODE_IRL7654_MASK: + /* enable IRL7-4 and mask using cpu intc controller */ + __raw_writel(0x40000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl4567); + break; + case IRQ_MODE_IRL3210_MASK: + /* enable IRL0-3 and mask using cpu intc controller */ + __raw_writel(0x80000000, INTC_INTMSKCLR1); + register_intc_controller(&intc_desc_irl0123); + break; + default: + BUG(); + } +} + +void __init plat_mem_setup(void) +{ +} + +static int __init sh7786_devices_setup(void) +{ + int ret, irq; + + sh7786_usb_setup(); + + /* + * De-mux SCIF1 IRQs if possible + */ + irq = intc_irq_lookup(sh7786_intc_desc.name, TXI1); + if (irq > 0) { + scif1_demux_resources[1].start = + intc_irq_lookup(sh7786_intc_desc.name, ERI1); + scif1_demux_resources[2].start = + intc_irq_lookup(sh7786_intc_desc.name, RXI1); + scif1_demux_resources[3].start = irq; + scif1_demux_resources[4].start = + intc_irq_lookup(sh7786_intc_desc.name, BRI1); + + scif1_device.resource = scif1_demux_resources; + scif1_device.num_resources = ARRAY_SIZE(scif1_demux_resources); + } + + ret = platform_add_devices(sh7786_early_devices, + ARRAY_SIZE(sh7786_early_devices)); + if (unlikely(ret != 0)) + return ret; + + return platform_add_devices(sh7786_devices, + ARRAY_SIZE(sh7786_devices)); +} +arch_initcall(sh7786_devices_setup); + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(sh7786_early_devices, + ARRAY_SIZE(sh7786_early_devices)); +} diff --git a/arch/sh/kernel/cpu/sh4a/setup-shx3.c b/arch/sh/kernel/cpu/sh4a/setup-shx3.c new file mode 100644 index 000000000..7014d6d19 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/setup-shx3.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH-X3 Prototype Setup + * + * Copyright (C) 2007 - 2010 Paul Mundt + */ +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/serial.h> +#include <linux/serial_sci.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/sh_timer.h> +#include <linux/sh_intc.h> +#include <cpu/shx3.h> +#include <asm/mmzone.h> +#include <asm/platform_early.h> + +/* + * This intentionally only registers SCIF ports 0, 1, and 3. SCIF 2 + * INTEVT values overlap with the FPU EXPEVT ones, requiring special + * demuxing in the exception dispatch path. + * + * As this overlap is something that never should have made it in to + * silicon in the first place, we just refuse to deal with the port at + * all rather than adding infrastructure to hack around it. + */ +static struct plat_sci_port scif0_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif0_resources[] = { + DEFINE_RES_MEM(0xffc30000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x700)), + DEFINE_RES_IRQ(evt2irq(0x720)), + DEFINE_RES_IRQ(evt2irq(0x760)), + DEFINE_RES_IRQ(evt2irq(0x740)), +}; + +static struct platform_device scif0_device = { + .name = "sh-sci", + .id = 0, + .resource = scif0_resources, + .num_resources = ARRAY_SIZE(scif0_resources), + .dev = { + .platform_data = &scif0_platform_data, + }, +}; + +static struct plat_sci_port scif1_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif1_resources[] = { + DEFINE_RES_MEM(0xffc40000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x780)), + DEFINE_RES_IRQ(evt2irq(0x7a0)), + DEFINE_RES_IRQ(evt2irq(0x7e0)), + DEFINE_RES_IRQ(evt2irq(0x7c0)), +}; + +static struct platform_device scif1_device = { + .name = "sh-sci", + .id = 1, + .resource = scif1_resources, + .num_resources = ARRAY_SIZE(scif1_resources), + .dev = { + .platform_data = &scif1_platform_data, + }, +}; + +static struct plat_sci_port scif2_platform_data = { + .scscr = SCSCR_REIE, + .type = PORT_SCIF, +}; + +static struct resource scif2_resources[] = { + DEFINE_RES_MEM(0xffc60000, 0x100), + DEFINE_RES_IRQ(evt2irq(0x880)), + DEFINE_RES_IRQ(evt2irq(0x8a0)), + DEFINE_RES_IRQ(evt2irq(0x8e0)), + DEFINE_RES_IRQ(evt2irq(0x8c0)), +}; + +static struct platform_device scif2_device = { + .name = "sh-sci", + .id = 2, + .resource = scif2_resources, + .num_resources = ARRAY_SIZE(scif2_resources), + .dev = { + .platform_data = &scif2_platform_data, + }, +}; + +static struct sh_timer_config tmu0_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu0_resources[] = { + DEFINE_RES_MEM(0xffc10000, 0x30), + DEFINE_RES_IRQ(evt2irq(0x400)), + DEFINE_RES_IRQ(evt2irq(0x420)), + DEFINE_RES_IRQ(evt2irq(0x440)), +}; + +static struct platform_device tmu0_device = { + .name = "sh-tmu", + .id = 0, + .dev = { + .platform_data = &tmu0_platform_data, + }, + .resource = tmu0_resources, + .num_resources = ARRAY_SIZE(tmu0_resources), +}; + +static struct sh_timer_config tmu1_platform_data = { + .channels_mask = 7, +}; + +static struct resource tmu1_resources[] = { + DEFINE_RES_MEM(0xffc20000, 0x2c), + DEFINE_RES_IRQ(evt2irq(0x460)), + DEFINE_RES_IRQ(evt2irq(0x480)), + DEFINE_RES_IRQ(evt2irq(0x4a0)), +}; + +static struct platform_device tmu1_device = { + .name = "sh-tmu", + .id = 1, + .dev = { + .platform_data = &tmu1_platform_data, + }, + .resource = tmu1_resources, + .num_resources = ARRAY_SIZE(tmu1_resources), +}; + +static struct platform_device *shx3_early_devices[] __initdata = { + &scif0_device, + &scif1_device, + &scif2_device, + &tmu0_device, + &tmu1_device, +}; + +static int __init shx3_devices_setup(void) +{ + return platform_add_devices(shx3_early_devices, + ARRAY_SIZE(shx3_early_devices)); +} +arch_initcall(shx3_devices_setup); + +void __init plat_early_device_setup(void) +{ + sh_early_platform_add_devices(shx3_early_devices, + ARRAY_SIZE(shx3_early_devices)); +} + +enum { + UNUSED = 0, + + /* interrupt sources */ + IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL, + IRQ0, IRQ1, IRQ2, IRQ3, + HUDII, + TMU0, TMU1, TMU2, TMU3, TMU4, TMU5, + PCII0, PCII1, PCII2, PCII3, PCII4, + PCII5, PCII6, PCII7, PCII8, PCII9, + SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI, + SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI, + SCIF2_ERI, SCIF2_RXI, SCIF2_BRI, SCIF2_TXI, + SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI, + DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, DMAC0_DMINT3, + DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE, + DU, + DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, DMAC1_DMINT9, + DMAC1_DMINT10, DMAC1_DMINT11, DMAC1_DMAE, + IIC, VIN0, VIN1, VCORE0, ATAPI, + DTU0, DTU1, DTU2, DTU3, + FE0, FE1, + GPIO0, GPIO1, GPIO2, GPIO3, + PAM, IRM, + INTICI0, INTICI1, INTICI2, INTICI3, + INTICI4, INTICI5, INTICI6, INTICI7, + + /* interrupt groups */ + IRL, PCII56789, SCIF0, SCIF1, SCIF2, SCIF3, + DMAC0, DMAC1, +}; + +static struct intc_vect vectors[] __initdata = { + INTC_VECT(HUDII, 0x3e0), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2, 0x440), INTC_VECT(TMU3, 0x460), + INTC_VECT(TMU4, 0x480), INTC_VECT(TMU5, 0x4a0), + INTC_VECT(PCII0, 0x500), INTC_VECT(PCII1, 0x520), + INTC_VECT(PCII2, 0x540), INTC_VECT(PCII3, 0x560), + INTC_VECT(PCII4, 0x580), INTC_VECT(PCII5, 0x5a0), + INTC_VECT(PCII6, 0x5c0), INTC_VECT(PCII7, 0x5e0), + INTC_VECT(PCII8, 0x600), INTC_VECT(PCII9, 0x620), + INTC_VECT(SCIF0_ERI, 0x700), INTC_VECT(SCIF0_RXI, 0x720), + INTC_VECT(SCIF0_BRI, 0x740), INTC_VECT(SCIF0_TXI, 0x760), + INTC_VECT(SCIF1_ERI, 0x780), INTC_VECT(SCIF1_RXI, 0x7a0), + INTC_VECT(SCIF1_BRI, 0x7c0), INTC_VECT(SCIF1_TXI, 0x7e0), + INTC_VECT(SCIF3_ERI, 0x880), INTC_VECT(SCIF3_RXI, 0x8a0), + INTC_VECT(SCIF3_BRI, 0x8c0), INTC_VECT(SCIF3_TXI, 0x8e0), + INTC_VECT(DMAC0_DMINT0, 0x900), INTC_VECT(DMAC0_DMINT1, 0x920), + INTC_VECT(DMAC0_DMINT2, 0x940), INTC_VECT(DMAC0_DMINT3, 0x960), + INTC_VECT(DMAC0_DMINT4, 0x980), INTC_VECT(DMAC0_DMINT5, 0x9a0), + INTC_VECT(DMAC0_DMAE, 0x9c0), + INTC_VECT(DU, 0x9e0), + INTC_VECT(DMAC1_DMINT6, 0xa00), INTC_VECT(DMAC1_DMINT7, 0xa20), + INTC_VECT(DMAC1_DMINT8, 0xa40), INTC_VECT(DMAC1_DMINT9, 0xa60), + INTC_VECT(DMAC1_DMINT10, 0xa80), INTC_VECT(DMAC1_DMINT11, 0xaa0), + INTC_VECT(DMAC1_DMAE, 0xac0), + INTC_VECT(IIC, 0xae0), + INTC_VECT(VIN0, 0xb00), INTC_VECT(VIN1, 0xb20), + INTC_VECT(VCORE0, 0xb00), INTC_VECT(ATAPI, 0xb60), + INTC_VECT(DTU0, 0xc00), INTC_VECT(DTU0, 0xc20), + INTC_VECT(DTU0, 0xc40), + INTC_VECT(DTU1, 0xc60), INTC_VECT(DTU1, 0xc80), + INTC_VECT(DTU1, 0xca0), + INTC_VECT(DTU2, 0xcc0), INTC_VECT(DTU2, 0xce0), + INTC_VECT(DTU2, 0xd00), + INTC_VECT(DTU3, 0xd20), INTC_VECT(DTU3, 0xd40), + INTC_VECT(DTU3, 0xd60), + INTC_VECT(FE0, 0xe00), INTC_VECT(FE1, 0xe20), + INTC_VECT(GPIO0, 0xe40), INTC_VECT(GPIO1, 0xe60), + INTC_VECT(GPIO2, 0xe80), INTC_VECT(GPIO3, 0xea0), + INTC_VECT(PAM, 0xec0), INTC_VECT(IRM, 0xee0), + INTC_VECT(INTICI0, 0xf00), INTC_VECT(INTICI1, 0xf20), + INTC_VECT(INTICI2, 0xf40), INTC_VECT(INTICI3, 0xf60), + INTC_VECT(INTICI4, 0xf80), INTC_VECT(INTICI5, 0xfa0), + INTC_VECT(INTICI6, 0xfc0), INTC_VECT(INTICI7, 0xfe0), +}; + +static struct intc_group groups[] __initdata = { + INTC_GROUP(IRL, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, + IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, + IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, + IRL_HHLL, IRL_HHLH, IRL_HHHL), + INTC_GROUP(PCII56789, PCII5, PCII6, PCII7, PCII8, PCII9), + INTC_GROUP(SCIF0, SCIF0_ERI, SCIF0_RXI, SCIF0_BRI, SCIF0_TXI), + INTC_GROUP(SCIF1, SCIF1_ERI, SCIF1_RXI, SCIF1_BRI, SCIF1_TXI), + INTC_GROUP(SCIF3, SCIF3_ERI, SCIF3_RXI, SCIF3_BRI, SCIF3_TXI), + INTC_GROUP(DMAC0, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, + DMAC0_DMINT3, DMAC0_DMINT4, DMAC0_DMINT5, DMAC0_DMAE), + INTC_GROUP(DMAC1, DMAC1_DMINT6, DMAC1_DMINT7, DMAC1_DMINT8, + DMAC1_DMINT9, DMAC1_DMINT10, DMAC1_DMINT11), +}; + +#define INT2DISTCR0 0xfe4108a0 +#define INT2DISTCR1 0xfe4108a4 +#define INT2DISTCR2 0xfe4108a8 + +static struct intc_mask_reg mask_registers[] __initdata = { + { 0xfe410030, 0xfe410050, 32, /* CnINTMSK0 / CnINTMSKCLR0 */ + { IRQ0, IRQ1, IRQ2, IRQ3 } }, + { 0xfe410040, 0xfe410060, 32, /* CnINTMSK1 / CnINTMSKCLR1 */ + { IRL } }, + { 0xfe410820, 0xfe410850, 32, /* CnINT2MSK0 / CnINT2MSKCLR0 */ + { FE1, FE0, 0, ATAPI, VCORE0, VIN1, VIN0, IIC, + DU, GPIO3, GPIO2, GPIO1, GPIO0, PAM, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, /* HUDI bits ignored */ + 0, TMU5, TMU4, TMU3, TMU2, TMU1, TMU0, 0, }, + INTC_SMP_BALANCING(INT2DISTCR0) }, + { 0xfe410830, 0xfe410860, 32, /* CnINT2MSK1 / CnINT2MSKCLR1 */ + { 0, 0, 0, 0, DTU3, DTU2, DTU1, DTU0, /* IRM bits ignored */ + PCII9, PCII8, PCII7, PCII6, PCII5, PCII4, PCII3, PCII2, + PCII1, PCII0, DMAC1_DMAE, DMAC1_DMINT11, + DMAC1_DMINT10, DMAC1_DMINT9, DMAC1_DMINT8, DMAC1_DMINT7, + DMAC1_DMINT6, DMAC0_DMAE, DMAC0_DMINT5, DMAC0_DMINT4, + DMAC0_DMINT3, DMAC0_DMINT2, DMAC0_DMINT1, DMAC0_DMINT0 }, + INTC_SMP_BALANCING(INT2DISTCR1) }, + { 0xfe410840, 0xfe410870, 32, /* CnINT2MSK2 / CnINT2MSKCLR2 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + SCIF3_TXI, SCIF3_BRI, SCIF3_RXI, SCIF3_ERI, + SCIF2_TXI, SCIF2_BRI, SCIF2_RXI, SCIF2_ERI, + SCIF1_TXI, SCIF1_BRI, SCIF1_RXI, SCIF1_ERI, + SCIF0_TXI, SCIF0_BRI, SCIF0_RXI, SCIF0_ERI }, + INTC_SMP_BALANCING(INT2DISTCR2) }, +}; + +static struct intc_prio_reg prio_registers[] __initdata = { + { 0xfe410010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, + + { 0xfe410800, 0, 32, 4, /* INT2PRI0 */ { 0, HUDII, TMU5, TMU4, + TMU3, TMU2, TMU1, TMU0 } }, + { 0xfe410804, 0, 32, 4, /* INT2PRI1 */ { DTU3, DTU2, DTU1, DTU0, + SCIF3, SCIF2, + SCIF1, SCIF0 } }, + { 0xfe410808, 0, 32, 4, /* INT2PRI2 */ { DMAC1, DMAC0, + PCII56789, PCII4, + PCII3, PCII2, + PCII1, PCII0 } }, + { 0xfe41080c, 0, 32, 4, /* INT2PRI3 */ { FE1, FE0, ATAPI, VCORE0, + VIN1, VIN0, IIC, DU} }, + { 0xfe410810, 0, 32, 4, /* INT2PRI4 */ { 0, 0, PAM, GPIO3, + GPIO2, GPIO1, GPIO0, IRM } }, + { 0xfe410090, 0xfe4100a0, 32, 4, /* CnICIPRI / CnICIPRICLR */ + { INTICI7, INTICI6, INTICI5, INTICI4, + INTICI3, INTICI2, INTICI1, INTICI0 }, INTC_SMP(4, 4) }, +}; + +static DECLARE_INTC_DESC(intc_desc, "shx3", vectors, groups, + mask_registers, prio_registers, NULL); + +/* Support for external interrupt pins in IRQ mode */ +static struct intc_vect vectors_irq[] __initdata = { + INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), + INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), +}; + +static struct intc_sense_reg sense_registers[] __initdata = { + { 0xfe41001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, +}; + +static DECLARE_INTC_DESC(intc_desc_irq, "shx3-irq", vectors_irq, groups, + mask_registers, prio_registers, sense_registers); + +/* External interrupt pins in IRL mode */ +static struct intc_vect vectors_irl[] __initdata = { + INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), + INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), + INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), + INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), + INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), + INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), + INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), + INTC_VECT(IRL_HHHL, 0x3c0), +}; + +static DECLARE_INTC_DESC(intc_desc_irl, "shx3-irl", vectors_irl, groups, + mask_registers, prio_registers, NULL); + +void __init plat_irq_setup_pins(int mode) +{ + int ret = 0; + + switch (mode) { + case IRQ_MODE_IRQ: + ret |= gpio_request(GPIO_FN_IRQ3, intc_desc_irq.name); + ret |= gpio_request(GPIO_FN_IRQ2, intc_desc_irq.name); + ret |= gpio_request(GPIO_FN_IRQ1, intc_desc_irq.name); + ret |= gpio_request(GPIO_FN_IRQ0, intc_desc_irq.name); + + if (unlikely(ret)) { + pr_err("Failed to set IRQ mode\n"); + return; + } + + register_intc_controller(&intc_desc_irq); + break; + case IRQ_MODE_IRL3210: + ret |= gpio_request(GPIO_FN_IRL3, intc_desc_irl.name); + ret |= gpio_request(GPIO_FN_IRL2, intc_desc_irl.name); + ret |= gpio_request(GPIO_FN_IRL1, intc_desc_irl.name); + ret |= gpio_request(GPIO_FN_IRL0, intc_desc_irl.name); + + if (unlikely(ret)) { + pr_err("Failed to set IRL mode\n"); + return; + } + + register_intc_controller(&intc_desc_irl); + break; + default: + BUG(); + } +} + +void __init plat_irq_setup(void) +{ + register_intc_controller(&intc_desc); +} + +void __init plat_mem_setup(void) +{ + unsigned int nid = 1; + + /* Register CPU#0 URAM space as Node 1 */ + setup_bootmem_node(nid++, 0x145f0000, 0x14610000); /* CPU0 */ + +#if 0 + /* XXX: Not yet.. */ + setup_bootmem_node(nid++, 0x14df0000, 0x14e10000); /* CPU1 */ + setup_bootmem_node(nid++, 0x155f0000, 0x15610000); /* CPU2 */ + setup_bootmem_node(nid++, 0x15df0000, 0x15e10000); /* CPU3 */ +#endif + + setup_bootmem_node(nid++, 0x16000000, 0x16020000); /* CSM */ +} diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c new file mode 100644 index 000000000..1261dc7b8 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SH-X3 SMP + * + * Copyright (C) 2007 - 2010 Paul Mundt + * Copyright (C) 2007 Magnus Damm + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/cpumask.h> +#include <linux/smp.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/cpu.h> +#include <asm/sections.h> + +#define STBCR_REG(phys_id) (0xfe400004 | (phys_id << 12)) +#define RESET_REG(phys_id) (0xfe400008 | (phys_id << 12)) + +#define STBCR_MSTP 0x00000001 +#define STBCR_RESET 0x00000002 +#define STBCR_SLEEP 0x00000004 +#define STBCR_LTSLP 0x80000000 + +static irqreturn_t ipi_interrupt_handler(int irq, void *arg) +{ + unsigned int message = (unsigned int)(long)arg; + unsigned int cpu = hard_smp_processor_id(); + unsigned int offs = 4 * cpu; + unsigned int x; + + x = __raw_readl(0xfe410070 + offs); /* C0INITICI..CnINTICI */ + x &= (1 << (message << 2)); + __raw_writel(x, 0xfe410080 + offs); /* C0INTICICLR..CnINTICICLR */ + + smp_message_recv(message); + + return IRQ_HANDLED; +} + +static void shx3_smp_setup(void) +{ + unsigned int cpu = 0; + int i, num; + + init_cpu_possible(cpumask_of(cpu)); + + /* Enable light sleep for the boot CPU */ + __raw_writel(__raw_readl(STBCR_REG(cpu)) | STBCR_LTSLP, STBCR_REG(cpu)); + + __cpu_number_map[0] = 0; + __cpu_logical_map[0] = 0; + + /* + * Do this stupidly for now.. we don't have an easy way to probe + * for the total number of cores. + */ + for (i = 1, num = 0; i < NR_CPUS; i++) { + set_cpu_possible(i, true); + __cpu_number_map[i] = ++num; + __cpu_logical_map[num] = i; + } + + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); +} + +static void shx3_prepare_cpus(unsigned int max_cpus) +{ + int i; + + BUILD_BUG_ON(SMP_MSG_NR >= 8); + + for (i = 0; i < SMP_MSG_NR; i++) + if (request_irq(104 + i, ipi_interrupt_handler, + IRQF_PERCPU, "IPI", (void *)(long)i)) + pr_err("Failed to request irq %d\n", i); + + for (i = 0; i < max_cpus; i++) + set_cpu_present(i, true); +} + +static void shx3_start_cpu(unsigned int cpu, unsigned long entry_point) +{ + if (__in_29bit_mode()) + __raw_writel(entry_point, RESET_REG(cpu)); + else + __raw_writel(virt_to_phys(entry_point), RESET_REG(cpu)); + + if (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) + __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); + + while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) + cpu_relax(); + + /* Start up secondary processor by sending a reset */ + __raw_writel(STBCR_RESET | STBCR_LTSLP, STBCR_REG(cpu)); +} + +static unsigned int shx3_smp_processor_id(void) +{ + return __raw_readl(0xff000048); /* CPIDR */ +} + +static void shx3_send_ipi(unsigned int cpu, unsigned int message) +{ + unsigned long addr = 0xfe410070 + (cpu * 4); + + BUG_ON(cpu >= 4); + + __raw_writel(1 << (message << 2), addr); /* C0INTICI..CnINTICI */ +} + +static void shx3_update_boot_vector(unsigned int cpu) +{ + __raw_writel(STBCR_MSTP, STBCR_REG(cpu)); + while (!(__raw_readl(STBCR_REG(cpu)) & STBCR_MSTP)) + cpu_relax(); + __raw_writel(STBCR_RESET, STBCR_REG(cpu)); +} + +static int shx3_cpu_prepare(unsigned int cpu) +{ + shx3_update_boot_vector(cpu); + return 0; +} + +static int register_shx3_cpu_notifier(void) +{ + cpuhp_setup_state_nocalls(CPUHP_SH_SH3X_PREPARE, "sh/shx3:prepare", + shx3_cpu_prepare, NULL); + return 0; +} +late_initcall(register_shx3_cpu_notifier); + +struct plat_smp_ops shx3_smp_ops = { + .smp_setup = shx3_smp_setup, + .prepare_cpus = shx3_prepare_cpus, + .start_cpu = shx3_start_cpu, + .smp_processor_id = shx3_smp_processor_id, + .send_ipi = shx3_send_ipi, + .cpu_die = native_cpu_die, + .cpu_disable = native_cpu_disable, + .play_dead = native_play_dead, +}; diff --git a/arch/sh/kernel/cpu/sh4a/ubc.c b/arch/sh/kernel/cpu/sh4a/ubc.c new file mode 100644 index 000000000..25eacd9c4 --- /dev/null +++ b/arch/sh/kernel/cpu/sh4a/ubc.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/sh4a/ubc.c + * + * On-chip UBC support for SH-4A CPUs. + * + * Copyright (C) 2009 - 2010 Paul Mundt + */ +#include <linux/init.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <asm/hw_breakpoint.h> + +#define UBC_CBR(idx) (0xff200000 + (0x20 * idx)) +#define UBC_CRR(idx) (0xff200004 + (0x20 * idx)) +#define UBC_CAR(idx) (0xff200008 + (0x20 * idx)) +#define UBC_CAMR(idx) (0xff20000c + (0x20 * idx)) + +#define UBC_CCMFR 0xff200600 +#define UBC_CBCR 0xff200620 + +/* CRR */ +#define UBC_CRR_PCB (1 << 1) +#define UBC_CRR_BIE (1 << 0) + +/* CBR */ +#define UBC_CBR_CE (1 << 0) + +static struct sh_ubc sh4a_ubc; + +static void sh4a_ubc_enable(struct arch_hw_breakpoint *info, int idx) +{ + __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR(idx)); + __raw_writel(info->address, UBC_CAR(idx)); +} + +static void sh4a_ubc_disable(struct arch_hw_breakpoint *info, int idx) +{ + __raw_writel(0, UBC_CBR(idx)); + __raw_writel(0, UBC_CAR(idx)); +} + +static void sh4a_ubc_enable_all(unsigned long mask) +{ + int i; + + for (i = 0; i < sh4a_ubc.num_events; i++) + if (mask & (1 << i)) + __raw_writel(__raw_readl(UBC_CBR(i)) | UBC_CBR_CE, + UBC_CBR(i)); +} + +static void sh4a_ubc_disable_all(void) +{ + int i; + + for (i = 0; i < sh4a_ubc.num_events; i++) + __raw_writel(__raw_readl(UBC_CBR(i)) & ~UBC_CBR_CE, + UBC_CBR(i)); +} + +static unsigned long sh4a_ubc_active_mask(void) +{ + unsigned long active = 0; + int i; + + for (i = 0; i < sh4a_ubc.num_events; i++) + if (__raw_readl(UBC_CBR(i)) & UBC_CBR_CE) + active |= (1 << i); + + return active; +} + +static unsigned long sh4a_ubc_triggered_mask(void) +{ + return __raw_readl(UBC_CCMFR); +} + +static void sh4a_ubc_clear_triggered_mask(unsigned long mask) +{ + __raw_writel(__raw_readl(UBC_CCMFR) & ~mask, UBC_CCMFR); +} + +static struct sh_ubc sh4a_ubc = { + .name = "SH-4A", + .num_events = 2, + .trap_nr = 0x1e0, + .enable = sh4a_ubc_enable, + .disable = sh4a_ubc_disable, + .enable_all = sh4a_ubc_enable_all, + .disable_all = sh4a_ubc_disable_all, + .active_mask = sh4a_ubc_active_mask, + .triggered_mask = sh4a_ubc_triggered_mask, + .clear_triggered_mask = sh4a_ubc_clear_triggered_mask, +}; + +static int __init sh4a_ubc_init(void) +{ + struct clk *ubc_iclk = clk_get(NULL, "ubc0"); + int i; + + /* + * The UBC MSTP bit is optional, as not all platforms will have + * it. Just ignore it if we can't find it. + */ + if (IS_ERR(ubc_iclk)) + ubc_iclk = NULL; + + clk_enable(ubc_iclk); + + __raw_writel(0, UBC_CBCR); + + for (i = 0; i < sh4a_ubc.num_events; i++) { + __raw_writel(0, UBC_CAMR(i)); + __raw_writel(0, UBC_CBR(i)); + + __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR(i)); + + /* dummy read for write posting */ + (void)__raw_readl(UBC_CRR(i)); + } + + clk_disable(ubc_iclk); + + sh4a_ubc.clk = ubc_iclk; + + return register_sh_ubc(&sh4a_ubc); +} +arch_initcall(sh4a_ubc_init); diff --git a/arch/sh/kernel/cpu/shmobile/Makefile b/arch/sh/kernel/cpu/shmobile/Makefile new file mode 100644 index 000000000..7581d5f03 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux/SuperH SH-Mobile backends. +# + +# Power Management & Sleep mode +obj-$(CONFIG_PM) += pm.o sleep.o +obj-$(CONFIG_CPU_IDLE) += cpuidle.o diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c new file mode 100644 index 000000000..b0f9c8f8f --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/shmobile/cpuidle.c + * + * Cpuidle support code for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/suspend.h> +#include <linux/cpuidle.h> +#include <linux/export.h> +#include <asm/suspend.h> +#include <linux/uaccess.h> + +static unsigned long cpuidle_mode[] = { + SUSP_SH_SLEEP, /* regular sleep mode */ + SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */ + SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */ +}; + +static int cpuidle_sleep_enter(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + unsigned long allowed_mode = SUSP_SH_SLEEP; + int requested_state = index; + int allowed_state; + int k; + + /* convert allowed mode to allowed state */ + for (k = ARRAY_SIZE(cpuidle_mode) - 1; k > 0; k--) + if (cpuidle_mode[k] == allowed_mode) + break; + + allowed_state = k; + + /* take the following into account for sleep mode selection: + * - allowed_state: best mode allowed by hardware (clock deps) + * - requested_state: best mode allowed by software (latencies) + */ + k = min_t(int, allowed_state, requested_state); + + sh_mobile_call_standby(cpuidle_mode[k]); + + return k; +} + +static struct cpuidle_driver cpuidle_driver = { + .name = "sh_idle", + .owner = THIS_MODULE, + .states = { + { + .exit_latency = 1, + .target_residency = 1 * 2, + .power_usage = 3, + .enter = cpuidle_sleep_enter, + .name = "C1", + .desc = "SuperH Sleep Mode", + }, + { + .exit_latency = 100, + .target_residency = 1 * 2, + .power_usage = 1, + .enter = cpuidle_sleep_enter, + .name = "C2", + .desc = "SuperH Sleep Mode [SF]", + .flags = CPUIDLE_FLAG_UNUSABLE, + }, + { + .exit_latency = 2300, + .target_residency = 1 * 2, + .power_usage = 1, + .enter = cpuidle_sleep_enter, + .name = "C3", + .desc = "SuperH Mobile Standby Mode [SF]", + .flags = CPUIDLE_FLAG_UNUSABLE, + }, + }, + .safe_state_index = 0, + .state_count = 3, +}; + +int __init sh_mobile_setup_cpuidle(void) +{ + if (sh_mobile_sleep_supported & SUSP_SH_SF) + cpuidle_driver.states[1].flags = CPUIDLE_FLAG_NONE; + + if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) + cpuidle_driver.states[2].flags = CPUIDLE_FLAG_NONE; + + return cpuidle_register(&cpuidle_driver, NULL); +} diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c new file mode 100644 index 000000000..ca9945f51 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/cpu/shmobile/pm.c + * + * Power management support code for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/suspend.h> +#include <asm/suspend.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> +#include <asm/bl_bit.h> + +/* + * Notifier lists for pre/post sleep notification + */ +ATOMIC_NOTIFIER_HEAD(sh_mobile_pre_sleep_notifier_list); +ATOMIC_NOTIFIER_HEAD(sh_mobile_post_sleep_notifier_list); + +/* + * Sleep modes available on SuperH Mobile: + * + * Sleep mode is just plain "sleep" instruction + * Sleep Self-Refresh mode is above plus RAM put in Self-Refresh + * Standby Self-Refresh mode is above plus stopped clocks + */ +#define SUSP_MODE_SLEEP (SUSP_SH_SLEEP) +#define SUSP_MODE_SLEEP_SF (SUSP_SH_SLEEP | SUSP_SH_SF) +#define SUSP_MODE_STANDBY_SF (SUSP_SH_STANDBY | SUSP_SH_SF) +#define SUSP_MODE_RSTANDBY_SF \ + (SUSP_SH_RSTANDBY | SUSP_SH_MMU | SUSP_SH_REGS | SUSP_SH_SF) + /* + * U-standby mode is unsupported since it needs bootloader hacks + */ + +#ifdef CONFIG_CPU_SUBTYPE_SH7724 +#define RAM_BASE 0xfd800000 /* RSMEM */ +#else +#define RAM_BASE 0xe5200000 /* ILRAM */ +#endif + +void sh_mobile_call_standby(unsigned long mode) +{ + void *onchip_mem = (void *)RAM_BASE; + struct sh_sleep_data *sdp = onchip_mem; + void (*standby_onchip_mem)(unsigned long, unsigned long); + + /* code located directly after data structure */ + standby_onchip_mem = (void *)(sdp + 1); + + atomic_notifier_call_chain(&sh_mobile_pre_sleep_notifier_list, + mode, NULL); + + /* flush the caches if MMU flag is set */ + if (mode & SUSP_SH_MMU) + flush_cache_all(); + + /* Let assembly snippet in on-chip memory handle the rest */ + standby_onchip_mem(mode, RAM_BASE); + + atomic_notifier_call_chain(&sh_mobile_post_sleep_notifier_list, + mode, NULL); +} + +extern char sh_mobile_sleep_enter_start; +extern char sh_mobile_sleep_enter_end; + +extern char sh_mobile_sleep_resume_start; +extern char sh_mobile_sleep_resume_end; + +unsigned long sh_mobile_sleep_supported = SUSP_SH_SLEEP; + +void sh_mobile_register_self_refresh(unsigned long flags, + void *pre_start, void *pre_end, + void *post_start, void *post_end) +{ + void *onchip_mem = (void *)RAM_BASE; + void *vp; + struct sh_sleep_data *sdp; + int n; + + /* part 0: data area */ + sdp = onchip_mem; + sdp->addr.stbcr = 0xa4150020; /* STBCR */ + sdp->addr.bar = 0xa4150040; /* BAR */ + sdp->addr.pteh = 0xff000000; /* PTEH */ + sdp->addr.ptel = 0xff000004; /* PTEL */ + sdp->addr.ttb = 0xff000008; /* TTB */ + sdp->addr.tea = 0xff00000c; /* TEA */ + sdp->addr.mmucr = 0xff000010; /* MMUCR */ + sdp->addr.ptea = 0xff000034; /* PTEA */ + sdp->addr.pascr = 0xff000070; /* PASCR */ + sdp->addr.irmcr = 0xff000078; /* IRMCR */ + sdp->addr.ccr = 0xff00001c; /* CCR */ + sdp->addr.ramcr = 0xff000074; /* RAMCR */ + vp = sdp + 1; + + /* part 1: common code to enter sleep mode */ + n = &sh_mobile_sleep_enter_end - &sh_mobile_sleep_enter_start; + memcpy(vp, &sh_mobile_sleep_enter_start, n); + vp += roundup(n, 4); + + /* part 2: board specific code to enter self-refresh mode */ + n = pre_end - pre_start; + memcpy(vp, pre_start, n); + sdp->sf_pre = (unsigned long)vp; + vp += roundup(n, 4); + + /* part 3: board specific code to resume from self-refresh mode */ + n = post_end - post_start; + memcpy(vp, post_start, n); + sdp->sf_post = (unsigned long)vp; + vp += roundup(n, 4); + + /* part 4: common code to resume from sleep mode */ + WARN_ON(vp > (onchip_mem + 0x600)); + vp = onchip_mem + 0x600; /* located at interrupt vector */ + n = &sh_mobile_sleep_resume_end - &sh_mobile_sleep_resume_start; + memcpy(vp, &sh_mobile_sleep_resume_start, n); + sdp->resume = (unsigned long)vp; + + sh_mobile_sleep_supported |= flags; +} + +static int sh_pm_enter(suspend_state_t state) +{ + if (!(sh_mobile_sleep_supported & SUSP_MODE_STANDBY_SF)) + return -ENXIO; + + local_irq_disable(); + set_bl_bit(); + sh_mobile_call_standby(SUSP_MODE_STANDBY_SF); + local_irq_disable(); + clear_bl_bit(); + return 0; +} + +static const struct platform_suspend_ops sh_pm_ops = { + .enter = sh_pm_enter, + .valid = suspend_valid_only_mem, +}; + +static int __init sh_pm_init(void) +{ + suspend_set_ops(&sh_pm_ops); + return sh_mobile_setup_cpuidle(); +} + +late_initcall(sh_pm_init); diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S new file mode 100644 index 000000000..f928c0315 --- /dev/null +++ b/arch/sh/kernel/cpu/shmobile/sleep.S @@ -0,0 +1,402 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/cpu/sh4a/sleep-sh_mobile.S + * + * Sleep mode and Standby modes support for SuperH Mobile + * + * Copyright (C) 2009 Magnus Damm + */ + +#include <linux/sys.h> +#include <linux/errno.h> +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/suspend.h> + +/* + * Kernel mode register usage, see entry.S: + * k0 scratch + * k1 scratch + */ +#define k0 r0 +#define k1 r1 + +/* manage self-refresh and enter standby mode. must be self-contained. + * this code will be copied to on-chip memory and executed from there. + */ + .balign 4 +ENTRY(sh_mobile_sleep_enter_start) + + /* save mode flags */ + mov.l r4, @(SH_SLEEP_MODE, r5) + + /* save original vbr */ + stc vbr, r0 + mov.l r0, @(SH_SLEEP_VBR, r5) + + /* point vbr to our on-chip memory page */ + ldc r5, vbr + + /* save return address */ + sts pr, r0 + mov.l r0, @(SH_SLEEP_SPC, r5) + + /* save sr */ + stc sr, r0 + mov.l r0, @(SH_SLEEP_SR, r5) + + /* save general purpose registers to stack if needed */ + mov.l @(SH_SLEEP_MODE, r5), r0 + tst #SUSP_SH_REGS, r0 + bt skip_regs_save + + sts.l pr, @-r15 + mov.l r14, @-r15 + mov.l r13, @-r15 + mov.l r12, @-r15 + mov.l r11, @-r15 + mov.l r10, @-r15 + mov.l r9, @-r15 + mov.l r8, @-r15 + + /* make sure bank0 is selected, save low registers */ + mov.l rb_bit, r9 + not r9, r9 + bsr set_sr + mov #0, r10 + + bsr save_low_regs + nop + + /* switch to bank 1, save low registers */ + mov.l rb_bit, r10 + bsr set_sr + mov #-1, r9 + + bsr save_low_regs + nop + + /* switch back to bank 0 */ + mov.l rb_bit, r9 + not r9, r9 + bsr set_sr + mov #0, r10 + +skip_regs_save: + + /* save sp, also set to internal ram */ + mov.l r15, @(SH_SLEEP_SP, r5) + mov r5, r15 + + /* save stbcr */ + bsr save_register + mov #SH_SLEEP_REG_STBCR, r0 + + /* save mmu and cache context if needed */ + mov.l @(SH_SLEEP_MODE, r5), r0 + tst #SUSP_SH_MMU, r0 + bt skip_mmu_save_disable + + /* save mmu state */ + bsr save_register + mov #SH_SLEEP_REG_PTEH, r0 + + bsr save_register + mov #SH_SLEEP_REG_PTEL, r0 + + bsr save_register + mov #SH_SLEEP_REG_TTB, r0 + + bsr save_register + mov #SH_SLEEP_REG_TEA, r0 + + bsr save_register + mov #SH_SLEEP_REG_MMUCR, r0 + + bsr save_register + mov #SH_SLEEP_REG_PTEA, r0 + + bsr save_register + mov #SH_SLEEP_REG_PASCR, r0 + + bsr save_register + mov #SH_SLEEP_REG_IRMCR, r0 + + /* invalidate TLBs and disable the MMU */ + bsr get_register + mov #SH_SLEEP_REG_MMUCR, r0 + mov #4, r1 + mov.l r1, @r0 + icbi @r0 + + /* save cache registers and disable caches */ + bsr save_register + mov #SH_SLEEP_REG_CCR, r0 + + bsr save_register + mov #SH_SLEEP_REG_RAMCR, r0 + + bsr get_register + mov #SH_SLEEP_REG_CCR, r0 + mov #0, r1 + mov.l r1, @r0 + icbi @r0 + +skip_mmu_save_disable: + /* call self-refresh entering code if needed */ + mov.l @(SH_SLEEP_MODE, r5), r0 + tst #SUSP_SH_SF, r0 + bt skip_set_sf + + mov.l @(SH_SLEEP_SF_PRE, r5), r0 + jsr @r0 + nop + +skip_set_sf: + mov.l @(SH_SLEEP_MODE, r5), r0 + tst #SUSP_SH_STANDBY, r0 + bt test_rstandby + + /* set mode to "software standby mode" */ + bra do_sleep + mov #0x80, r1 + +test_rstandby: + tst #SUSP_SH_RSTANDBY, r0 + bt test_ustandby + + /* setup BAR register */ + bsr get_register + mov #SH_SLEEP_REG_BAR, r0 + mov.l @(SH_SLEEP_RESUME, r5), r1 + mov.l r1, @r0 + + /* set mode to "r-standby mode" */ + bra do_sleep + mov #0x20, r1 + +test_ustandby: + tst #SUSP_SH_USTANDBY, r0 + bt force_sleep + + /* set mode to "u-standby mode" */ + bra do_sleep + mov #0x10, r1 + +force_sleep: + + /* set mode to "sleep mode" */ + mov #0x00, r1 + +do_sleep: + /* setup and enter selected standby mode */ + bsr get_register + mov #SH_SLEEP_REG_STBCR, r0 + mov.l r1, @r0 +again: + sleep + bra again + nop + +save_register: + add #SH_SLEEP_BASE_ADDR, r0 + mov.l @(r0, r5), r1 + add #-SH_SLEEP_BASE_ADDR, r0 + mov.l @r1, r1 + add #SH_SLEEP_BASE_DATA, r0 + mov.l r1, @(r0, r5) + add #-SH_SLEEP_BASE_DATA, r0 + rts + nop + +get_register: + add #SH_SLEEP_BASE_ADDR, r0 + mov.l @(r0, r5), r0 + rts + nop + +set_sr: + stc sr, r8 + and r9, r8 + or r10, r8 + ldc r8, sr + rts + nop + +save_low_regs: + mov.l r7, @-r15 + mov.l r6, @-r15 + mov.l r5, @-r15 + mov.l r4, @-r15 + mov.l r3, @-r15 + mov.l r2, @-r15 + mov.l r1, @-r15 + rts + mov.l r0, @-r15 + + .balign 4 +rb_bit: .long 0x20000000 ! RB=1 + +ENTRY(sh_mobile_sleep_enter_end) + + .balign 4 +ENTRY(sh_mobile_sleep_resume_start) + + /* figure out start address */ + bsr 0f + nop +0: + sts pr, k1 + mov.l 1f, k0 + and k0, k1 + + /* store pointer to data area in VBR */ + ldc k1, vbr + + /* setup sr with saved sr */ + mov.l @(SH_SLEEP_SR, k1), k0 + ldc k0, sr + + /* now: user register set! */ + stc vbr, r5 + + /* setup spc with return address to c code */ + mov.l @(SH_SLEEP_SPC, r5), r0 + ldc r0, spc + + /* restore vbr */ + mov.l @(SH_SLEEP_VBR, r5), r0 + ldc r0, vbr + + /* setup ssr with saved sr */ + mov.l @(SH_SLEEP_SR, r5), r0 + ldc r0, ssr + + /* restore sp */ + mov.l @(SH_SLEEP_SP, r5), r15 + + /* restore sleep mode register */ + bsr restore_register + mov #SH_SLEEP_REG_STBCR, r0 + + /* call self-refresh resume code if needed */ + mov.l @(SH_SLEEP_MODE, r5), r0 + tst #SUSP_SH_SF, r0 + bt skip_restore_sf + + mov.l @(SH_SLEEP_SF_POST, r5), r0 + jsr @r0 + nop + +skip_restore_sf: + /* restore mmu and cache state if needed */ + mov.l @(SH_SLEEP_MODE, r5), r0 + tst #SUSP_SH_MMU, r0 + bt skip_restore_mmu + + /* restore mmu state */ + bsr restore_register + mov #SH_SLEEP_REG_PTEH, r0 + + bsr restore_register + mov #SH_SLEEP_REG_PTEL, r0 + + bsr restore_register + mov #SH_SLEEP_REG_TTB, r0 + + bsr restore_register + mov #SH_SLEEP_REG_TEA, r0 + + bsr restore_register + mov #SH_SLEEP_REG_PTEA, r0 + + bsr restore_register + mov #SH_SLEEP_REG_PASCR, r0 + + bsr restore_register + mov #SH_SLEEP_REG_IRMCR, r0 + + bsr restore_register + mov #SH_SLEEP_REG_MMUCR, r0 + icbi @r0 + + /* restore cache settings */ + bsr restore_register + mov #SH_SLEEP_REG_RAMCR, r0 + icbi @r0 + + bsr restore_register + mov #SH_SLEEP_REG_CCR, r0 + icbi @r0 + +skip_restore_mmu: + + /* restore general purpose registers if needed */ + mov.l @(SH_SLEEP_MODE, r5), r0 + tst #SUSP_SH_REGS, r0 + bt skip_restore_regs + + /* switch to bank 1, restore low registers */ + mov.l _rb_bit, r10 + bsr _set_sr + mov #-1, r9 + + bsr restore_low_regs + nop + + /* switch to bank0, restore low registers */ + mov.l _rb_bit, r9 + not r9, r9 + bsr _set_sr + mov #0, r10 + + bsr restore_low_regs + nop + + /* restore the rest of the registers */ + mov.l @r15+, r8 + mov.l @r15+, r9 + mov.l @r15+, r10 + mov.l @r15+, r11 + mov.l @r15+, r12 + mov.l @r15+, r13 + mov.l @r15+, r14 + lds.l @r15+, pr + +skip_restore_regs: + rte + nop + +restore_register: + add #SH_SLEEP_BASE_DATA, r0 + mov.l @(r0, r5), r1 + add #-SH_SLEEP_BASE_DATA, r0 + add #SH_SLEEP_BASE_ADDR, r0 + mov.l @(r0, r5), r0 + mov.l r1, @r0 + rts + nop + +_set_sr: + stc sr, r8 + and r9, r8 + or r10, r8 + ldc r8, sr + rts + nop + +restore_low_regs: + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + rts + mov.l @r15+, r7 + + .balign 4 +_rb_bit: .long 0x20000000 ! RB=1 +1: .long ~0x7ff +ENTRY(sh_mobile_sleep_resume_end) diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c new file mode 100644 index 000000000..19ce6a950 --- /dev/null +++ b/arch/sh/kernel/crash_dump.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * crash_dump.c - Memory preserving reboot related code. + * + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + */ +#include <linux/errno.h> +#include <linux/crash_dump.h> +#include <linux/io.h> +#include <linux/uio.h> +#include <linux/uaccess.h> + +ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, + size_t csize, unsigned long offset) +{ + void __iomem *vaddr; + + if (!csize) + return 0; + + vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + csize = copy_to_iter(vaddr + offset, csize, iter); + iounmap(vaddr); + + return csize; +} diff --git a/arch/sh/kernel/debugtraps.S b/arch/sh/kernel/debugtraps.S new file mode 100644 index 000000000..ad07527e2 --- /dev/null +++ b/arch/sh/kernel/debugtraps.S @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/debugtraps.S + * + * Debug trap jump tables for SuperH + * + * Copyright (C) 2006 - 2008 Paul Mundt + */ +#include <linux/sys.h> +#include <linux/linkage.h> + +#if !defined(CONFIG_KGDB) +#define singlestep_trap_handler debug_trap_handler +#endif + +#if !defined(CONFIG_SH_STANDARD_BIOS) +#define sh_bios_handler debug_trap_handler +#endif + + .data + +ENTRY(debug_trap_table) + .long debug_trap_handler /* 0x30 */ + .long debug_trap_handler /* 0x31 */ + .long debug_trap_handler /* 0x32 */ + .long debug_trap_handler /* 0x33 */ + .long debug_trap_handler /* 0x34 */ + .long debug_trap_handler /* 0x35 */ + .long debug_trap_handler /* 0x36 */ + .long debug_trap_handler /* 0x37 */ + .long debug_trap_handler /* 0x38 */ + .long debug_trap_handler /* 0x39 */ + .long debug_trap_handler /* 0x3a */ + .long debug_trap_handler /* 0x3b */ + .long breakpoint_trap_handler /* 0x3c */ + .long singlestep_trap_handler /* 0x3d */ + .long bug_trap_handler /* 0x3e */ + .long sh_bios_handler /* 0x3f */ diff --git a/arch/sh/kernel/disassemble.c b/arch/sh/kernel/disassemble.c new file mode 100644 index 000000000..34e25a439 --- /dev/null +++ b/arch/sh/kernel/disassemble.c @@ -0,0 +1,572 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Disassemble SuperH instructions. + * + * Copyright (C) 1999 kaz Kojima + * Copyright (C) 2008 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/uaccess.h> + +#include <asm/ptrace.h> + +/* + * Format of an instruction in memory. + */ +typedef enum { + HEX_0, HEX_1, HEX_2, HEX_3, HEX_4, HEX_5, HEX_6, HEX_7, + HEX_8, HEX_9, HEX_A, HEX_B, HEX_C, HEX_D, HEX_E, HEX_F, + REG_N, REG_M, REG_NM, REG_B, + BRANCH_12, BRANCH_8, + DISP_8, DISP_4, + IMM_4, IMM_4BY2, IMM_4BY4, PCRELIMM_8BY2, PCRELIMM_8BY4, + IMM_8, IMM_8BY2, IMM_8BY4, +} sh_nibble_type; + +typedef enum { + A_END, A_BDISP12, A_BDISP8, + A_DEC_M, A_DEC_N, + A_DISP_GBR, A_DISP_PC, A_DISP_REG_M, A_DISP_REG_N, + A_GBR, + A_IMM, + A_INC_M, A_INC_N, + A_IND_M, A_IND_N, A_IND_R0_REG_M, A_IND_R0_REG_N, + A_MACH, A_MACL, + A_PR, A_R0, A_R0_GBR, A_REG_M, A_REG_N, A_REG_B, + A_SR, A_VBR, A_SSR, A_SPC, A_SGR, A_DBR, + F_REG_N, F_REG_M, D_REG_N, D_REG_M, + X_REG_N, /* Only used for argument parsing */ + X_REG_M, /* Only used for argument parsing */ + DX_REG_N, DX_REG_M, V_REG_N, V_REG_M, + FD_REG_N, + XMTRX_M4, + F_FR0, + FPUL_N, FPUL_M, FPSCR_N, FPSCR_M, +} sh_arg_type; + +static struct sh_opcode_info { + char *name; + sh_arg_type arg[7]; + sh_nibble_type nibbles[4]; +} sh_table[] = { + {"add",{A_IMM,A_REG_N},{HEX_7,REG_N,IMM_8}}, + {"add",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_C}}, + {"addc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_E}}, + {"addv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_F}}, + {"and",{A_IMM,A_R0},{HEX_C,HEX_9,IMM_8}}, + {"and",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_9}}, + {"and.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_D,IMM_8}}, + {"bra",{A_BDISP12},{HEX_A,BRANCH_12}}, + {"bsr",{A_BDISP12},{HEX_B,BRANCH_12}}, + {"bt",{A_BDISP8},{HEX_8,HEX_9,BRANCH_8}}, + {"bf",{A_BDISP8},{HEX_8,HEX_B,BRANCH_8}}, + {"bt.s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}}, + {"bt/s",{A_BDISP8},{HEX_8,HEX_D,BRANCH_8}}, + {"bf.s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}}, + {"bf/s",{A_BDISP8},{HEX_8,HEX_F,BRANCH_8}}, + {"clrmac",{0},{HEX_0,HEX_0,HEX_2,HEX_8}}, + {"clrs",{0},{HEX_0,HEX_0,HEX_4,HEX_8}}, + {"clrt",{0},{HEX_0,HEX_0,HEX_0,HEX_8}}, + {"cmp/eq",{A_IMM,A_R0},{HEX_8,HEX_8,IMM_8}}, + {"cmp/eq",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_0}}, + {"cmp/ge",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_3}}, + {"cmp/gt",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_7}}, + {"cmp/hi",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_6}}, + {"cmp/hs",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_2}}, + {"cmp/pl",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_5}}, + {"cmp/pz",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_1}}, + {"cmp/str",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_C}}, + {"div0s",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_7}}, + {"div0u",{0},{HEX_0,HEX_0,HEX_1,HEX_9}}, + {"div1",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_4}}, + {"exts.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_E}}, + {"exts.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_F}}, + {"extu.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_C}}, + {"extu.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_D}}, + {"jmp",{A_IND_N},{HEX_4,REG_N,HEX_2,HEX_B}}, + {"jsr",{A_IND_N},{HEX_4,REG_N,HEX_0,HEX_B}}, + {"ldc",{A_REG_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_E}}, + {"ldc",{A_REG_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_E}}, + {"ldc",{A_REG_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_E}}, + {"ldc",{A_REG_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_E}}, + {"ldc",{A_REG_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_E}}, + {"ldc",{A_REG_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_E}}, + {"ldc",{A_REG_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_E}}, + {"ldc.l",{A_INC_N,A_SR},{HEX_4,REG_N,HEX_0,HEX_7}}, + {"ldc.l",{A_INC_N,A_GBR},{HEX_4,REG_N,HEX_1,HEX_7}}, + {"ldc.l",{A_INC_N,A_VBR},{HEX_4,REG_N,HEX_2,HEX_7}}, + {"ldc.l",{A_INC_N,A_SSR},{HEX_4,REG_N,HEX_3,HEX_7}}, + {"ldc.l",{A_INC_N,A_SPC},{HEX_4,REG_N,HEX_4,HEX_7}}, + {"ldc.l",{A_INC_N,A_DBR},{HEX_4,REG_N,HEX_7,HEX_7}}, + {"ldc.l",{A_INC_N,A_REG_B},{HEX_4,REG_N,REG_B,HEX_7}}, + {"lds",{A_REG_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_A}}, + {"lds",{A_REG_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_A}}, + {"lds",{A_REG_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_A}}, + {"lds",{A_REG_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_A}}, + {"lds",{A_REG_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_A}}, + {"lds.l",{A_INC_N,A_MACH},{HEX_4,REG_N,HEX_0,HEX_6}}, + {"lds.l",{A_INC_N,A_MACL},{HEX_4,REG_N,HEX_1,HEX_6}}, + {"lds.l",{A_INC_N,A_PR},{HEX_4,REG_N,HEX_2,HEX_6}}, + {"lds.l",{A_INC_M,FPUL_N},{HEX_4,REG_M,HEX_5,HEX_6}}, + {"lds.l",{A_INC_M,FPSCR_N},{HEX_4,REG_M,HEX_6,HEX_6}}, + {"ldtlb",{0},{HEX_0,HEX_0,HEX_3,HEX_8}}, + {"mac.w",{A_INC_M,A_INC_N},{HEX_4,REG_N,REG_M,HEX_F}}, + {"mov",{A_IMM,A_REG_N},{HEX_E,REG_N,IMM_8}}, + {"mov",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_3}}, + {"mov.b",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_4}}, + {"mov.b",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_4}}, + {"mov.b",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_0}}, + {"mov.b",{A_DISP_REG_M,A_R0},{HEX_8,HEX_4,REG_M,IMM_4}}, + {"mov.b",{A_DISP_GBR,A_R0},{HEX_C,HEX_4,IMM_8}}, + {"mov.b",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_C}}, + {"mov.b",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_4}}, + {"mov.b",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_0}}, + {"mov.b",{A_R0,A_DISP_REG_M},{HEX_8,HEX_0,REG_M,IMM_4}}, + {"mov.b",{A_R0,A_DISP_GBR},{HEX_C,HEX_0,IMM_8}}, + {"mov.l",{ A_REG_M,A_DISP_REG_N},{HEX_1,REG_N,REG_M,IMM_4BY4}}, + {"mov.l",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_6}}, + {"mov.l",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_6}}, + {"mov.l",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_2}}, + {"mov.l",{A_DISP_REG_M,A_REG_N},{HEX_5,REG_N,REG_M,IMM_4BY4}}, + {"mov.l",{A_DISP_GBR,A_R0},{HEX_C,HEX_6,IMM_8BY4}}, + {"mov.l",{A_DISP_PC,A_REG_N},{HEX_D,REG_N,PCRELIMM_8BY4}}, + {"mov.l",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_E}}, + {"mov.l",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_6}}, + {"mov.l",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_2}}, + {"mov.l",{A_R0,A_DISP_GBR},{HEX_C,HEX_2,IMM_8BY4}}, + {"mov.w",{ A_REG_M,A_IND_R0_REG_N},{HEX_0,REG_N,REG_M,HEX_5}}, + {"mov.w",{ A_REG_M,A_DEC_N},{HEX_2,REG_N,REG_M,HEX_5}}, + {"mov.w",{ A_REG_M,A_IND_N},{HEX_2,REG_N,REG_M,HEX_1}}, + {"mov.w",{A_DISP_REG_M,A_R0},{HEX_8,HEX_5,REG_M,IMM_4BY2}}, + {"mov.w",{A_DISP_GBR,A_R0},{HEX_C,HEX_5,IMM_8BY2}}, + {"mov.w",{A_DISP_PC,A_REG_N},{HEX_9,REG_N,PCRELIMM_8BY2}}, + {"mov.w",{A_IND_R0_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_D}}, + {"mov.w",{A_INC_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_5}}, + {"mov.w",{A_IND_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_1}}, + {"mov.w",{A_R0,A_DISP_REG_M},{HEX_8,HEX_1,REG_M,IMM_4BY2}}, + {"mov.w",{A_R0,A_DISP_GBR},{HEX_C,HEX_1,IMM_8BY2}}, + {"mova",{A_DISP_PC,A_R0},{HEX_C,HEX_7,PCRELIMM_8BY4}}, + {"movca.l",{A_R0,A_IND_N},{HEX_0,REG_N,HEX_C,HEX_3}}, + {"movt",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_9}}, + {"muls",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_F}}, + {"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}}, + {"mulu",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_E}}, + {"neg",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_B}}, + {"negc",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_A}}, + {"nop",{0},{HEX_0,HEX_0,HEX_0,HEX_9}}, + {"not",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_7}}, + {"ocbi",{A_IND_N},{HEX_0,REG_N,HEX_9,HEX_3}}, + {"ocbp",{A_IND_N},{HEX_0,REG_N,HEX_A,HEX_3}}, + {"ocbwb",{A_IND_N},{HEX_0,REG_N,HEX_B,HEX_3}}, + {"or",{A_IMM,A_R0},{HEX_C,HEX_B,IMM_8}}, + {"or",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_B}}, + {"or.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_F,IMM_8}}, + {"pref",{A_IND_N},{HEX_0,REG_N,HEX_8,HEX_3}}, + {"rotcl",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_4}}, + {"rotcr",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_5}}, + {"rotl",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_4}}, + {"rotr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_5}}, + {"rte",{0},{HEX_0,HEX_0,HEX_2,HEX_B}}, + {"rts",{0},{HEX_0,HEX_0,HEX_0,HEX_B}}, + {"sets",{0},{HEX_0,HEX_0,HEX_5,HEX_8}}, + {"sett",{0},{HEX_0,HEX_0,HEX_1,HEX_8}}, + {"shad",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_C}}, + {"shld",{ A_REG_M,A_REG_N},{HEX_4,REG_N,REG_M,HEX_D}}, + {"shal",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_0}}, + {"shar",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_1}}, + {"shll",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_0}}, + {"shll16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_8}}, + {"shll2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_8}}, + {"shll8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_8}}, + {"shlr",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_1}}, + {"shlr16",{A_REG_N},{HEX_4,REG_N,HEX_2,HEX_9}}, + {"shlr2",{A_REG_N},{HEX_4,REG_N,HEX_0,HEX_9}}, + {"shlr8",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_9}}, + {"sleep",{0},{HEX_0,HEX_0,HEX_1,HEX_B}}, + {"stc",{A_SR,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_2}}, + {"stc",{A_GBR,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_2}}, + {"stc",{A_VBR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_2}}, + {"stc",{A_SSR,A_REG_N},{HEX_0,REG_N,HEX_3,HEX_2}}, + {"stc",{A_SPC,A_REG_N},{HEX_0,REG_N,HEX_4,HEX_2}}, + {"stc",{A_SGR,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_2}}, + {"stc",{A_DBR,A_REG_N},{HEX_0,REG_N,HEX_7,HEX_2}}, + {"stc",{A_REG_B,A_REG_N},{HEX_0,REG_N,REG_B,HEX_2}}, + {"stc.l",{A_SR,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_3}}, + {"stc.l",{A_GBR,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_3}}, + {"stc.l",{A_VBR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_3}}, + {"stc.l",{A_SSR,A_DEC_N},{HEX_4,REG_N,HEX_3,HEX_3}}, + {"stc.l",{A_SPC,A_DEC_N},{HEX_4,REG_N,HEX_4,HEX_3}}, + {"stc.l",{A_SGR,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_3}}, + {"stc.l",{A_DBR,A_DEC_N},{HEX_4,REG_N,HEX_7,HEX_3}}, + {"stc.l",{A_REG_B,A_DEC_N},{HEX_4,REG_N,REG_B,HEX_3}}, + {"sts",{A_MACH,A_REG_N},{HEX_0,REG_N,HEX_0,HEX_A}}, + {"sts",{A_MACL,A_REG_N},{HEX_0,REG_N,HEX_1,HEX_A}}, + {"sts",{A_PR,A_REG_N},{HEX_0,REG_N,HEX_2,HEX_A}}, + {"sts",{FPUL_M,A_REG_N},{HEX_0,REG_N,HEX_5,HEX_A}}, + {"sts",{FPSCR_M,A_REG_N},{HEX_0,REG_N,HEX_6,HEX_A}}, + {"sts.l",{A_MACH,A_DEC_N},{HEX_4,REG_N,HEX_0,HEX_2}}, + {"sts.l",{A_MACL,A_DEC_N},{HEX_4,REG_N,HEX_1,HEX_2}}, + {"sts.l",{A_PR,A_DEC_N},{HEX_4,REG_N,HEX_2,HEX_2}}, + {"sts.l",{FPUL_M,A_DEC_N},{HEX_4,REG_N,HEX_5,HEX_2}}, + {"sts.l",{FPSCR_M,A_DEC_N},{HEX_4,REG_N,HEX_6,HEX_2}}, + {"sub",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_8}}, + {"subc",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_A}}, + {"subv",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_B}}, + {"swap.b",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_8}}, + {"swap.w",{ A_REG_M,A_REG_N},{HEX_6,REG_N,REG_M,HEX_9}}, + {"tas.b",{A_IND_N},{HEX_4,REG_N,HEX_1,HEX_B}}, + {"trapa",{A_IMM},{HEX_C,HEX_3,IMM_8}}, + {"tst",{A_IMM,A_R0},{HEX_C,HEX_8,IMM_8}}, + {"tst",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_8}}, + {"tst.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_C,IMM_8}}, + {"xor",{A_IMM,A_R0},{HEX_C,HEX_A,IMM_8}}, + {"xor",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_A}}, + {"xor.b",{A_IMM,A_R0_GBR},{HEX_C,HEX_E,IMM_8}}, + {"xtrct",{ A_REG_M,A_REG_N},{HEX_2,REG_N,REG_M,HEX_D}}, + {"mul.l",{ A_REG_M,A_REG_N},{HEX_0,REG_N,REG_M,HEX_7}}, + {"dt",{A_REG_N},{HEX_4,REG_N,HEX_1,HEX_0}}, + {"dmuls.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_D}}, + {"dmulu.l",{ A_REG_M,A_REG_N},{HEX_3,REG_N,REG_M,HEX_5}}, + {"mac.l",{A_INC_M,A_INC_N},{HEX_0,REG_N,REG_M,HEX_F}}, + {"braf",{A_REG_N},{HEX_0,REG_N,HEX_2,HEX_3}}, + {"bsrf",{A_REG_N},{HEX_0,REG_N,HEX_0,HEX_3}}, + {"fabs",{FD_REG_N},{HEX_F,REG_N,HEX_5,HEX_D}}, + {"fadd",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_0}}, + {"fadd",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_0}}, + {"fcmp/eq",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_4}}, + {"fcmp/eq",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_4}}, + {"fcmp/gt",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_5}}, + {"fcmp/gt",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_5}}, + {"fcnvds",{D_REG_N,FPUL_M},{HEX_F,REG_N,HEX_B,HEX_D}}, + {"fcnvsd",{FPUL_M,D_REG_N},{HEX_F,REG_N,HEX_A,HEX_D}}, + {"fdiv",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_3}}, + {"fdiv",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_3}}, + {"fipr",{V_REG_M,V_REG_N},{HEX_F,REG_NM,HEX_E,HEX_D}}, + {"fldi0",{F_REG_N},{HEX_F,REG_N,HEX_8,HEX_D}}, + {"fldi1",{F_REG_N},{HEX_F,REG_N,HEX_9,HEX_D}}, + {"flds",{F_REG_N,FPUL_M},{HEX_F,REG_N,HEX_1,HEX_D}}, + {"float",{FPUL_M,FD_REG_N},{HEX_F,REG_N,HEX_2,HEX_D}}, + {"fmac",{F_FR0,F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_E}}, + {"fmov",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_C}}, + {"fmov",{DX_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_C}}, + {"fmov",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}}, + {"fmov",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}}, + {"fmov",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}}, + {"fmov",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}}, + {"fmov",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}}, + {"fmov",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}}, + {"fmov",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}}, + {"fmov",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}}, + {"fmov",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}}, + {"fmov",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}}, + {"fmov",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}}, + {"fmov",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}}, + {"fmov.d",{A_IND_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_8}}, + {"fmov.d",{DX_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}}, + {"fmov.d",{A_INC_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_9}}, + {"fmov.d",{DX_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}}, + {"fmov.d",{A_IND_R0_REG_M,DX_REG_N},{HEX_F,REG_N,REG_M,HEX_6}}, + {"fmov.d",{DX_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}}, + {"fmov.s",{A_IND_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_8}}, + {"fmov.s",{F_REG_M,A_IND_N},{HEX_F,REG_N,REG_M,HEX_A}}, + {"fmov.s",{A_INC_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_9}}, + {"fmov.s",{F_REG_M,A_DEC_N},{HEX_F,REG_N,REG_M,HEX_B}}, + {"fmov.s",{A_IND_R0_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_6}}, + {"fmov.s",{F_REG_M,A_IND_R0_REG_N},{HEX_F,REG_N,REG_M,HEX_7}}, + {"fmul",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_2}}, + {"fmul",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_2}}, + {"fneg",{FD_REG_N},{HEX_F,REG_N,HEX_4,HEX_D}}, + {"frchg",{0},{HEX_F,HEX_B,HEX_F,HEX_D}}, + {"fschg",{0},{HEX_F,HEX_3,HEX_F,HEX_D}}, + {"fsqrt",{FD_REG_N},{HEX_F,REG_N,HEX_6,HEX_D}}, + {"fsts",{FPUL_M,F_REG_N},{HEX_F,REG_N,HEX_0,HEX_D}}, + {"fsub",{F_REG_M,F_REG_N},{HEX_F,REG_N,REG_M,HEX_1}}, + {"fsub",{D_REG_M,D_REG_N},{HEX_F,REG_N,REG_M,HEX_1}}, + {"ftrc",{FD_REG_N,FPUL_M},{HEX_F,REG_N,HEX_3,HEX_D}}, + {"ftrv",{XMTRX_M4,V_REG_N},{HEX_F,REG_NM,HEX_F,HEX_D}}, + { 0 }, +}; + +static void print_sh_insn(u32 memaddr, u16 insn) +{ + int relmask = ~0; + int nibs[4] = { (insn >> 12) & 0xf, (insn >> 8) & 0xf, (insn >> 4) & 0xf, insn & 0xf}; + int lastsp; + struct sh_opcode_info *op = sh_table; + + for (; op->name; op++) { + int n; + int imm = 0; + int rn = 0; + int rm = 0; + int rb = 0; + int disp_pc; + int disp_pc_addr = 0; + + for (n = 0; n < 4; n++) { + int i = op->nibbles[n]; + + if (i < 16) { + if (nibs[n] == i) + continue; + goto fail; + } + switch (i) { + case BRANCH_8: + imm = (nibs[2] << 4) | (nibs[3]); + if (imm & 0x80) + imm |= ~0xff; + imm = ((char)imm) * 2 + 4 ; + goto ok; + case BRANCH_12: + imm = ((nibs[1]) << 8) | (nibs[2] << 4) | (nibs[3]); + if (imm & 0x800) + imm |= ~0xfff; + imm = imm * 2 + 4; + goto ok; + case IMM_4: + imm = nibs[3]; + goto ok; + case IMM_4BY2: + imm = nibs[3] <<1; + goto ok; + case IMM_4BY4: + imm = nibs[3] <<2; + goto ok; + case IMM_8: + imm = (nibs[2] << 4) | nibs[3]; + goto ok; + case PCRELIMM_8BY2: + imm = ((nibs[2] << 4) | nibs[3]) <<1; + relmask = ~1; + goto ok; + case PCRELIMM_8BY4: + imm = ((nibs[2] << 4) | nibs[3]) <<2; + relmask = ~3; + goto ok; + case IMM_8BY2: + imm = ((nibs[2] << 4) | nibs[3]) <<1; + goto ok; + case IMM_8BY4: + imm = ((nibs[2] << 4) | nibs[3]) <<2; + goto ok; + case DISP_8: + imm = (nibs[2] << 4) | (nibs[3]); + goto ok; + case DISP_4: + imm = nibs[3]; + goto ok; + case REG_N: + rn = nibs[n]; + break; + case REG_M: + rm = nibs[n]; + break; + case REG_NM: + rn = (nibs[n] & 0xc) >> 2; + rm = (nibs[n] & 0x3); + break; + case REG_B: + rb = nibs[n] & 0x07; + break; + default: + return; + } + } + + ok: + pr_cont("%-8s ", op->name); + lastsp = (op->arg[0] == A_END); + disp_pc = 0; + for (n = 0; n < 6 && op->arg[n] != A_END; n++) { + if (n && op->arg[1] != A_END) + pr_cont(", "); + switch (op->arg[n]) { + case A_IMM: + pr_cont("#%d", (char)(imm)); + break; + case A_R0: + pr_cont("r0"); + break; + case A_REG_N: + pr_cont("r%d", rn); + break; + case A_INC_N: + pr_cont("@r%d+", rn); + break; + case A_DEC_N: + pr_cont("@-r%d", rn); + break; + case A_IND_N: + pr_cont("@r%d", rn); + break; + case A_DISP_REG_N: + pr_cont("@(%d,r%d)", imm, rn); + break; + case A_REG_M: + pr_cont("r%d", rm); + break; + case A_INC_M: + pr_cont("@r%d+", rm); + break; + case A_DEC_M: + pr_cont("@-r%d", rm); + break; + case A_IND_M: + pr_cont("@r%d", rm); + break; + case A_DISP_REG_M: + pr_cont("@(%d,r%d)", imm, rm); + break; + case A_REG_B: + pr_cont("r%d_bank", rb); + break; + case A_DISP_PC: + disp_pc = 1; + disp_pc_addr = imm + 4 + (memaddr & relmask); + pr_cont("%08x <%pS>", disp_pc_addr, + (void *)disp_pc_addr); + break; + case A_IND_R0_REG_N: + pr_cont("@(r0,r%d)", rn); + break; + case A_IND_R0_REG_M: + pr_cont("@(r0,r%d)", rm); + break; + case A_DISP_GBR: + pr_cont("@(%d,gbr)", imm); + break; + case A_R0_GBR: + pr_cont("@(r0,gbr)"); + break; + case A_BDISP12: + case A_BDISP8: + pr_cont("%08x", imm + memaddr); + break; + case A_SR: + pr_cont("sr"); + break; + case A_GBR: + pr_cont("gbr"); + break; + case A_VBR: + pr_cont("vbr"); + break; + case A_SSR: + pr_cont("ssr"); + break; + case A_SPC: + pr_cont("spc"); + break; + case A_MACH: + pr_cont("mach"); + break; + case A_MACL: + pr_cont("macl"); + break; + case A_PR: + pr_cont("pr"); + break; + case A_SGR: + pr_cont("sgr"); + break; + case A_DBR: + pr_cont("dbr"); + break; + case FD_REG_N: + case F_REG_N: + pr_cont("fr%d", rn); + break; + case F_REG_M: + pr_cont("fr%d", rm); + break; + case DX_REG_N: + if (rn & 1) { + pr_cont("xd%d", rn & ~1); + break; + } + fallthrough; + case D_REG_N: + pr_cont("dr%d", rn); + break; + case DX_REG_M: + if (rm & 1) { + pr_cont("xd%d", rm & ~1); + break; + } + fallthrough; + case D_REG_M: + pr_cont("dr%d", rm); + break; + case FPSCR_M: + case FPSCR_N: + pr_cont("fpscr"); + break; + case FPUL_M: + case FPUL_N: + pr_cont("fpul"); + break; + case F_FR0: + pr_cont("fr0"); + break; + case V_REG_N: + pr_cont("fv%d", rn*4); + break; + case V_REG_M: + pr_cont("fv%d", rm*4); + break; + case XMTRX_M4: + pr_cont("xmtrx"); + break; + default: + return; + } + } + + if (disp_pc && strcmp(op->name, "mova") != 0) { + u32 val; + + if (relmask == ~1) + __get_user(val, (u16 *)disp_pc_addr); + else + __get_user(val, (u32 *)disp_pc_addr); + + pr_cont(" ! %08x <%pS>", val, (void *)val); + } + + return; + fail: + ; + + } + + pr_info(".word 0x%x%x%x%x", nibs[0], nibs[1], nibs[2], nibs[3]); +} + +void show_code(struct pt_regs *regs) +{ + unsigned short *pc = (unsigned short *)regs->pc; + long i; + + if (regs->pc & 0x1) + return; + + pr_info("Code:\n"); + + for (i = -3 ; i < 6 ; i++) { + unsigned short insn; + + if (__get_user(insn, pc + i)) { + pr_err(" (Bad address in pc)\n"); + break; + } + + pr_info("%s%08lx: ", (i ? " " : "->"), + (unsigned long)(pc + i)); + print_sh_insn((unsigned long)(pc + i), insn); + pr_cont("\n"); + } + + pr_info("\n"); +} diff --git a/arch/sh/kernel/dma-coherent.c b/arch/sh/kernel/dma-coherent.c new file mode 100644 index 000000000..6a44c0e7b --- /dev/null +++ b/arch/sh/kernel/dma-coherent.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2004 - 2007 Paul Mundt + */ +#include <linux/mm.h> +#include <linux/dma-map-ops.h> +#include <asm/cacheflush.h> +#include <asm/addrspace.h> + +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + __flush_purge_region(page_address(page), size); +} + +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + void *addr = sh_cacheop_vaddr(phys_to_virt(paddr)); + + switch (dir) { + case DMA_FROM_DEVICE: /* invalidate only */ + __flush_invalidate_region(addr, size); + break; + case DMA_TO_DEVICE: /* writeback only */ + __flush_wback_region(addr, size); + break; + case DMA_BIDIRECTIONAL: /* writeback and invalidate */ + __flush_purge_region(addr, size); + break; + default: + BUG(); + } +} diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c new file mode 100644 index 000000000..758a6c89e --- /dev/null +++ b/arch/sh/kernel/dumpstack.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs + * Copyright (C) 2009 Matt Fleming + * Copyright (C) 2002 - 2012 Paul Mundt + */ +#include <linux/kallsyms.h> +#include <linux/ftrace.h> +#include <linux/debug_locks.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> +#include <linux/kdebug.h> +#include <linux/export.h> +#include <linux/uaccess.h> +#include <asm/unwinder.h> +#include <asm/stacktrace.h> + +void dump_mem(const char *str, const char *loglvl, unsigned long bottom, + unsigned long top) +{ + unsigned long p; + int i; + + printk("%s%s(0x%08lx to 0x%08lx)\n", loglvl, str, bottom, top); + + for (p = bottom & ~31; p < top; ) { + printk("%s%04lx: ", loglvl, p & 0xffff); + + for (i = 0; i < 8; i++, p += 4) { + unsigned int val; + + if (p < bottom || p >= top) + pr_cont(" "); + else { + if (__get_user(val, (unsigned int __user *)p)) { + pr_cont("\n"); + return; + } + pr_cont("%08x ", val); + } + } + pr_cont("\n"); + } +} + +void printk_address(unsigned long address, int reliable) +{ + pr_cont(" [<%px>] %s%pS\n", (void *) address, + reliable ? "" : "? ", (void *) address); +} + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static void +print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, + struct thread_info *tinfo, int *graph) +{ + struct task_struct *task = tinfo->task; + struct ftrace_ret_stack *ret_stack; + unsigned long ret_addr; + + if (addr != (unsigned long)return_to_handler) + return; + + if (!task->ret_stack) + return; + + ret_stack = ftrace_graph_get_ret_stack(task, *graph); + if (!ret_stack) + return; + + ret_addr = ret_stack->ret; + + ops->address(data, ret_addr, 1); + + (*graph)++; +} +#else +static inline void +print_ftrace_graph_addr(unsigned long addr, void *data, + const struct stacktrace_ops *ops, + struct thread_info *tinfo, int *graph) +{ } +#endif + +void +stack_reader_dump(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, const struct stacktrace_ops *ops, + void *data) +{ + struct thread_info *context; + int graph = 0; + + context = (struct thread_info *) + ((unsigned long)sp & (~(THREAD_SIZE - 1))); + + while (!kstack_end(sp)) { + unsigned long addr = *sp++; + + if (__kernel_text_address(addr)) { + ops->address(data, addr, 1); + + print_ftrace_graph_addr(addr, data, ops, + context, &graph); + } + } +} + +/* + * Print one address/symbol entries per line. + */ +static void print_trace_address(void *data, unsigned long addr, int reliable) +{ + printk("%s", (char *)data); + printk_address(addr, reliable); +} + +static const struct stacktrace_ops print_trace_ops = { + .address = print_trace_address, +}; + +void show_trace(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs, const char *loglvl) +{ + if (regs && user_mode(regs)) + return; + + printk("%s\nCall trace:\n", loglvl); + + unwind_stack(tsk, regs, sp, &print_trace_ops, (void *)loglvl); + + pr_cont("\n"); + + if (!tsk) + tsk = current; + + debug_show_held_locks(tsk); +} + +void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) +{ + unsigned long stack; + + if (!tsk) + tsk = current; + if (tsk == current) + sp = (unsigned long *)current_stack_pointer; + else + sp = (unsigned long *)tsk->thread.sp; + + stack = (unsigned long)sp; + dump_mem("Stack: ", loglvl, stack, THREAD_SIZE + + (unsigned long)task_stack_page(tsk)); + show_trace(tsk, sp, NULL, loglvl); +} diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c new file mode 100644 index 000000000..bf8682e71 --- /dev/null +++ b/arch/sh/kernel/dwarf.c @@ -0,0 +1,1206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> + * + * This is an implementation of a DWARF unwinder. Its main purpose is + * for generating stacktrace information. Based on the DWARF 3 + * specification from http://www.dwarfstd.org. + * + * TODO: + * - DWARF64 doesn't work. + * - Registers with DWARF_VAL_OFFSET rules aren't handled properly. + */ + +/* #define DEBUG */ +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/list.h> +#include <linux/mempool.h> +#include <linux/mm.h> +#include <linux/elf.h> +#include <linux/ftrace.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <asm/dwarf.h> +#include <asm/unwinder.h> +#include <asm/sections.h> +#include <asm/unaligned.h> +#include <asm/stacktrace.h> + +/* Reserve enough memory for two stack frames */ +#define DWARF_FRAME_MIN_REQ 2 +/* ... with 4 registers per frame. */ +#define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4) + +static struct kmem_cache *dwarf_frame_cachep; +static mempool_t *dwarf_frame_pool; + +static struct kmem_cache *dwarf_reg_cachep; +static mempool_t *dwarf_reg_pool; + +static struct rb_root cie_root; +static DEFINE_SPINLOCK(dwarf_cie_lock); + +static struct rb_root fde_root; +static DEFINE_SPINLOCK(dwarf_fde_lock); + +static struct dwarf_cie *cached_cie; + +static unsigned int dwarf_unwinder_ready; + +/** + * dwarf_frame_alloc_reg - allocate memory for a DWARF register + * @frame: the DWARF frame whose list of registers we insert on + * @reg_num: the register number + * + * Allocate space for, and initialise, a dwarf reg from + * dwarf_reg_pool and insert it onto the (unsorted) linked-list of + * dwarf registers for @frame. + * + * Return the initialised DWARF reg. + */ +static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame, + unsigned int reg_num) +{ + struct dwarf_reg *reg; + + reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC); + if (!reg) { + printk(KERN_WARNING "Unable to allocate a DWARF register\n"); + /* + * Let's just bomb hard here, we have no way to + * gracefully recover. + */ + UNWINDER_BUG(); + } + + reg->number = reg_num; + reg->addr = 0; + reg->flags = 0; + + list_add(®->link, &frame->reg_list); + + return reg; +} + +static void dwarf_frame_free_regs(struct dwarf_frame *frame) +{ + struct dwarf_reg *reg, *n; + + list_for_each_entry_safe(reg, n, &frame->reg_list, link) { + list_del(®->link); + mempool_free(reg, dwarf_reg_pool); + } +} + +/** + * dwarf_frame_reg - return a DWARF register + * @frame: the DWARF frame to search in for @reg_num + * @reg_num: the register number to search for + * + * Lookup and return the dwarf reg @reg_num for this frame. Return + * NULL if @reg_num is an register invalid number. + */ +static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame, + unsigned int reg_num) +{ + struct dwarf_reg *reg; + + list_for_each_entry(reg, &frame->reg_list, link) { + if (reg->number == reg_num) + return reg; + } + + return NULL; +} + +/** + * dwarf_read_addr - read dwarf data + * @src: source address of data + * @dst: destination address to store the data to + * + * Read 'n' bytes from @src, where 'n' is the size of an address on + * the native machine. We return the number of bytes read, which + * should always be 'n'. We also have to be careful when reading + * from @src and writing to @dst, because they can be arbitrarily + * aligned. Return 'n' - the number of bytes read. + */ +static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) +{ + u32 val = get_unaligned(src); + put_unaligned(val, dst); + return sizeof(unsigned long *); +} + +/** + * dwarf_read_uleb128 - read unsigned LEB128 data + * @addr: the address where the ULEB128 data is stored + * @ret: address to store the result + * + * Decode an unsigned LEB128 encoded datum. The algorithm is taken + * from Appendix C of the DWARF 3 spec. For information on the + * encodings refer to section "7.6 - Variable Length Data". Return + * the number of bytes read. + */ +static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) +{ + unsigned int result; + unsigned char byte; + int shift, count; + + result = 0; + shift = 0; + count = 0; + + while (1) { + byte = __raw_readb(addr); + addr++; + count++; + + result |= (byte & 0x7f) << shift; + shift += 7; + + if (!(byte & 0x80)) + break; + } + + *ret = result; + + return count; +} + +/** + * dwarf_read_leb128 - read signed LEB128 data + * @addr: the address of the LEB128 encoded data + * @ret: address to store the result + * + * Decode signed LEB128 data. The algorithm is taken from Appendix + * C of the DWARF 3 spec. Return the number of bytes read. + */ +static inline unsigned long dwarf_read_leb128(char *addr, int *ret) +{ + unsigned char byte; + int result, shift; + int num_bits; + int count; + + result = 0; + shift = 0; + count = 0; + + while (1) { + byte = __raw_readb(addr); + addr++; + result |= (byte & 0x7f) << shift; + shift += 7; + count++; + + if (!(byte & 0x80)) + break; + } + + /* The number of bits in a signed integer. */ + num_bits = 8 * sizeof(result); + + if ((shift < num_bits) && (byte & 0x40)) + result |= (-1 << shift); + + *ret = result; + + return count; +} + +/** + * dwarf_read_encoded_value - return the decoded value at @addr + * @addr: the address of the encoded value + * @val: where to write the decoded value + * @encoding: the encoding with which we can decode @addr + * + * GCC emits encoded address in the .eh_frame FDE entries. Decode + * the value at @addr using @encoding. The decoded value is written + * to @val and the number of bytes read is returned. + */ +static int dwarf_read_encoded_value(char *addr, unsigned long *val, + char encoding) +{ + unsigned long decoded_addr = 0; + int count = 0; + + switch (encoding & 0x70) { + case DW_EH_PE_absptr: + break; + case DW_EH_PE_pcrel: + decoded_addr = (unsigned long)addr; + break; + default: + pr_debug("encoding=0x%x\n", (encoding & 0x70)); + UNWINDER_BUG(); + } + + if ((encoding & 0x07) == 0x00) + encoding |= DW_EH_PE_udata4; + + switch (encoding & 0x0f) { + case DW_EH_PE_sdata4: + case DW_EH_PE_udata4: + count += 4; + decoded_addr += get_unaligned((u32 *)addr); + __raw_writel(decoded_addr, val); + break; + default: + pr_debug("encoding=0x%x\n", encoding); + UNWINDER_BUG(); + } + + return count; +} + +/** + * dwarf_entry_len - return the length of an FDE or CIE + * @addr: the address of the entry + * @len: the length of the entry + * + * Read the initial_length field of the entry and store the size of + * the entry in @len. We return the number of bytes read. Return a + * count of 0 on error. + */ +static inline int dwarf_entry_len(char *addr, unsigned long *len) +{ + u32 initial_len; + int count; + + initial_len = get_unaligned((u32 *)addr); + count = 4; + + /* + * An initial length field value in the range DW_LEN_EXT_LO - + * DW_LEN_EXT_HI indicates an extension, and should not be + * interpreted as a length. The only extension that we currently + * understand is the use of DWARF64 addresses. + */ + if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { + /* + * The 64-bit length field immediately follows the + * compulsory 32-bit length field. + */ + if (initial_len == DW_EXT_DWARF64) { + *len = get_unaligned((u64 *)addr + 4); + count = 12; + } else { + printk(KERN_WARNING "Unknown DWARF extension\n"); + count = 0; + } + } else + *len = initial_len; + + return count; +} + +/** + * dwarf_lookup_cie - locate the cie + * @cie_ptr: pointer to help with lookup + */ +static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) +{ + struct rb_node **rb_node = &cie_root.rb_node; + struct dwarf_cie *cie = NULL; + unsigned long flags; + + spin_lock_irqsave(&dwarf_cie_lock, flags); + + /* + * We've cached the last CIE we looked up because chances are + * that the FDE wants this CIE. + */ + if (cached_cie && cached_cie->cie_pointer == cie_ptr) { + cie = cached_cie; + goto out; + } + + while (*rb_node) { + struct dwarf_cie *cie_tmp; + + cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); + BUG_ON(!cie_tmp); + + if (cie_ptr == cie_tmp->cie_pointer) { + cie = cie_tmp; + cached_cie = cie_tmp; + goto out; + } else { + if (cie_ptr < cie_tmp->cie_pointer) + rb_node = &(*rb_node)->rb_left; + else + rb_node = &(*rb_node)->rb_right; + } + } + +out: + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + return cie; +} + +/** + * dwarf_lookup_fde - locate the FDE that covers pc + * @pc: the program counter + */ +struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) +{ + struct rb_node **rb_node = &fde_root.rb_node; + struct dwarf_fde *fde = NULL; + unsigned long flags; + + spin_lock_irqsave(&dwarf_fde_lock, flags); + + while (*rb_node) { + struct dwarf_fde *fde_tmp; + unsigned long tmp_start, tmp_end; + + fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); + BUG_ON(!fde_tmp); + + tmp_start = fde_tmp->initial_location; + tmp_end = fde_tmp->initial_location + fde_tmp->address_range; + + if (pc < tmp_start) { + rb_node = &(*rb_node)->rb_left; + } else { + if (pc < tmp_end) { + fde = fde_tmp; + goto out; + } else + rb_node = &(*rb_node)->rb_right; + } + } + +out: + spin_unlock_irqrestore(&dwarf_fde_lock, flags); + + return fde; +} + +/** + * dwarf_cfa_execute_insns - execute instructions to calculate a CFA + * @insn_start: address of the first instruction + * @insn_end: address of the last instruction + * @cie: the CIE for this function + * @fde: the FDE for this function + * @frame: the instructions calculate the CFA for this frame + * @pc: the program counter of the address we're interested in + * + * Execute the Call Frame instruction sequence starting at + * @insn_start and ending at @insn_end. The instructions describe + * how to calculate the Canonical Frame Address of a stackframe. + * Store the results in @frame. + */ +static int dwarf_cfa_execute_insns(unsigned char *insn_start, + unsigned char *insn_end, + struct dwarf_cie *cie, + struct dwarf_fde *fde, + struct dwarf_frame *frame, + unsigned long pc) +{ + unsigned char insn; + unsigned char *current_insn; + unsigned int count, delta, reg, expr_len, offset; + struct dwarf_reg *regp; + + current_insn = insn_start; + + while (current_insn < insn_end && frame->pc <= pc) { + insn = __raw_readb(current_insn++); + + /* + * Firstly, handle the opcodes that embed their operands + * in the instructions. + */ + switch (DW_CFA_opcode(insn)) { + case DW_CFA_advance_loc: + delta = DW_CFA_operand(insn); + delta *= cie->code_alignment_factor; + frame->pc += delta; + continue; + /* NOTREACHED */ + case DW_CFA_offset: + reg = DW_CFA_operand(insn); + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + regp = dwarf_frame_alloc_reg(frame, reg); + regp->addr = offset; + regp->flags |= DWARF_REG_OFFSET; + continue; + /* NOTREACHED */ + case DW_CFA_restore: + reg = DW_CFA_operand(insn); + continue; + /* NOTREACHED */ + } + + /* + * Secondly, handle the opcodes that don't embed their + * operands in the instruction. + */ + switch (insn) { + case DW_CFA_nop: + continue; + case DW_CFA_advance_loc1: + delta = *current_insn++; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_advance_loc2: + delta = get_unaligned((u16 *)current_insn); + current_insn += 2; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_advance_loc4: + delta = get_unaligned((u32 *)current_insn); + current_insn += 4; + frame->pc += delta * cie->code_alignment_factor; + break; + case DW_CFA_offset_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + break; + case DW_CFA_restore_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + break; + case DW_CFA_undefined: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + regp = dwarf_frame_alloc_reg(frame, reg); + regp->flags |= DWARF_UNDEFINED; + break; + case DW_CFA_def_cfa: + count = dwarf_read_uleb128(current_insn, + &frame->cfa_register); + current_insn += count; + count = dwarf_read_uleb128(current_insn, + &frame->cfa_offset); + current_insn += count; + + frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; + break; + case DW_CFA_def_cfa_register: + count = dwarf_read_uleb128(current_insn, + &frame->cfa_register); + current_insn += count; + frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; + break; + case DW_CFA_def_cfa_offset: + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + frame->cfa_offset = offset; + break; + case DW_CFA_def_cfa_expression: + count = dwarf_read_uleb128(current_insn, &expr_len); + current_insn += count; + + frame->cfa_expr = current_insn; + frame->cfa_expr_len = expr_len; + current_insn += expr_len; + + frame->flags |= DWARF_FRAME_CFA_REG_EXP; + break; + case DW_CFA_offset_extended_sf: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_leb128(current_insn, &offset); + current_insn += count; + offset *= cie->data_alignment_factor; + regp = dwarf_frame_alloc_reg(frame, reg); + regp->flags |= DWARF_REG_OFFSET; + regp->addr = offset; + break; + case DW_CFA_val_offset: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_leb128(current_insn, &offset); + offset *= cie->data_alignment_factor; + regp = dwarf_frame_alloc_reg(frame, reg); + regp->flags |= DWARF_VAL_OFFSET; + regp->addr = offset; + break; + case DW_CFA_GNU_args_size: + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + break; + case DW_CFA_GNU_negative_offset_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_uleb128(current_insn, &offset); + offset *= cie->data_alignment_factor; + + regp = dwarf_frame_alloc_reg(frame, reg); + regp->flags |= DWARF_REG_OFFSET; + regp->addr = -offset; + break; + default: + pr_debug("unhandled DWARF instruction 0x%x\n", insn); + UNWINDER_BUG(); + break; + } + } + + return 0; +} + +/** + * dwarf_free_frame - free the memory allocated for @frame + * @frame: the frame to free + */ +void dwarf_free_frame(struct dwarf_frame *frame) +{ + dwarf_frame_free_regs(frame); + mempool_free(frame, dwarf_frame_pool); +} + +extern void ret_from_irq(void); + +/** + * dwarf_unwind_stack - unwind the stack + * + * @pc: address of the function to unwind + * @prev: struct dwarf_frame of the previous stackframe on the callstack + * + * Return a struct dwarf_frame representing the most recent frame + * on the callstack. Each of the lower (older) stack frames are + * linked via the "prev" member. + */ +struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, + struct dwarf_frame *prev) +{ + struct dwarf_frame *frame; + struct dwarf_cie *cie; + struct dwarf_fde *fde; + struct dwarf_reg *reg; + unsigned long addr; + + /* + * If we've been called in to before initialization has + * completed, bail out immediately. + */ + if (!dwarf_unwinder_ready) + return NULL; + + /* + * If we're starting at the top of the stack we need get the + * contents of a physical register to get the CFA in order to + * begin the virtual unwinding of the stack. + * + * NOTE: the return address is guaranteed to be setup by the + * time this function makes its first function call. + */ + if (!pc || !prev) + pc = _THIS_IP_; + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* + * If our stack has been patched by the function graph tracer + * then we might see the address of return_to_handler() where we + * expected to find the real return address. + */ + if (pc == (unsigned long)&return_to_handler) { + struct ftrace_ret_stack *ret_stack; + + ret_stack = ftrace_graph_get_ret_stack(current, 0); + if (ret_stack) + pc = ret_stack->ret; + /* + * We currently have no way of tracking how many + * return_to_handler()'s we've seen. If there is more + * than one patched return address on our stack, + * complain loudly. + */ + WARN_ON(ftrace_graph_get_ret_stack(current, 1)); + } +#endif + + frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC); + if (!frame) { + printk(KERN_ERR "Unable to allocate a dwarf frame\n"); + UNWINDER_BUG(); + } + + INIT_LIST_HEAD(&frame->reg_list); + frame->flags = 0; + frame->prev = prev; + frame->return_addr = 0; + + fde = dwarf_lookup_fde(pc); + if (!fde) { + /* + * This is our normal exit path. There are two reasons + * why we might exit here, + * + * a) pc has no asscociated DWARF frame info and so + * we don't know how to unwind this frame. This is + * usually the case when we're trying to unwind a + * frame that was called from some assembly code + * that has no DWARF info, e.g. syscalls. + * + * b) the DEBUG info for pc is bogus. There's + * really no way to distinguish this case from the + * case above, which sucks because we could print a + * warning here. + */ + goto bail; + } + + cie = dwarf_lookup_cie(fde->cie_pointer); + + frame->pc = fde->initial_location; + + /* CIE initial instructions */ + dwarf_cfa_execute_insns(cie->initial_instructions, + cie->instructions_end, cie, fde, + frame, pc); + + /* FDE instructions */ + dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, + fde, frame, pc); + + /* Calculate the CFA */ + switch (frame->flags) { + case DWARF_FRAME_CFA_REG_OFFSET: + if (prev) { + reg = dwarf_frame_reg(prev, frame->cfa_register); + UNWINDER_BUG_ON(!reg); + UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); + + addr = prev->cfa + reg->addr; + frame->cfa = __raw_readl(addr); + + } else { + /* + * Again, we're starting from the top of the + * stack. We need to physically read + * the contents of a register in order to get + * the Canonical Frame Address for this + * function. + */ + frame->cfa = dwarf_read_arch_reg(frame->cfa_register); + } + + frame->cfa += frame->cfa_offset; + break; + default: + UNWINDER_BUG(); + } + + reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG); + + /* + * If we haven't seen the return address register or the return + * address column is undefined then we must assume that this is + * the end of the callstack. + */ + if (!reg || reg->flags == DWARF_UNDEFINED) + goto bail; + + UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET); + + addr = frame->cfa + reg->addr; + frame->return_addr = __raw_readl(addr); + + /* + * Ah, the joys of unwinding through interrupts. + * + * Interrupts are tricky - the DWARF info needs to be _really_ + * accurate and unfortunately I'm seeing a lot of bogus DWARF + * info. For example, I've seen interrupts occur in epilogues + * just after the frame pointer (r14) had been restored. The + * problem was that the DWARF info claimed that the CFA could be + * reached by using the value of the frame pointer before it was + * restored. + * + * So until the compiler can be trusted to produce reliable + * DWARF info when it really matters, let's stop unwinding once + * we've calculated the function that was interrupted. + */ + if (prev && prev->pc == (unsigned long)ret_from_irq) + frame->return_addr = 0; + + return frame; + +bail: + dwarf_free_frame(frame); + return NULL; +} + +static int dwarf_parse_cie(void *entry, void *p, unsigned long len, + unsigned char *end, struct module *mod) +{ + struct rb_node **rb_node = &cie_root.rb_node; + struct rb_node *parent = *rb_node; + struct dwarf_cie *cie; + unsigned long flags; + int count; + + cie = kzalloc(sizeof(*cie), GFP_KERNEL); + if (!cie) + return -ENOMEM; + + cie->length = len; + + /* + * Record the offset into the .eh_frame section + * for this CIE. It allows this CIE to be + * quickly and easily looked up from the + * corresponding FDE. + */ + cie->cie_pointer = (unsigned long)entry; + + cie->version = *(char *)p++; + UNWINDER_BUG_ON(cie->version != 1); + + cie->augmentation = p; + p += strlen(cie->augmentation) + 1; + + count = dwarf_read_uleb128(p, &cie->code_alignment_factor); + p += count; + + count = dwarf_read_leb128(p, &cie->data_alignment_factor); + p += count; + + /* + * Which column in the rule table contains the + * return address? + */ + if (cie->version == 1) { + cie->return_address_reg = __raw_readb(p); + p++; + } else { + count = dwarf_read_uleb128(p, &cie->return_address_reg); + p += count; + } + + if (cie->augmentation[0] == 'z') { + unsigned int length, count; + cie->flags |= DWARF_CIE_Z_AUGMENTATION; + + count = dwarf_read_uleb128(p, &length); + p += count; + + UNWINDER_BUG_ON((unsigned char *)p > end); + + cie->initial_instructions = p + length; + cie->augmentation++; + } + + while (*cie->augmentation) { + /* + * "L" indicates a byte showing how the + * LSDA pointer is encoded. Skip it. + */ + if (*cie->augmentation == 'L') { + p++; + cie->augmentation++; + } else if (*cie->augmentation == 'R') { + /* + * "R" indicates a byte showing + * how FDE addresses are + * encoded. + */ + cie->encoding = *(char *)p++; + cie->augmentation++; + } else if (*cie->augmentation == 'P') { + /* + * "R" indicates a personality + * routine in the CIE + * augmentation. + */ + UNWINDER_BUG(); + } else if (*cie->augmentation == 'S') { + UNWINDER_BUG(); + } else { + /* + * Unknown augmentation. Assume + * 'z' augmentation. + */ + p = cie->initial_instructions; + UNWINDER_BUG_ON(!p); + break; + } + } + + cie->initial_instructions = p; + cie->instructions_end = end; + + /* Add to list */ + spin_lock_irqsave(&dwarf_cie_lock, flags); + + while (*rb_node) { + struct dwarf_cie *cie_tmp; + + cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node); + + parent = *rb_node; + + if (cie->cie_pointer < cie_tmp->cie_pointer) + rb_node = &parent->rb_left; + else if (cie->cie_pointer >= cie_tmp->cie_pointer) + rb_node = &parent->rb_right; + else + WARN_ON(1); + } + + rb_link_node(&cie->node, parent, rb_node); + rb_insert_color(&cie->node, &cie_root); + +#ifdef CONFIG_MODULES + if (mod != NULL) + list_add_tail(&cie->link, &mod->arch.cie_list); +#endif + + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + + return 0; +} + +static int dwarf_parse_fde(void *entry, u32 entry_type, + void *start, unsigned long len, + unsigned char *end, struct module *mod) +{ + struct rb_node **rb_node = &fde_root.rb_node; + struct rb_node *parent = *rb_node; + struct dwarf_fde *fde; + struct dwarf_cie *cie; + unsigned long flags; + int count; + void *p = start; + + fde = kzalloc(sizeof(*fde), GFP_KERNEL); + if (!fde) + return -ENOMEM; + + fde->length = len; + + /* + * In a .eh_frame section the CIE pointer is the + * delta between the address within the FDE + */ + fde->cie_pointer = (unsigned long)(p - entry_type - 4); + + cie = dwarf_lookup_cie(fde->cie_pointer); + fde->cie = cie; + + if (cie->encoding) + count = dwarf_read_encoded_value(p, &fde->initial_location, + cie->encoding); + else + count = dwarf_read_addr(p, &fde->initial_location); + + p += count; + + if (cie->encoding) + count = dwarf_read_encoded_value(p, &fde->address_range, + cie->encoding & 0x0f); + else + count = dwarf_read_addr(p, &fde->address_range); + + p += count; + + if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { + unsigned int length; + count = dwarf_read_uleb128(p, &length); + p += count + length; + } + + /* Call frame instructions. */ + fde->instructions = p; + fde->end = end; + + /* Add to list. */ + spin_lock_irqsave(&dwarf_fde_lock, flags); + + while (*rb_node) { + struct dwarf_fde *fde_tmp; + unsigned long tmp_start, tmp_end; + unsigned long start, end; + + fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node); + + start = fde->initial_location; + end = fde->initial_location + fde->address_range; + + tmp_start = fde_tmp->initial_location; + tmp_end = fde_tmp->initial_location + fde_tmp->address_range; + + parent = *rb_node; + + if (start < tmp_start) + rb_node = &parent->rb_left; + else if (start >= tmp_end) + rb_node = &parent->rb_right; + else + WARN_ON(1); + } + + rb_link_node(&fde->node, parent, rb_node); + rb_insert_color(&fde->node, &fde_root); + +#ifdef CONFIG_MODULES + if (mod != NULL) + list_add_tail(&fde->link, &mod->arch.fde_list); +#endif + + spin_unlock_irqrestore(&dwarf_fde_lock, flags); + + return 0; +} + +static void dwarf_unwinder_dump(struct task_struct *task, + struct pt_regs *regs, + unsigned long *sp, + const struct stacktrace_ops *ops, + void *data) +{ + struct dwarf_frame *frame, *_frame; + unsigned long return_addr; + + _frame = NULL; + return_addr = 0; + + while (1) { + frame = dwarf_unwind_stack(return_addr, _frame); + + if (_frame) + dwarf_free_frame(_frame); + + _frame = frame; + + if (!frame || !frame->return_addr) + break; + + return_addr = frame->return_addr; + ops->address(data, return_addr, 1); + } + + if (frame) + dwarf_free_frame(frame); +} + +static struct unwinder dwarf_unwinder = { + .name = "dwarf-unwinder", + .dump = dwarf_unwinder_dump, + .rating = 150, +}; + +static void __init dwarf_unwinder_cleanup(void) +{ + struct dwarf_fde *fde, *next_fde; + struct dwarf_cie *cie, *next_cie; + + /* + * Deallocate all the memory allocated for the DWARF unwinder. + * Traverse all the FDE/CIE lists and remove and free all the + * memory associated with those data structures. + */ + rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node) + kfree(fde); + + rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node) + kfree(cie); + + mempool_destroy(dwarf_reg_pool); + mempool_destroy(dwarf_frame_pool); + kmem_cache_destroy(dwarf_reg_cachep); + kmem_cache_destroy(dwarf_frame_cachep); +} + +/** + * dwarf_parse_section - parse DWARF section + * @eh_frame_start: start address of the .eh_frame section + * @eh_frame_end: end address of the .eh_frame section + * @mod: the kernel module containing the .eh_frame section + * + * Parse the information in a .eh_frame section. + */ +static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end, + struct module *mod) +{ + u32 entry_type; + void *p, *entry; + int count, err = 0; + unsigned long len = 0; + unsigned int c_entries, f_entries; + unsigned char *end; + + c_entries = 0; + f_entries = 0; + entry = eh_frame_start; + + while ((char *)entry < eh_frame_end) { + p = entry; + + count = dwarf_entry_len(p, &len); + if (count == 0) { + /* + * We read a bogus length field value. There is + * nothing we can do here apart from disabling + * the DWARF unwinder. We can't even skip this + * entry and move to the next one because 'len' + * tells us where our next entry is. + */ + err = -EINVAL; + goto out; + } else + p += count; + + /* initial length does not include itself */ + end = p + len; + + entry_type = get_unaligned((u32 *)p); + p += 4; + + if (entry_type == DW_EH_FRAME_CIE) { + err = dwarf_parse_cie(entry, p, len, end, mod); + if (err < 0) + goto out; + else + c_entries++; + } else { + err = dwarf_parse_fde(entry, entry_type, p, len, + end, mod); + if (err < 0) + goto out; + else + f_entries++; + } + + entry = (char *)entry + len + 4; + } + + printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", + c_entries, f_entries); + + return 0; + +out: + return err; +} + +#ifdef CONFIG_MODULES +int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, + struct module *me) +{ + unsigned int i, err; + unsigned long start, end; + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + start = end = 0; + + for (i = 1; i < hdr->e_shnum; i++) { + /* Alloc bit cleared means "ignore it." */ + if ((sechdrs[i].sh_flags & SHF_ALLOC) + && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) { + start = sechdrs[i].sh_addr; + end = start + sechdrs[i].sh_size; + break; + } + } + + /* Did we find the .eh_frame section? */ + if (i != hdr->e_shnum) { + INIT_LIST_HEAD(&me->arch.cie_list); + INIT_LIST_HEAD(&me->arch.fde_list); + err = dwarf_parse_section((char *)start, (char *)end, me); + if (err) { + printk(KERN_WARNING "%s: failed to parse DWARF info\n", + me->name); + return err; + } + } + + return 0; +} + +/** + * module_dwarf_cleanup - remove FDE/CIEs associated with @mod + * @mod: the module that is being unloaded + * + * Remove any FDEs and CIEs from the global lists that came from + * @mod's .eh_frame section because @mod is being unloaded. + */ +void module_dwarf_cleanup(struct module *mod) +{ + struct dwarf_fde *fde, *ftmp; + struct dwarf_cie *cie, *ctmp; + unsigned long flags; + + spin_lock_irqsave(&dwarf_cie_lock, flags); + + list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) { + list_del(&cie->link); + rb_erase(&cie->node, &cie_root); + kfree(cie); + } + + spin_unlock_irqrestore(&dwarf_cie_lock, flags); + + spin_lock_irqsave(&dwarf_fde_lock, flags); + + list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) { + list_del(&fde->link); + rb_erase(&fde->node, &fde_root); + kfree(fde); + } + + spin_unlock_irqrestore(&dwarf_fde_lock, flags); +} +#endif /* CONFIG_MODULES */ + +/** + * dwarf_unwinder_init - initialise the dwarf unwinder + * + * Build the data structures describing the .dwarf_frame section to + * make it easier to lookup CIE and FDE entries. Because the + * .eh_frame section is packed as tightly as possible it is not + * easy to lookup the FDE for a given PC, so we build a list of FDE + * and CIE entries that make it easier. + */ +static int __init dwarf_unwinder_init(void) +{ + int err = -ENOMEM; + + dwarf_frame_cachep = kmem_cache_create("dwarf_frames", + sizeof(struct dwarf_frame), 0, + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); + + dwarf_reg_cachep = kmem_cache_create("dwarf_regs", + sizeof(struct dwarf_reg), 0, + SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL); + + dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ, + dwarf_frame_cachep); + if (!dwarf_frame_pool) + goto out; + + dwarf_reg_pool = mempool_create_slab_pool(DWARF_REG_MIN_REQ, + dwarf_reg_cachep); + if (!dwarf_reg_pool) + goto out; + + err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL); + if (err) + goto out; + + err = unwinder_register(&dwarf_unwinder); + if (err) + goto out; + + dwarf_unwinder_ready = 1; + + return 0; + +out: + printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); + dwarf_unwinder_cleanup(); + return err; +} +early_initcall(dwarf_unwinder_init); diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S new file mode 100644 index 000000000..91ab2607a --- /dev/null +++ b/arch/sh/kernel/entry-common.S @@ -0,0 +1,400 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 1999, 2000, 2002 Niibe Yutaka + * Copyright (C) 2003 - 2008 Paul Mundt + */ + +! NOTE: +! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address +! to be jumped is too far, but it causes illegal slot exception. + +/* + * entry.S contains the system-call and fault low-level handling routines. + * This also contains the timer-interrupt handler, as well as all interrupts + * and faults that can result in a task-switch. + * + * NOTE: This code handles signal-recognition, which happens every time + * after a timer-interrupt and after each system call. + * + * NOTE: This code uses a convention that instructions in the delay slot + * of a transfer-control instruction are indented by an extra space, thus: + * + * jmp @k0 ! control-transfer instruction + * ldc k1, ssr ! delay slot + * + * Stack layout in 'ret_from_syscall': + * ptrace needs to have all regs on the stack. + * if the order here is changed, it needs to be + * updated in ptrace.c and ptrace.h + * + * r0 + * ... + * r15 = stack pointer + * spc + * pr + * ssr + * gbr + * mach + * macl + * syscall # + * + */ +#include <asm/dwarf.h> + +#if defined(CONFIG_PREEMPTION) +# define preempt_stop() cli ; TRACE_IRQS_OFF +#else +# define preempt_stop() +# define resume_kernel __restore_all +#endif + + + .align 2 +ENTRY(exception_error) + ! + TRACE_IRQS_ON + sti + mov.l 1f, r0 + jmp @r0 + nop + + .align 2 +1: .long do_exception_error + + .align 2 +ret_from_exception: + CFI_STARTPROC simple + CFI_DEF_CFA r14, 0 + CFI_REL_OFFSET 17, 64 + CFI_REL_OFFSET 15, 60 + CFI_REL_OFFSET 14, 56 + CFI_REL_OFFSET 13, 52 + CFI_REL_OFFSET 12, 48 + CFI_REL_OFFSET 11, 44 + CFI_REL_OFFSET 10, 40 + CFI_REL_OFFSET 9, 36 + CFI_REL_OFFSET 8, 32 + preempt_stop() +ENTRY(ret_from_irq) + ! + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + shll r0 + shll r0 ! kernel space? + get_current_thread_info r8, r0 + bt resume_kernel ! Yes, it's from kernel, go back soon + +#ifdef CONFIG_PREEMPTION + bra resume_userspace + nop +ENTRY(resume_kernel) + cli + TRACE_IRQS_OFF + mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count + tst r0, r0 + bf noresched +need_resched: + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #_TIF_NEED_RESCHED, r0 ! need_resched set? + bt noresched + + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + shlr r0 + and #(0xf0>>1), r0 ! interrupts off (exception path)? + cmp/eq #(0xf0>>1), r0 + bt noresched + mov.l 1f, r0 + jsr @r0 ! call preempt_schedule_irq + nop + bra need_resched + nop + +noresched: + bra __restore_all + nop + + .align 2 +1: .long preempt_schedule_irq +#endif + +ENTRY(resume_userspace) + ! r8: current_thread_info + cli + TRACE_IRQS_OFF + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #(_TIF_WORK_MASK & 0xff), r0 + bt/s __restore_all + tst #_TIF_NEED_RESCHED, r0 + + .align 2 +work_pending: + ! r0: current_thread_info->flags + ! r8: current_thread_info + ! t: result of "tst #_TIF_NEED_RESCHED, r0" + bf/s work_resched + tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0 +work_notifysig: + bt/s __restore_all + mov r15, r4 + mov r12, r5 ! set arg1(save_r0) + mov r0, r6 + sti + mov.l 2f, r1 + mov.l 3f, r0 + jmp @r1 + lds r0, pr +work_resched: + mov.l 1f, r1 + jsr @r1 ! schedule + nop + cli + TRACE_IRQS_OFF + ! + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #(_TIF_WORK_MASK & 0xff), r0 + bt __restore_all + bra work_pending + tst #_TIF_NEED_RESCHED, r0 + + .align 2 +1: .long schedule +2: .long do_notify_resume +3: .long resume_userspace + + .align 2 +syscall_exit_work: + ! r0: current_thread_info->flags + ! r8: current_thread_info + tst #(_TIF_WORK_SYSCALL_MASK & 0xff), r0 + bt/s work_pending + tst #_TIF_NEED_RESCHED, r0 + TRACE_IRQS_ON + sti + mov r15, r4 + mov.l 8f, r0 ! do_syscall_trace_leave + jsr @r0 + nop + bra resume_userspace + nop + +__restore_all: + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bt 1f + TRACE_IRQS_ON + bra 2f + nop +1: + TRACE_IRQS_OFF +2: + mov.l 3f, r0 + jmp @r0 + nop + + .align 2 +3: .long restore_all + + .align 2 +syscall_badsys: ! Bad syscall number + get_current_thread_info r8, r0 + mov #-ENOSYS, r0 + bra resume_userspace + mov.l r0, @(OFF_R0,r15) ! Return value + +/* + * The main debug trap handler. + * + * r8=TRA (not the trap number!) + * + * Note: This assumes that the trapa value is left in its original + * form (without the shlr2 shift) so the calculation for the jump + * call table offset remains a simple in place mask. + */ +debug_trap: + mov r8, r0 + and #(0xf << 2), r0 + mov.l 1f, r8 + add r0, r8 + mov.l @r8, r8 + jsr @r8 + nop + bra ret_from_exception + nop + CFI_ENDPROC + + .align 2 +1: .long debug_trap_table + +/* + * Syscall interface: + * + * Syscall #: R3 + * Arguments #0 to #3: R4--R7 + * Arguments #4 to #6: R0, R1, R2 + * TRA: See following table. + * + * (TRA>>2) Purpose + * -------- ------- + * 0x00-0x0f original SH-3/4 syscall ABI (not in general use). + * 0x10-0x1f general SH-3/4 syscall ABI. + * 0x1f unified SH-2/3/4 syscall ABI (preferred). + * 0x20-0x2f original SH-2 syscall ABI. + * 0x30-0x3f debug traps used by the kernel. + * 0x40-0xff Not supported by all parts, so left unhandled. + * + * For making system calls, any trap number in the range for the + * given cpu model may be used, but the unified trap number 0x1f is + * preferred for compatibility with all models. + * + * The low bits of the trap number were once documented as matching + * the number of arguments, but they were never actually used as such + * by the kernel. SH-2 originally used its own separate trap range + * because several hardware exceptions fell in the range used for the + * SH-3/4 syscall ABI. + * + * This code also handles delegating other traps to the BIOS/gdb stub. + * + * Note: When we're first called, the TRA value must be shifted + * right 2 bits in order to get the value that was used as the "trapa" + * argument. + */ + + .align 2 + .globl ret_from_fork +ret_from_fork: + mov.l 1f, r8 + jsr @r8 + mov r0, r4 + bra syscall_exit + nop + + .align 2 + .globl ret_from_kernel_thread +ret_from_kernel_thread: + mov.l 1f, r8 + jsr @r8 + mov r0, r4 + mov.l @(OFF_R5,r15), r5 ! fn + jsr @r5 + mov.l @(OFF_R4,r15), r4 ! arg + bra syscall_exit + nop + + .align 2 +1: .long schedule_tail + +/* + * The poorly named main trapa decode and dispatch routine, for + * system calls and debug traps through their respective jump tables. + */ +ENTRY(system_call) + setup_frame_reg +#if !defined(CONFIG_CPU_SH2) + mov.l 1f, r9 + mov.l @r9, r8 ! Read from TRA (Trap Address) Register +#endif + + mov #OFF_TRA, r10 + add r15, r10 + mov.l r8, @r10 ! set TRA value to tra + + /* + * Check the trap type + */ + mov #((0x20 << 2) - 1), r9 + cmp/hi r9, r8 + bt/s debug_trap ! it's a debug trap.. + nop + + TRACE_IRQS_ON + sti + + ! + get_current_thread_info r8, r10 + mov.l @(TI_FLAGS,r8), r8 + mov #(_TIF_WORK_SYSCALL_MASK & 0xff), r10 + mov #(_TIF_WORK_SYSCALL_MASK >> 8), r9 + tst r10, r8 + shll8 r9 + bf syscall_trace_entry + tst r9, r8 + bf syscall_trace_entry + ! + mov.l 6f, r8 ! Number of syscalls + cmp/hs r8, r3 + bt syscall_badsys + ! +syscall_call: + shll2 r3 ! x4 + mov.l 3f, r8 ! Load the address of sys_call_table + add r8, r3 + mov.l @r3, r8 + mov.l @(OFF_R2,r15), r2 + mov.l @(OFF_R1,r15), r1 + mov.l @(OFF_R0,r15), r0 + mov.l r2, @-r15 + mov.l r1, @-r15 + mov.l r0, @-r15 + jsr @r8 ! jump to specific syscall handler + nop + add #12, r15 + mov.l @(OFF_R0,r15), r12 ! save r0 + mov.l r0, @(OFF_R0,r15) ! save the return value + ! +syscall_exit: + cli + TRACE_IRQS_OFF + ! + get_current_thread_info r8, r0 + mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags + tst #(_TIF_ALLWORK_MASK & 0xff), r0 + mov #(_TIF_ALLWORK_MASK >> 8), r1 + bf syscall_exit_work + shlr8 r0 + tst r0, r1 + bf syscall_exit_work + bra __restore_all + nop + + .align 2 +syscall_trace_entry: + ! Yes it is traced. + mov r15, r4 + mov.l 7f, r11 ! Call do_syscall_trace_enter which notifies + jsr @r11 ! superior (will chomp R[0-7]) + nop + cmp/eq #-1, r0 + bt syscall_exit + ! Reload R0-R4 from kernel stack, where the + ! parent may have modified them using + ! ptrace(POKEUSR). (Note that R0-R2 are + ! reloaded from the kernel stack by syscall_call + ! below, so don't need to be reloaded here.) + ! This allows the parent to rewrite system calls + ! and args on the fly. + mov.l @(OFF_R4,r15), r4 ! arg0 + mov.l @(OFF_R5,r15), r5 + mov.l @(OFF_R6,r15), r6 + mov.l @(OFF_R7,r15), r7 ! arg3 + mov.l @(OFF_R3,r15), r3 ! syscall_nr + ! + mov.l 6f, r10 ! Number of syscalls + cmp/hs r10, r3 + bf syscall_call + mov #-ENOSYS, r0 + bra syscall_exit + mov.l r0, @(OFF_R0,r15) ! Return value + + .align 2 +#if !defined(CONFIG_CPU_SH2) +1: .long TRA +#endif +6: .long NR_syscalls +3: .long sys_call_table +7: .long do_syscall_trace_enter +8: .long do_syscall_trace_leave diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c new file mode 100644 index 000000000..930001bb8 --- /dev/null +++ b/arch/sh/kernel/ftrace.c @@ -0,0 +1,365 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org> + * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org> + * + * Code for replacing ftrace calls with jumps. + * + * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> + * + * Thanks goes to Ingo Molnar, for suggesting the idea. + * Mathieu Desnoyers, for suggesting postponing the modifications. + * Arjan van de Ven, for keeping me straight, and explaining to me + * the dangers of modifying code on the run. + */ +#include <linux/uaccess.h> +#include <linux/ftrace.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <asm/ftrace.h> +#include <asm/cacheflush.h> +#include <asm/unistd.h> +#include <trace/syscall.h> + +#ifdef CONFIG_DYNAMIC_FTRACE +static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE]; + +static unsigned char ftrace_nop[4]; +/* + * If we're trying to nop out a call to a function, we instead + * place a call to the address after the memory table. + * + * 8c011060 <a>: + * 8c011060: 02 d1 mov.l 8c01106c <a+0xc>,r1 + * 8c011062: 22 4f sts.l pr,@-r15 + * 8c011064: 02 c7 mova 8c011070 <a+0x10>,r0 + * 8c011066: 2b 41 jmp @r1 + * 8c011068: 2a 40 lds r0,pr + * 8c01106a: 09 00 nop + * 8c01106c: 68 24 .word 0x2468 <--- ip + * 8c01106e: 1d 8c .word 0x8c1d + * 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE + * + * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch + * past the _mcount call and continue executing code like normal. + */ +static unsigned char *ftrace_nop_replace(unsigned long ip) +{ + __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop); + return ftrace_nop; +} + +static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) +{ + /* Place the address in the memory table. */ + __raw_writel(addr, ftrace_replaced_code); + + /* + * No locking needed, this must be called via kstop_machine + * which in essence is like running on a uniprocessor machine. + */ + return ftrace_replaced_code; +} + +/* + * Modifying code must take extra care. On an SMP machine, if + * the code being modified is also being executed on another CPU + * that CPU will have undefined results and possibly take a GPF. + * We use kstop_machine to stop other CPUS from executing code. + * But this does not stop NMIs from happening. We still need + * to protect against that. We separate out the modification of + * the code to take care of this. + * + * Two buffers are added: An IP buffer and a "code" buffer. + * + * 1) Put the instruction pointer into the IP buffer + * and the new code into the "code" buffer. + * 2) Wait for any running NMIs to finish and set a flag that says + * we are modifying code, it is done in an atomic operation. + * 3) Write the code + * 4) clear the flag. + * 5) Wait for any running NMIs to finish. + * + * If an NMI is executed, the first thing it does is to call + * "ftrace_nmi_enter". This will check if the flag is set to write + * and if it is, it will write what is in the IP and "code" buffers. + * + * The trick is, it does not matter if everyone is writing the same + * content to the code location. Also, if a CPU is executing code + * it is OK to write to that code location if the contents being written + * are the same as what exists. + */ +#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */ +static atomic_t nmi_running = ATOMIC_INIT(0); +static int mod_code_status; /* holds return value of text write */ +static void *mod_code_ip; /* holds the IP to write to */ +static void *mod_code_newcode; /* holds the text to write to the IP */ + +static void clear_mod_flag(void) +{ + int old = atomic_read(&nmi_running); + + for (;;) { + int new = old & ~MOD_CODE_WRITE_FLAG; + + if (old == new) + break; + + old = atomic_cmpxchg(&nmi_running, old, new); + } +} + +static void ftrace_mod_code(void) +{ + /* + * Yes, more than one CPU process can be writing to mod_code_status. + * (and the code itself) + * But if one were to fail, then they all should, and if one were + * to succeed, then they all should. + */ + mod_code_status = copy_to_kernel_nofault(mod_code_ip, mod_code_newcode, + MCOUNT_INSN_SIZE); + + /* if we fail, then kill any new writers */ + if (mod_code_status) + clear_mod_flag(); +} + +void arch_ftrace_nmi_enter(void) +{ + if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { + smp_rmb(); + ftrace_mod_code(); + } + /* Must have previous changes seen before executions */ + smp_mb(); +} + +void arch_ftrace_nmi_exit(void) +{ + /* Finish all executions before clearing nmi_running */ + smp_mb(); + atomic_dec(&nmi_running); +} + +static void wait_for_nmi_and_set_mod_flag(void) +{ + if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)) + return; + + do { + cpu_relax(); + } while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG)); +} + +static void wait_for_nmi(void) +{ + if (!atomic_read(&nmi_running)) + return; + + do { + cpu_relax(); + } while (atomic_read(&nmi_running)); +} + +static int +do_ftrace_mod_code(unsigned long ip, void *new_code) +{ + mod_code_ip = (void *)ip; + mod_code_newcode = new_code; + + /* The buffers need to be visible before we let NMIs write them */ + smp_mb(); + + wait_for_nmi_and_set_mod_flag(); + + /* Make sure all running NMIs have finished before we write the code */ + smp_mb(); + + ftrace_mod_code(); + + /* Make sure the write happens before clearing the bit */ + smp_mb(); + + clear_mod_flag(); + wait_for_nmi(); + + return mod_code_status; +} + +static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, + unsigned char *new_code) +{ + unsigned char replaced[MCOUNT_INSN_SIZE]; + + /* + * Note: + * We are paranoid about modifying text, as if a bug was to happen, it + * could cause us to read or write to someplace that could cause harm. + * Carefully read and modify the code with probe_kernel_*(), and make + * sure what we read is what we expected it to be before modifying it. + */ + + /* read the text we want to modify */ + if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + /* Make sure it is what we expect it to be */ + if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) + return -EINVAL; + + /* replace the text with the new text */ + if (do_ftrace_mod_code(ip, new_code)) + return -EPERM; + + flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); + + return 0; +} + +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET; + unsigned char old[MCOUNT_INSN_SIZE], *new; + + memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE); + new = ftrace_call_replace(ip, (unsigned long)func); + + return ftrace_modify_code(ip, old, new); +} + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *new, *old; + unsigned long ip = rec->ip; + + old = ftrace_call_replace(ip, addr); + new = ftrace_nop_replace(ip); + + return ftrace_modify_code(rec->ip, old, new); +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned char *new, *old; + unsigned long ip = rec->ip; + + old = ftrace_nop_replace(ip); + new = ftrace_call_replace(ip, addr); + + return ftrace_modify_code(rec->ip, old, new); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#ifdef CONFIG_DYNAMIC_FTRACE +extern void ftrace_graph_call(void); + +static int ftrace_mod(unsigned long ip, unsigned long old_addr, + unsigned long new_addr) +{ + unsigned char code[MCOUNT_INSN_SIZE]; + + if (copy_from_kernel_nofault(code, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + + if (old_addr != __raw_readl((unsigned long *)code)) + return -EINVAL; + + __raw_writel(new_addr, ip); + return 0; +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&skip_trace); + new_addr = (unsigned long)(&ftrace_graph_caller); + + return ftrace_mod(ip, old_addr, new_addr); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + unsigned long ip, old_addr, new_addr; + + ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET; + old_addr = (unsigned long)(&ftrace_graph_caller); + new_addr = (unsigned long)(&skip_trace); + + return ftrace_mod(ip, old_addr, new_addr); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +/* + * Hook the return address and push it in the stack of return addrs + * in the current thread info. + * + * This is the main routine for the function graph tracer. The function + * graph tracer essentially works like this: + * + * parent is the stack address containing self_addr's return address. + * We pull the real return address out of parent and store it in + * current's ret_stack. Then, we replace the return address on the stack + * with the address of return_to_handler. self_addr is the function that + * called mcount. + * + * When self_addr returns, it will jump to return_to_handler which calls + * ftrace_return_to_handler. ftrace_return_to_handler will pull the real + * return address off of current's ret_stack and jump to it. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) +{ + unsigned long old; + int faulted; + unsigned long return_hooker = (unsigned long)&return_to_handler; + + if (unlikely(ftrace_graph_is_dead())) + return; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Protect against fault, even if it shouldn't + * happen. This tool is too much intrusive to + * ignore such a protection. + */ + __asm__ __volatile__( + "1: \n\t" + "mov.l @%2, %0 \n\t" + "2: \n\t" + "mov.l %3, @%2 \n\t" + "mov #0, %1 \n\t" + "3: \n\t" + ".section .fixup, \"ax\" \n\t" + "4: \n\t" + "mov.l 5f, %0 \n\t" + "jmp @%0 \n\t" + " mov #1, %1 \n\t" + ".balign 4 \n\t" + "5: .long 3b \n\t" + ".previous \n\t" + ".section __ex_table,\"a\" \n\t" + ".long 1b, 4b \n\t" + ".long 2b, 4b \n\t" + ".previous \n\t" + : "=&r" (old), "=r" (faulted) + : "r" (parent), "r" (return_hooker) + ); + + if (unlikely(faulted)) { + ftrace_graph_stop(); + WARN_ON(1); + return; + } + + if (function_graph_enter(old, self_addr, 0, NULL)) + __raw_writel(old, parent); +} +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/sh/kernel/head_32.S b/arch/sh/kernel/head_32.S new file mode 100644 index 000000000..b603b7968 --- /dev/null +++ b/arch/sh/kernel/head_32.S @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 + * $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $ + * + * arch/sh/kernel/head.S + * + * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima + * Copyright (C) 2010 Matt Fleming + * + * Head.S contains the SH exception handlers and startup code. + */ +#include <linux/init.h> +#include <linux/linkage.h> +#include <asm/thread_info.h> +#include <asm/mmu.h> +#include <cpu/mmu_context.h> + +#ifdef CONFIG_CPU_SH4A +#define SYNCO() synco + +#define PREFI(label, reg) \ + mov.l label, reg; \ + prefi @reg +#else +#define SYNCO() +#define PREFI(label, reg) +#endif + + .section .empty_zero_page, "aw" +ENTRY(empty_zero_page) + .long 1 /* MOUNT_ROOT_RDONLY */ + .long 0 /* RAMDISK_FLAGS */ + .long 0x0200 /* ORIG_ROOT_DEV */ + .long 1 /* LOADER_TYPE */ + .long 0x00000000 /* INITRD_START */ + .long 0x00000000 /* INITRD_SIZE */ +#ifdef CONFIG_32BIT + .long 0x53453f00 + 32 /* "SE?" = 32 bit */ +#else + .long 0x53453f00 + 29 /* "SE?" = 29 bit */ +#endif +1: + .skip PAGE_SIZE - empty_zero_page - 1b + + __HEAD + +/* + * Condition at the entry of _stext: + * + * BSC has already been initialized. + * INTC may or may not be initialized. + * VBR may or may not be initialized. + * MMU may or may not be initialized. + * Cache may or may not be initialized. + * Hardware (including on-chip modules) may or may not be initialized. + * + */ +ENTRY(_stext) + ! Initialize Status Register + mov.l 1f, r0 ! MD=1, RB=0, BL=0, IMASK=0xF + ldc r0, sr + ! Initialize global interrupt mask +#ifdef CONFIG_CPU_HAS_SR_RB + mov #0, r0 + ldc r0, r6_bank +#endif + +#ifdef CONFIG_OF_EARLY_FLATTREE + mov r4, r12 ! Store device tree blob pointer in r12 +#endif + + /* + * Prefetch if possible to reduce cache miss penalty. + * + * We do this early on for SH-4A as a micro-optimization, + * as later on we will have speculative execution enabled + * and this will become less of an issue. + */ + PREFI(5f, r0) + PREFI(6f, r0) + + ! + mov.l 2f, r0 + mov r0, r15 ! Set initial r15 (stack pointer) +#ifdef CONFIG_CPU_HAS_SR_RB + mov.l 7f, r0 + ldc r0, r7_bank ! ... and initial thread_info +#endif + +#ifdef CONFIG_PMB +/* + * Reconfigure the initial PMB mappings setup by the hardware. + * + * When we boot in 32-bit MMU mode there are 2 PMB entries already + * setup for us. + * + * Entry VPN PPN V SZ C UB WT + * --------------------------------------------------------------- + * 0 0x80000000 0x00000000 1 512MB 1 0 1 + * 1 0xA0000000 0x00000000 1 512MB 0 0 0 + * + * But we reprogram them here because we want complete control over + * our address space and the initial mappings may not map PAGE_OFFSET + * to __MEMORY_START (or even map all of our RAM). + * + * Once we've setup cached and uncached mappings we clear the rest of the + * PMB entries. This clearing also deals with the fact that PMB entries + * can persist across reboots. The PMB could have been left in any state + * when the reboot occurred, so to be safe we clear all entries and start + * with with a clean slate. + * + * The uncached mapping is constructed using the smallest possible + * mapping with a single unbufferable page. Only the kernel text needs to + * be covered via the uncached mapping so that certain functions can be + * run uncached. + * + * Drivers and the like that have previously abused the 1:1 identity + * mapping are unsupported in 32-bit mode and must specify their caching + * preference when page tables are constructed. + * + * This frees up the P2 space for more nefarious purposes. + * + * Register utilization is as follows: + * + * r0 = PMB_DATA data field + * r1 = PMB_DATA address field + * r2 = PMB_ADDR data field + * r3 = PMB_ADDR address field + * r4 = PMB_E_SHIFT + * r5 = remaining amount of RAM to map + * r6 = PMB mapping size we're trying to use + * r7 = cached_to_uncached + * r8 = scratch register + * r9 = scratch register + * r10 = number of PMB entries we've setup + * r11 = scratch register + */ + + mov.l .LMMUCR, r1 /* Flush the TLB */ + mov.l @r1, r0 + or #MMUCR_TI, r0 + mov.l r0, @r1 + + mov.l .LMEMORY_SIZE, r5 + + mov #PMB_E_SHIFT, r0 + mov #0x1, r4 + shld r0, r4 + + mov.l .LFIRST_DATA_ENTRY, r0 + mov.l .LPMB_DATA, r1 + mov.l .LFIRST_ADDR_ENTRY, r2 + mov.l .LPMB_ADDR, r3 + + /* + * First we need to walk the PMB and figure out if there are any + * existing mappings that match the initial mappings VPN/PPN. + * If these have already been established by the bootloader, we + * don't bother setting up new entries here, and let the late PMB + * initialization take care of things instead. + * + * Note that we may need to coalesce and merge entries in order + * to reclaim more available PMB slots, which is much more than + * we want to do at this early stage. + */ + mov #0, r10 + mov #NR_PMB_ENTRIES, r9 + + mov r1, r7 /* temporary PMB_DATA iter */ + +.Lvalidate_existing_mappings: + + mov.l .LPMB_DATA_MASK, r11 + mov.l @r7, r8 + and r11, r8 + cmp/eq r0, r8 /* Check for valid __MEMORY_START mappings */ + bt .Lpmb_done + + add #1, r10 /* Increment the loop counter */ + cmp/eq r9, r10 + bf/s .Lvalidate_existing_mappings + add r4, r7 /* Increment to the next PMB_DATA entry */ + + /* + * If we've fallen through, continue with setting up the initial + * mappings. + */ + + mov r5, r7 /* cached_to_uncached */ + mov #0, r10 + +#ifdef CONFIG_UNCACHED_MAPPING + /* + * Uncached mapping + */ + mov #(PMB_SZ_16M >> 2), r9 + shll2 r9 + + mov #(PMB_UB >> 8), r8 + shll8 r8 + + or r0, r8 + or r9, r8 + mov.l r8, @r1 + mov r2, r8 + add r7, r8 + mov.l r8, @r3 + + add r4, r1 + add r4, r3 + add #1, r10 +#endif + +/* + * Iterate over all of the available sizes from largest to + * smallest for constructing the cached mapping. + */ +#define __PMB_ITER_BY_SIZE(size) \ +.L##size: \ + mov #(size >> 4), r6; \ + shll16 r6; \ + shll8 r6; \ + \ + cmp/hi r5, r6; \ + bt 9999f; \ + \ + mov #(PMB_SZ_##size##M >> 2), r9; \ + shll2 r9; \ + \ + /* \ + * Cached mapping \ + */ \ + mov #PMB_C, r8; \ + or r0, r8; \ + or r9, r8; \ + mov.l r8, @r1; \ + mov.l r2, @r3; \ + \ + /* Increment to the next PMB_DATA entry */ \ + add r4, r1; \ + /* Increment to the next PMB_ADDR entry */ \ + add r4, r3; \ + /* Increment number of PMB entries */ \ + add #1, r10; \ + \ + sub r6, r5; \ + add r6, r0; \ + add r6, r2; \ + \ + bra .L##size; \ +9999: + + __PMB_ITER_BY_SIZE(512) + __PMB_ITER_BY_SIZE(128) + __PMB_ITER_BY_SIZE(64) + __PMB_ITER_BY_SIZE(16) + +#ifdef CONFIG_UNCACHED_MAPPING + /* + * Now that we can access it, update cached_to_uncached and + * uncached_size. + */ + mov.l .Lcached_to_uncached, r0 + mov.l r7, @r0 + + mov.l .Luncached_size, r0 + mov #1, r7 + shll16 r7 + shll8 r7 + mov.l r7, @r0 +#endif + + /* + * Clear the remaining PMB entries. + * + * r3 = entry to begin clearing from + * r10 = number of entries we've setup so far + */ + mov #0, r1 + mov #NR_PMB_ENTRIES, r0 + +.Lagain: + mov.l r1, @r3 /* Clear PMB_ADDR entry */ + add #1, r10 /* Increment the loop counter */ + cmp/eq r0, r10 + bf/s .Lagain + add r4, r3 /* Increment to the next PMB_ADDR entry */ + + mov.l 6f, r0 + icbi @r0 + +.Lpmb_done: +#endif /* CONFIG_PMB */ + +#ifndef CONFIG_SH_NO_BSS_INIT + /* + * Don't clear BSS if running on slow platforms such as an RTL simulation, + * remote memory via SHdebug link, etc. For these the memory can be guaranteed + * to be all zero on boot anyway. + */ + ! Clear BSS area +#ifdef CONFIG_SMP + mov.l 3f, r0 + cmp/eq #0, r0 ! skip clear if set to zero + bt 10f +#endif + + mov.l 3f, r1 + add #4, r1 + mov.l 4f, r2 + mov #0, r0 +9: cmp/hs r2, r1 + bf/s 9b ! while (r1 < r2) + mov.l r0,@-r2 + +10: +#endif + +#ifdef CONFIG_OF_EARLY_FLATTREE + mov.l 8f, r0 ! Make flat device tree available early. + jsr @r0 + mov r12, r4 +#endif + + ! Additional CPU initialization + mov.l 6f, r0 + jsr @r0 + nop + + SYNCO() ! Wait for pending instructions.. + + ! Start kernel + mov.l 5f, r0 + jmp @r0 + nop + + .balign 4 +#if defined(CONFIG_CPU_SH2) +1: .long 0x000000F0 ! IMASK=0xF +#else +1: .long 0x500080F0 ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF +#endif +ENTRY(stack_start) +2: .long init_thread_union+THREAD_SIZE +3: .long __bss_start +4: .long _end +5: .long start_kernel +6: .long cpu_init +7: .long init_thread_union +#if defined(CONFIG_OF_EARLY_FLATTREE) +8: .long sh_fdt_init +#endif + +#ifdef CONFIG_PMB +.LPMB_ADDR: .long PMB_ADDR +.LPMB_DATA: .long PMB_DATA +.LPMB_DATA_MASK: .long PMB_PFN_MASK | PMB_V +.LFIRST_ADDR_ENTRY: .long PAGE_OFFSET | PMB_V +.LFIRST_DATA_ENTRY: .long __MEMORY_START | PMB_V +.LMMUCR: .long MMUCR +.LMEMORY_SIZE: .long __MEMORY_SIZE +#ifdef CONFIG_UNCACHED_MAPPING +.Lcached_to_uncached: .long cached_to_uncached +.Luncached_size: .long uncached_size +#endif +#endif diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c new file mode 100644 index 000000000..f10d64311 --- /dev/null +++ b/arch/sh/kernel/hw_breakpoint.c @@ -0,0 +1,408 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/hw_breakpoint.c + * + * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC. + * + * Copyright (C) 2009 - 2010 Paul Mundt + */ +#include <linux/init.h> +#include <linux/perf_event.h> +#include <linux/sched/signal.h> +#include <linux/hw_breakpoint.h> +#include <linux/percpu.h> +#include <linux/kallsyms.h> +#include <linux/notifier.h> +#include <linux/kprobes.h> +#include <linux/kdebug.h> +#include <linux/io.h> +#include <linux/clk.h> +#include <asm/hw_breakpoint.h> +#include <asm/mmu_context.h> +#include <asm/ptrace.h> +#include <asm/traps.h> + +/* + * Stores the breakpoints currently in use on each breakpoint address + * register for each cpus + */ +static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]); + +/* + * A dummy placeholder for early accesses until the CPUs get a chance to + * register their UBCs later in the boot process. + */ +static struct sh_ubc ubc_dummy = { .num_events = 0 }; + +static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy; + +/* + * Install a perf counter breakpoint. + * + * We seek a free UBC channel and use it for this breakpoint. + * + * Atomic: we hold the counter->ctx->lock and we only handle variables + * and registers local to this cpu. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + int i; + + for (i = 0; i < sh_ubc->num_events; i++) { + struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); + + if (!*slot) { + *slot = bp; + break; + } + } + + if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) + return -EBUSY; + + clk_enable(sh_ubc->clk); + sh_ubc->enable(info, i); + + return 0; +} + +/* + * Uninstall the breakpoint contained in the given counter. + * + * First we search the debug address register it uses and then we disable + * it. + * + * Atomic: we hold the counter->ctx->lock and we only handle variables + * and registers local to this cpu. + */ +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + int i; + + for (i = 0; i < sh_ubc->num_events; i++) { + struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]); + + if (*slot == bp) { + *slot = NULL; + break; + } + } + + if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot")) + return; + + sh_ubc->disable(info, i); + clk_disable(sh_ubc->clk); +} + +static int get_hbp_len(u16 hbp_len) +{ + unsigned int len_in_bytes = 0; + + switch (hbp_len) { + case SH_BREAKPOINT_LEN_1: + len_in_bytes = 1; + break; + case SH_BREAKPOINT_LEN_2: + len_in_bytes = 2; + break; + case SH_BREAKPOINT_LEN_4: + len_in_bytes = 4; + break; + case SH_BREAKPOINT_LEN_8: + len_in_bytes = 8; + break; + } + return len_in_bytes; +} + +/* + * Check for virtual address in kernel space. + */ +int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw) +{ + unsigned int len; + unsigned long va; + + va = hw->address; + len = get_hbp_len(hw->len); + + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); +} + +int arch_bp_generic_fields(int sh_len, int sh_type, + int *gen_len, int *gen_type) +{ + /* Len */ + switch (sh_len) { + case SH_BREAKPOINT_LEN_1: + *gen_len = HW_BREAKPOINT_LEN_1; + break; + case SH_BREAKPOINT_LEN_2: + *gen_len = HW_BREAKPOINT_LEN_2; + break; + case SH_BREAKPOINT_LEN_4: + *gen_len = HW_BREAKPOINT_LEN_4; + break; + case SH_BREAKPOINT_LEN_8: + *gen_len = HW_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + /* Type */ + switch (sh_type) { + case SH_BREAKPOINT_READ: + *gen_type = HW_BREAKPOINT_R; + break; + case SH_BREAKPOINT_WRITE: + *gen_type = HW_BREAKPOINT_W; + break; + case SH_BREAKPOINT_RW: + *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int arch_build_bp_info(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) +{ + hw->address = attr->bp_addr; + + /* Len */ + switch (attr->bp_len) { + case HW_BREAKPOINT_LEN_1: + hw->len = SH_BREAKPOINT_LEN_1; + break; + case HW_BREAKPOINT_LEN_2: + hw->len = SH_BREAKPOINT_LEN_2; + break; + case HW_BREAKPOINT_LEN_4: + hw->len = SH_BREAKPOINT_LEN_4; + break; + case HW_BREAKPOINT_LEN_8: + hw->len = SH_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + /* Type */ + switch (attr->bp_type) { + case HW_BREAKPOINT_R: + hw->type = SH_BREAKPOINT_READ; + break; + case HW_BREAKPOINT_W: + hw->type = SH_BREAKPOINT_WRITE; + break; + case HW_BREAKPOINT_W | HW_BREAKPOINT_R: + hw->type = SH_BREAKPOINT_RW; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Validate the arch-specific HW Breakpoint register settings + */ +int hw_breakpoint_arch_parse(struct perf_event *bp, + const struct perf_event_attr *attr, + struct arch_hw_breakpoint *hw) +{ + unsigned int align; + int ret; + + ret = arch_build_bp_info(bp, attr, hw); + if (ret) + return ret; + + ret = -EINVAL; + + switch (hw->len) { + case SH_BREAKPOINT_LEN_1: + align = 0; + break; + case SH_BREAKPOINT_LEN_2: + align = 1; + break; + case SH_BREAKPOINT_LEN_4: + align = 3; + break; + case SH_BREAKPOINT_LEN_8: + align = 7; + break; + default: + return ret; + } + + /* + * Check that the low-order bits of the address are appropriate + * for the alignment implied by len. + */ + if (hw->address & align) + return -EINVAL; + + return 0; +} + +/* + * Release the user breakpoints used by ptrace + */ +void flush_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *t = &tsk->thread; + + for (i = 0; i < sh_ubc->num_events; i++) { + unregister_hw_breakpoint(t->ptrace_bps[i]); + t->ptrace_bps[i] = NULL; + } +} + +static int __kprobes hw_breakpoint_handler(struct die_args *args) +{ + int cpu, i, rc = NOTIFY_STOP; + struct perf_event *bp; + unsigned int cmf, resume_mask; + + /* + * Do an early return if none of the channels triggered. + */ + cmf = sh_ubc->triggered_mask(); + if (unlikely(!cmf)) + return NOTIFY_DONE; + + /* + * By default, resume all of the active channels. + */ + resume_mask = sh_ubc->active_mask(); + + /* + * Disable breakpoints during exception handling. + */ + sh_ubc->disable_all(); + + cpu = get_cpu(); + for (i = 0; i < sh_ubc->num_events; i++) { + unsigned long event_mask = (1 << i); + + if (likely(!(cmf & event_mask))) + continue; + + /* + * The counter may be concurrently released but that can only + * occur from a call_rcu() path. We can then safely fetch + * the breakpoint, use its callback, touch its counter + * while we are in an rcu_read_lock() path. + */ + rcu_read_lock(); + + bp = per_cpu(bp_per_reg[i], cpu); + if (bp) + rc = NOTIFY_DONE; + + /* + * Reset the condition match flag to denote completion of + * exception handling. + */ + sh_ubc->clear_triggered_mask(event_mask); + + /* + * bp can be NULL due to concurrent perf counter + * removing. + */ + if (!bp) { + rcu_read_unlock(); + break; + } + + /* + * Don't restore the channel if the breakpoint is from + * ptrace, as it always operates in one-shot mode. + */ + if (bp->overflow_handler == ptrace_triggered) + resume_mask &= ~(1 << i); + + perf_bp_event(bp, args->regs); + + /* Deliver the signal to userspace */ + if (!arch_check_bp_in_kernelspace(&bp->hw.info)) { + force_sig_fault(SIGTRAP, TRAP_HWBKPT, + (void __user *)NULL); + } + + rcu_read_unlock(); + } + + if (cmf == 0) + rc = NOTIFY_DONE; + + sh_ubc->enable_all(resume_mask); + + put_cpu(); + + return rc; +} + +BUILD_TRAP_HANDLER(breakpoint) +{ + unsigned long ex = lookup_exception_vector(); + TRAP_HANDLER_DECL; + + notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP); +} + +/* + * Handle debug exception notifications. + */ +int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data) +{ + struct die_args *args = data; + + if (val != DIE_BREAKPOINT) + return NOTIFY_DONE; + + /* + * If the breakpoint hasn't been triggered by the UBC, it's + * probably from a debugger, so don't do anything more here. + * + * This also permits the UBC interface clock to remain off for + * non-UBC breakpoints, as we don't need to check the triggered + * or active channel masks. + */ + if (args->trapnr != sh_ubc->trap_nr) + return NOTIFY_DONE; + + return hw_breakpoint_handler(data); +} + +void hw_breakpoint_pmu_read(struct perf_event *bp) +{ + /* TODO */ +} + +int register_sh_ubc(struct sh_ubc *ubc) +{ + /* Bail if it's already assigned */ + if (sh_ubc != &ubc_dummy) + return -EBUSY; + sh_ubc = ubc; + + pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name); + + WARN_ON(ubc->num_events > HBP_NUM); + + return 0; +} diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c new file mode 100644 index 000000000..a80b2a5b2 --- /dev/null +++ b/arch/sh/kernel/idle.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The idle loop for all SuperH platforms. + * + * Copyright (C) 2002 - 2009 Paul Mundt + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/pm.h> +#include <linux/tick.h> +#include <linux/preempt.h> +#include <linux/thread_info.h> +#include <linux/irqflags.h> +#include <linux/smp.h> +#include <linux/atomic.h> +#include <asm/processor.h> +#include <asm/smp.h> +#include <asm/bl_bit.h> + +static void (*sh_idle)(void); + +void default_idle(void) +{ + set_bl_bit(); + raw_local_irq_enable(); + /* Isn't this racy ? */ + cpu_sleep(); + clear_bl_bit(); +} + +void arch_cpu_idle_dead(void) +{ + play_dead(); +} + +void arch_cpu_idle(void) +{ + sh_idle(); +} + +void __init select_idle_routine(void) +{ + /* + * If a platform has set its own idle routine, leave it alone. + */ + if (!sh_idle) + sh_idle = default_idle; +} + +void stop_this_cpu(void *unused) +{ + local_irq_disable(); + set_cpu_online(smp_processor_id(), false); + + for (;;) + cpu_sleep(); +} diff --git a/arch/sh/kernel/io.c b/arch/sh/kernel/io.c new file mode 100644 index 000000000..da22f3b32 --- /dev/null +++ b/arch/sh/kernel/io.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/io.c - Machine independent I/O functions. + * + * Copyright (C) 2000 - 2009 Stuart Menefy + * Copyright (C) 2005 Paul Mundt + */ +#include <linux/module.h> +#include <linux/pci.h> +#include <asm/machvec.h> +#include <asm/io.h> + +/* + * Copy data from IO memory space to "real" memory space. + */ +void memcpy_fromio(void *to, const volatile void __iomem *from, unsigned long count) +{ + /* + * Would it be worthwhile doing byte and long transfers first + * to try and get aligned? + */ +#ifdef CONFIG_CPU_SH4 + if ((count >= 0x20) && + (((u32)to & 0x1f) == 0) && (((u32)from & 0x3) == 0)) { + int tmp2, tmp3, tmp4, tmp5, tmp6; + + __asm__ __volatile__( + "1: \n\t" + "mov.l @%7+, r0 \n\t" + "mov.l @%7+, %2 \n\t" + "movca.l r0, @%0 \n\t" + "mov.l @%7+, %3 \n\t" + "mov.l @%7+, %4 \n\t" + "mov.l @%7+, %5 \n\t" + "mov.l @%7+, %6 \n\t" + "mov.l @%7+, r7 \n\t" + "mov.l @%7+, r0 \n\t" + "mov.l %2, @(0x04,%0) \n\t" + "mov #0x20, %2 \n\t" + "mov.l %3, @(0x08,%0) \n\t" + "sub %2, %1 \n\t" + "mov.l %4, @(0x0c,%0) \n\t" + "cmp/hi %1, %2 ! T if 32 > count \n\t" + "mov.l %5, @(0x10,%0) \n\t" + "mov.l %6, @(0x14,%0) \n\t" + "mov.l r7, @(0x18,%0) \n\t" + "mov.l r0, @(0x1c,%0) \n\t" + "bf.s 1b \n\t" + " add #0x20, %0 \n\t" + : "=&r" (to), "=&r" (count), + "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4), + "=&r" (tmp5), "=&r" (tmp6), "=&r" (from) + : "7"(from), "0" (to), "1" (count) + : "r0", "r7", "t", "memory"); + } +#endif + + if ((((u32)to | (u32)from) & 0x3) == 0) { + for (; count > 3; count -= 4) { + *(u32 *)to = *(volatile u32 *)from; + to += 4; + from += 4; + } + } + + for (; count > 0; count--) { + *(u8 *)to = *(volatile u8 *)from; + to++; + from++; + } + + mb(); +} +EXPORT_SYMBOL(memcpy_fromio); + +/* + * Copy data from "real" memory space to IO memory space. + */ +void memcpy_toio(volatile void __iomem *to, const void *from, unsigned long count) +{ + if ((((u32)to | (u32)from) & 0x3) == 0) { + for ( ; count > 3; count -= 4) { + *(volatile u32 *)to = *(u32 *)from; + to += 4; + from += 4; + } + } + + for (; count > 0; count--) { + *(volatile u8 *)to = *(u8 *)from; + to++; + from++; + } + + mb(); +} +EXPORT_SYMBOL(memcpy_toio); + +/* + * "memset" on IO memory space. + * This needs to be optimized. + */ +void memset_io(volatile void __iomem *dst, int c, unsigned long count) +{ + while (count) { + count--; + writeb(c, dst); + dst++; + } +} +EXPORT_SYMBOL(memset_io); diff --git a/arch/sh/kernel/io_trapped.c b/arch/sh/kernel/io_trapped.c new file mode 100644 index 000000000..e803b14ef --- /dev/null +++ b/arch/sh/kernel/io_trapped.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Trapped io support + * + * Copyright (C) 2008 Magnus Damm + * + * Intercept io operations by trapping. + */ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/bitops.h> +#include <linux/vmalloc.h> +#include <linux/module.h> +#include <linux/init.h> +#include <asm/mmu_context.h> +#include <linux/uaccess.h> +#include <asm/io.h> +#include <asm/io_trapped.h> + +#define TRAPPED_PAGES_MAX 16 + +#ifdef CONFIG_HAS_IOPORT_MAP +LIST_HEAD(trapped_io); +EXPORT_SYMBOL_GPL(trapped_io); +#endif +#ifdef CONFIG_HAS_IOMEM +LIST_HEAD(trapped_mem); +EXPORT_SYMBOL_GPL(trapped_mem); +#endif +static DEFINE_SPINLOCK(trapped_lock); + +static int trapped_io_disable __read_mostly; + +static int __init trapped_io_setup(char *__unused) +{ + trapped_io_disable = 1; + return 1; +} +__setup("noiotrap", trapped_io_setup); + +int register_trapped_io(struct trapped_io *tiop) +{ + struct resource *res; + unsigned long len = 0, flags = 0; + struct page *pages[TRAPPED_PAGES_MAX]; + int k, n; + + if (unlikely(trapped_io_disable)) + return 0; + + /* structure must be page aligned */ + if ((unsigned long)tiop & (PAGE_SIZE - 1)) + goto bad; + + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + len += roundup(resource_size(res), PAGE_SIZE); + flags |= res->flags; + } + + /* support IORESOURCE_IO _or_ MEM, not both */ + if (hweight_long(flags) != 1) + goto bad; + + n = len >> PAGE_SHIFT; + + if (n >= TRAPPED_PAGES_MAX) + goto bad; + + for (k = 0; k < n; k++) + pages[k] = virt_to_page(tiop); + + tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); + if (!tiop->virt_base) + goto bad; + + len = 0; + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n", + (unsigned long)(tiop->virt_base + len), + res->flags & IORESOURCE_IO ? "io" : "mmio", + (unsigned long)res->start); + len += roundup(resource_size(res), PAGE_SIZE); + } + + tiop->magic = IO_TRAPPED_MAGIC; + INIT_LIST_HEAD(&tiop->list); + spin_lock_irq(&trapped_lock); +#ifdef CONFIG_HAS_IOPORT_MAP + if (flags & IORESOURCE_IO) + list_add(&tiop->list, &trapped_io); +#endif +#ifdef CONFIG_HAS_IOMEM + if (flags & IORESOURCE_MEM) + list_add(&tiop->list, &trapped_mem); +#endif + spin_unlock_irq(&trapped_lock); + + return 0; + bad: + pr_warn("unable to install trapped io filter\n"); + return -1; +} + +void __iomem *match_trapped_io_handler(struct list_head *list, + unsigned long offset, + unsigned long size) +{ + unsigned long voffs; + struct trapped_io *tiop; + struct resource *res; + int k, len; + unsigned long flags; + + spin_lock_irqsave(&trapped_lock, flags); + list_for_each_entry(tiop, list, list) { + voffs = 0; + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + if (res->start == offset) { + spin_unlock_irqrestore(&trapped_lock, flags); + return tiop->virt_base + voffs; + } + + len = resource_size(res); + voffs += roundup(len, PAGE_SIZE); + } + } + spin_unlock_irqrestore(&trapped_lock, flags); + return NULL; +} + +static struct trapped_io *lookup_tiop(unsigned long address) +{ + pgd_t *pgd_k; + p4d_t *p4d_k; + pud_t *pud_k; + pmd_t *pmd_k; + pte_t *pte_k; + pte_t entry; + + pgd_k = swapper_pg_dir + pgd_index(address); + if (!pgd_present(*pgd_k)) + return NULL; + + p4d_k = p4d_offset(pgd_k, address); + if (!p4d_present(*p4d_k)) + return NULL; + + pud_k = pud_offset(p4d_k, address); + if (!pud_present(*pud_k)) + return NULL; + + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd_k)) + return NULL; + + pte_k = pte_offset_kernel(pmd_k, address); + entry = *pte_k; + + return pfn_to_kaddr(pte_pfn(entry)); +} + +static unsigned long lookup_address(struct trapped_io *tiop, + unsigned long address) +{ + struct resource *res; + unsigned long vaddr = (unsigned long)tiop->virt_base; + unsigned long len; + int k; + + for (k = 0; k < tiop->num_resources; k++) { + res = tiop->resource + k; + len = roundup(resource_size(res), PAGE_SIZE); + if (address < (vaddr + len)) + return res->start + (address - vaddr); + vaddr += len; + } + return 0; +} + +static unsigned long long copy_word(unsigned long src_addr, int src_len, + unsigned long dst_addr, int dst_len) +{ + unsigned long long tmp = 0; + + switch (src_len) { + case 1: + tmp = __raw_readb(src_addr); + break; + case 2: + tmp = __raw_readw(src_addr); + break; + case 4: + tmp = __raw_readl(src_addr); + break; + case 8: + tmp = __raw_readq(src_addr); + break; + } + + switch (dst_len) { + case 1: + __raw_writeb(tmp, dst_addr); + break; + case 2: + __raw_writew(tmp, dst_addr); + break; + case 4: + __raw_writel(tmp, dst_addr); + break; + case 8: + __raw_writeq(tmp, dst_addr); + break; + } + + return tmp; +} + +static unsigned long from_device(void *dst, const void *src, unsigned long cnt) +{ + struct trapped_io *tiop; + unsigned long src_addr = (unsigned long)src; + unsigned long long tmp; + + pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt); + tiop = lookup_tiop(src_addr); + WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); + + src_addr = lookup_address(tiop, src_addr); + if (!src_addr) + return cnt; + + tmp = copy_word(src_addr, + max_t(unsigned long, cnt, + (tiop->minimum_bus_width / 8)), + (unsigned long)dst, cnt); + + pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp); + return 0; +} + +static unsigned long to_device(void *dst, const void *src, unsigned long cnt) +{ + struct trapped_io *tiop; + unsigned long dst_addr = (unsigned long)dst; + unsigned long long tmp; + + pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt); + tiop = lookup_tiop(dst_addr); + WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); + + dst_addr = lookup_address(tiop, dst_addr); + if (!dst_addr) + return cnt; + + tmp = copy_word((unsigned long)src, cnt, + dst_addr, max_t(unsigned long, cnt, + (tiop->minimum_bus_width / 8))); + + pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp); + return 0; +} + +static struct mem_access trapped_io_access = { + from_device, + to_device, +}; + +int handle_trapped_io(struct pt_regs *regs, unsigned long address) +{ + insn_size_t instruction; + int tmp; + + if (trapped_io_disable) + return 0; + if (!lookup_tiop(address)) + return 0; + + WARN_ON(user_mode(regs)); + + if (copy_from_kernel_nofault(&instruction, (void *)(regs->pc), + sizeof(instruction))) { + return 0; + } + + tmp = handle_unaligned_access(instruction, regs, + &trapped_io_access, 1, address); + return tmp == 0; +} diff --git a/arch/sh/kernel/iomap.c b/arch/sh/kernel/iomap.c new file mode 100644 index 000000000..0a0dff4e6 --- /dev/null +++ b/arch/sh/kernel/iomap.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/iomap.c + * + * Copyright (C) 2000 Niibe Yutaka + * Copyright (C) 2005 - 2007 Paul Mundt + */ +#include <linux/module.h> +#include <linux/io.h> + +unsigned int ioread8(const void __iomem *addr) +{ + return readb(addr); +} +EXPORT_SYMBOL(ioread8); + +unsigned int ioread16(const void __iomem *addr) +{ + return readw(addr); +} +EXPORT_SYMBOL(ioread16); + +unsigned int ioread16be(const void __iomem *addr) +{ + return be16_to_cpu(__raw_readw(addr)); +} +EXPORT_SYMBOL(ioread16be); + +unsigned int ioread32(const void __iomem *addr) +{ + return readl(addr); +} +EXPORT_SYMBOL(ioread32); + +unsigned int ioread32be(const void __iomem *addr) +{ + return be32_to_cpu(__raw_readl(addr)); +} +EXPORT_SYMBOL(ioread32be); + +void iowrite8(u8 val, void __iomem *addr) +{ + writeb(val, addr); +} +EXPORT_SYMBOL(iowrite8); + +void iowrite16(u16 val, void __iomem *addr) +{ + writew(val, addr); +} +EXPORT_SYMBOL(iowrite16); + +void iowrite16be(u16 val, void __iomem *addr) +{ + __raw_writew(cpu_to_be16(val), addr); +} +EXPORT_SYMBOL(iowrite16be); + +void iowrite32(u32 val, void __iomem *addr) +{ + writel(val, addr); +} +EXPORT_SYMBOL(iowrite32); + +void iowrite32be(u32 val, void __iomem *addr) +{ + __raw_writel(cpu_to_be32(val), addr); +} +EXPORT_SYMBOL(iowrite32be); + +/* + * These are the "repeat MMIO read/write" functions. + * Note the "__raw" accesses, since we don't want to + * convert to CPU byte order. We write in "IO byte + * order" (we also don't have IO barriers). + */ +static inline void mmio_insb(const void __iomem *addr, u8 *dst, int count) +{ + while (--count >= 0) { + u8 data = __raw_readb(addr); + *dst = data; + dst++; + } +} + +static inline void mmio_insw(const void __iomem *addr, u16 *dst, int count) +{ + while (--count >= 0) { + u16 data = __raw_readw(addr); + *dst = data; + dst++; + } +} + +static inline void mmio_insl(const void __iomem *addr, u32 *dst, int count) +{ + while (--count >= 0) { + u32 data = __raw_readl(addr); + *dst = data; + dst++; + } +} + +static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) +{ + while (--count >= 0) { + __raw_writeb(*src, addr); + src++; + } +} + +static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) +{ + while (--count >= 0) { + __raw_writew(*src, addr); + src++; + } +} + +static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) +{ + while (--count >= 0) { + __raw_writel(*src, addr); + src++; + } +} + +void ioread8_rep(const void __iomem *addr, void *dst, unsigned long count) +{ + mmio_insb(addr, dst, count); +} +EXPORT_SYMBOL(ioread8_rep); + +void ioread16_rep(const void __iomem *addr, void *dst, unsigned long count) +{ + mmio_insw(addr, dst, count); +} +EXPORT_SYMBOL(ioread16_rep); + +void ioread32_rep(const void __iomem *addr, void *dst, unsigned long count) +{ + mmio_insl(addr, dst, count); +} +EXPORT_SYMBOL(ioread32_rep); + +void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) +{ + mmio_outsb(addr, src, count); +} +EXPORT_SYMBOL(iowrite8_rep); + +void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) +{ + mmio_outsw(addr, src, count); +} +EXPORT_SYMBOL(iowrite16_rep); + +void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) +{ + mmio_outsl(addr, src, count); +} +EXPORT_SYMBOL(iowrite32_rep); diff --git a/arch/sh/kernel/ioport.c b/arch/sh/kernel/ioport.c new file mode 100644 index 000000000..f39446a65 --- /dev/null +++ b/arch/sh/kernel/ioport.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/ioport.c + * + * Copyright (C) 2000 Niibe Yutaka + * Copyright (C) 2005 - 2007 Paul Mundt + */ +#include <linux/module.h> +#include <linux/io.h> +#include <asm/io_trapped.h> + +unsigned long sh_io_port_base __read_mostly = -1; +EXPORT_SYMBOL(sh_io_port_base); + +void __iomem *__ioport_map(unsigned long addr, unsigned int size) +{ + if (sh_mv.mv_ioport_map) + return sh_mv.mv_ioport_map(addr, size); + + return (void __iomem *)(addr + sh_io_port_base); +} +EXPORT_SYMBOL(__ioport_map); + +void __iomem *ioport_map(unsigned long port, unsigned int nr) +{ + void __iomem *ret; + + ret = __ioport_map_trapped(port, nr); + if (ret) + return ret; + + return __ioport_map(port, nr); +} +EXPORT_SYMBOL(ioport_map); + +void ioport_unmap(void __iomem *addr) +{ + if (sh_mv.mv_ioport_unmap) + sh_mv.mv_ioport_unmap(addr); +} +EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c new file mode 100644 index 000000000..4e6835de5 --- /dev/null +++ b/arch/sh/kernel/irq.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sh/kernel/irq.c + * + * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar + * + * + * SuperH version: Copyright (C) 1999 Niibe Yutaka + */ +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/kernel_stat.h> +#include <linux/seq_file.h> +#include <linux/ftrace.h> +#include <linux/delay.h> +#include <linux/ratelimit.h> +#include <asm/processor.h> +#include <asm/machvec.h> +#include <linux/uaccess.h> +#include <asm/thread_info.h> +#include <cpu/mmu_context.h> +#include <asm/softirq_stack.h> + +atomic_t irq_err_count; + +/* + * 'what should we do if we get a hw irq event on an illegal vector'. + * each architecture has to answer this themselves, it doesn't deserve + * a generic callback i think. + */ +void ack_bad_irq(unsigned int irq) +{ + atomic_inc(&irq_err_count); + printk("unexpected IRQ trap at vector %02x\n", irq); +} + +#if defined(CONFIG_PROC_FS) +/* + * /proc/interrupts printing for arch specific interrupts + */ +int arch_show_interrupts(struct seq_file *p, int prec) +{ + int j; + + seq_printf(p, "%*s: ", prec, "NMI"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", per_cpu(irq_stat.__nmi_count, j)); + seq_printf(p, " Non-maskable interrupts\n"); + + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); + + return 0; +} +#endif + +#ifdef CONFIG_IRQSTACKS +/* + * per-CPU IRQ handling contexts (thread information and stack) + */ +union irq_ctx { + struct thread_info tinfo; + u32 stack[THREAD_SIZE/sizeof(u32)]; +}; + +static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; +static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; + +static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; +static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; + +static inline void handle_one_irq(unsigned int irq) +{ + union irq_ctx *curctx, *irqctx; + + curctx = (union irq_ctx *)current_thread_info(); + irqctx = hardirq_ctx[smp_processor_id()]; + + /* + * this is where we switch to the IRQ stack. However, if we are + * already using the IRQ stack (because we interrupted a hardirq + * handler) we can't do that and just have to keep using the + * current stack (which is the irq stack already after all) + */ + if (curctx != irqctx) { + u32 *isp; + + isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); + irqctx->tinfo.task = curctx->tinfo.task; + irqctx->tinfo.previous_sp = current_stack_pointer; + + /* + * Copy the softirq bits in preempt_count so that the + * softirq checks work in the hardirq context. + */ + irqctx->tinfo.preempt_count = + (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | + (curctx->tinfo.preempt_count & SOFTIRQ_MASK); + + __asm__ __volatile__ ( + "mov %0, r4 \n" + "mov r15, r8 \n" + "jsr @%1 \n" + /* switch to the irq stack */ + " mov %2, r15 \n" + /* restore the stack (ring zero) */ + "mov r8, r15 \n" + : /* no outputs */ + : "r" (irq), "r" (generic_handle_irq), "r" (isp) + : "memory", "r0", "r1", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "t", "pr" + ); + } else + generic_handle_irq(irq); +} + +/* + * allocate per-cpu stacks for hardirq and for softirq processing + */ +void irq_ctx_init(int cpu) +{ + union irq_ctx *irqctx; + + if (hardirq_ctx[cpu]) + return; + + irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE]; + irqctx->tinfo.task = NULL; + irqctx->tinfo.cpu = cpu; + irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; + irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); + + hardirq_ctx[cpu] = irqctx; + + irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE]; + irqctx->tinfo.task = NULL; + irqctx->tinfo.cpu = cpu; + irqctx->tinfo.preempt_count = 0; + irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); + + softirq_ctx[cpu] = irqctx; + + printk("CPU %u irqstacks, hard=%p soft=%p\n", + cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); +} + +void irq_ctx_exit(int cpu) +{ + hardirq_ctx[cpu] = NULL; +} + +#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK +void do_softirq_own_stack(void) +{ + struct thread_info *curctx; + union irq_ctx *irqctx; + u32 *isp; + + curctx = current_thread_info(); + irqctx = softirq_ctx[smp_processor_id()]; + irqctx->tinfo.task = curctx->task; + irqctx->tinfo.previous_sp = current_stack_pointer; + + /* build the stack frame on the softirq stack */ + isp = (u32 *)((char *)irqctx + sizeof(*irqctx)); + + __asm__ __volatile__ ( + "mov r15, r9 \n" + "jsr @%0 \n" + /* switch to the softirq stack */ + " mov %1, r15 \n" + /* restore the thread stack */ + "mov r9, r15 \n" + : /* no outputs */ + : "r" (__do_softirq), "r" (isp) + : "memory", "r0", "r1", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); +} +#endif +#else +static inline void handle_one_irq(unsigned int irq) +{ + generic_handle_irq(irq); +} +#endif + +asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + + irq = irq_demux(irq_lookup(irq)); + + if (irq != NO_IRQ_IGNORE) { + handle_one_irq(irq); + irq_finish(irq); + } + + irq_exit(); + + set_irq_regs(old_regs); + + return IRQ_HANDLED; +} + +void __init init_IRQ(void) +{ + plat_irq_setup(); + + /* Perform the machine specific initialisation */ + if (sh_mv.mv_init_irq) + sh_mv.mv_init_irq(); + + intc_finalize(); + + irq_ctx_init(smp_processor_id()); +} + +#ifdef CONFIG_HOTPLUG_CPU +/* + * The CPU has been marked offline. Migrate IRQs off this CPU. If + * the affinity settings do not allow other CPUs, force them onto any + * available CPU. + */ +void migrate_irqs(void) +{ + unsigned int irq, cpu = smp_processor_id(); + + for_each_active_irq(irq) { + struct irq_data *data = irq_get_irq_data(irq); + + if (irq_data_get_node(data) == cpu) { + const struct cpumask *mask = irq_data_get_affinity_mask(data); + unsigned int newcpu = cpumask_any_and(mask, + cpu_online_mask); + if (newcpu >= nr_cpu_ids) { + pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", + irq, cpu); + + irq_set_affinity(irq, cpu_all_mask); + } else { + irq_set_affinity(irq, mask); + } + } + } +} +#endif diff --git a/arch/sh/kernel/irq_32.c b/arch/sh/kernel/irq_32.c new file mode 100644 index 000000000..e09cdc4ad --- /dev/null +++ b/arch/sh/kernel/irq_32.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SHcompact irqflags support + * + * Copyright (C) 2006 - 2009 Paul Mundt + */ +#include <linux/irqflags.h> +#include <linux/module.h> + +void notrace arch_local_irq_restore(unsigned long flags) +{ + unsigned long __dummy0, __dummy1; + + if (flags == ARCH_IRQ_DISABLED) { + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "or #0xf0, %0\n\t" + "ldc %0, sr\n\t" + : "=&z" (__dummy0) + : /* no inputs */ + : "memory" + ); + } else { + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and %1, %0\n\t" +#ifdef CONFIG_CPU_HAS_SR_RB + "stc r6_bank, %1\n\t" + "or %1, %0\n\t" +#endif + "ldc %0, sr\n\t" + : "=&r" (__dummy0), "=r" (__dummy1) + : "1" (~ARCH_IRQ_DISABLED) + : "memory" + ); + } +} +EXPORT_SYMBOL(arch_local_irq_restore); + +unsigned long notrace arch_local_save_flags(void) +{ + unsigned long flags; + + __asm__ __volatile__ ( + "stc sr, %0\n\t" + "and #0xf0, %0\n\t" + : "=&z" (flags) + : /* no inputs */ + : "memory" + ); + + return flags; +} +EXPORT_SYMBOL(arch_local_save_flags); diff --git a/arch/sh/kernel/kdebugfs.c b/arch/sh/kernel/kdebugfs.c new file mode 100644 index 000000000..8b505e155 --- /dev/null +++ b/arch/sh/kernel/kdebugfs.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/module.h> +#include <linux/init.h> +#include <linux/debugfs.h> + +struct dentry *arch_debugfs_dir; +EXPORT_SYMBOL(arch_debugfs_dir); + +static int __init arch_kdebugfs_init(void) +{ + arch_debugfs_dir = debugfs_create_dir("sh", NULL); + return 0; +} +arch_initcall(arch_kdebugfs_init); diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c new file mode 100644 index 000000000..e4147efa9 --- /dev/null +++ b/arch/sh/kernel/kgdb.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SuperH KGDB support + * + * Copyright (C) 2008 - 2012 Paul Mundt + * + * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel. + */ +#include <linux/kgdb.h> +#include <linux/kdebug.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> + +#include <asm/cacheflush.h> +#include <asm/traps.h> + +/* Macros for single step instruction identification */ +#define OPCODE_BT(op) (((op) & 0xff00) == 0x8900) +#define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00) +#define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \ + (((op) & 0x7f ) << 1)) +#define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00) +#define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00) +#define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000) +#define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ + (((op) & 0x7ff) << 1)) +#define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023) +#define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8) +#define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000) +#define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \ + (((op) & 0x7ff) << 1)) +#define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003) +#define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf) +#define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b) +#define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf) +#define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b) +#define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf) +#define OPCODE_RTS(op) ((op) == 0xb) +#define OPCODE_RTE(op) ((op) == 0x2b) + +#define SR_T_BIT_MASK 0x1 +#define STEP_OPCODE 0xc33d + +/* Calculate the new address for after a step */ +static short *get_step_address(struct pt_regs *linux_regs) +{ + insn_size_t op = __raw_readw(linux_regs->pc); + long addr; + + /* BT */ + if (OPCODE_BT(op)) { + if (linux_regs->sr & SR_T_BIT_MASK) + addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); + else + addr = linux_regs->pc + 2; + } + + /* BTS */ + else if (OPCODE_BTS(op)) { + if (linux_regs->sr & SR_T_BIT_MASK) + addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); + else + addr = linux_regs->pc + 4; /* Not in delay slot */ + } + + /* BF */ + else if (OPCODE_BF(op)) { + if (!(linux_regs->sr & SR_T_BIT_MASK)) + addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); + else + addr = linux_regs->pc + 2; + } + + /* BFS */ + else if (OPCODE_BFS(op)) { + if (!(linux_regs->sr & SR_T_BIT_MASK)) + addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op); + else + addr = linux_regs->pc + 4; /* Not in delay slot */ + } + + /* BRA */ + else if (OPCODE_BRA(op)) + addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op); + + /* BRAF */ + else if (OPCODE_BRAF(op)) + addr = linux_regs->pc + 4 + + linux_regs->regs[OPCODE_BRAF_REG(op)]; + + /* BSR */ + else if (OPCODE_BSR(op)) + addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op); + + /* BSRF */ + else if (OPCODE_BSRF(op)) + addr = linux_regs->pc + 4 + + linux_regs->regs[OPCODE_BSRF_REG(op)]; + + /* JMP */ + else if (OPCODE_JMP(op)) + addr = linux_regs->regs[OPCODE_JMP_REG(op)]; + + /* JSR */ + else if (OPCODE_JSR(op)) + addr = linux_regs->regs[OPCODE_JSR_REG(op)]; + + /* RTS */ + else if (OPCODE_RTS(op)) + addr = linux_regs->pr; + + /* RTE */ + else if (OPCODE_RTE(op)) + addr = linux_regs->regs[15]; + + /* Other */ + else + addr = linux_regs->pc + instruction_size(op); + + flush_icache_range(addr, addr + instruction_size(op)); + return (short *)addr; +} + +/* + * Replace the instruction immediately after the current instruction + * (i.e. next in the expected flow of control) with a trap instruction, + * so that returning will cause only a single instruction to be executed. + * Note that this model is slightly broken for instructions with delay + * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the + * instruction in the delay slot will be executed. + */ + +static unsigned long stepped_address; +static insn_size_t stepped_opcode; + +static void do_single_step(struct pt_regs *linux_regs) +{ + /* Determine where the target instruction will send us to */ + unsigned short *addr = get_step_address(linux_regs); + + stepped_address = (int)addr; + + /* Replace it */ + stepped_opcode = __raw_readw((long)addr); + *addr = STEP_OPCODE; + + /* Flush and return */ + flush_icache_range((long)addr, (long)addr + + instruction_size(stepped_opcode)); +} + +/* Undo a single step */ +static void undo_single_step(struct pt_regs *linux_regs) +{ + /* If we have stepped, put back the old instruction */ + /* Use stepped_address in case we stopped elsewhere */ + if (stepped_opcode != 0) { + __raw_writew(stepped_opcode, stepped_address); + flush_icache_range(stepped_address, stepped_address + 2); + } + + stepped_opcode = 0; +} + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { + { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) }, + { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) }, + { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) }, + { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) }, + { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) }, + { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) }, + { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) }, + { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) }, + { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) }, + { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) }, + { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) }, + { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) }, + { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) }, + { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) }, + { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) }, + { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) }, + { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) }, + { "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) }, + { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) }, + { "gbr", GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) }, + { "mach", GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) }, + { "macl", GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) }, + { "vbr", GDB_SIZEOF_REG, -1 }, +}; + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno < 0 || regno >= DBG_MAX_REG_NUM) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + + return 0; +} + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].size != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + + switch (regno) { + case GDB_VBR: + __asm__ __volatile__ ("stc vbr, %0" : "=r" (mem)); + break; + } + + return dbg_reg_def[regno].name; +} + +void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) +{ + struct pt_regs *thread_regs = task_pt_regs(p); + int reg; + + /* Initialize to zero */ + for (reg = 0; reg < DBG_MAX_REG_NUM; reg++) + gdb_regs[reg] = 0; + + /* + * Copy out GP regs 8 to 14. + * + * switch_to() relies on SR.RB toggling, so regs 0->7 are banked + * and need privileged instructions to get to. The r15 value we + * fetch from the thread info directly. + */ + for (reg = GDB_R8; reg < GDB_R15; reg++) + gdb_regs[reg] = thread_regs->regs[reg]; + + gdb_regs[GDB_R15] = p->thread.sp; + gdb_regs[GDB_PC] = p->thread.pc; + + /* + * Additional registers we have context for + */ + gdb_regs[GDB_PR] = thread_regs->pr; + gdb_regs[GDB_GBR] = thread_regs->gbr; +} + +int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, + char *remcomInBuffer, char *remcomOutBuffer, + struct pt_regs *linux_regs) +{ + unsigned long addr; + char *ptr; + + /* Undo any stepping we may have done */ + undo_single_step(linux_regs); + + switch (remcomInBuffer[0]) { + case 'c': + case 's': + /* try to read optional parameter, pc unchanged if no parm */ + ptr = &remcomInBuffer[1]; + if (kgdb_hex2long(&ptr, &addr)) + linux_regs->pc = addr; + fallthrough; + case 'D': + case 'k': + atomic_set(&kgdb_cpu_doing_single_step, -1); + + if (remcomInBuffer[0] == 's') { + do_single_step(linux_regs); + kgdb_single_step = 1; + + atomic_set(&kgdb_cpu_doing_single_step, + raw_smp_processor_id()); + } + + return 0; + } + + /* this means that we do not want to exit from the handler: */ + return -1; +} + +unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) +{ + if (exception == 60) + return instruction_pointer(regs) - 2; + return instruction_pointer(regs); +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->pc = ip; +} + +/* + * The primary entry points for the kgdb debug trap table entries. + */ +BUILD_TRAP_HANDLER(singlestep) +{ + unsigned long flags; + TRAP_HANDLER_DECL; + + local_irq_save(flags); + regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); + kgdb_handle_exception(0, SIGTRAP, 0, regs); + local_irq_restore(flags); +} + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + int ret; + + switch (cmd) { + case DIE_BREAKPOINT: + /* + * This means a user thread is single stepping + * a system call which should be ignored + */ + if (test_thread_flag(TIF_SINGLESTEP)) + return NOTIFY_DONE; + + ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr, + args->err, args->regs); + if (ret) + return NOTIFY_DONE; + + break; + } + + return NOTIFY_STOP; +} + +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, + + /* + * Lowest-prio notifier priority, we want to be notified last: + */ + .priority = -INT_MAX, +}; + +int kgdb_arch_init(void) +{ + return register_die_notifier(&kgdb_notifier); +} + +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} + +const struct kgdb_arch arch_kgdb_ops = { + /* Breakpoint instruction: trapa #0x3c */ +#ifdef CONFIG_CPU_LITTLE_ENDIAN + .gdb_bpt_instr = { 0x3c, 0xc3 }, +#else + .gdb_bpt_instr = { 0xc3, 0x3c }, +#endif +}; diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c new file mode 100644 index 000000000..aed1ea8e2 --- /dev/null +++ b/arch/sh/kernel/kprobes.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kernel probes (kprobes) for SuperH + * + * Copyright (C) 2007 Chris Smith <chris.smith@st.com> + * Copyright (C) 2006 Lineo Solutions, Inc. + */ +#include <linux/kprobes.h> +#include <linux/extable.h> +#include <linux/ptrace.h> +#include <linux/preempt.h> +#include <linux/kdebug.h> +#include <linux/slab.h> +#include <asm/cacheflush.h> +#include <linux/uaccess.h> + +DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +static DEFINE_PER_CPU(struct kprobe, saved_current_opcode); +static DEFINE_PER_CPU(struct kprobe, saved_next_opcode); +static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2); + +#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b) +#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b) +#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000) +#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023) +#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000) +#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003) + +#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00) +#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00) + +#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00) +#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900) + +#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b) +#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b) + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr); + + if (OPCODE_RTE(opcode)) + return -EFAULT; /* Bad breakpoint */ + + p->opcode = opcode; + + return 0; +} + +void __kprobes arch_copy_kprobe(struct kprobe *p) +{ + memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + p->opcode = *p->addr; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + *p->addr = BREAKPOINT_INSTRUCTION; + flush_icache_range((unsigned long)p->addr, + (unsigned long)p->addr + sizeof(kprobe_opcode_t)); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + *p->addr = p->opcode; + flush_icache_range((unsigned long)p->addr, + (unsigned long)p->addr + sizeof(kprobe_opcode_t)); +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (*p->addr == BREAKPOINT_INSTRUCTION) + return 1; + + return 0; +} + +/** + * If an illegal slot instruction exception occurs for an address + * containing a kprobe, remove the probe. + * + * Returns 0 if the exception was handled successfully, 1 otherwise. + */ +int __kprobes kprobe_handle_illslot(unsigned long pc) +{ + struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1); + + if (p != NULL) { + printk("Warning: removing kprobe from delay slot: 0x%.8x\n", + (unsigned int)pc + 2); + unregister_kprobe(p); + return 0; + } + + return 1; +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + struct kprobe *saved = this_cpu_ptr(&saved_next_opcode); + + if (saved->addr) { + arch_disarm_kprobe(p); + arch_disarm_kprobe(saved); + + saved->addr = NULL; + saved->opcode = 0; + + saved = this_cpu_ptr(&saved_next_opcode2); + if (saved->addr) { + arch_disarm_kprobe(saved); + + saved->addr = NULL; + saved->opcode = 0; + } + } +} + +static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, p); +} + +/* + * Singlestep is implemented by disabling the current kprobe and setting one + * on the next instruction, following branches. Two probes are set if the + * branch is conditional. + */ +static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + __this_cpu_write(saved_current_opcode.addr, (kprobe_opcode_t *)regs->pc); + + if (p != NULL) { + struct kprobe *op1, *op2; + + arch_disarm_kprobe(p); + + op1 = this_cpu_ptr(&saved_next_opcode); + op2 = this_cpu_ptr(&saved_next_opcode2); + + if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) { + unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); + op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr]; + } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) { + unsigned long disp = (p->opcode & 0x0FFF); + op1->addr = + (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); + + } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) { + unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); + op1->addr = + (kprobe_opcode_t *) (regs->pc + 4 + + regs->regs[reg_nr]); + + } else if (OPCODE_RTS(p->opcode)) { + op1->addr = (kprobe_opcode_t *) regs->pr; + + } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) { + unsigned long disp = (p->opcode & 0x00FF); + /* case 1 */ + op1->addr = p->addr + 1; + /* case 2 */ + op2->addr = + (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); + op2->opcode = *(op2->addr); + arch_arm_kprobe(op2); + + } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) { + unsigned long disp = (p->opcode & 0x00FF); + /* case 1 */ + op1->addr = p->addr + 2; + /* case 2 */ + op2->addr = + (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); + op2->opcode = *(op2->addr); + arch_arm_kprobe(op2); + + } else { + op1->addr = p->addr + 1; + } + + op1->opcode = *(op1->addr); + arch_arm_kprobe(op1); + } +} + +/* Called with kretprobe_lock held */ +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) regs->pr; + ri->fp = NULL; + + /* Replace the return addr with trampoline addr */ + regs->pr = (unsigned long)__kretprobe_trampoline; +} + +static int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + int ret = 0; + kprobe_opcode_t *addr = NULL; + struct kprobe_ctlblk *kcb; + + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + + addr = (kprobe_opcode_t *) (regs->pc); + + /* Check we're not actually recursing */ + if (kprobe_running()) { + p = get_kprobe(addr); + if (p) { + if (kcb->kprobe_status == KPROBE_HIT_SS && + *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { + goto no_kprobe; + } + /* We have reentered the kprobe_handler(), since + * another probe was hit while within the handler. + * We here save the original kprobes variables and + * just single step on the instruction of the new probe + * without calling any user handlers. + */ + save_previous_kprobe(kcb); + set_current_kprobe(p, regs, kcb); + kprobes_inc_nmissed_count(p); + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_REENTER; + return 1; + } + goto no_kprobe; + } + + p = get_kprobe(addr); + if (!p) { + /* Not one of ours: let kernel handle it */ + if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed + * either a probepoint or a debugger breakpoint + * at this address. In either case, no further + * handling of this interrupt is appropriate. + */ + ret = 1; + } + + goto no_kprobe; + } + + set_current_kprobe(p, regs, kcb); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + if (p->pre_handler && p->pre_handler(p, regs)) { + /* handler has already set things up, so skip ss setup */ + reset_current_kprobe(); + preempt_enable_no_resched(); + return 1; + } + + prepare_singlestep(p, regs); + kcb->kprobe_status = KPROBE_HIT_SS; + return 1; + +no_kprobe: + preempt_enable_no_resched(); + return ret; +} + +/* + * For function-return probes, init_kprobes() establishes a probepoint + * here. When a retprobed function returns, this probe is hit and + * trampoline_probe_handler() runs, calling the kretprobe's handler. + */ +static void __used kretprobe_trampoline_holder(void) +{ + asm volatile (".globl __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n\t" + "nop\n"); +} + +/* + * Called when we hit the probe point at __kretprobe_trampoline + */ +int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +{ + regs->pc = __kretprobe_trampoline_handler(regs, NULL); + + return 1; +} + +static int __kprobes post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + kprobe_opcode_t *addr = NULL; + struct kprobe *p = NULL; + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + p = this_cpu_ptr(&saved_next_opcode); + if (p->addr) { + arch_disarm_kprobe(p); + p->addr = NULL; + p->opcode = 0; + + addr = __this_cpu_read(saved_current_opcode.addr); + __this_cpu_write(saved_current_opcode.addr, NULL); + + p = get_kprobe(addr); + arch_arm_kprobe(p); + + p = this_cpu_ptr(&saved_next_opcode2); + if (p->addr) { + arch_disarm_kprobe(p); + p->addr = NULL; + p->opcode = 0; + } + } + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + + reset_current_kprobe(); + +out: + preempt_enable_no_resched(); + + return 1; +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + const struct exception_table_entry *entry; + + switch (kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe, point the pc back to the probe address + * and allow the page fault handler to continue as a + * normal page fault. + */ + regs->pc = (unsigned long)cur->addr; + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); + preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * In case the user-specified fault handler returned + * zero, try to fix up. + */ + if ((entry = search_exception_tables(regs->pc)) != NULL) { + regs->pc = entry->fixup; + return 1; + } + + /* + * fixup_exception() could not handle it, + * Let do_page_fault() fix it. + */ + break; + default: + break; + } + + return 0; +} + +/* + * Wrapper routine to for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct kprobe *p = NULL; + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + kprobe_opcode_t *addr = NULL; + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + addr = (kprobe_opcode_t *) (args->regs->pc); + if (val == DIE_TRAP && + args->trapnr == (BREAKPOINT_INSTRUCTION & 0xff)) { + if (!kprobe_running()) { + if (kprobe_handler(args->regs)) { + ret = NOTIFY_STOP; + } else { + /* Not a kprobe trap */ + ret = NOTIFY_DONE; + } + } else { + p = get_kprobe(addr); + if ((kcb->kprobe_status == KPROBE_HIT_SS) || + (kcb->kprobe_status == KPROBE_REENTER)) { + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + } else { + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + } + } + } + + return ret; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *)&__kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c new file mode 100644 index 000000000..223c14f44 --- /dev/null +++ b/arch/sh/kernel/machine_kexec.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * machine_kexec.c - handle transition of Linux booting another kernel + * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> + * + * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz + * LANDISK/sh4 supported by kogiidena + */ +#include <linux/mm.h> +#include <linux/kexec.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <linux/numa.h> +#include <linux/ftrace.h> +#include <linux/suspend.h> +#include <linux/memblock.h> +#include <asm/mmu_context.h> +#include <asm/io.h> +#include <asm/cacheflush.h> +#include <asm/sh_bios.h> +#include <asm/reboot.h> + +typedef void (*relocate_new_kernel_t)(unsigned long indirection_page, + unsigned long reboot_code_buffer, + unsigned long start_address); + +extern const unsigned char relocate_new_kernel[]; +extern const unsigned int relocate_new_kernel_size; +extern void *vbr_base; + +void native_machine_crash_shutdown(struct pt_regs *regs) +{ + /* Nothing to do for UP, but definitely broken for SMP.. */ +} + +/* + * Do what every setup is needed on image and the + * reboot code buffer to allow us to avoid allocations + * later. + */ +int machine_kexec_prepare(struct kimage *image) +{ + return 0; +} + +void machine_kexec_cleanup(struct kimage *image) +{ +} + +static void kexec_info(struct kimage *image) +{ + int i; + printk("kexec information\n"); + for (i = 0; i < image->nr_segments; i++) { + printk(" segment[%d]: 0x%08x - 0x%08x (0x%08x)\n", + i, + (unsigned int)image->segment[i].mem, + (unsigned int)image->segment[i].mem + + image->segment[i].memsz, + (unsigned int)image->segment[i].memsz); + } + printk(" start : 0x%08x\n\n", (unsigned int)image->start); +} + +/* + * Do not allocate memory (or fail in any way) in machine_kexec(). + * We are past the point of no return, committed to rebooting now. + */ +void machine_kexec(struct kimage *image) +{ + unsigned long page_list; + unsigned long reboot_code_buffer; + relocate_new_kernel_t rnk; + unsigned long entry; + unsigned long *ptr; + int save_ftrace_enabled; + + /* + * Nicked from the mips version of machine_kexec(): + * The generic kexec code builds a page list with physical + * addresses. Use phys_to_virt() to convert them to virtual. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } + +#ifdef CONFIG_KEXEC_JUMP + if (image->preserve_context) + save_processor_state(); +#endif + + save_ftrace_enabled = __ftrace_enabled_save(); + + /* Interrupts aren't acceptable while we reboot */ + local_irq_disable(); + + page_list = image->head; + + /* we need both effective and real address here */ + reboot_code_buffer = + (unsigned long)page_address(image->control_code_page); + + /* copy our kernel relocation code to the control code page */ + memcpy((void *)reboot_code_buffer, relocate_new_kernel, + relocate_new_kernel_size); + + kexec_info(image); + flush_cache_all(); + + sh_bios_vbr_reload(); + + /* now call it */ + rnk = (relocate_new_kernel_t) reboot_code_buffer; + (*rnk)(page_list, reboot_code_buffer, + (unsigned long)phys_to_virt(image->start)); + +#ifdef CONFIG_KEXEC_JUMP + asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); + + if (image->preserve_context) + restore_processor_state(); + + /* Convert page list back to physical addresses, what a mess. */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (*ptr & IND_INDIRECTION) ? + phys_to_virt(*ptr & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = virt_to_phys(*ptr); + } +#endif + + __ftrace_enabled_restore(save_ftrace_enabled); +} + +void arch_crash_save_vmcoreinfo(void) +{ +#ifdef CONFIG_NUMA + VMCOREINFO_SYMBOL(node_data); + VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); +#endif +#ifdef CONFIG_X2TLB + VMCOREINFO_CONFIG(X2TLB); +#endif +} + +void __init reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base; + int ret; + + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + &crash_size, &crash_base); + if (ret == 0 && crash_size > 0) { + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + } + + if (crashk_res.end == crashk_res.start) + goto disable; + + crash_size = PAGE_ALIGN(resource_size(&crashk_res)); + if (!crashk_res.start) { + unsigned long max = memblock_end_of_DRAM() - memory_limit; + crashk_res.start = memblock_phys_alloc_range(crash_size, + PAGE_SIZE, 0, max); + if (!crashk_res.start) { + pr_err("crashkernel allocation failed\n"); + goto disable; + } + } else { + ret = memblock_reserve(crashk_res.start, crash_size); + if (unlikely(ret < 0)) { + pr_err("crashkernel reservation failed - " + "memory is in use\n"); + goto disable; + } + } + + crashk_res.end = crashk_res.start + crash_size - 1; + + /* + * Crash kernel trumps memory limit + */ + if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) { + memory_limit = 0; + pr_info("Disabled memory limit for crashkernel\n"); + } + + pr_info("Reserving %ldMB of memory at 0x%08lx " + "for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crashk_res.start), + (unsigned long)(memblock_phys_mem_size() >> 20)); + + return; + +disable: + crashk_res.start = crashk_res.end = 0; +} diff --git a/arch/sh/kernel/machvec.c b/arch/sh/kernel/machvec.c new file mode 100644 index 000000000..57efaf5b8 --- /dev/null +++ b/arch/sh/kernel/machvec.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/machvec.c + * + * The SuperH machine vector setup handlers, yanked from setup.c + * + * Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2002 - 2007 Paul Mundt + */ +#include <linux/init.h> +#include <linux/string.h> +#include <asm/machvec.h> +#include <asm/sections.h> +#include <asm/addrspace.h> +#include <asm/setup.h> +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/processor.h> + +#define MV_NAME_SIZE 32 + +#define for_each_mv(mv) \ + for ((mv) = (struct sh_machine_vector *)__machvec_start; \ + (mv) && (unsigned long)(mv) < (unsigned long)__machvec_end; \ + (mv)++) + +static struct sh_machine_vector * __init get_mv_byname(const char *name) +{ + struct sh_machine_vector *mv; + + for_each_mv(mv) + if (strcasecmp(name, mv->mv_name) == 0) + return mv; + + return NULL; +} + +static unsigned int __initdata machvec_selected; + +static int __init early_parse_mv(char *from) +{ + char mv_name[MV_NAME_SIZE] = ""; + char *mv_end; + char *mv_comma; + int mv_len; + struct sh_machine_vector *mvp; + + mv_end = strchr(from, ' '); + if (mv_end == NULL) + mv_end = from + strlen(from); + + mv_comma = strchr(from, ','); + mv_len = mv_end - from; + if (mv_len > (MV_NAME_SIZE-1)) + mv_len = MV_NAME_SIZE-1; + memcpy(mv_name, from, mv_len); + mv_name[mv_len] = '\0'; + from = mv_end; + + machvec_selected = 1; + + /* Boot with the generic vector */ + if (strcmp(mv_name, "generic") == 0) + return 0; + + mvp = get_mv_byname(mv_name); + if (unlikely(!mvp)) { + pr_info("Available vectors:\n\n\t'%s', ", sh_mv.mv_name); + for_each_mv(mvp) + pr_cont("'%s', ", mvp->mv_name); + pr_cont("\n\n"); + panic("Failed to select machvec '%s' -- halting.\n", + mv_name); + } else + sh_mv = *mvp; + + return 0; +} +early_param("sh_mv", early_parse_mv); + +void __init sh_mv_setup(void) +{ + /* + * Only overload the machvec if one hasn't been selected on + * the command line with sh_mv= + */ + if (!machvec_selected) { + unsigned long machvec_size; + + machvec_size = ((unsigned long)__machvec_end - + (unsigned long)__machvec_start); + + /* + * Sanity check for machvec section alignment. Ensure + * __initmv hasn't been misused. + */ + if (machvec_size % sizeof(struct sh_machine_vector)) + panic("machvec misaligned, invalid __initmv use?"); + + /* + * If the machvec hasn't been preselected, use the first + * vector (usually the only one) from .machvec.init. + */ + if (machvec_size >= sizeof(struct sh_machine_vector)) + sh_mv = *(struct sh_machine_vector *)__machvec_start; + } + + pr_notice("Booting machvec: %s\n", get_system_type()); + + /* + * Manually walk the vec, fill in anything that the board hasn't yet + * by hand, wrapping to the generic implementation. + */ +#define mv_set(elem) do { \ + if (!sh_mv.mv_##elem) \ + sh_mv.mv_##elem = generic_##elem; \ +} while (0) + + mv_set(irq_demux); + mv_set(mode_pins); + mv_set(mem_init); +} diff --git a/arch/sh/kernel/module.c b/arch/sh/kernel/module.c new file mode 100644 index 000000000..b9cee98a7 --- /dev/null +++ b/arch/sh/kernel/module.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Kernel module help for SH. + + SHcompact version by Kaz Kojima and Paul Mundt. + + SHmedia bits: + + Copyright 2004 SuperH (UK) Ltd + Author: Richard Curnow + + Based on the sh version, and on code from the sh64-specific parts of + modutils, originally written by Richard Curnow and Ben Gaster. +*/ +#include <linux/moduleloader.h> +#include <linux/elf.h> +#include <linux/vmalloc.h> +#include <linux/bug.h> +#include <linux/fs.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <asm/unaligned.h> +#include <asm/dwarf.h> + +int apply_relocate_add(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + Elf32_Addr relocation; + uint32_t *location; + uint32_t value; + + pr_debug("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + relocation = sym->st_value + rel[i].r_addend; + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_SH_NONE: + break; + case R_SH_DIR32: + value = get_unaligned(location); + value += relocation; + put_unaligned(value, location); + break; + case R_SH_REL32: + relocation = (relocation - (Elf32_Addr) location); + value = get_unaligned(location); + value += relocation; + put_unaligned(value, location); + break; + case R_SH_IMM_LOW16: + *location = (*location & ~0x3fffc00) | + ((relocation & 0xffff) << 10); + break; + case R_SH_IMM_MEDLOW16: + *location = (*location & ~0x3fffc00) | + (((relocation >> 16) & 0xffff) << 10); + break; + case R_SH_IMM_LOW16_PCREL: + relocation -= (Elf32_Addr) location; + *location = (*location & ~0x3fffc00) | + ((relocation & 0xffff) << 10); + break; + case R_SH_IMM_MEDLOW16_PCREL: + relocation -= (Elf32_Addr) location; + *location = (*location & ~0x3fffc00) | + (((relocation >> 16) & 0xffff) << 10); + break; + default: + printk(KERN_ERR "module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + int ret = 0; + + ret |= module_dwarf_finalize(hdr, sechdrs, me); + + return ret; +} + +void module_arch_cleanup(struct module *mod) +{ + module_dwarf_cleanup(mod); +} diff --git a/arch/sh/kernel/nmi_debug.c b/arch/sh/kernel/nmi_debug.c new file mode 100644 index 000000000..a212b645b --- /dev/null +++ b/arch/sh/kernel/nmi_debug.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2007 Atmel Corporation + */ +#include <linux/delay.h> +#include <linux/kdebug.h> +#include <linux/notifier.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/hardirq.h> + +enum nmi_action { + NMI_SHOW_STATE = 1 << 0, + NMI_SHOW_REGS = 1 << 1, + NMI_DIE = 1 << 2, + NMI_DEBOUNCE = 1 << 3, +}; + +static unsigned long nmi_actions; + +static int nmi_debug_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = data; + + if (likely(val != DIE_NMI)) + return NOTIFY_DONE; + + if (nmi_actions & NMI_SHOW_STATE) + show_state(); + if (nmi_actions & NMI_SHOW_REGS) + show_regs(args->regs); + if (nmi_actions & NMI_DEBOUNCE) + mdelay(10); + if (nmi_actions & NMI_DIE) + return NOTIFY_BAD; + + return NOTIFY_OK; +} + +static struct notifier_block nmi_debug_nb = { + .notifier_call = nmi_debug_notify, +}; + +static int __init nmi_debug_setup(char *str) +{ + char *p, *sep; + + register_die_notifier(&nmi_debug_nb); + + if (*str != '=') + return 1; + + for (p = str + 1; *p; p = sep + 1) { + sep = strchr(p, ','); + if (sep) + *sep = 0; + if (strcmp(p, "state") == 0) + nmi_actions |= NMI_SHOW_STATE; + else if (strcmp(p, "regs") == 0) + nmi_actions |= NMI_SHOW_REGS; + else if (strcmp(p, "debounce") == 0) + nmi_actions |= NMI_DEBOUNCE; + else if (strcmp(p, "die") == 0) + nmi_actions |= NMI_DIE; + else + printk(KERN_WARNING "NMI: Unrecognized action `%s'\n", + p); + if (!sep) + break; + } + + return 1; +} +__setup("nmi_debug", nmi_debug_setup); diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c new file mode 100644 index 000000000..c9d3aa187 --- /dev/null +++ b/arch/sh/kernel/perf_callchain.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance event callchain support - SuperH architecture code + * + * Copyright (C) 2009 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/perf_event.h> +#include <linux/percpu.h> +#include <asm/unwinder.h> +#include <asm/ptrace.h> + +static void callchain_address(void *data, unsigned long addr, int reliable) +{ + struct perf_callchain_entry_ctx *entry = data; + + if (reliable) + perf_callchain_store(entry, addr); +} + +static const struct stacktrace_ops callchain_ops = { + .address = callchain_address, +}; + +void +perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) +{ + perf_callchain_store(entry, regs->pc); + + unwind_stack(NULL, regs, NULL, &callchain_ops, entry); +} diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c new file mode 100644 index 000000000..1d2507f22 --- /dev/null +++ b/arch/sh/kernel/perf_event.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance event support framework for SuperH hardware counters. + * + * Copyright (C) 2009 Paul Mundt + * + * Heavily based on the x86 and PowerPC implementations. + * + * x86: + * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> + * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar + * Copyright (C) 2009 Jaswinder Singh Rajput + * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter + * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra + * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> + * + * ppc: + * Copyright 2008-2009 Paul Mackerras, IBM Corporation. + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/perf_event.h> +#include <linux/export.h> +#include <asm/processor.h> + +struct cpu_hw_events { + struct perf_event *events[MAX_HWEVENTS]; + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; +}; + +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +static struct sh_pmu *sh_pmu __read_mostly; + +/* Number of perf_events counting hardware events */ +static atomic_t num_events; +/* Used to avoid races in calling reserve/release_pmc_hardware */ +static DEFINE_MUTEX(pmc_reserve_mutex); + +/* + * Stub these out for now, do something more profound later. + */ +int reserve_pmc_hardware(void) +{ + return 0; +} + +void release_pmc_hardware(void) +{ +} + +static inline int sh_pmu_initialized(void) +{ + return !!sh_pmu; +} + +/* + * Release the PMU if this is the last perf_event. + */ +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (!atomic_add_unless(&num_events, -1, 1)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_dec_return(&num_events) == 0) + release_pmc_hardware(); + mutex_unlock(&pmc_reserve_mutex); + } +} + +static int hw_perf_cache_event(int config, int *evp) +{ + unsigned long type, op, result; + int ev; + + if (!sh_pmu->cache_events) + return -EINVAL; + + /* unpack config */ + type = config & 0xff; + op = (config >> 8) & 0xff; + result = (config >> 16) & 0xff; + + if (type >= PERF_COUNT_HW_CACHE_MAX || + op >= PERF_COUNT_HW_CACHE_OP_MAX || + result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ev = (*sh_pmu->cache_events)[type][op][result]; + if (ev == 0) + return -EOPNOTSUPP; + if (ev == -1) + return -EINVAL; + *evp = ev; + return 0; +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + int config = -1; + int err; + + if (!sh_pmu_initialized()) + return -ENODEV; + + /* + * See if we need to reserve the counter. + * + * If no events are currently in use, then we have to take a + * mutex to ensure that we don't race with another task doing + * reserve_pmc_hardware or release_pmc_hardware. + */ + err = 0; + if (!atomic_inc_not_zero(&num_events)) { + mutex_lock(&pmc_reserve_mutex); + if (atomic_read(&num_events) == 0 && + reserve_pmc_hardware()) + err = -EBUSY; + else + atomic_inc(&num_events); + mutex_unlock(&pmc_reserve_mutex); + } + + if (err) + return err; + + event->destroy = hw_perf_event_destroy; + + switch (attr->type) { + case PERF_TYPE_RAW: + config = attr->config & sh_pmu->raw_event_mask; + break; + case PERF_TYPE_HW_CACHE: + err = hw_perf_cache_event(attr->config, &config); + if (err) + return err; + break; + case PERF_TYPE_HARDWARE: + if (attr->config >= sh_pmu->max_events) + return -EINVAL; + + config = sh_pmu->event_map(attr->config); + break; + } + + if (config == -1) + return -EINVAL; + + hwc->config |= config; + + return 0; +} + +static void sh_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + u64 prev_raw_count, new_raw_count; + s64 delta; + int shift = 0; + + /* + * Depending on the counter configuration, they may or may not + * be chained, in which case the previous counter value can be + * updated underneath us if the lower-half overflows. + * + * Our tactic to handle this is to first atomically read and + * exchange a new raw count - then add that new-prev delta + * count to the generic counter atomically. + * + * As there is no interrupt associated with the overflow events, + * this is the simplest approach for maintaining consistency. + */ +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = sh_pmu->read(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (counter-)time and add that to the generic counter. + * + * Careful, not all hw sign-extends above the physical width + * of the count. + */ + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); +} + +static void sh_pmu_stop(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (!(event->hw.state & PERF_HES_STOPPED)) { + sh_pmu->disable(hwc, idx); + cpuc->events[idx] = NULL; + event->hw.state |= PERF_HES_STOPPED; + } + + if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { + sh_perf_event_update(event, &event->hw, idx); + event->hw.state |= PERF_HES_UPTODATE; + } +} + +static void sh_pmu_start(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + if (WARN_ON_ONCE(idx == -1)) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); + + cpuc->events[idx] = event; + event->hw.state = 0; + sh_pmu->enable(hwc, idx); +} + +static void sh_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + + sh_pmu_stop(event, PERF_EF_UPDATE); + __clear_bit(event->hw.idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static int sh_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + int ret = -EAGAIN; + + perf_pmu_disable(event->pmu); + + if (__test_and_set_bit(idx, cpuc->used_mask)) { + idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); + if (idx == sh_pmu->num_events) + goto out; + + __set_bit(idx, cpuc->used_mask); + hwc->idx = idx; + } + + sh_pmu->disable(hwc, idx); + + event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + if (flags & PERF_EF_START) + sh_pmu_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + ret = 0; +out: + perf_pmu_enable(event->pmu); + return ret; +} + +static void sh_pmu_read(struct perf_event *event) +{ + sh_perf_event_update(event, &event->hw, event->hw.idx); +} + +static int sh_pmu_event_init(struct perf_event *event) +{ + int err; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HW_CACHE: + case PERF_TYPE_HARDWARE: + err = __hw_perf_event_init(event); + break; + + default: + return -ENOENT; + } + + if (unlikely(err)) { + if (event->destroy) + event->destroy(event); + } + + return err; +} + +static void sh_pmu_enable(struct pmu *pmu) +{ + if (!sh_pmu_initialized()) + return; + + sh_pmu->enable_all(); +} + +static void sh_pmu_disable(struct pmu *pmu) +{ + if (!sh_pmu_initialized()) + return; + + sh_pmu->disable_all(); +} + +static struct pmu pmu = { + .pmu_enable = sh_pmu_enable, + .pmu_disable = sh_pmu_disable, + .event_init = sh_pmu_event_init, + .add = sh_pmu_add, + .del = sh_pmu_del, + .start = sh_pmu_start, + .stop = sh_pmu_stop, + .read = sh_pmu_read, +}; + +static int sh_pmu_prepare_cpu(unsigned int cpu) +{ + struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); + + memset(cpuhw, 0, sizeof(struct cpu_hw_events)); + return 0; +} + +int register_sh_pmu(struct sh_pmu *_pmu) +{ + if (sh_pmu) + return -EBUSY; + sh_pmu = _pmu; + + pr_info("Performance Events: %s support registered\n", _pmu->name); + + /* + * All of the on-chip counters are "limited", in that they have + * no interrupts, and are therefore unable to do sampling without + * further work and timer assistance. + */ + pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + + WARN_ON(_pmu->num_events > MAX_HWEVENTS); + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu, + NULL); + return 0; +} diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c new file mode 100644 index 000000000..169832fcf --- /dev/null +++ b/arch/sh/kernel/process.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/mm.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/sched/signal.h> +#include <linux/sched/task_stack.h> +#include <linux/export.h> +#include <linux/stackprotector.h> +#include <asm/fpu.h> +#include <asm/ptrace.h> + +struct kmem_cache *task_xstate_cachep = NULL; +unsigned int xstate_size; + +#ifdef CONFIG_STACKPROTECTOR +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + +/* + * this gets called so that we can store lazy state into memory and copy the + * current task into the new thread. + */ +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + unlazy_fpu(src, task_pt_regs(src)); + *dst = *src; + + if (src->thread.xstate) { + dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, + GFP_KERNEL); + if (!dst->thread.xstate) + return -ENOMEM; + memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); + } + + return 0; +} + +void free_thread_xstate(struct task_struct *tsk) +{ + if (tsk->thread.xstate) { + kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); + tsk->thread.xstate = NULL; + } +} + +void arch_release_task_struct(struct task_struct *tsk) +{ + free_thread_xstate(tsk); +} + +void arch_task_cache_init(void) +{ + if (!xstate_size) + return; + + task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, + __alignof__(union thread_xstate), + SLAB_PANIC, NULL); +} + +#ifdef CONFIG_SH_FPU_EMU +# define HAVE_SOFTFP 1 +#else +# define HAVE_SOFTFP 0 +#endif + +void init_thread_xstate(void) +{ + if (boot_cpu_data.flags & CPU_HAS_FPU) + xstate_size = sizeof(struct sh_fpu_hard_struct); + else if (HAVE_SOFTFP) + xstate_size = sizeof(struct sh_fpu_soft_struct); + else + xstate_size = 0; +} diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c new file mode 100644 index 000000000..92b6649d4 --- /dev/null +++ b/arch/sh/kernel/process_32.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/process.c + * + * This file handles the architecture-dependent parts of process handling.. + * + * Copyright (C) 1995 Linus Torvalds + * + * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima + * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC + * Copyright (C) 2002 - 2008 Paul Mundt + */ +#include <linux/module.h> +#include <linux/mm.h> +#include <linux/sched/debug.h> +#include <linux/sched/task.h> +#include <linux/sched/task_stack.h> +#include <linux/slab.h> +#include <linux/elfcore.h> +#include <linux/fs.h> +#include <linux/ftrace.h> +#include <linux/hw_breakpoint.h> +#include <linux/prefetch.h> +#include <linux/stackprotector.h> +#include <linux/uaccess.h> +#include <asm/mmu_context.h> +#include <asm/fpu.h> +#include <asm/syscalls.h> +#include <asm/switch_to.h> + +void show_regs(struct pt_regs * regs) +{ + pr_info("\n"); + show_regs_print_info(KERN_DEFAULT); + + pr_info("PC is at %pS\n", (void *)instruction_pointer(regs)); + pr_info("PR is at %pS\n", (void *)regs->pr); + + pr_info("PC : %08lx SP : %08lx SR : %08lx ", regs->pc, + regs->regs[15], regs->sr); +#ifdef CONFIG_MMU + pr_cont("TEA : %08x\n", __raw_readl(MMU_TEA)); +#else + pr_cont("\n"); +#endif + + pr_info("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", + regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3]); + pr_info("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", + regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]); + pr_info("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n", + regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]); + pr_info("R12 : %08lx R13 : %08lx R14 : %08lx\n", + regs->regs[12], regs->regs[13], regs->regs[14]); + pr_info("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n", + regs->mach, regs->macl, regs->gbr, regs->pr); + + show_trace(NULL, (unsigned long *)regs->regs[15], regs, KERN_DEFAULT); + show_code(regs); +} + +void start_thread(struct pt_regs *regs, unsigned long new_pc, + unsigned long new_sp) +{ + regs->pr = 0; + regs->sr = SR_FD; + regs->pc = new_pc; + regs->regs[15] = new_sp; + + free_thread_xstate(current); +} +EXPORT_SYMBOL(start_thread); + +void flush_thread(void) +{ + struct task_struct *tsk = current; + + flush_ptrace_hw_breakpoint(tsk); + +#if defined(CONFIG_SH_FPU) + /* Forget lazy FPU state */ + clear_fpu(tsk, task_pt_regs(tsk)); + clear_used_math(); +#endif +} + +asmlinkage void ret_from_fork(void); +asmlinkage void ret_from_kernel_thread(void); + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; + struct thread_info *ti = task_thread_info(p); + struct pt_regs *childregs; + +#if defined(CONFIG_SH_DSP) + struct task_struct *tsk = current; + + if (is_dsp_enabled(tsk)) { + /* We can use the __save_dsp or just copy the struct: + * __save_dsp(p); + * p->thread.dsp_status.status |= SR_DSP + */ + p->thread.dsp_status = tsk->thread.dsp_status; + } +#endif + + memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); + + childregs = task_pt_regs(p); + p->thread.sp = (unsigned long) childregs; + if (unlikely(args->fn)) { + memset(childregs, 0, sizeof(struct pt_regs)); + p->thread.pc = (unsigned long) ret_from_kernel_thread; + childregs->regs[4] = (unsigned long) args->fn_arg; + childregs->regs[5] = (unsigned long) args->fn; + childregs->sr = SR_MD; +#if defined(CONFIG_SH_FPU) + childregs->sr |= SR_FD; +#endif + ti->status &= ~TS_USEDFPU; + p->thread.fpu_counter = 0; + return 0; + } + *childregs = *current_pt_regs(); + + if (usp) + childregs->regs[15] = usp; + + if (clone_flags & CLONE_SETTLS) + childregs->gbr = tls; + + childregs->regs[0] = 0; /* Set return value for child */ + p->thread.pc = (unsigned long) ret_from_fork; + return 0; +} + +/* + * switch_to(x,y) should switch tasks from x to y. + * + */ +__notrace_funcgraph struct task_struct * +__switch_to(struct task_struct *prev, struct task_struct *next) +{ + struct thread_struct *next_t = &next->thread; + +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) + __stack_chk_guard = next->stack_canary; +#endif + + unlazy_fpu(prev, task_pt_regs(prev)); + + /* we're going to use this soon, after a few expensive things */ + if (next->thread.fpu_counter > 5) + prefetch(next_t->xstate); + +#ifdef CONFIG_MMU + /* + * Restore the kernel mode register + * k7 (r7_bank1) + */ + asm volatile("ldc %0, r7_bank" + : /* no output */ + : "r" (task_thread_info(next))); +#endif + + /* + * If the task has used fpu the last 5 timeslices, just do a full + * restore of the math state immediately to avoid the trap; the + * chances of needing FPU soon are obviously high now + */ + if (next->thread.fpu_counter > 5) + __fpu_state_restore(); + + return prev; +} + +unsigned long __get_wchan(struct task_struct *p) +{ + unsigned long pc; + + /* + * The same comment as on the Alpha applies here, too ... + */ + pc = thread_saved_pc(p); + +#ifdef CONFIG_FRAME_POINTER + if (in_sched_functions(pc)) { + unsigned long schedule_frame = (unsigned long)p->thread.sp; + return ((unsigned long *)schedule_frame)[21]; + } +#endif + + return pc; +} diff --git a/arch/sh/kernel/ptrace.c b/arch/sh/kernel/ptrace.c new file mode 100644 index 000000000..bfc59f2c2 --- /dev/null +++ b/arch/sh/kernel/ptrace.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/ptrace.h> + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +/** + * regs_query_register_name() - query register name from its offset + * @offset: the offset of a register in struct pt_regs. + * + * regs_query_register_name() returns the name of a register from its + * offset in struct pt_regs. If the @offset is invalid, this returns NULL; + */ +const char *regs_query_register_name(unsigned int offset) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (roff->offset == offset) + return roff->name; + return NULL; +} diff --git a/arch/sh/kernel/ptrace_32.c b/arch/sh/kernel/ptrace_32.c new file mode 100644 index 000000000..d417988d9 --- /dev/null +++ b/arch/sh/kernel/ptrace_32.c @@ -0,0 +1,487 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SuperH process tracing + * + * Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka + * Copyright (C) 2002 - 2009 Paul Mundt + * + * Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp> + */ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/errno.h> +#include <linux/ptrace.h> +#include <linux/user.h> +#include <linux/security.h> +#include <linux/signal.h> +#include <linux/io.h> +#include <linux/audit.h> +#include <linux/seccomp.h> +#include <linux/elf.h> +#include <linux/regset.h> +#include <linux/hw_breakpoint.h> +#include <linux/uaccess.h> +#include <asm/processor.h> +#include <asm/mmu_context.h> +#include <asm/syscalls.h> +#include <asm/fpu.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/syscalls.h> + +/* + * This routine will get a word off of the process kernel stack. + */ +static inline int get_stack_long(struct task_struct *task, int offset) +{ + unsigned char *stack; + + stack = (unsigned char *)task_pt_regs(task); + stack += offset; + return (*((int *)stack)); +} + +/* + * This routine will put a word on the process kernel stack. + */ +static inline int put_stack_long(struct task_struct *task, int offset, + unsigned long data) +{ + unsigned char *stack; + + stack = (unsigned char *)task_pt_regs(task); + stack += offset; + *(unsigned long *) stack = data; + return 0; +} + +void ptrace_triggered(struct perf_event *bp, + struct perf_sample_data *data, struct pt_regs *regs) +{ + struct perf_event_attr attr; + + /* + * Disable the breakpoint request here since ptrace has defined a + * one-shot behaviour for breakpoint exceptions. + */ + attr = bp->attr; + attr.disabled = true; + modify_user_hw_breakpoint(bp, &attr); +} + +static int set_single_step(struct task_struct *tsk, unsigned long addr) +{ + struct thread_struct *thread = &tsk->thread; + struct perf_event *bp; + struct perf_event_attr attr; + + bp = thread->ptrace_bps[0]; + if (!bp) { + ptrace_breakpoint_init(&attr); + + attr.bp_addr = addr; + attr.bp_len = HW_BREAKPOINT_LEN_2; + attr.bp_type = HW_BREAKPOINT_R; + + bp = register_user_hw_breakpoint(&attr, ptrace_triggered, + NULL, tsk); + if (IS_ERR(bp)) + return PTR_ERR(bp); + + thread->ptrace_bps[0] = bp; + } else { + int err; + + attr = bp->attr; + attr.bp_addr = addr; + /* reenable breakpoint */ + attr.disabled = false; + err = modify_user_hw_breakpoint(bp, &attr); + if (unlikely(err)) + return err; + } + + return 0; +} + +void user_enable_single_step(struct task_struct *child) +{ + unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc)); + + set_tsk_thread_flag(child, TIF_SINGLESTEP); + + set_single_step(child, pc); +} + +void user_disable_single_step(struct task_struct *child) +{ + clear_tsk_thread_flag(child, TIF_SINGLESTEP); +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure single step bits etc are not set. + */ +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} + +static int genregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const struct pt_regs *regs = task_pt_regs(target); + + return membuf_write(&to, regs, sizeof(struct pt_regs)); +} + +static int genregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs *regs = task_pt_regs(target); + int ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + regs->regs, + 0, 16 * sizeof(unsigned long)); + if (!ret && count > 0) + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, + ®s->pc, + offsetof(struct pt_regs, pc), + sizeof(struct pt_regs)); + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_regs), -1); + + return ret; +} + +#ifdef CONFIG_SH_FPU +static int fpregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + int ret; + + ret = init_fpu(target); + if (ret) + return ret; + + return membuf_write(&to, target->thread.xstate, + sizeof(struct user_fpu_struct)); +} + +static int fpregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + + ret = init_fpu(target); + if (ret) + return ret; + + set_stopped_child_used_math(target); + + if ((boot_cpu_data.flags & CPU_HAS_FPU)) + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.xstate->hardfpu, 0, -1); + + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.xstate->softfpu, 0, -1); +} + +static int fpregs_active(struct task_struct *target, + const struct user_regset *regset) +{ + return tsk_used_math(target) ? regset->n : 0; +} +#endif + +#ifdef CONFIG_SH_DSP +static int dspregs_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + const struct pt_dspregs *regs = + (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; + + return membuf_write(&to, regs, sizeof(struct pt_dspregs)); +} + +static int dspregs_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_dspregs *regs = + (struct pt_dspregs *)&target->thread.dsp_status.dsp_regs; + int ret; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, + 0, sizeof(struct pt_dspregs)); + if (!ret) + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, + sizeof(struct pt_dspregs), -1); + + return ret; +} + +static int dspregs_active(struct task_struct *target, + const struct user_regset *regset) +{ + struct pt_regs *regs = task_pt_regs(target); + + return regs->sr & SR_DSP ? regset->n : 0; +} +#endif + +const struct pt_regs_offset regoffset_table[] = { + REGS_OFFSET_NAME(0), + REGS_OFFSET_NAME(1), + REGS_OFFSET_NAME(2), + REGS_OFFSET_NAME(3), + REGS_OFFSET_NAME(4), + REGS_OFFSET_NAME(5), + REGS_OFFSET_NAME(6), + REGS_OFFSET_NAME(7), + REGS_OFFSET_NAME(8), + REGS_OFFSET_NAME(9), + REGS_OFFSET_NAME(10), + REGS_OFFSET_NAME(11), + REGS_OFFSET_NAME(12), + REGS_OFFSET_NAME(13), + REGS_OFFSET_NAME(14), + REGS_OFFSET_NAME(15), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(pr), + REG_OFFSET_NAME(sr), + REG_OFFSET_NAME(gbr), + REG_OFFSET_NAME(mach), + REG_OFFSET_NAME(macl), + REG_OFFSET_NAME(tra), + REG_OFFSET_END, +}; + +/* + * These are our native regset flavours. + */ +enum sh_regset { + REGSET_GENERAL, +#ifdef CONFIG_SH_FPU + REGSET_FPU, +#endif +#ifdef CONFIG_SH_DSP + REGSET_DSP, +#endif +}; + +static const struct user_regset sh_regsets[] = { + /* + * Format is: + * R0 --> R15 + * PC, PR, SR, GBR, MACH, MACL, TRA + */ + [REGSET_GENERAL] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(long), + .align = sizeof(long), + .regset_get = genregs_get, + .set = genregs_set, + }, + +#ifdef CONFIG_SH_FPU + [REGSET_FPU] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpu_struct) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long), + .regset_get = fpregs_get, + .set = fpregs_set, + .active = fpregs_active, + }, +#endif + +#ifdef CONFIG_SH_DSP + [REGSET_DSP] = { + .n = sizeof(struct pt_dspregs) / sizeof(long), + .size = sizeof(long), + .align = sizeof(long), + .regset_get = dspregs_get, + .set = dspregs_set, + .active = dspregs_active, + }, +#endif +}; + +static const struct user_regset_view user_sh_native_view = { + .name = "sh", + .e_machine = EM_SH, + .regsets = sh_regsets, + .n = ARRAY_SIZE(sh_regsets), +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sh_native_view; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + unsigned long __user *datap = (unsigned long __user *)data; + int ret; + + switch (request) { + /* read the word at location addr in the USER area. */ + case PTRACE_PEEKUSR: { + unsigned long tmp; + + ret = -EIO; + if ((addr & 3) || addr < 0 || + addr > sizeof(struct user) - 3) + break; + + if (addr < sizeof(struct pt_regs)) + tmp = get_stack_long(child, addr); + else if (addr >= offsetof(struct user, fpu) && + addr < offsetof(struct user, u_fpvalid)) { + if (!tsk_used_math(child)) { + if (addr == offsetof(struct user, fpu.fpscr)) + tmp = FPSCR_INIT; + else + tmp = 0; + } else { + unsigned long index; + ret = init_fpu(child); + if (ret) + break; + index = addr - offsetof(struct user, fpu); + tmp = ((unsigned long *)child->thread.xstate) + [index >> 2]; + } + } else if (addr == offsetof(struct user, u_fpvalid)) + tmp = !!tsk_used_math(child); + else if (addr == PT_TEXT_ADDR) + tmp = child->mm->start_code; + else if (addr == PT_DATA_ADDR) + tmp = child->mm->start_data; + else if (addr == PT_TEXT_END_ADDR) + tmp = child->mm->end_code; + else if (addr == PT_TEXT_LEN) + tmp = child->mm->end_code - child->mm->start_code; + else + tmp = 0; + ret = put_user(tmp, datap); + break; + } + + case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ + ret = -EIO; + if ((addr & 3) || addr < 0 || + addr > sizeof(struct user) - 3) + break; + + if (addr < sizeof(struct pt_regs)) + ret = put_stack_long(child, addr, data); + else if (addr >= offsetof(struct user, fpu) && + addr < offsetof(struct user, u_fpvalid)) { + unsigned long index; + ret = init_fpu(child); + if (ret) + break; + index = addr - offsetof(struct user, fpu); + set_stopped_child_used_math(child); + ((unsigned long *)child->thread.xstate) + [index >> 2] = data; + ret = 0; + } else if (addr == offsetof(struct user, u_fpvalid)) { + conditional_stopped_child_used_math(data, child); + ret = 0; + } + break; + + case PTRACE_GETREGS: + return copy_regset_to_user(child, &user_sh_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + datap); + case PTRACE_SETREGS: + return copy_regset_from_user(child, &user_sh_native_view, + REGSET_GENERAL, + 0, sizeof(struct pt_regs), + datap); +#ifdef CONFIG_SH_FPU + case PTRACE_GETFPREGS: + return copy_regset_to_user(child, &user_sh_native_view, + REGSET_FPU, + 0, sizeof(struct user_fpu_struct), + datap); + case PTRACE_SETFPREGS: + return copy_regset_from_user(child, &user_sh_native_view, + REGSET_FPU, + 0, sizeof(struct user_fpu_struct), + datap); +#endif +#ifdef CONFIG_SH_DSP + case PTRACE_GETDSPREGS: + return copy_regset_to_user(child, &user_sh_native_view, + REGSET_DSP, + 0, sizeof(struct pt_dspregs), + datap); + case PTRACE_SETDSPREGS: + return copy_regset_from_user(child, &user_sh_native_view, + REGSET_DSP, + 0, sizeof(struct pt_dspregs), + datap); +#endif + default: + ret = ptrace_request(child, request, addr, data); + break; + } + + return ret; +} + +asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) +{ + if (test_thread_flag(TIF_SYSCALL_TRACE) && + ptrace_report_syscall_entry(regs)) { + regs->regs[0] = -ENOSYS; + return -1; + } + + if (secure_computing() == -1) + return -1; + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[0]); + + audit_syscall_entry(regs->regs[3], regs->regs[4], regs->regs[5], + regs->regs[6], regs->regs[7]); + + return 0; +} + +asmlinkage void do_syscall_trace_leave(struct pt_regs *regs) +{ + int step; + + audit_syscall_exit(regs); + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs->regs[0]); + + step = test_thread_flag(TIF_SINGLESTEP); + if (step || test_thread_flag(TIF_SYSCALL_TRACE)) + ptrace_report_syscall_exit(regs, step); +} diff --git a/arch/sh/kernel/reboot.c b/arch/sh/kernel/reboot.c new file mode 100644 index 000000000..e8eeedc9b --- /dev/null +++ b/arch/sh/kernel/reboot.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/pm.h> +#include <linux/kexec.h> +#include <linux/kernel.h> +#include <linux/reboot.h> +#include <linux/module.h> +#include <asm/watchdog.h> +#include <asm/addrspace.h> +#include <asm/reboot.h> +#include <asm/tlbflush.h> +#include <asm/traps.h> + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +static void watchdog_trigger_immediate(void) +{ + sh_wdt_write_cnt(0xFF); + sh_wdt_write_csr(0xC2); +} + +static void native_machine_restart(char * __unused) +{ + local_irq_disable(); + + /* Destroy all of the TLBs in preparation for reset by MMU */ + __flush_tlb_global(); + + /* Address error with SR.BL=1 first. */ + trigger_address_error(); + + /* If that fails or is unsupported, go for the watchdog next. */ + watchdog_trigger_immediate(); + + /* + * Give up and sleep. + */ + while (1) + cpu_sleep(); +} + +static void native_machine_shutdown(void) +{ + smp_send_stop(); +} + +static void native_machine_power_off(void) +{ + do_kernel_power_off(); +} + +static void native_machine_halt(void) +{ + /* stop other cpus */ + machine_shutdown(); + + /* stop this cpu */ + stop_this_cpu(NULL); +} + +struct machine_ops machine_ops = { + .power_off = native_machine_power_off, + .shutdown = native_machine_shutdown, + .restart = native_machine_restart, + .halt = native_machine_halt, +#ifdef CONFIG_KEXEC + .crash_shutdown = native_machine_crash_shutdown, +#endif +}; + +void machine_power_off(void) +{ + machine_ops.power_off(); +} + +void machine_shutdown(void) +{ + machine_ops.shutdown(); +} + +void machine_restart(char *cmd) +{ + machine_ops.restart(cmd); +} + +void machine_halt(void) +{ + machine_ops.halt(); +} + +#ifdef CONFIG_KEXEC +void machine_crash_shutdown(struct pt_regs *regs) +{ + machine_ops.crash_shutdown(regs); +} +#endif diff --git a/arch/sh/kernel/relocate_kernel.S b/arch/sh/kernel/relocate_kernel.S new file mode 100644 index 000000000..d9bf2b727 --- /dev/null +++ b/arch/sh/kernel/relocate_kernel.S @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * relocate_kernel.S - put the kernel image in place to boot + * 2005.9.17 kogiidena@eggplant.ddo.jp + * + * LANDISK/sh4 is supported. Maybe, SH archtecture works well. + * + * 2009-03-18 Magnus Damm - Added Kexec Jump support + */ +#include <linux/linkage.h> +#include <asm/addrspace.h> +#include <asm/page.h> + + .globl relocate_new_kernel +relocate_new_kernel: + /* r4 = indirection_page */ + /* r5 = reboot_code_buffer */ + /* r6 = start_address */ + + mov.l 10f, r0 /* PAGE_SIZE */ + add r5, r0 /* setup new stack at end of control page */ + + /* save r15->r8 to new stack */ + mov.l r15, @-r0 + mov r0, r15 + mov.l r14, @-r15 + mov.l r13, @-r15 + mov.l r12, @-r15 + mov.l r11, @-r15 + mov.l r10, @-r15 + mov.l r9, @-r15 + mov.l r8, @-r15 + + /* save other random registers */ + sts.l macl, @-r15 + sts.l mach, @-r15 + stc.l gbr, @-r15 + stc.l ssr, @-r15 + stc.l sr, @-r15 + sts.l pr, @-r15 + stc.l spc, @-r15 + + /* switch to bank1 and save r7->r0 */ + mov.l 12f, r9 + stc sr, r8 + or r9, r8 + ldc r8, sr + mov.l r7, @-r15 + mov.l r6, @-r15 + mov.l r5, @-r15 + mov.l r4, @-r15 + mov.l r3, @-r15 + mov.l r2, @-r15 + mov.l r1, @-r15 + mov.l r0, @-r15 + + /* switch to bank0 and save r7->r0 */ + mov.l 12f, r9 + not r9, r9 + stc sr, r8 + and r9, r8 + ldc r8, sr + mov.l r7, @-r15 + mov.l r6, @-r15 + mov.l r5, @-r15 + mov.l r4, @-r15 + mov.l r3, @-r15 + mov.l r2, @-r15 + mov.l r1, @-r15 + mov.l r0, @-r15 + + mov.l r4, @-r15 /* save indirection page again */ + + bsr swap_pages /* swap pages before jumping to new kernel */ + nop + + mova 11f, r0 + mov.l r15, @r0 /* save pointer to stack */ + + jsr @r6 /* hand over control to new kernel */ + nop + + mov.l 11f, r15 /* get pointer to stack */ + mov.l @r15+, r4 /* restore r4 to get indirection page */ + + bsr swap_pages /* swap pages back to previous state */ + nop + + /* make sure bank0 is active and restore r0->r7 */ + mov.l 12f, r9 + not r9, r9 + stc sr, r8 + and r9, r8 + ldc r8, sr + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + + /* switch to bank1 and restore r0->r7 */ + mov.l 12f, r9 + stc sr, r8 + or r9, r8 + ldc r8, sr + mov.l @r15+, r0 + mov.l @r15+, r1 + mov.l @r15+, r2 + mov.l @r15+, r3 + mov.l @r15+, r4 + mov.l @r15+, r5 + mov.l @r15+, r6 + mov.l @r15+, r7 + + /* switch back to bank0 */ + mov.l 12f, r9 + not r9, r9 + stc sr, r8 + and r9, r8 + ldc r8, sr + + /* restore other random registers */ + ldc.l @r15+, spc + lds.l @r15+, pr + ldc.l @r15+, sr + ldc.l @r15+, ssr + ldc.l @r15+, gbr + lds.l @r15+, mach + lds.l @r15+, macl + + /* restore r8->r15 */ + mov.l @r15+, r8 + mov.l @r15+, r9 + mov.l @r15+, r10 + mov.l @r15+, r11 + mov.l @r15+, r12 + mov.l @r15+, r13 + mov.l @r15+, r14 + mov.l @r15+, r15 + rts + nop + +swap_pages: + bra 1f + mov r4,r0 /* cmd = indirection_page */ +0: + mov.l @r4+,r0 /* cmd = *ind++ */ + +1: /* addr = cmd & 0xfffffff0 */ + mov r0,r2 + mov #-16,r1 + and r1,r2 + + /* if(cmd & IND_DESTINATION) dst = addr */ + tst #1,r0 + bt 2f + bra 0b + mov r2,r5 + +2: /* else if(cmd & IND_INDIRECTION) ind = addr */ + tst #2,r0 + bt 3f + bra 0b + mov r2,r4 + +3: /* else if(cmd & IND_DONE) return */ + tst #4,r0 + bt 4f + rts + nop + +4: /* else if(cmd & IND_SOURCE) memcpy(dst,addr,PAGE_SIZE) */ + tst #8,r0 + bt 0b + + mov.l 10f,r3 /* PAGE_SIZE */ + shlr2 r3 + shlr2 r3 +5: + dt r3 + + /* regular kexec just overwrites the destination page + * with the contents of the source page. + * for the kexec jump case we need to swap the contents + * of the pages. + * to keep it simple swap the contents for both cases. + */ + mov.l @(0, r2), r8 + mov.l @(0, r5), r1 + mov.l r8, @(0, r5) + mov.l r1, @(0, r2) + + mov.l @(4, r2), r8 + mov.l @(4, r5), r1 + mov.l r8, @(4, r5) + mov.l r1, @(4, r2) + + mov.l @(8, r2), r8 + mov.l @(8, r5), r1 + mov.l r8, @(8, r5) + mov.l r1, @(8, r2) + + mov.l @(12, r2), r8 + mov.l @(12, r5), r1 + mov.l r8, @(12, r5) + mov.l r1, @(12, r2) + + add #16,r5 + add #16,r2 + bf 5b + + bra 0b + nop + + .align 2 +10: + .long PAGE_SIZE +11: + .long 0 +12: + .long 0x20000000 ! RB=1 + +relocate_new_kernel_end: + + .globl relocate_new_kernel_size +relocate_new_kernel_size: + .long relocate_new_kernel_end - relocate_new_kernel diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c new file mode 100644 index 000000000..8838094c9 --- /dev/null +++ b/arch/sh/kernel/return_address.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/return_address.c + * + * Copyright (C) 2009 Matt Fleming + * Copyright (C) 2009 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <asm/dwarf.h> + +#ifdef CONFIG_DWARF_UNWINDER + +void *return_address(unsigned int depth) +{ + struct dwarf_frame *frame; + unsigned long ra; + int i; + + for (i = 0, frame = NULL, ra = 0; i <= depth; i++) { + struct dwarf_frame *tmp; + + tmp = dwarf_unwind_stack(ra, frame); + if (!tmp) + return NULL; + + if (frame) + dwarf_free_frame(frame); + + frame = tmp; + + if (!frame || !frame->return_addr) + break; + + ra = frame->return_addr; + } + + /* Failed to unwind the stack to the specified depth. */ + WARN_ON(i != depth + 1); + + if (frame) + dwarf_free_frame(frame); + + return (void *)ra; +} + +#else + +void *return_address(unsigned int depth) +{ + return NULL; +} + +#endif + +EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c new file mode 100644 index 000000000..cf7c0f72f --- /dev/null +++ b/arch/sh/kernel/setup.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/setup.c + * + * This file handles the architecture-dependent parts of initialization + * + * Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2002 - 2010 Paul Mundt + */ +#include <linux/screen_info.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/initrd.h> +#include <linux/console.h> +#include <linux/root_dev.h> +#include <linux/utsname.h> +#include <linux/nodemask.h> +#include <linux/cpu.h> +#include <linux/pfn.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/kexec.h> +#include <linux/module.h> +#include <linux/smp.h> +#include <linux/err.h> +#include <linux/crash_dump.h> +#include <linux/mmzone.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/memblock.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/uaccess.h> +#include <uapi/linux/mount.h> +#include <asm/io.h> +#include <asm/page.h> +#include <asm/elf.h> +#include <asm/sections.h> +#include <asm/irq.h> +#include <asm/setup.h> +#include <asm/clock.h> +#include <asm/smp.h> +#include <asm/mmu_context.h> +#include <asm/mmzone.h> +#include <asm/processor.h> +#include <asm/sparsemem.h> +#include <asm/platform_early.h> + +/* + * Initialize loops_per_jiffy as 10000000 (1000MIPS). + * This value will be used at the very early stage of serial setup. + * The bigger value means no problem. + */ +struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = { + [0] = { + .type = CPU_SH_NONE, + .family = CPU_FAMILY_UNKNOWN, + .loops_per_jiffy = 10000000, + .phys_bits = MAX_PHYSMEM_BITS, + }, +}; +EXPORT_SYMBOL(cpu_data); + +/* + * The machine vector. First entry in .machvec.init, or clobbered by + * sh_mv= on the command line, prior to .machvec.init teardown. + */ +struct sh_machine_vector sh_mv = { .mv_name = "generic", }; +EXPORT_SYMBOL(sh_mv); + +#ifdef CONFIG_VT +struct screen_info screen_info; +#endif + +extern int root_mountflags; + +#define RAMDISK_IMAGE_START_MASK 0x07FF +#define RAMDISK_PROMPT_FLAG 0x8000 +#define RAMDISK_LOAD_FLAG 0x4000 + +static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, }; + +static struct resource code_resource = { + .name = "Kernel code", + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, +}; + +static struct resource data_resource = { + .name = "Kernel data", + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, +}; + +static struct resource bss_resource = { + .name = "Kernel bss", + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, +}; + +unsigned long memory_start; +EXPORT_SYMBOL(memory_start); +unsigned long memory_end = 0; +EXPORT_SYMBOL(memory_end); +unsigned long memory_limit = 0; + +static struct resource mem_resources[MAX_NUMNODES]; + +int l1i_cache_shape, l1d_cache_shape, l2_cache_shape; + +static int __init early_parse_mem(char *p) +{ + if (!p) + return 1; + + memory_limit = PAGE_ALIGN(memparse(p, &p)); + + pr_notice("Memory limited to %ldMB\n", memory_limit >> 20); + + return 0; +} +early_param("mem", early_parse_mem); + +void __init check_for_initrd(void) +{ +#ifdef CONFIG_BLK_DEV_INITRD + unsigned long start, end; + + /* + * Check for the rare cases where boot loaders adhere to the boot + * ABI. + */ + if (!LOADER_TYPE || !INITRD_START || !INITRD_SIZE) + goto disable; + + start = INITRD_START + __MEMORY_START; + end = start + INITRD_SIZE; + + if (unlikely(end <= start)) + goto disable; + if (unlikely(start & ~PAGE_MASK)) { + pr_err("initrd must be page aligned\n"); + goto disable; + } + + if (unlikely(start < __MEMORY_START)) { + pr_err("initrd start (%08lx) < __MEMORY_START(%x)\n", + start, __MEMORY_START); + goto disable; + } + + if (unlikely(end > memblock_end_of_DRAM())) { + pr_err("initrd extends beyond end of memory " + "(0x%08lx > 0x%08lx)\ndisabling initrd\n", + end, (unsigned long)memblock_end_of_DRAM()); + goto disable; + } + + /* + * If we got this far in spite of the boot loader's best efforts + * to the contrary, assume we actually have a valid initrd and + * fix up the root dev. + */ + ROOT_DEV = Root_RAM0; + + /* + * Address sanitization + */ + initrd_start = (unsigned long)__va(start); + initrd_end = initrd_start + INITRD_SIZE; + + memblock_reserve(__pa(initrd_start), INITRD_SIZE); + + return; + +disable: + pr_info("initrd disabled\n"); + initrd_start = initrd_end = 0; +#endif +} + +#ifndef CONFIG_GENERIC_CALIBRATE_DELAY +void calibrate_delay(void) +{ + struct clk *clk = clk_get(NULL, "cpu_clk"); + + if (IS_ERR(clk)) + panic("Need a sane CPU clock definition!"); + + loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ; + + printk(KERN_INFO "Calibrating delay loop (skipped)... " + "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n", + loops_per_jiffy/(500000/HZ), + (loops_per_jiffy/(5000/HZ)) % 100, + loops_per_jiffy); +} +#endif + +void __init __add_active_range(unsigned int nid, unsigned long start_pfn, + unsigned long end_pfn) +{ + struct resource *res = &mem_resources[nid]; + unsigned long start, end; + + WARN_ON(res->name); /* max one active range per node for now */ + + start = start_pfn << PAGE_SHIFT; + end = end_pfn << PAGE_SHIFT; + + res->name = "System RAM"; + res->start = start; + res->end = end - 1; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + + if (request_resource(&iomem_resource, res)) { + pr_err("unable to request memory_resource 0x%lx 0x%lx\n", + start_pfn, end_pfn); + return; + } + + /* + * We don't know which RAM region contains kernel data or + * the reserved crashkernel region, so try it repeatedly + * and let the resource manager test it. + */ + request_resource(res, &code_resource); + request_resource(res, &data_resource); + request_resource(res, &bss_resource); +#ifdef CONFIG_KEXEC + request_resource(res, &crashk_res); +#endif + + /* + * Also make sure that there is a PMB mapping that covers this + * range before we attempt to activate it, to avoid reset by MMU. + * We can hit this path with NUMA or memory hot-add. + */ + pmb_bolt_mapping((unsigned long)__va(start), start, end - start, + PAGE_KERNEL); + + memblock_set_node(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), + &memblock.memory, nid); +} + +void __init __weak plat_early_device_setup(void) +{ +} + +#ifdef CONFIG_OF_EARLY_FLATTREE +void __ref sh_fdt_init(phys_addr_t dt_phys) +{ + static int done = 0; + void *dt_virt; + + /* Avoid calling an __init function on secondary cpus. */ + if (done) return; + +#ifdef CONFIG_USE_BUILTIN_DTB + dt_virt = __dtb_start; +#else + dt_virt = phys_to_virt(dt_phys); +#endif + + if (!dt_virt || !early_init_dt_scan(dt_virt)) { + pr_crit("Error: invalid device tree blob" + " at physical address %p\n", (void *)dt_phys); + + while (true) + cpu_relax(); + } + + done = 1; +} +#endif + +void __init setup_arch(char **cmdline_p) +{ + enable_mmu(); + + ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); + + printk(KERN_NOTICE "Boot params:\n" + "... MOUNT_ROOT_RDONLY - %08lx\n" + "... RAMDISK_FLAGS - %08lx\n" + "... ORIG_ROOT_DEV - %08lx\n" + "... LOADER_TYPE - %08lx\n" + "... INITRD_START - %08lx\n" + "... INITRD_SIZE - %08lx\n", + MOUNT_ROOT_RDONLY, RAMDISK_FLAGS, + ORIG_ROOT_DEV, LOADER_TYPE, + INITRD_START, INITRD_SIZE); + +#ifdef CONFIG_BLK_DEV_RAM + rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; +#endif + + if (!MOUNT_ROOT_RDONLY) + root_mountflags &= ~MS_RDONLY; + setup_initial_init_mm(_text, _etext, _edata, _end); + + code_resource.start = virt_to_phys(_text); + code_resource.end = virt_to_phys(_etext)-1; + data_resource.start = virt_to_phys(_etext); + data_resource.end = virt_to_phys(_edata)-1; + bss_resource.start = virt_to_phys(__bss_start); + bss_resource.end = virt_to_phys(__bss_stop)-1; + +#ifdef CONFIG_CMDLINE_OVERWRITE + strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); +#else + strlcpy(command_line, COMMAND_LINE, sizeof(command_line)); +#ifdef CONFIG_CMDLINE_EXTEND + strlcat(command_line, " ", sizeof(command_line)); + strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line)); +#endif +#endif + + /* Save unparsed command line copy for /proc/cmdline */ + memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); + *cmdline_p = command_line; + + parse_early_param(); + + plat_early_device_setup(); + + sh_mv_setup(); + + /* Let earlyprintk output early console messages */ + sh_early_platform_driver_probe("earlyprintk", 1, 1); + +#ifdef CONFIG_OF_EARLY_FLATTREE +#ifdef CONFIG_USE_BUILTIN_DTB + unflatten_and_copy_device_tree(); +#else + unflatten_device_tree(); +#endif +#endif + + paging_init(); + + /* Perform the machine specific initialisation */ + if (likely(sh_mv.mv_setup)) + sh_mv.mv_setup(cmdline_p); + + plat_smp_setup(); +} + +/* processor boot mode configuration */ +int generic_mode_pins(void) +{ + pr_warn("generic_mode_pins(): missing mode pin configuration\n"); + return 0; +} + +int test_mode_pin(int pin) +{ + return sh_mv.mv_mode_pins() & pin; +} + +void __init arch_cpu_finalize_init(void) +{ + char *p = &init_utsname()->machine[2]; /* "sh" */ + + select_idle_routine(); + + current_cpu_data.loops_per_jiffy = loops_per_jiffy; + + switch (current_cpu_data.family) { + case CPU_FAMILY_SH2: + *p++ = '2'; + break; + case CPU_FAMILY_SH2A: + *p++ = '2'; + *p++ = 'a'; + break; + case CPU_FAMILY_SH3: + *p++ = '3'; + break; + case CPU_FAMILY_SH4: + *p++ = '4'; + break; + case CPU_FAMILY_SH4A: + *p++ = '4'; + *p++ = 'a'; + break; + case CPU_FAMILY_SH4AL_DSP: + *p++ = '4'; + *p++ = 'a'; + *p++ = 'l'; + *p++ = '-'; + *p++ = 'd'; + *p++ = 's'; + *p++ = 'p'; + break; + case CPU_FAMILY_UNKNOWN: + /* + * Specifically use CPU_FAMILY_UNKNOWN rather than + * default:, so we're able to have the compiler whine + * about unhandled enumerations. + */ + break; + } + + pr_info("CPU: %s\n", get_cpu_subtype(¤t_cpu_data)); + +#ifndef __LITTLE_ENDIAN__ + /* 'eb' means 'Endian Big' */ + *p++ = 'e'; + *p++ = 'b'; +#endif + *p = '\0'; +} diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c new file mode 100644 index 000000000..250dbdf3f --- /dev/null +++ b/arch/sh/kernel/sh_bios.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * C interface for trapping into the standard LinuxSH BIOS. + * + * Copyright (C) 2000 Greg Banks, Mitch Davis + * Copyright (C) 1999, 2000 Niibe Yutaka + * Copyright (C) 2002 M. R. Brown + * Copyright (C) 2004 - 2010 Paul Mundt + */ +#include <linux/module.h> +#include <linux/console.h> +#include <linux/tty.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <asm/sh_bios.h> + +#define BIOS_CALL_CONSOLE_WRITE 0 +#define BIOS_CALL_ETH_NODE_ADDR 10 +#define BIOS_CALL_SHUTDOWN 11 +#define BIOS_CALL_GDB_DETACH 0xff + +void *gdb_vbr_vector = NULL; + +static inline long sh_bios_call(long func, long arg0, long arg1, long arg2, + long arg3) +{ + register long r0 __asm__("r0") = func; + register long r4 __asm__("r4") = arg0; + register long r5 __asm__("r5") = arg1; + register long r6 __asm__("r6") = arg2; + register long r7 __asm__("r7") = arg3; + + if (!gdb_vbr_vector) + return -ENOSYS; + + __asm__ __volatile__("trapa #0x3f":"=z"(r0) + :"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7) + :"memory"); + return r0; +} + +void sh_bios_console_write(const char *buf, unsigned int len) +{ + sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0); +} + +void sh_bios_gdb_detach(void) +{ + sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0); +} +EXPORT_SYMBOL_GPL(sh_bios_gdb_detach); + +void sh_bios_get_node_addr(unsigned char *node_addr) +{ + sh_bios_call(BIOS_CALL_ETH_NODE_ADDR, 0, (long)node_addr, 0, 0); +} +EXPORT_SYMBOL_GPL(sh_bios_get_node_addr); + +void sh_bios_shutdown(unsigned int how) +{ + sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0); +} + +/* + * Read the old value of the VBR register to initialise the vector + * through which debug and BIOS traps are delegated by the Linux trap + * handler. + */ +void sh_bios_vbr_init(void) +{ + unsigned long vbr; + + if (unlikely(gdb_vbr_vector)) + return; + + __asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr)); + + if (vbr) { + gdb_vbr_vector = (void *)(vbr + 0x100); + printk(KERN_NOTICE "Setting GDB trap vector to %p\n", + gdb_vbr_vector); + } else + printk(KERN_NOTICE "SH-BIOS not detected\n"); +} + +/** + * sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector. + * + * This can be used by save/restore code to reinitialize the system VBR + * from the fixed BIOS VBR. A no-op if no BIOS VBR is known. + */ +void sh_bios_vbr_reload(void) +{ + if (gdb_vbr_vector) + __asm__ __volatile__ ( + "ldc %0, vbr" + : + : "r" (((unsigned long) gdb_vbr_vector) - 0x100) + : "memory" + ); +} + +#ifdef CONFIG_EARLY_PRINTK +/* + * Print a string through the BIOS + */ +static void sh_console_write(struct console *co, const char *s, + unsigned count) +{ + sh_bios_console_write(s, count); +} + +/* + * Setup initial baud/bits/parity. We do two things here: + * - construct a cflag setting for the first rs_open() + * - initialize the serial port + * Return non-zero if we didn't find a serial port. + */ +static int __init sh_console_setup(struct console *co, char *options) +{ + int cflag = CREAD | HUPCL | CLOCAL; + + /* + * Now construct a cflag setting. + * TODO: this is a totally bogus cflag, as we have + * no idea what serial settings the BIOS is using, or + * even if its using the serial port at all. + */ + cflag |= B115200 | CS8 | /*no parity*/0; + + co->cflag = cflag; + + return 0; +} + +static struct console bios_console = { + .name = "bios", + .write = sh_console_write, + .setup = sh_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +static int __init setup_early_printk(char *buf) +{ + int keep_early = 0; + + if (!buf) + return 0; + + if (strstr(buf, "keep")) + keep_early = 1; + + if (!strncmp(buf, "bios", 4)) + early_console = &bios_console; + + if (likely(early_console)) { + if (keep_early) + early_console->flags &= ~CON_BOOT; + else + early_console->flags |= CON_BOOT; + register_console(early_console); + } + + return 0; +} +early_param("earlyprintk", setup_early_printk); +#endif diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c new file mode 100644 index 000000000..5858936cb --- /dev/null +++ b/arch/sh/kernel/sh_ksyms_32.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/module.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/delay.h> +#include <linux/mm.h> +#include <asm/checksum.h> +#include <asm/sections.h> + +EXPORT_SYMBOL(memchr); +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(__udelay); +EXPORT_SYMBOL(__ndelay); +EXPORT_SYMBOL(__const_udelay); +EXPORT_SYMBOL(strlen); +EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(csum_partial_copy_generic); +EXPORT_SYMBOL(copy_page); +EXPORT_SYMBOL(__clear_user); +EXPORT_SYMBOL(empty_zero_page); +#ifdef CONFIG_FLATMEM +/* need in pfn_valid macro */ +EXPORT_SYMBOL(min_low_pfn); +EXPORT_SYMBOL(max_low_pfn); +#endif + +#define DECLARE_EXPORT(name) \ + extern void name(void);EXPORT_SYMBOL(name) + +DECLARE_EXPORT(__udivsi3); +DECLARE_EXPORT(__sdivsi3); +DECLARE_EXPORT(__lshrsi3); +DECLARE_EXPORT(__ashrsi3); +DECLARE_EXPORT(__ashlsi3); +DECLARE_EXPORT(__lshrsi3_r0); +DECLARE_EXPORT(__ashrsi3_r0); +DECLARE_EXPORT(__ashlsi3_r0); + +DECLARE_EXPORT(__ashiftrt_r4_0); +DECLARE_EXPORT(__ashiftrt_r4_1); +DECLARE_EXPORT(__ashiftrt_r4_2); +DECLARE_EXPORT(__ashiftrt_r4_3); +DECLARE_EXPORT(__ashiftrt_r4_4); +DECLARE_EXPORT(__ashiftrt_r4_5); +DECLARE_EXPORT(__ashiftrt_r4_6); +DECLARE_EXPORT(__ashiftrt_r4_7); +DECLARE_EXPORT(__ashiftrt_r4_8); +DECLARE_EXPORT(__ashiftrt_r4_9); +DECLARE_EXPORT(__ashiftrt_r4_10); +DECLARE_EXPORT(__ashiftrt_r4_11); +DECLARE_EXPORT(__ashiftrt_r4_12); +DECLARE_EXPORT(__ashiftrt_r4_13); +DECLARE_EXPORT(__ashiftrt_r4_14); +DECLARE_EXPORT(__ashiftrt_r4_15); +DECLARE_EXPORT(__ashiftrt_r4_16); +DECLARE_EXPORT(__ashiftrt_r4_17); +DECLARE_EXPORT(__ashiftrt_r4_18); +DECLARE_EXPORT(__ashiftrt_r4_19); +DECLARE_EXPORT(__ashiftrt_r4_20); +DECLARE_EXPORT(__ashiftrt_r4_21); +DECLARE_EXPORT(__ashiftrt_r4_22); +DECLARE_EXPORT(__ashiftrt_r4_23); +DECLARE_EXPORT(__ashiftrt_r4_24); +DECLARE_EXPORT(__ashiftrt_r4_25); +DECLARE_EXPORT(__ashiftrt_r4_26); +DECLARE_EXPORT(__ashiftrt_r4_27); +DECLARE_EXPORT(__ashiftrt_r4_28); +DECLARE_EXPORT(__ashiftrt_r4_29); +DECLARE_EXPORT(__ashiftrt_r4_30); +DECLARE_EXPORT(__ashiftrt_r4_31); +DECLARE_EXPORT(__ashiftrt_r4_32); +DECLARE_EXPORT(__movstr); +DECLARE_EXPORT(__movstrSI8); +DECLARE_EXPORT(__movstrSI12); +DECLARE_EXPORT(__movstrSI16); +DECLARE_EXPORT(__movstrSI20); +DECLARE_EXPORT(__movstrSI24); +DECLARE_EXPORT(__movstrSI28); +DECLARE_EXPORT(__movstrSI32); +DECLARE_EXPORT(__movstrSI36); +DECLARE_EXPORT(__movstrSI40); +DECLARE_EXPORT(__movstrSI44); +DECLARE_EXPORT(__movstrSI48); +DECLARE_EXPORT(__movstrSI52); +DECLARE_EXPORT(__movstrSI56); +DECLARE_EXPORT(__movstrSI60); +DECLARE_EXPORT(__movstr_i4_even); +DECLARE_EXPORT(__movstr_i4_odd); +DECLARE_EXPORT(__movstrSI12_i4); +DECLARE_EXPORT(__movmem); +DECLARE_EXPORT(__movmemSI8); +DECLARE_EXPORT(__movmemSI12); +DECLARE_EXPORT(__movmemSI16); +DECLARE_EXPORT(__movmemSI20); +DECLARE_EXPORT(__movmemSI24); +DECLARE_EXPORT(__movmemSI28); +DECLARE_EXPORT(__movmemSI32); +DECLARE_EXPORT(__movmemSI36); +DECLARE_EXPORT(__movmemSI40); +DECLARE_EXPORT(__movmemSI44); +DECLARE_EXPORT(__movmemSI48); +DECLARE_EXPORT(__movmemSI52); +DECLARE_EXPORT(__movmemSI56); +DECLARE_EXPORT(__movmemSI60); +DECLARE_EXPORT(__movmem_i4_even); +DECLARE_EXPORT(__movmem_i4_odd); +DECLARE_EXPORT(__movmemSI12_i4); +DECLARE_EXPORT(__udiv_qrnnd_16); +DECLARE_EXPORT(__sdivsi3_i4); +DECLARE_EXPORT(__udivsi3_i4); +DECLARE_EXPORT(__sdivsi3_i4i); +DECLARE_EXPORT(__udivsi3_i4i); +#ifdef CONFIG_MCOUNT +DECLARE_EXPORT(mcount); +#endif diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c new file mode 100644 index 000000000..a6bfc6f37 --- /dev/null +++ b/arch/sh/kernel/signal_32.c @@ -0,0 +1,510 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sh/kernel/signal.c + * + * Copyright (C) 1991, 1992 Linus Torvalds + * + * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson + * + * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima + * + */ +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/kernel.h> +#include <linux/signal.h> +#include <linux/errno.h> +#include <linux/wait.h> +#include <linux/ptrace.h> +#include <linux/unistd.h> +#include <linux/stddef.h> +#include <linux/tty.h> +#include <linux/elf.h> +#include <linux/personality.h> +#include <linux/binfmts.h> +#include <linux/io.h> +#include <linux/resume_user_mode.h> +#include <asm/ucontext.h> +#include <linux/uaccess.h> +#include <asm/cacheflush.h> +#include <asm/syscalls.h> +#include <asm/fpu.h> + +struct fdpic_func_descriptor { + unsigned long text; + unsigned long GOT; +}; + +/* + * The following define adds a 64 byte gap between the signal + * stack frame and previous contents of the stack. This allows + * frame unwinding in a function epilogue but only if a frame + * pointer is used in the function. This is necessary because + * current gcc compilers (<4.3) do not generate unwind info on + * SH for function epilogues. + */ +#define UNWINDGUARD 64 + +/* + * Do a signal return; undo the signal stack. + */ + +#define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */ +#if defined(CONFIG_CPU_SH2) +#define TRAP_NOARG 0xc320 /* Syscall w/no args (NR in R3) */ +#else +#define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) */ +#endif +#define OR_R0_R0 0x200b /* or r0,r0 (insert to avoid hardware bug) */ + +struct sigframe +{ + struct sigcontext sc; + unsigned long extramask[_NSIG_WORDS-1]; + u16 retcode[8]; +}; + +struct rt_sigframe +{ + struct siginfo info; + struct ucontext uc; + u16 retcode[8]; +}; + +#ifdef CONFIG_SH_FPU +static inline int restore_sigcontext_fpu(struct sigcontext __user *sc) +{ + struct task_struct *tsk = current; + + if (!(boot_cpu_data.flags & CPU_HAS_FPU)) + return 0; + + set_used_math(); + return __copy_from_user(&tsk->thread.xstate->hardfpu, &sc->sc_fpregs[0], + sizeof(long)*(16*2+2)); +} + +static inline int save_sigcontext_fpu(struct sigcontext __user *sc, + struct pt_regs *regs) +{ + struct task_struct *tsk = current; + + if (!(boot_cpu_data.flags & CPU_HAS_FPU)) + return 0; + + if (!used_math()) + return __put_user(0, &sc->sc_ownedfp); + + if (__put_user(1, &sc->sc_ownedfp)) + return -EFAULT; + + /* This will cause a "finit" to be triggered by the next + attempted FPU operation by the 'current' process. + */ + clear_used_math(); + + unlazy_fpu(tsk, regs); + return __copy_to_user(&sc->sc_fpregs[0], &tsk->thread.xstate->hardfpu, + sizeof(long)*(16*2+2)); +} +#endif /* CONFIG_SH_FPU */ + +static int +restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p) +{ + unsigned int err = 0; + unsigned int sr = regs->sr & ~SR_USER_MASK; + +#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) + COPY(regs[1]); + COPY(regs[2]); COPY(regs[3]); + COPY(regs[4]); COPY(regs[5]); + COPY(regs[6]); COPY(regs[7]); + COPY(regs[8]); COPY(regs[9]); + COPY(regs[10]); COPY(regs[11]); + COPY(regs[12]); COPY(regs[13]); + COPY(regs[14]); COPY(regs[15]); + COPY(gbr); COPY(mach); + COPY(macl); COPY(pr); + COPY(sr); COPY(pc); +#undef COPY + + regs->sr = (regs->sr & SR_USER_MASK) | sr; + +#ifdef CONFIG_SH_FPU + if (boot_cpu_data.flags & CPU_HAS_FPU) { + int owned_fp; + struct task_struct *tsk = current; + + regs->sr |= SR_FD; /* Release FPU */ + clear_fpu(tsk, regs); + clear_used_math(); + err |= __get_user (owned_fp, &sc->sc_ownedfp); + if (owned_fp) + err |= restore_sigcontext_fpu(sc); + } +#endif + + regs->tra = -1; /* disable syscall checks */ + err |= __get_user(*r0_p, &sc->sc_regs[0]); + return err; +} + +asmlinkage int sys_sigreturn(void) +{ + struct pt_regs *regs = current_pt_regs(); + struct sigframe __user *frame = (struct sigframe __user *)regs->regs[15]; + sigset_t set; + int r0; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + if (!access_ok(frame, sizeof(*frame))) + goto badframe; + + if (__get_user(set.sig[0], &frame->sc.oldmask) + || (_NSIG_WORDS > 1 + && __copy_from_user(&set.sig[1], &frame->extramask, + sizeof(frame->extramask)))) + goto badframe; + + set_current_blocked(&set); + + if (restore_sigcontext(regs, &frame->sc, &r0)) + goto badframe; + return r0; + +badframe: + force_sig(SIGSEGV); + return 0; +} + +asmlinkage int sys_rt_sigreturn(void) +{ + struct pt_regs *regs = current_pt_regs(); + struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15]; + sigset_t set; + int r0; + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + if (!access_ok(frame, sizeof(*frame))) + goto badframe; + + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto badframe; + + set_current_blocked(&set); + + if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) + goto badframe; + + if (restore_altstack(&frame->uc.uc_stack)) + goto badframe; + + return r0; + +badframe: + force_sig(SIGSEGV); + return 0; +} + +/* + * Set up a signal frame. + */ + +static int +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + unsigned long mask) +{ + int err = 0; + +#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) + COPY(regs[0]); COPY(regs[1]); + COPY(regs[2]); COPY(regs[3]); + COPY(regs[4]); COPY(regs[5]); + COPY(regs[6]); COPY(regs[7]); + COPY(regs[8]); COPY(regs[9]); + COPY(regs[10]); COPY(regs[11]); + COPY(regs[12]); COPY(regs[13]); + COPY(regs[14]); COPY(regs[15]); + COPY(gbr); COPY(mach); + COPY(macl); COPY(pr); + COPY(sr); COPY(pc); +#undef COPY + +#ifdef CONFIG_SH_FPU + err |= save_sigcontext_fpu(sc, regs); +#endif + + /* non-iBCS2 extensions.. */ + err |= __put_user(mask, &sc->oldmask); + + return err; +} + +/* + * Determine which stack to use.. + */ +static inline void __user * +get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) +{ + if (ka->sa.sa_flags & SA_ONSTACK) { + if (sas_ss_flags(sp) == 0) + sp = current->sas_ss_sp + current->sas_ss_size; + } + + return (void __user *)((sp - (frame_size+UNWINDGUARD)) & -8ul); +} + +/* These symbols are defined with the addresses in the vsyscall page. + See vsyscall-trapa.S. */ +extern void __kernel_sigreturn(void); +extern void __kernel_rt_sigreturn(void); + +static int setup_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) +{ + struct sigframe __user *frame; + int err = 0, sig = ksig->sig; + + frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); + + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + err |= setup_sigcontext(&frame->sc, regs, set->sig[0]); + + if (_NSIG_WORDS > 1) + err |= __copy_to_user(frame->extramask, &set->sig[1], + sizeof(frame->extramask)); + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ksig->ka.sa.sa_flags & SA_RESTORER) { + regs->pr = (unsigned long) ksig->ka.sa.sa_restorer; +#ifdef CONFIG_VSYSCALL + } else if (likely(current->mm->context.vdso)) { + regs->pr = VDSO_SYM(&__kernel_sigreturn); +#endif + } else { + /* Generate return code (system call to sigreturn) */ + err |= __put_user(MOVW(7), &frame->retcode[0]); + err |= __put_user(TRAP_NOARG, &frame->retcode[1]); + err |= __put_user(OR_R0_R0, &frame->retcode[2]); + err |= __put_user(OR_R0_R0, &frame->retcode[3]); + err |= __put_user(OR_R0_R0, &frame->retcode[4]); + err |= __put_user(OR_R0_R0, &frame->retcode[5]); + err |= __put_user(OR_R0_R0, &frame->retcode[6]); + err |= __put_user((__NR_sigreturn), &frame->retcode[7]); + regs->pr = (unsigned long) frame->retcode; + flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); + } + + if (err) + return -EFAULT; + + /* Set up registers for signal handler */ + regs->regs[15] = (unsigned long) frame; + regs->regs[4] = sig; /* Arg for signal handler */ + regs->regs[5] = 0; + regs->regs[6] = (unsigned long) &frame->sc; + + if (current->personality & FDPIC_FUNCPTRS) { + struct fdpic_func_descriptor __user *funcptr = + (struct fdpic_func_descriptor __user *)ksig->ka.sa.sa_handler; + + err |= __get_user(regs->pc, &funcptr->text); + err |= __get_user(regs->regs[12], &funcptr->GOT); + } else + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; + + if (err) + return -EFAULT; + + pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", + current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); + + return 0; +} + +static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + int err = 0, sig = ksig->sig; + + frame = get_sigframe(&ksig->ka, regs->regs[15], sizeof(*frame)); + + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(NULL, &frame->uc.uc_link); + err |= __save_altstack(&frame->uc.uc_stack, regs->regs[15]); + err |= setup_sigcontext(&frame->uc.uc_mcontext, + regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Set up to return from userspace. If provided, use a stub + already in userspace. */ + if (ksig->ka.sa.sa_flags & SA_RESTORER) { + regs->pr = (unsigned long) ksig->ka.sa.sa_restorer; +#ifdef CONFIG_VSYSCALL + } else if (likely(current->mm->context.vdso)) { + regs->pr = VDSO_SYM(&__kernel_rt_sigreturn); +#endif + } else { + /* Generate return code (system call to rt_sigreturn) */ + err |= __put_user(MOVW(7), &frame->retcode[0]); + err |= __put_user(TRAP_NOARG, &frame->retcode[1]); + err |= __put_user(OR_R0_R0, &frame->retcode[2]); + err |= __put_user(OR_R0_R0, &frame->retcode[3]); + err |= __put_user(OR_R0_R0, &frame->retcode[4]); + err |= __put_user(OR_R0_R0, &frame->retcode[5]); + err |= __put_user(OR_R0_R0, &frame->retcode[6]); + err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]); + regs->pr = (unsigned long) frame->retcode; + flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode)); + } + + if (err) + return -EFAULT; + + /* Set up registers for signal handler */ + regs->regs[15] = (unsigned long) frame; + regs->regs[4] = sig; /* Arg for signal handler */ + regs->regs[5] = (unsigned long) &frame->info; + regs->regs[6] = (unsigned long) &frame->uc; + + if (current->personality & FDPIC_FUNCPTRS) { + struct fdpic_func_descriptor __user *funcptr = + (struct fdpic_func_descriptor __user *)ksig->ka.sa.sa_handler; + + err |= __get_user(regs->pc, &funcptr->text); + err |= __get_user(regs->regs[12], &funcptr->GOT); + } else + regs->pc = (unsigned long)ksig->ka.sa.sa_handler; + + if (err) + return -EFAULT; + + pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", + current->comm, task_pid_nr(current), frame, regs->pc, regs->pr); + + return 0; +} + +static inline void +handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs, + struct sigaction *sa) +{ + /* If we're not from a syscall, bail out */ + if (regs->tra < 0) + return; + + /* check for system call restart.. */ + switch (regs->regs[0]) { + case -ERESTART_RESTARTBLOCK: + case -ERESTARTNOHAND: + no_system_call_restart: + regs->regs[0] = -EINTR; + break; + + case -ERESTARTSYS: + if (!(sa->sa_flags & SA_RESTART)) + goto no_system_call_restart; + fallthrough; + case -ERESTARTNOINTR: + regs->regs[0] = save_r0; + regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); + break; + } +} + +/* + * OK, we're invoking a handler + */ +static void +handle_signal(struct ksignal *ksig, struct pt_regs *regs, unsigned int save_r0) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + /* Set up the stack frame */ + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + ret = setup_rt_frame(ksig, oldset, regs); + else + ret = setup_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static void do_signal(struct pt_regs *regs, unsigned int save_r0) +{ + struct ksignal ksig; + + /* + * We want the common case to go fast, which + * is why we may in certain cases get here from + * kernel mode. Just return without doing anything + * if so. + */ + if (!user_mode(regs)) + return; + + if (get_signal(&ksig)) { + handle_syscall_restart(save_r0, regs, &ksig.ka.sa); + + /* Whee! Actually deliver the signal. */ + handle_signal(&ksig, regs, save_r0); + return; + } + + /* Did we come from a system call? */ + if (regs->tra >= 0) { + /* Restart the system call - no handlers present */ + if (regs->regs[0] == -ERESTARTNOHAND || + regs->regs[0] == -ERESTARTSYS || + regs->regs[0] == -ERESTARTNOINTR) { + regs->regs[0] = save_r0; + regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); + } else if (regs->regs[0] == -ERESTART_RESTARTBLOCK) { + regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); + regs->regs[3] = __NR_restart_syscall; + } + } + + /* + * If there's no signal to deliver, we just put the saved sigmask + * back. + */ + restore_saved_sigmask(); +} + +asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, + unsigned long thread_info_flags) +{ + /* deal with pending signal delivery */ + if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs, save_r0); + + if (thread_info_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); +} diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c new file mode 100644 index 000000000..65924d9ec --- /dev/null +++ b/arch/sh/kernel/smp.c @@ -0,0 +1,471 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/smp.c + * + * SMP support for the SuperH processors. + * + * Copyright (C) 2002 - 2010 Paul Mundt + * Copyright (C) 2006 - 2007 Akio Idehara + */ +#include <linux/err.h> +#include <linux/cache.h> +#include <linux/cpumask.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/sched/mm.h> +#include <linux/sched/hotplug.h> +#include <linux/atomic.h> +#include <linux/clockchips.h> +#include <asm/processor.h> +#include <asm/mmu_context.h> +#include <asm/smp.h> +#include <asm/cacheflush.h> +#include <asm/sections.h> +#include <asm/setup.h> + +int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ +int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ + +struct plat_smp_ops *mp_ops = NULL; + +/* State of each CPU */ +DEFINE_PER_CPU(int, cpu_state) = { 0 }; + +void register_smp_ops(struct plat_smp_ops *ops) +{ + if (mp_ops) + printk(KERN_WARNING "Overriding previously set SMP ops\n"); + + mp_ops = ops; +} + +static inline void smp_store_cpu_info(unsigned int cpu) +{ + struct sh_cpuinfo *c = cpu_data + cpu; + + memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo)); + + c->loops_per_jiffy = loops_per_jiffy; +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned int cpu = smp_processor_id(); + + init_new_context(current, &init_mm); + current_thread_info()->cpu = cpu; + mp_ops->prepare_cpus(max_cpus); + +#ifndef CONFIG_HOTPLUG_CPU + init_cpu_present(cpu_possible_mask); +#endif +} + +void __init smp_prepare_boot_cpu(void) +{ + unsigned int cpu = smp_processor_id(); + + __cpu_number_map[0] = cpu; + __cpu_logical_map[0] = cpu; + + set_cpu_online(cpu, true); + set_cpu_possible(cpu, true); + + per_cpu(cpu_state, cpu) = CPU_ONLINE; +} + +#ifdef CONFIG_HOTPLUG_CPU +void native_cpu_die(unsigned int cpu) +{ + unsigned int i; + + for (i = 0; i < 10; i++) { + smp_rmb(); + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + + return; + } + + msleep(100); + } + + pr_err("CPU %u didn't die...\n", cpu); +} + +int native_cpu_disable(unsigned int cpu) +{ + return cpu == 0 ? -EPERM : 0; +} + +void play_dead_common(void) +{ + idle_task_exit(); + irq_ctx_exit(raw_smp_processor_id()); + mb(); + + __this_cpu_write(cpu_state, CPU_DEAD); + local_irq_disable(); +} + +void native_play_dead(void) +{ + play_dead_common(); +} + +int __cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + int ret; + + ret = mp_ops->cpu_disable(cpu); + if (ret) + return ret; + + /* + * Take this CPU offline. Once we clear this, we can't return, + * and we must not schedule until we're ready to give up the cpu. + */ + set_cpu_online(cpu, false); + + /* + * OK - migrate IRQs away from this CPU + */ + migrate_irqs(); + + /* + * Flush user cache and TLB mappings, and then remove this CPU + * from the vm mask set of all processes. + */ + flush_cache_all(); +#ifdef CONFIG_MMU + local_flush_tlb_all(); +#endif + + clear_tasks_mm_cpumask(cpu); + + return 0; +} +#else /* ... !CONFIG_HOTPLUG_CPU */ +int native_cpu_disable(unsigned int cpu) +{ + return -ENOSYS; +} + +void native_cpu_die(unsigned int cpu) +{ + /* We said "no" in __cpu_disable */ + BUG(); +} + +void native_play_dead(void) +{ + BUG(); +} +#endif + +asmlinkage void start_secondary(void) +{ + unsigned int cpu = smp_processor_id(); + struct mm_struct *mm = &init_mm; + + enable_mmu(); + mmgrab(mm); + mmget(mm); + current->active_mm = mm; +#ifdef CONFIG_MMU + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); +#endif + + per_cpu_trap_init(); + + notify_cpu_starting(cpu); + + local_irq_enable(); + + calibrate_delay(); + + smp_store_cpu_info(cpu); + + set_cpu_online(cpu, true); + per_cpu(cpu_state, cpu) = CPU_ONLINE; + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +extern struct { + unsigned long sp; + unsigned long bss_start; + unsigned long bss_end; + void *start_kernel_fn; + void *cpu_init_fn; + void *thread_info; +} stack_start; + +int __cpu_up(unsigned int cpu, struct task_struct *tsk) +{ + unsigned long timeout; + + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; + + /* Fill in data in head.S for secondary cpus */ + stack_start.sp = tsk->thread.sp; + stack_start.thread_info = tsk->stack; + stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ + stack_start.start_kernel_fn = start_secondary; + + flush_icache_range((unsigned long)&stack_start, + (unsigned long)&stack_start + sizeof(stack_start)); + wmb(); + + mp_ops->start_cpu(cpu, (unsigned long)_stext); + + timeout = jiffies + HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpu)) + break; + + udelay(10); + barrier(); + } + + if (cpu_online(cpu)) + return 0; + + return -ENOENT; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + unsigned long bogosum = 0; + int cpu; + + for_each_online_cpu(cpu) + bogosum += cpu_data[cpu].loops_per_jiffy; + + printk(KERN_INFO "SMP: Total of %d processors activated " + "(%lu.%02lu BogoMIPS).\n", num_online_cpus(), + bogosum / (500000/HZ), + (bogosum / (5000/HZ)) % 100); +} + +void smp_send_reschedule(int cpu) +{ + mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE); +} + +void smp_send_stop(void) +{ + smp_call_function(stop_this_cpu, 0, 0); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + int cpu; + + for_each_cpu(cpu, mask) + mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); +} + +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +void tick_broadcast(const struct cpumask *mask) +{ + int cpu; + + for_each_cpu(cpu, mask) + mp_ops->send_ipi(cpu, SMP_MSG_TIMER); +} + +static void ipi_timer(void) +{ + irq_enter(); + tick_receive_broadcast(); + irq_exit(); +} +#endif + +void smp_message_recv(unsigned int msg) +{ + switch (msg) { + case SMP_MSG_FUNCTION: + generic_smp_call_function_interrupt(); + break; + case SMP_MSG_RESCHEDULE: + scheduler_ipi(); + break; + case SMP_MSG_FUNCTION_SINGLE: + generic_smp_call_function_single_interrupt(); + break; +#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST + case SMP_MSG_TIMER: + ipi_timer(); + break; +#endif + default: + printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n", + smp_processor_id(), __func__, msg); + break; + } +} + +/* Not really SMP stuff ... */ +int setup_profiling_timer(unsigned int multiplier) +{ + return 0; +} + +#ifdef CONFIG_MMU + +static void flush_tlb_all_ipi(void *info) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + on_each_cpu(flush_tlb_all_ipi, 0, 1); +} + +static void flush_tlb_mm_ipi(void *mm) +{ + local_flush_tlb_mm((struct mm_struct *)mm); +} + +/* + * The following tlb flush calls are invoked when old translations are + * being torn down, or pte attributes are changing. For single threaded + * address spaces, a new context is obtained on the current cpu, and tlb + * context on other cpus are invalidated to force a new context allocation + * at switch_mm time, should the mm ever be used on other cpus. For + * multithreaded address spaces, intercpu interrupts have to be sent. + * Another case where intercpu interrupts are required is when the target + * mm might be active on another cpu (eg debuggers doing the flushes on + * behalf of debugees, kswapd stealing pages from another process etc). + * Kanoj 07/00. + */ +void flush_tlb_mm(struct mm_struct *mm) +{ + preempt_disable(); + + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); + } else { + int i; + for_each_online_cpu(i) + if (smp_processor_id() != i) + cpu_context(i, mm) = 0; + } + local_flush_tlb_mm(mm); + + preempt_enable(); +} + +struct flush_tlb_data { + struct vm_area_struct *vma; + unsigned long addr1; + unsigned long addr2; +}; + +static void flush_tlb_range_ipi(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + + local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); +} + +void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { + struct flush_tlb_data fd; + + fd.vma = vma; + fd.addr1 = start; + fd.addr2 = end; + smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); + } else { + int i; + for_each_online_cpu(i) + if (smp_processor_id() != i) + cpu_context(i, mm) = 0; + } + local_flush_tlb_range(vma, start, end); + preempt_enable(); +} + +static void flush_tlb_kernel_range_ipi(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + + local_flush_tlb_kernel_range(fd->addr1, fd->addr2); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct flush_tlb_data fd; + + fd.addr1 = start; + fd.addr2 = end; + on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); +} + +static void flush_tlb_page_ipi(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + + local_flush_tlb_page(fd->vma, fd->addr1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) +{ + preempt_disable(); + if ((atomic_read(&vma->vm_mm->mm_users) != 1) || + (current->mm != vma->vm_mm)) { + struct flush_tlb_data fd; + + fd.vma = vma; + fd.addr1 = page; + smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); + } else { + int i; + for_each_online_cpu(i) + if (smp_processor_id() != i) + cpu_context(i, vma->vm_mm) = 0; + } + local_flush_tlb_page(vma, page); + preempt_enable(); +} + +static void flush_tlb_one_ipi(void *info) +{ + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; + local_flush_tlb_one(fd->addr1, fd->addr2); +} + +void flush_tlb_one(unsigned long asid, unsigned long vaddr) +{ + struct flush_tlb_data fd; + + fd.addr1 = asid; + fd.addr2 = vaddr; + + smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); + local_flush_tlb_one(asid, vaddr); +} + +#endif diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c new file mode 100644 index 000000000..daf0b53ee --- /dev/null +++ b/arch/sh/kernel/stacktrace.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/stacktrace.c + * + * Stack trace management functions + * + * Copyright (C) 2006 - 2008 Paul Mundt + */ +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/stacktrace.h> +#include <linux/thread_info.h> +#include <linux/module.h> +#include <asm/unwinder.h> +#include <asm/ptrace.h> +#include <asm/stacktrace.h> + +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +static void save_stack_address(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = data; + + if (!reliable) + return; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +static const struct stacktrace_ops save_stack_ops = { + .address = save_stack_address, +}; + +void save_stack_trace(struct stack_trace *trace) +{ + unsigned long *sp = (unsigned long *)current_stack_pointer; + + unwind_stack(current, NULL, sp, &save_stack_ops, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +static void +save_stack_address_nosched(void *data, unsigned long addr, int reliable) +{ + struct stack_trace *trace = (struct stack_trace *)data; + + if (!reliable) + return; + + if (in_sched_functions(addr)) + return; + + if (trace->skip > 0) { + trace->skip--; + return; + } + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = addr; +} + +static const struct stacktrace_ops save_stack_ops_nosched = { + .address = save_stack_address_nosched, +}; + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + unsigned long *sp = (unsigned long *)tsk->thread.sp; + + unwind_stack(current, NULL, sp, &save_stack_ops_nosched, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); diff --git a/arch/sh/kernel/swsusp.c b/arch/sh/kernel/swsusp.c new file mode 100644 index 000000000..0b772d6d7 --- /dev/null +++ b/arch/sh/kernel/swsusp.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * swsusp.c - SuperH hibernation support + * + * Copyright (C) 2009 Magnus Damm + */ + +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/suspend.h> +#include <asm/suspend.h> +#include <asm/sections.h> +#include <asm/tlbflush.h> +#include <asm/page.h> +#include <asm/fpu.h> + +struct swsusp_arch_regs swsusp_arch_regs_cpu0; + +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; + unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; + + return (pfn >= begin_pfn) && (pfn < end_pfn); +} + +void save_processor_state(void) +{ + init_fpu(current); +} + +void restore_processor_state(void) +{ + local_flush_tlb_all(); +} diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c new file mode 100644 index 000000000..a5a7b33ed --- /dev/null +++ b/arch/sh/kernel/sys_sh.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sh/kernel/sys_sh.c + * + * This file contains various random system calls that + * have a non-standard calling sequence on the Linux/SuperH + * platform. + * + * Taken from i386 version. + */ +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/sem.h> +#include <linux/msg.h> +#include <linux/shm.h> +#include <linux/stat.h> +#include <linux/syscalls.h> +#include <linux/mman.h> +#include <linux/file.h> +#include <linux/utsname.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/ipc.h> +#include <asm/syscalls.h> +#include <linux/uaccess.h> +#include <asm/unistd.h> +#include <asm/cacheflush.h> +#include <asm/cachectl.h> + +asmlinkage int old_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + int fd, unsigned long off) +{ + if (off & ~PAGE_MASK) + return -EINVAL; + return ksys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT); +} + +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff) +{ + /* + * The shift for mmap2 is constant, regardless of PAGE_SIZE + * setting. + */ + if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) + return -EINVAL; + + pgoff >>= PAGE_SHIFT - 12; + + return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff); +} + +/* sys_cacheflush -- flush (part of) the processor cache. */ +asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op) +{ + struct vm_area_struct *vma; + + if ((op <= 0) || (op > (CACHEFLUSH_D_PURGE|CACHEFLUSH_I))) + return -EINVAL; + + /* + * Verify that the specified address region actually belongs + * to this process. + */ + if (addr + len < addr) + return -EFAULT; + + mmap_read_lock(current->mm); + vma = find_vma (current->mm, addr); + if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) { + mmap_read_unlock(current->mm); + return -EFAULT; + } + + switch (op & CACHEFLUSH_D_PURGE) { + case CACHEFLUSH_D_INVAL: + __flush_invalidate_region((void *)addr, len); + break; + case CACHEFLUSH_D_WB: + __flush_wback_region((void *)addr, len); + break; + case CACHEFLUSH_D_PURGE: + __flush_purge_region((void *)addr, len); + break; + } + + if (op & CACHEFLUSH_I) + flush_icache_range(addr, addr+len); + + mmap_read_unlock(current->mm); + return 0; +} diff --git a/arch/sh/kernel/sys_sh32.c b/arch/sh/kernel/sys_sh32.c new file mode 100644 index 000000000..9dca56850 --- /dev/null +++ b/arch/sh/kernel/sys_sh32.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/sem.h> +#include <linux/msg.h> +#include <linux/shm.h> +#include <linux/stat.h> +#include <linux/syscalls.h> +#include <linux/mman.h> +#include <linux/file.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/ipc.h> +#include <asm/cacheflush.h> +#include <linux/uaccess.h> +#include <asm/unistd.h> +#include <asm/syscalls.h> + +/* + * sys_pipe() is the normal C calling standard for creating + * a pipe. It's not the way Unix traditionally does this, though. + */ +asmlinkage int sys_sh_pipe(void) +{ + int fd[2]; + int error; + + error = do_pipe_flags(fd, 0); + if (!error) { + current_pt_regs()->regs[1] = fd[1]; + return fd[0]; + } + return error; +} + +asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf, + size_t count, long dummy, loff_t pos) +{ + return ksys_pread64(fd, buf, count, pos); +} + +asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf, + size_t count, long dummy, loff_t pos) +{ + return ksys_pwrite64(fd, buf, count, pos); +} + +asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1, + u32 len0, u32 len1, int advice) +{ +#ifdef __LITTLE_ENDIAN__ + return ksys_fadvise64_64(fd, (u64)offset1 << 32 | offset0, + (u64)len1 << 32 | len0, advice); +#else + return ksys_fadvise64_64(fd, (u64)offset0 << 32 | offset1, + (u64)len0 << 32 | len1, advice); +#endif +} diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile new file mode 100644 index 000000000..b265e4bc1 --- /dev/null +++ b/arch/sh/kernel/syscalls/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +$(shell mkdir -p $(uapi) $(kapi)) + +syscall := $(src)/syscall.tbl +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ + +$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE + $(call if_changed,syshdr) + +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE + $(call if_changed,systbl) + +uapisyshdr-y += unistd_32.h +kapisyshdr-y += syscall_table.h + +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) + +PHONY += all +all: $(uapisyshdr-y) $(kapisyshdr-y) + @: diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl new file mode 100644 index 000000000..2de85c977 --- /dev/null +++ b/arch/sh/kernel/syscalls/syscall.tbl @@ -0,0 +1,455 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sh +# +# The format is: +# <number> <abi> <name> <entry point> +# +# The <abi> is always "common" for this file +# +0 common restart_syscall sys_restart_syscall +1 common exit sys_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +5 common open sys_open +6 common close sys_close +7 common waitpid sys_waitpid +8 common creat sys_creat +9 common link sys_link +10 common unlink sys_unlink +11 common execve sys_execve +12 common chdir sys_chdir +13 common time sys_time32 +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common lchown sys_lchown16 +# 17 was break +18 common oldstat sys_stat +19 common lseek sys_lseek +20 common getpid sys_getpid +21 common mount sys_mount +22 common umount sys_oldumount +23 common setuid sys_setuid16 +24 common getuid sys_getuid16 +25 common stime sys_stime32 +26 common ptrace sys_ptrace +27 common alarm sys_alarm +28 common oldfstat sys_fstat +29 common pause sys_pause +30 common utime sys_utime32 +# 31 was stty +# 32 was gtty +33 common access sys_access +34 common nice sys_nice +# 35 was ftime +36 common sync sys_sync +37 common kill sys_kill +38 common rename sys_rename +39 common mkdir sys_mkdir +40 common rmdir sys_rmdir +41 common dup sys_dup +42 common pipe sys_sh_pipe +43 common times sys_times +# 44 was prof +45 common brk sys_brk +46 common setgid sys_setgid16 +47 common getgid sys_getgid16 +48 common signal sys_signal +49 common geteuid sys_geteuid16 +50 common getegid sys_getegid16 +51 common acct sys_acct +52 common umount2 sys_umount +# 53 was lock +54 common ioctl sys_ioctl +55 common fcntl sys_fcntl +# 56 was mpx +57 common setpgid sys_setpgid +# 58 was ulimit +# 59 was olduname +60 common umask sys_umask +61 common chroot sys_chroot +62 common ustat sys_ustat +63 common dup2 sys_dup2 +64 common getppid sys_getppid +65 common getpgrp sys_getpgrp +66 common setsid sys_setsid +67 common sigaction sys_sigaction +68 common sgetmask sys_sgetmask +69 common ssetmask sys_ssetmask +70 common setreuid sys_setreuid16 +71 common setregid sys_setregid16 +72 common sigsuspend sys_sigsuspend +73 common sigpending sys_sigpending +74 common sethostname sys_sethostname +75 common setrlimit sys_setrlimit +76 common getrlimit sys_old_getrlimit +77 common getrusage sys_getrusage +78 common gettimeofday sys_gettimeofday +79 common settimeofday sys_settimeofday +80 common getgroups sys_getgroups16 +81 common setgroups sys_setgroups16 +# 82 was select +83 common symlink sys_symlink +84 common oldlstat sys_lstat +85 common readlink sys_readlink +86 common uselib sys_uselib +87 common swapon sys_swapon +88 common reboot sys_reboot +89 common readdir sys_old_readdir +90 common mmap old_mmap +91 common munmap sys_munmap +92 common truncate sys_truncate +93 common ftruncate sys_ftruncate +94 common fchmod sys_fchmod +95 common fchown sys_fchown16 +96 common getpriority sys_getpriority +97 common setpriority sys_setpriority +# 98 was profil +99 common statfs sys_statfs +100 common fstatfs sys_fstatfs +# 101 was ioperm +102 common socketcall sys_socketcall +103 common syslog sys_syslog +104 common setitimer sys_setitimer +105 common getitimer sys_getitimer +106 common stat sys_newstat +107 common lstat sys_newlstat +108 common fstat sys_newfstat +109 common olduname sys_uname +# 110 was iopl +111 common vhangup sys_vhangup +# 112 was idle +# 113 was vm86old +114 common wait4 sys_wait4 +115 common swapoff sys_swapoff +116 common sysinfo sys_sysinfo +117 common ipc sys_ipc +118 common fsync sys_fsync +119 common sigreturn sys_sigreturn +120 common clone sys_clone +121 common setdomainname sys_setdomainname +122 common uname sys_newuname +123 common cacheflush sys_cacheflush +124 common adjtimex sys_adjtimex_time32 +125 common mprotect sys_mprotect +126 common sigprocmask sys_sigprocmask +# 127 was create_module +128 common init_module sys_init_module +129 common delete_module sys_delete_module +# 130 was get_kernel_syms +131 common quotactl sys_quotactl +132 common getpgid sys_getpgid +133 common fchdir sys_fchdir +134 common bdflush sys_ni_syscall +135 common sysfs sys_sysfs +136 common personality sys_personality +# 137 was afs_syscall +138 common setfsuid sys_setfsuid16 +139 common setfsgid sys_setfsgid16 +140 common _llseek sys_llseek +141 common getdents sys_getdents +142 common _newselect sys_select +143 common flock sys_flock +144 common msync sys_msync +145 common readv sys_readv +146 common writev sys_writev +147 common getsid sys_getsid +148 common fdatasync sys_fdatasync +149 common _sysctl sys_ni_syscall +150 common mlock sys_mlock +151 common munlock sys_munlock +152 common mlockall sys_mlockall +153 common munlockall sys_munlockall +154 common sched_setparam sys_sched_setparam +155 common sched_getparam sys_sched_getparam +156 common sched_setscheduler sys_sched_setscheduler +157 common sched_getscheduler sys_sched_getscheduler +158 common sched_yield sys_sched_yield +159 common sched_get_priority_max sys_sched_get_priority_max +160 common sched_get_priority_min sys_sched_get_priority_min +161 common sched_rr_get_interval sys_sched_rr_get_interval_time32 +162 common nanosleep sys_nanosleep_time32 +163 common mremap sys_mremap +164 common setresuid sys_setresuid16 +165 common getresuid sys_getresuid16 +# 166 was vm86 +# 167 was query_module +168 common poll sys_poll +169 common nfsservctl sys_ni_syscall +170 common setresgid sys_setresgid16 +171 common getresgid sys_getresgid16 +172 common prctl sys_prctl +173 common rt_sigreturn sys_rt_sigreturn +174 common rt_sigaction sys_rt_sigaction +175 common rt_sigprocmask sys_rt_sigprocmask +176 common rt_sigpending sys_rt_sigpending +177 common rt_sigtimedwait sys_rt_sigtimedwait_time32 +178 common rt_sigqueueinfo sys_rt_sigqueueinfo +179 common rt_sigsuspend sys_rt_sigsuspend +180 common pread64 sys_pread_wrapper +181 common pwrite64 sys_pwrite_wrapper +182 common chown sys_chown16 +183 common getcwd sys_getcwd +184 common capget sys_capget +185 common capset sys_capset +186 common sigaltstack sys_sigaltstack +187 common sendfile sys_sendfile +# 188 is reserved for getpmsg +# 189 is reserved for putpmsg +190 common vfork sys_vfork +191 common ugetrlimit sys_getrlimit +192 common mmap2 sys_mmap2 +193 common truncate64 sys_truncate64 +194 common ftruncate64 sys_ftruncate64 +195 common stat64 sys_stat64 +196 common lstat64 sys_lstat64 +197 common fstat64 sys_fstat64 +198 common lchown32 sys_lchown +199 common getuid32 sys_getuid +200 common getgid32 sys_getgid +201 common geteuid32 sys_geteuid +202 common getegid32 sys_getegid +203 common setreuid32 sys_setreuid +204 common setregid32 sys_setregid +205 common getgroups32 sys_getgroups +206 common setgroups32 sys_setgroups +207 common fchown32 sys_fchown +208 common setresuid32 sys_setresuid +209 common getresuid32 sys_getresuid +210 common setresgid32 sys_setresgid +211 common getresgid32 sys_getresgid +212 common chown32 sys_chown +213 common setuid32 sys_setuid +214 common setgid32 sys_setgid +215 common setfsuid32 sys_setfsuid +216 common setfsgid32 sys_setfsgid +217 common pivot_root sys_pivot_root +218 common mincore sys_mincore +219 common madvise sys_madvise +220 common getdents64 sys_getdents64 +221 common fcntl64 sys_fcntl64 +# 222 is reserved for tux +# 223 is unused +224 common gettid sys_gettid +225 common readahead sys_readahead +226 common setxattr sys_setxattr +227 common lsetxattr sys_lsetxattr +228 common fsetxattr sys_fsetxattr +229 common getxattr sys_getxattr +230 common lgetxattr sys_lgetxattr +231 common fgetxattr sys_fgetxattr +232 common listxattr sys_listxattr +233 common llistxattr sys_llistxattr +234 common flistxattr sys_flistxattr +235 common removexattr sys_removexattr +236 common lremovexattr sys_lremovexattr +237 common fremovexattr sys_fremovexattr +238 common tkill sys_tkill +239 common sendfile64 sys_sendfile64 +240 common futex sys_futex_time32 +241 common sched_setaffinity sys_sched_setaffinity +242 common sched_getaffinity sys_sched_getaffinity +# 243 is reserved for set_thread_area +# 244 is reserved for get_thread_area +245 common io_setup sys_io_setup +246 common io_destroy sys_io_destroy +247 common io_getevents sys_io_getevents_time32 +248 common io_submit sys_io_submit +249 common io_cancel sys_io_cancel +250 common fadvise64 sys_fadvise64 +# 251 is unused +252 common exit_group sys_exit_group +253 common lookup_dcookie sys_lookup_dcookie +254 common epoll_create sys_epoll_create +255 common epoll_ctl sys_epoll_ctl +256 common epoll_wait sys_epoll_wait +257 common remap_file_pages sys_remap_file_pages +258 common set_tid_address sys_set_tid_address +259 common timer_create sys_timer_create +260 common timer_settime sys_timer_settime32 +261 common timer_gettime sys_timer_gettime32 +262 common timer_getoverrun sys_timer_getoverrun +263 common timer_delete sys_timer_delete +264 common clock_settime sys_clock_settime32 +265 common clock_gettime sys_clock_gettime32 +266 common clock_getres sys_clock_getres_time32 +267 common clock_nanosleep sys_clock_nanosleep_time32 +268 common statfs64 sys_statfs64 +269 common fstatfs64 sys_fstatfs64 +270 common tgkill sys_tgkill +271 common utimes sys_utimes_time32 +272 common fadvise64_64 sys_fadvise64_64_wrapper +# 273 is reserved for vserver +274 common mbind sys_mbind +275 common get_mempolicy sys_get_mempolicy +276 common set_mempolicy sys_set_mempolicy +277 common mq_open sys_mq_open +278 common mq_unlink sys_mq_unlink +279 common mq_timedsend sys_mq_timedsend_time32 +280 common mq_timedreceive sys_mq_timedreceive_time32 +281 common mq_notify sys_mq_notify +282 common mq_getsetattr sys_mq_getsetattr +283 common kexec_load sys_kexec_load +284 common waitid sys_waitid +285 common add_key sys_add_key +286 common request_key sys_request_key +287 common keyctl sys_keyctl +288 common ioprio_set sys_ioprio_set +289 common ioprio_get sys_ioprio_get +290 common inotify_init sys_inotify_init +291 common inotify_add_watch sys_inotify_add_watch +292 common inotify_rm_watch sys_inotify_rm_watch +# 293 is unused +294 common migrate_pages sys_migrate_pages +295 common openat sys_openat +296 common mkdirat sys_mkdirat +297 common mknodat sys_mknodat +298 common fchownat sys_fchownat +299 common futimesat sys_futimesat_time32 +300 common fstatat64 sys_fstatat64 +301 common unlinkat sys_unlinkat +302 common renameat sys_renameat +303 common linkat sys_linkat +304 common symlinkat sys_symlinkat +305 common readlinkat sys_readlinkat +306 common fchmodat sys_fchmodat +307 common faccessat sys_faccessat +308 common pselect6 sys_pselect6_time32 +309 common ppoll sys_ppoll_time32 +310 common unshare sys_unshare +311 common set_robust_list sys_set_robust_list +312 common get_robust_list sys_get_robust_list +313 common splice sys_splice +314 common sync_file_range sys_sync_file_range +315 common tee sys_tee +316 common vmsplice sys_vmsplice +317 common move_pages sys_move_pages +318 common getcpu sys_getcpu +319 common epoll_pwait sys_epoll_pwait +320 common utimensat sys_utimensat_time32 +321 common signalfd sys_signalfd +322 common timerfd_create sys_timerfd_create +323 common eventfd sys_eventfd +324 common fallocate sys_fallocate +325 common timerfd_settime sys_timerfd_settime32 +326 common timerfd_gettime sys_timerfd_gettime32 +327 common signalfd4 sys_signalfd4 +328 common eventfd2 sys_eventfd2 +329 common epoll_create1 sys_epoll_create1 +330 common dup3 sys_dup3 +331 common pipe2 sys_pipe2 +332 common inotify_init1 sys_inotify_init1 +333 common preadv sys_preadv +334 common pwritev sys_pwritev +335 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +336 common perf_event_open sys_perf_event_open +337 common fanotify_init sys_fanotify_init +338 common fanotify_mark sys_fanotify_mark +339 common prlimit64 sys_prlimit64 +340 common socket sys_socket +341 common bind sys_bind +342 common connect sys_connect +343 common listen sys_listen +344 common accept sys_accept +345 common getsockname sys_getsockname +346 common getpeername sys_getpeername +347 common socketpair sys_socketpair +348 common send sys_send +349 common sendto sys_sendto +350 common recv sys_recv +351 common recvfrom sys_recvfrom +352 common shutdown sys_shutdown +353 common setsockopt sys_setsockopt +354 common getsockopt sys_getsockopt +355 common sendmsg sys_sendmsg +356 common recvmsg sys_recvmsg +357 common recvmmsg sys_recvmmsg_time32 +358 common accept4 sys_accept4 +359 common name_to_handle_at sys_name_to_handle_at +360 common open_by_handle_at sys_open_by_handle_at +361 common clock_adjtime sys_clock_adjtime32 +362 common syncfs sys_syncfs +363 common sendmmsg sys_sendmmsg +364 common setns sys_setns +365 common process_vm_readv sys_process_vm_readv +366 common process_vm_writev sys_process_vm_writev +367 common kcmp sys_kcmp +368 common finit_module sys_finit_module +369 common sched_getattr sys_sched_getattr +370 common sched_setattr sys_sched_setattr +371 common renameat2 sys_renameat2 +372 common seccomp sys_seccomp +373 common getrandom sys_getrandom +374 common memfd_create sys_memfd_create +375 common bpf sys_bpf +376 common execveat sys_execveat +377 common userfaultfd sys_userfaultfd +378 common membarrier sys_membarrier +379 common mlock2 sys_mlock2 +380 common copy_file_range sys_copy_file_range +381 common preadv2 sys_preadv2 +382 common pwritev2 sys_pwritev2 +383 common statx sys_statx +384 common pkey_mprotect sys_pkey_mprotect +385 common pkey_alloc sys_pkey_alloc +386 common pkey_free sys_pkey_free +387 common rseq sys_rseq +# room for arch specific syscalls +393 common semget sys_semget +394 common semctl sys_semctl +395 common shmget sys_shmget +396 common shmctl sys_shmctl +397 common shmat sys_shmat +398 common shmdt sys_shmdt +399 common msgget sys_msgget +400 common msgsnd sys_msgsnd +401 common msgrcv sys_msgrcv +402 common msgctl sys_msgctl +403 common clock_gettime64 sys_clock_gettime +404 common clock_settime64 sys_clock_settime +405 common clock_adjtime64 sys_clock_adjtime +406 common clock_getres_time64 sys_clock_getres +407 common clock_nanosleep_time64 sys_clock_nanosleep +408 common timer_gettime64 sys_timer_gettime +409 common timer_settime64 sys_timer_settime +410 common timerfd_gettime64 sys_timerfd_gettime +411 common timerfd_settime64 sys_timerfd_settime +412 common utimensat_time64 sys_utimensat +413 common pselect6_time64 sys_pselect6 +414 common ppoll_time64 sys_ppoll +416 common io_pgetevents_time64 sys_io_pgetevents +417 common recvmmsg_time64 sys_recvmmsg +418 common mq_timedsend_time64 sys_mq_timedsend +419 common mq_timedreceive_time64 sys_mq_timedreceive +420 common semtimedop_time64 sys_semtimedop +421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait +422 common futex_time64 sys_futex +423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval +424 common pidfd_send_signal sys_pidfd_send_signal +425 common io_uring_setup sys_io_uring_setup +426 common io_uring_enter sys_io_uring_enter +427 common io_uring_register sys_io_uring_register +428 common open_tree sys_open_tree +429 common move_mount sys_move_mount +430 common fsopen sys_fsopen +431 common fsconfig sys_fsconfig +432 common fsmount sys_fsmount +433 common fspick sys_fspick +434 common pidfd_open sys_pidfd_open +# 435 reserved for clone3 +436 common close_range sys_close_range +437 common openat2 sys_openat2 +438 common pidfd_getfd sys_pidfd_getfd +439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise +441 common epoll_pwait2 sys_epoll_pwait2 +442 common mount_setattr sys_mount_setattr +443 common quotactl_fd sys_quotactl_fd +444 common landlock_create_ruleset sys_landlock_create_ruleset +445 common landlock_add_rule sys_landlock_add_rule +446 common landlock_restrict_self sys_landlock_restrict_self +# 447 reserved for memfd_secret +448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv +450 common set_mempolicy_home_node sys_set_mempolicy_home_node diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S new file mode 100644 index 000000000..bd1a9c544 --- /dev/null +++ b/arch/sh/kernel/syscalls_32.S @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * arch/sh/kernel/syscalls.S + * + * System call table for SuperH + * + * Copyright (C) 1999, 2000, 2002 Niibe Yutaka + * Copyright (C) 2003 Paul Mundt + */ +#include <linux/sys.h> +#include <linux/linkage.h> + +#define __SYSCALL(nr, entry) .long entry + .data +ENTRY(sys_call_table) +#include <asm/syscall_table.h> +#undef __SYSCALL diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c new file mode 100644 index 000000000..821a09cbd --- /dev/null +++ b/arch/sh/kernel/time.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/time.c + * + * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka + * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> + * Copyright (C) 2002 - 2009 Paul Mundt + * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/profile.h> +#include <linux/timex.h> +#include <linux/sched.h> +#include <linux/clockchips.h> +#include <linux/platform_device.h> +#include <linux/smp.h> +#include <linux/rtc.h> +#include <asm/clock.h> +#include <asm/rtc.h> +#include <asm/platform_early.h> + +static void __init sh_late_time_init(void) +{ + /* + * Make sure all compiled-in early timers register themselves. + * + * Run probe() for two "earlytimer" devices, these will be the + * clockevents and clocksource devices respectively. In the event + * that only a clockevents device is available, we -ENODEV on the + * clocksource and the jiffies clocksource is used transparently + * instead. No error handling is necessary here. + */ + sh_early_platform_driver_register_all("earlytimer"); + sh_early_platform_driver_probe("earlytimer", 2, 0); +} + +void __init time_init(void) +{ + timer_probe(); + + clk_init(); + + late_time_init = sh_late_time_init; +} diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c new file mode 100644 index 000000000..2d2a7509b --- /dev/null +++ b/arch/sh/kernel/topology.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/topology.c + * + * Copyright (C) 2007 Paul Mundt + */ +#include <linux/cpu.h> +#include <linux/cpumask.h> +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/topology.h> +#include <linux/node.h> +#include <linux/nodemask.h> +#include <linux/export.h> + +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +cpumask_t cpu_core_map[NR_CPUS]; +EXPORT_SYMBOL(cpu_core_map); + +static cpumask_t cpu_coregroup_map(int cpu) +{ + /* + * Presently all SH-X3 SMP cores are multi-cores, so just keep it + * simple until we have a method for determining topology.. + */ + return *cpu_possible_mask; +} + +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return &cpu_core_map[cpu]; +} + +int arch_update_cpu_topology(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + cpu_core_map[cpu] = cpu_coregroup_map(cpu); + + return 0; +} + +static int __init topology_init(void) +{ + int i, ret; + + for_each_present_cpu(i) { + struct cpu *c = &per_cpu(cpu_devices, i); + + c->hotpluggable = 1; + + ret = register_cpu(c, i); + if (unlikely(ret)) + printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n", + __func__, i, ret); + } + +#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) + /* + * In the UP case, make sure the CPU association is still + * registered under each node. Without this, sysfs fails + * to make the connection between nodes other than node0 + * and cpu0. + */ + for_each_online_node(i) + if (i != numa_node_id()) + register_cpu_under_node(raw_smp_processor_id(), i); +#endif + + return 0; +} +subsys_initcall(topology_init); diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c new file mode 100644 index 000000000..01884054a --- /dev/null +++ b/arch/sh/kernel/traps.c @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bug.h> +#include <linux/io.h> +#include <linux/types.h> +#include <linux/kdebug.h> +#include <linux/signal.h> +#include <linux/sched.h> +#include <linux/sched/debug.h> +#include <linux/sched/task_stack.h> +#include <linux/uaccess.h> +#include <linux/hardirq.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/sched/signal.h> + +#include <linux/extable.h> +#include <linux/module.h> /* print_modules */ +#include <asm/unwinder.h> +#include <asm/traps.h> + +static DEFINE_SPINLOCK(die_lock); + +void __noreturn die(const char *str, struct pt_regs *regs, long err) +{ + static int die_counter; + + oops_enter(); + + spin_lock_irq(&die_lock); + console_verbose(); + bust_spinlocks(1); + + printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); + print_modules(); + show_regs(regs); + + printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm, + task_pid_nr(current), task_stack_page(current) + 1); + + if (!user_mode(regs) || in_interrupt()) + dump_mem("Stack: ", KERN_DEFAULT, regs->regs[15], + THREAD_SIZE + (unsigned long)task_stack_page(current)); + + notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irq(&die_lock); + oops_exit(); + + if (kexec_should_crash(current)) + crash_kexec(regs); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) + panic("Fatal exception"); + + make_task_dead(SIGSEGV); +} + +void die_if_kernel(const char *str, struct pt_regs *regs, long err) +{ + if (!user_mode(regs)) + die(str, regs, err); +} + +/* + * try and fix up kernelspace address errors + * - userspace errors just cause EFAULT to be returned, resulting in SEGV + * - kernel/userspace interfaces cause a jump to an appropriate handler + * - other kernel errors are bad + */ +void die_if_no_fixup(const char *str, struct pt_regs *regs, long err) +{ + if (!user_mode(regs)) { + const struct exception_table_entry *fixup; + fixup = search_exception_tables(regs->pc); + if (fixup) { + regs->pc = fixup->fixup; + return; + } + + die(str, regs, err); + } +} + +#ifdef CONFIG_GENERIC_BUG +static void handle_BUG(struct pt_regs *regs) +{ + const struct bug_entry *bug; + unsigned long bugaddr = regs->pc; + enum bug_trap_type tt; + + if (!is_valid_bugaddr(bugaddr)) + goto invalid; + + bug = find_bug(bugaddr); + + /* Switch unwinders when unwind_stack() is called */ + if (bug->flags & BUGFLAG_UNWINDER) + unwinder_faulted = 1; + + tt = report_bug(bugaddr, regs); + if (tt == BUG_TRAP_TYPE_WARN) { + regs->pc += instruction_size(bugaddr); + return; + } + +invalid: + die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff); +} + +int is_valid_bugaddr(unsigned long addr) +{ + insn_size_t opcode; + + if (addr < PAGE_OFFSET) + return 0; + if (get_kernel_nofault(opcode, (insn_size_t *)addr)) + return 0; + if (opcode == TRAPA_BUG_OPCODE) + return 1; + + return 0; +} +#endif + +/* + * Generic trap handler. + */ +BUILD_TRAP_HANDLER(debug) +{ + TRAP_HANDLER_DECL; + + /* Rewind */ + regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); + + if (notify_die(DIE_TRAP, "debug trap", regs, 0, vec & 0xff, + SIGTRAP) == NOTIFY_STOP) + return; + + force_sig(SIGTRAP); +} + +/* + * Special handler for BUG() traps. + */ +BUILD_TRAP_HANDLER(bug) +{ + TRAP_HANDLER_DECL; + + /* Rewind */ + regs->pc -= instruction_size(__raw_readw(regs->pc - 4)); + + if (notify_die(DIE_TRAP, "bug trap", regs, 0, TRAPA_BUG_OPCODE & 0xff, + SIGTRAP) == NOTIFY_STOP) + return; + +#ifdef CONFIG_GENERIC_BUG + if (__kernel_text_address(instruction_pointer(regs))) { + insn_size_t insn = *(insn_size_t *)instruction_pointer(regs); + if (insn == TRAPA_BUG_OPCODE) + handle_BUG(regs); + return; + } +#endif + + force_sig(SIGTRAP); +} + +#ifdef CONFIG_DYNAMIC_FTRACE +extern void arch_ftrace_nmi_enter(void); +extern void arch_ftrace_nmi_exit(void); +#else +static inline void arch_ftrace_nmi_enter(void) { } +static inline void arch_ftrace_nmi_exit(void) { } +#endif + +BUILD_TRAP_HANDLER(nmi) +{ + TRAP_HANDLER_DECL; + + arch_ftrace_nmi_enter(); + + nmi_enter(); + this_cpu_inc(irq_stat.__nmi_count); + + switch (notify_die(DIE_NMI, "NMI", regs, 0, vec & 0xff, SIGINT)) { + case NOTIFY_OK: + case NOTIFY_STOP: + break; + case NOTIFY_BAD: + die("Fatal Non-Maskable Interrupt", regs, SIGINT); + default: + printk(KERN_ALERT "Got NMI, but nobody cared. Ignoring...\n"); + break; + } + + nmi_exit(); + + arch_ftrace_nmi_exit(); +} diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c new file mode 100644 index 000000000..6cdda3a62 --- /dev/null +++ b/arch/sh/kernel/traps_32.c @@ -0,0 +1,795 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * 'traps.c' handles hardware traps and faults after we have saved some + * state in 'entry.S'. + * + * SuperH version: Copyright (C) 1999 Niibe Yutaka + * Copyright (C) 2000 Philipp Rumpf + * Copyright (C) 2000 David Howells + * Copyright (C) 2002 - 2010 Paul Mundt + */ +#include <linux/kernel.h> +#include <linux/ptrace.h> +#include <linux/hardirq.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/kallsyms.h> +#include <linux/io.h> +#include <linux/bug.h> +#include <linux/debug_locks.h> +#include <linux/kdebug.h> +#include <linux/limits.h> +#include <linux/sysfs.h> +#include <linux/uaccess.h> +#include <linux/perf_event.h> +#include <linux/sched/task_stack.h> + +#include <asm/alignment.h> +#include <asm/fpu.h> +#include <asm/kprobes.h> +#include <asm/traps.h> +#include <asm/bl_bit.h> + +#ifdef CONFIG_CPU_SH2 +# define TRAP_RESERVED_INST 4 +# define TRAP_ILLEGAL_SLOT_INST 6 +# define TRAP_ADDRESS_ERROR 9 +# ifdef CONFIG_CPU_SH2A +# define TRAP_UBC 12 +# define TRAP_FPU_ERROR 13 +# define TRAP_DIVZERO_ERROR 17 +# define TRAP_DIVOVF_ERROR 18 +# endif +#else +#define TRAP_RESERVED_INST 12 +#define TRAP_ILLEGAL_SLOT_INST 13 +#endif + +static inline void sign_extend(unsigned int count, unsigned char *dst) +{ +#ifdef __LITTLE_ENDIAN__ + if ((count == 1) && dst[0] & 0x80) { + dst[1] = 0xff; + dst[2] = 0xff; + dst[3] = 0xff; + } + if ((count == 2) && dst[1] & 0x80) { + dst[2] = 0xff; + dst[3] = 0xff; + } +#else + if ((count == 1) && dst[3] & 0x80) { + dst[2] = 0xff; + dst[1] = 0xff; + dst[0] = 0xff; + } + if ((count == 2) && dst[2] & 0x80) { + dst[1] = 0xff; + dst[0] = 0xff; + } +#endif +} + +static struct mem_access user_mem_access = { + copy_from_user, + copy_to_user, +}; + +static unsigned long copy_from_kernel_wrapper(void *dst, const void __user *src, + unsigned long cnt) +{ + return copy_from_kernel_nofault(dst, (const void __force *)src, cnt); +} + +static unsigned long copy_to_kernel_wrapper(void __user *dst, const void *src, + unsigned long cnt) +{ + return copy_to_kernel_nofault((void __force *)dst, src, cnt); +} + +static struct mem_access kernel_mem_access = { + copy_from_kernel_wrapper, + copy_to_kernel_wrapper, +}; + +/* + * handle an instruction that does an unaligned memory access by emulating the + * desired behaviour + * - note that PC _may not_ point to the faulting instruction + * (if that instruction is in a branch delay slot) + * - return 0 if emulation okay, -EFAULT on existential error + */ +static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs, + struct mem_access *ma) +{ + int ret, index, count; + unsigned long *rm, *rn; + unsigned char *src, *dst; + unsigned char __user *srcu, *dstu; + + index = (instruction>>8)&15; /* 0x0F00 */ + rn = ®s->regs[index]; + + index = (instruction>>4)&15; /* 0x00F0 */ + rm = ®s->regs[index]; + + count = 1<<(instruction&3); + + switch (count) { + case 1: inc_unaligned_byte_access(); break; + case 2: inc_unaligned_word_access(); break; + case 4: inc_unaligned_dword_access(); break; + case 8: inc_unaligned_multi_access(); break; + } + + ret = -EFAULT; + switch (instruction>>12) { + case 0: /* mov.[bwl] to/from memory via r0+rn */ + if (instruction & 8) { + /* from memory */ + srcu = (unsigned char __user *)*rm; + srcu += regs->regs[0]; + dst = (unsigned char *)rn; + *(unsigned long *)dst = 0; + +#if !defined(__LITTLE_ENDIAN__) + dst += 4-count; +#endif + if (ma->from(dst, srcu, count)) + goto fetch_fault; + + sign_extend(count, dst); + } else { + /* to memory */ + src = (unsigned char *)rm; +#if !defined(__LITTLE_ENDIAN__) + src += 4-count; +#endif + dstu = (unsigned char __user *)*rn; + dstu += regs->regs[0]; + + if (ma->to(dstu, src, count)) + goto fetch_fault; + } + ret = 0; + break; + + case 1: /* mov.l Rm,@(disp,Rn) */ + src = (unsigned char*) rm; + dstu = (unsigned char __user *)*rn; + dstu += (instruction&0x000F)<<2; + + if (ma->to(dstu, src, 4)) + goto fetch_fault; + ret = 0; + break; + + case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ + if (instruction & 4) + *rn -= count; + src = (unsigned char*) rm; + dstu = (unsigned char __user *)*rn; +#if !defined(__LITTLE_ENDIAN__) + src += 4-count; +#endif + if (ma->to(dstu, src, count)) + goto fetch_fault; + ret = 0; + break; + + case 5: /* mov.l @(disp,Rm),Rn */ + srcu = (unsigned char __user *)*rm; + srcu += (instruction & 0x000F) << 2; + dst = (unsigned char *)rn; + *(unsigned long *)dst = 0; + + if (ma->from(dst, srcu, 4)) + goto fetch_fault; + ret = 0; + break; + + case 6: /* mov.[bwl] from memory, possibly with post-increment */ + srcu = (unsigned char __user *)*rm; + if (instruction & 4) + *rm += count; + dst = (unsigned char*) rn; + *(unsigned long*)dst = 0; + +#if !defined(__LITTLE_ENDIAN__) + dst += 4-count; +#endif + if (ma->from(dst, srcu, count)) + goto fetch_fault; + sign_extend(count, dst); + ret = 0; + break; + + case 8: + switch ((instruction&0xFF00)>>8) { + case 0x81: /* mov.w R0,@(disp,Rn) */ + src = (unsigned char *) ®s->regs[0]; +#if !defined(__LITTLE_ENDIAN__) + src += 2; +#endif + dstu = (unsigned char __user *)*rm; /* called Rn in the spec */ + dstu += (instruction & 0x000F) << 1; + + if (ma->to(dstu, src, 2)) + goto fetch_fault; + ret = 0; + break; + + case 0x85: /* mov.w @(disp,Rm),R0 */ + srcu = (unsigned char __user *)*rm; + srcu += (instruction & 0x000F) << 1; + dst = (unsigned char *) ®s->regs[0]; + *(unsigned long *)dst = 0; + +#if !defined(__LITTLE_ENDIAN__) + dst += 2; +#endif + if (ma->from(dst, srcu, 2)) + goto fetch_fault; + sign_extend(2, dst); + ret = 0; + break; + } + break; + + case 9: /* mov.w @(disp,PC),Rn */ + srcu = (unsigned char __user *)regs->pc; + srcu += 4; + srcu += (instruction & 0x00FF) << 1; + dst = (unsigned char *)rn; + *(unsigned long *)dst = 0; + +#if !defined(__LITTLE_ENDIAN__) + dst += 2; +#endif + + if (ma->from(dst, srcu, 2)) + goto fetch_fault; + sign_extend(2, dst); + ret = 0; + break; + + case 0xd: /* mov.l @(disp,PC),Rn */ + srcu = (unsigned char __user *)(regs->pc & ~0x3); + srcu += 4; + srcu += (instruction & 0x00FF) << 2; + dst = (unsigned char *)rn; + *(unsigned long *)dst = 0; + + if (ma->from(dst, srcu, 4)) + goto fetch_fault; + ret = 0; + break; + } + return ret; + + fetch_fault: + /* Argh. Address not only misaligned but also non-existent. + * Raise an EFAULT and see if it's trapped + */ + die_if_no_fixup("Fault in unaligned fixup", regs, 0); + return -EFAULT; +} + +/* + * emulate the instruction in the delay slot + * - fetches the instruction from PC+2 + */ +static inline int handle_delayslot(struct pt_regs *regs, + insn_size_t old_instruction, + struct mem_access *ma) +{ + insn_size_t instruction; + void __user *addr = (void __user *)(regs->pc + + instruction_size(old_instruction)); + + if (copy_from_user(&instruction, addr, sizeof(instruction))) { + /* the instruction-fetch faulted */ + if (user_mode(regs)) + return -EFAULT; + + /* kernel */ + die("delay-slot-insn faulting in handle_unaligned_delayslot", + regs, 0); + } + + return handle_unaligned_ins(instruction, regs, ma); +} + +/* + * handle an instruction that does an unaligned memory access + * - have to be careful of branch delay-slot instructions that fault + * SH3: + * - if the branch would be taken PC points to the branch + * - if the branch would not be taken, PC points to delay-slot + * SH4: + * - PC always points to delayed branch + * - return 0 if handled, -EFAULT if failed (may not return if in kernel) + */ + +/* Macros to determine offset from current PC for branch instructions */ +/* Explicit type coercion is used to force sign extension where needed */ +#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) +#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) + +int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs, + struct mem_access *ma, int expected, + unsigned long address) +{ + u_int rm; + int ret, index; + + /* + * XXX: We can't handle mixed 16/32-bit instructions yet + */ + if (instruction_size(instruction) != 2) + return -EINVAL; + + index = (instruction>>8)&15; /* 0x0F00 */ + rm = regs->regs[index]; + + /* + * Log the unexpected fixups, and then pass them on to perf. + * + * We intentionally don't report the expected cases to perf as + * otherwise the trapped I/O case will skew the results too much + * to be useful. + */ + if (!expected) { + unaligned_fixups_notify(current, instruction, regs); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, + regs, address); + } + + ret = -EFAULT; + switch (instruction&0xF000) { + case 0x0000: + if (instruction==0x000B) { + /* rts */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) + regs->pc = regs->pr; + } + else if ((instruction&0x00FF)==0x0023) { + /* braf @Rm */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) + regs->pc += rm + 4; + } + else if ((instruction&0x00FF)==0x0003) { + /* bsrf @Rm */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) { + regs->pr = regs->pc + 4; + regs->pc += rm + 4; + } + } + else { + /* mov.[bwl] to/from memory via r0+rn */ + goto simple; + } + break; + + case 0x1000: /* mov.l Rm,@(disp,Rn) */ + goto simple; + + case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */ + goto simple; + + case 0x4000: + if ((instruction&0x00FF)==0x002B) { + /* jmp @Rm */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) + regs->pc = rm; + } + else if ((instruction&0x00FF)==0x000B) { + /* jsr @Rm */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) { + regs->pr = regs->pc + 4; + regs->pc = rm; + } + } + else { + /* mov.[bwl] to/from memory via r0+rn */ + goto simple; + } + break; + + case 0x5000: /* mov.l @(disp,Rm),Rn */ + goto simple; + + case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */ + goto simple; + + case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */ + switch (instruction&0x0F00) { + case 0x0100: /* mov.w R0,@(disp,Rm) */ + goto simple; + case 0x0500: /* mov.w @(disp,Rm),R0 */ + goto simple; + case 0x0B00: /* bf lab - no delayslot*/ + ret = 0; + break; + case 0x0F00: /* bf/s lab */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) { +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) + if ((regs->sr & 0x00000001) != 0) + regs->pc += 4; /* next after slot */ + else +#endif + regs->pc += SH_PC_8BIT_OFFSET(instruction); + } + break; + case 0x0900: /* bt lab - no delayslot */ + ret = 0; + break; + case 0x0D00: /* bt/s lab */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) { +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) + if ((regs->sr & 0x00000001) == 0) + regs->pc += 4; /* next after slot */ + else +#endif + regs->pc += SH_PC_8BIT_OFFSET(instruction); + } + break; + } + break; + + case 0x9000: /* mov.w @(disp,Rm),Rn */ + goto simple; + + case 0xA000: /* bra label */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) + regs->pc += SH_PC_12BIT_OFFSET(instruction); + break; + + case 0xB000: /* bsr label */ + ret = handle_delayslot(regs, instruction, ma); + if (ret==0) { + regs->pr = regs->pc + 4; + regs->pc += SH_PC_12BIT_OFFSET(instruction); + } + break; + + case 0xD000: /* mov.l @(disp,Rm),Rn */ + goto simple; + } + return ret; + + /* handle non-delay-slot instruction */ + simple: + ret = handle_unaligned_ins(instruction, regs, ma); + if (ret==0) + regs->pc += instruction_size(instruction); + return ret; +} + +/* + * Handle various address error exceptions: + * - instruction address error: + * misaligned PC + * PC >= 0x80000000 in user mode + * - data address error (read and write) + * misaligned data access + * access to >= 0x80000000 is user mode + * Unfortuntaly we can't distinguish between instruction address error + * and data address errors caused by read accesses. + */ +asmlinkage void do_address_error(struct pt_regs *regs, + unsigned long writeaccess, + unsigned long address) +{ + unsigned long error_code = 0; + insn_size_t instruction; + int tmp; + + /* Intentional ifdef */ +#ifdef CONFIG_CPU_HAS_SR_RB + error_code = lookup_exception_vector(); +#endif + + if (user_mode(regs)) { + int si_code = BUS_ADRERR; + unsigned int user_action; + + local_irq_enable(); + inc_unaligned_user_access(); + + if (copy_from_user(&instruction, (insn_size_t __user *)(regs->pc & ~1), + sizeof(instruction))) { + goto uspace_segv; + } + + /* shout about userspace fixups */ + unaligned_fixups_notify(current, instruction, regs); + + user_action = unaligned_user_action(); + if (user_action & UM_FIXUP) + goto fixup; + if (user_action & UM_SIGNAL) + goto uspace_segv; + else { + /* ignore */ + regs->pc += instruction_size(instruction); + return; + } + +fixup: + /* bad PC is not something we can fix */ + if (regs->pc & 1) { + si_code = BUS_ADRALN; + goto uspace_segv; + } + + tmp = handle_unaligned_access(instruction, regs, + &user_mem_access, 0, + address); + + if (tmp == 0) + return; /* sorted */ +uspace_segv: + printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned " + "access (PC %lx PR %lx)\n", current->comm, regs->pc, + regs->pr); + + force_sig_fault(SIGBUS, si_code, (void __user *)address); + } else { + inc_unaligned_kernel_access(); + + if (regs->pc & 1) + die("unaligned program counter", regs, error_code); + + if (copy_from_kernel_nofault(&instruction, (void *)(regs->pc), + sizeof(instruction))) { + /* Argh. Fault on the instruction itself. + This should never happen non-SMP + */ + die("insn faulting in do_address_error", regs, 0); + } + + unaligned_fixups_notify(current, instruction, regs); + + handle_unaligned_access(instruction, regs, &kernel_mem_access, + 0, address); + } +} + +#ifdef CONFIG_SH_DSP +/* + * SH-DSP support gerg@snapgear.com. + */ +int is_dsp_inst(struct pt_regs *regs) +{ + unsigned short inst = 0; + + /* + * Safe guard if DSP mode is already enabled or we're lacking + * the DSP altogether. + */ + if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP)) + return 0; + + get_user(inst, ((unsigned short *) regs->pc)); + + inst &= 0xf000; + + /* Check for any type of DSP or support instruction */ + if ((inst == 0xf000) || (inst == 0x4000)) + return 1; + + return 0; +} +#else +#define is_dsp_inst(regs) (0) +#endif /* CONFIG_SH_DSP */ + +#ifdef CONFIG_CPU_SH2A +asmlinkage void do_divide_error(unsigned long r4) +{ + int code; + + switch (r4) { + case TRAP_DIVZERO_ERROR: + code = FPE_INTDIV; + break; + case TRAP_DIVOVF_ERROR: + code = FPE_INTOVF; + break; + default: + /* Let gcc know unhandled cases don't make it past here */ + return; + } + force_sig_fault(SIGFPE, code, NULL); +} +#endif + +asmlinkage void do_reserved_inst(void) +{ + struct pt_regs *regs = current_pt_regs(); + unsigned long error_code; + +#ifdef CONFIG_SH_FPU_EMU + unsigned short inst = 0; + int err; + + get_user(inst, (unsigned short __user *)regs->pc); + + err = do_fpu_inst(inst, regs); + if (!err) { + regs->pc += instruction_size(inst); + return; + } + /* not a FPU inst. */ +#endif + +#ifdef CONFIG_SH_DSP + /* Check if it's a DSP instruction */ + if (is_dsp_inst(regs)) { + /* Enable DSP mode, and restart instruction. */ + regs->sr |= SR_DSP; + /* Save DSP mode */ + current->thread.dsp_status.status |= SR_DSP; + return; + } +#endif + + error_code = lookup_exception_vector(); + + local_irq_enable(); + force_sig(SIGILL); + die_if_no_fixup("reserved instruction", regs, error_code); +} + +#ifdef CONFIG_SH_FPU_EMU +static int emulate_branch(unsigned short inst, struct pt_regs *regs) +{ + /* + * bfs: 8fxx: PC+=d*2+4; + * bts: 8dxx: PC+=d*2+4; + * bra: axxx: PC+=D*2+4; + * bsr: bxxx: PC+=D*2+4 after PR=PC+4; + * braf:0x23: PC+=Rn*2+4; + * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4; + * jmp: 4x2b: PC=Rn; + * jsr: 4x0b: PC=Rn after PR=PC+4; + * rts: 000b: PC=PR; + */ + if (((inst & 0xf000) == 0xb000) || /* bsr */ + ((inst & 0xf0ff) == 0x0003) || /* bsrf */ + ((inst & 0xf0ff) == 0x400b)) /* jsr */ + regs->pr = regs->pc + 4; + + if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */ + regs->pc += SH_PC_8BIT_OFFSET(inst); + return 0; + } + + if ((inst & 0xe000) == 0xa000) { /* bra, bsr */ + regs->pc += SH_PC_12BIT_OFFSET(inst); + return 0; + } + + if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */ + regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4; + return 0; + } + + if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */ + regs->pc = regs->regs[(inst & 0x0f00) >> 8]; + return 0; + } + + if ((inst & 0xffff) == 0x000b) { /* rts */ + regs->pc = regs->pr; + return 0; + } + + return 1; +} +#endif + +asmlinkage void do_illegal_slot_inst(void) +{ + struct pt_regs *regs = current_pt_regs(); + unsigned long inst; + + if (kprobe_handle_illslot(regs->pc) == 0) + return; + +#ifdef CONFIG_SH_FPU_EMU + get_user(inst, (unsigned short __user *)regs->pc + 1); + if (!do_fpu_inst(inst, regs)) { + get_user(inst, (unsigned short __user *)regs->pc); + if (!emulate_branch(inst, regs)) + return; + /* fault in branch.*/ + } + /* not a FPU inst. */ +#endif + + inst = lookup_exception_vector(); + + local_irq_enable(); + force_sig(SIGILL); + die_if_no_fixup("illegal slot instruction", regs, inst); +} + +asmlinkage void do_exception_error(void) +{ + long ex; + + ex = lookup_exception_vector(); + die_if_kernel("exception", current_pt_regs(), ex); +} + +void per_cpu_trap_init(void) +{ + extern void *vbr_base; + + /* NOTE: The VBR value should be at P1 + (or P2, virtural "fixed" address space). + It's definitely should not in physical address. */ + + asm volatile("ldc %0, vbr" + : /* no output */ + : "r" (&vbr_base) + : "memory"); + + /* disable exception blocking now when the vbr has been setup */ + clear_bl_bit(); +} + +void *set_exception_table_vec(unsigned int vec, void *handler) +{ + extern void *exception_handling_table[]; + void *old_handler; + + old_handler = exception_handling_table[vec]; + exception_handling_table[vec] = handler; + return old_handler; +} + +void __init trap_init(void) +{ + set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); + set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst); + +#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \ + defined(CONFIG_SH_FPU_EMU) + /* + * For SH-4 lacking an FPU, treat floating point instructions as + * reserved. They'll be handled in the math-emu case, or faulted on + * otherwise. + */ + set_exception_table_evt(0x800, do_reserved_inst); + set_exception_table_evt(0x820, do_illegal_slot_inst); +#elif defined(CONFIG_SH_FPU) + set_exception_table_evt(0x800, fpu_state_restore_trap_handler); + set_exception_table_evt(0x820, fpu_state_restore_trap_handler); +#endif + +#ifdef CONFIG_CPU_SH2 + set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler); +#endif +#ifdef CONFIG_CPU_SH2A + set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); + set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); +#ifdef CONFIG_SH_FPU + set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler); +#endif +#endif + +#ifdef TRAP_UBC + set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler); +#endif +} diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c new file mode 100644 index 000000000..7a54b72dd --- /dev/null +++ b/arch/sh/kernel/unwinder.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2009 Matt Fleming + * + * Based, in part, on kernel/time/clocksource.c. + * + * This file provides arbitration code for stack unwinders. + * + * Multiple stack unwinders can be available on a system, usually with + * the most accurate unwinder being the currently active one. + */ +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/module.h> +#include <asm/unwinder.h> +#include <linux/atomic.h> + +/* + * This is the most basic stack unwinder an architecture can + * provide. For architectures without reliable frame pointers, e.g. + * RISC CPUs, it can be implemented by looking through the stack for + * addresses that lie within the kernel text section. + * + * Other CPUs, e.g. x86, can use their frame pointer register to + * construct more accurate stack traces. + */ +static struct list_head unwinder_list; +static struct unwinder stack_reader = { + .name = "stack-reader", + .dump = stack_reader_dump, + .rating = 50, + .list = { + .next = &unwinder_list, + .prev = &unwinder_list, + }, +}; + +/* + * "curr_unwinder" points to the stack unwinder currently in use. This + * is the unwinder with the highest rating. + * + * "unwinder_list" is a linked-list of all available unwinders, sorted + * by rating. + * + * All modifications of "curr_unwinder" and "unwinder_list" must be + * performed whilst holding "unwinder_lock". + */ +static struct unwinder *curr_unwinder = &stack_reader; + +static struct list_head unwinder_list = { + .next = &stack_reader.list, + .prev = &stack_reader.list, +}; + +static DEFINE_SPINLOCK(unwinder_lock); + +/** + * select_unwinder - Select the best registered stack unwinder. + * + * Private function. Must hold unwinder_lock when called. + * + * Select the stack unwinder with the best rating. This is useful for + * setting up curr_unwinder. + */ +static struct unwinder *select_unwinder(void) +{ + struct unwinder *best; + + if (list_empty(&unwinder_list)) + return NULL; + + best = list_entry(unwinder_list.next, struct unwinder, list); + if (best == curr_unwinder) + return NULL; + + return best; +} + +/* + * Enqueue the stack unwinder sorted by rating. + */ +static int unwinder_enqueue(struct unwinder *ops) +{ + struct list_head *tmp, *entry = &unwinder_list; + + list_for_each(tmp, &unwinder_list) { + struct unwinder *o; + + o = list_entry(tmp, struct unwinder, list); + if (o == ops) + return -EBUSY; + /* Keep track of the place, where to insert */ + if (o->rating >= ops->rating) + entry = tmp; + } + list_add(&ops->list, entry); + + return 0; +} + +/** + * unwinder_register - Used to install new stack unwinder + * @u: unwinder to be registered + * + * Install the new stack unwinder on the unwinder list, which is sorted + * by rating. + * + * Returns -EBUSY if registration fails, zero otherwise. + */ +int unwinder_register(struct unwinder *u) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&unwinder_lock, flags); + ret = unwinder_enqueue(u); + if (!ret) + curr_unwinder = select_unwinder(); + spin_unlock_irqrestore(&unwinder_lock, flags); + + return ret; +} + +int unwinder_faulted = 0; + +/* + * Unwind the call stack and pass information to the stacktrace_ops + * functions. Also handle the case where we need to switch to a new + * stack dumper because the current one faulted unexpectedly. + */ +void unwind_stack(struct task_struct *task, struct pt_regs *regs, + unsigned long *sp, const struct stacktrace_ops *ops, + void *data) +{ + unsigned long flags; + + /* + * The problem with unwinders with high ratings is that they are + * inherently more complicated than the simple ones with lower + * ratings. We are therefore more likely to fault in the + * complicated ones, e.g. hitting BUG()s. If we fault in the + * code for the current stack unwinder we try to downgrade to + * one with a lower rating. + * + * Hopefully this will give us a semi-reliable stacktrace so we + * can diagnose why curr_unwinder->dump() faulted. + */ + if (unwinder_faulted) { + spin_lock_irqsave(&unwinder_lock, flags); + + /* Make sure no one beat us to changing the unwinder */ + if (unwinder_faulted && !list_is_singular(&unwinder_list)) { + list_del(&curr_unwinder->list); + curr_unwinder = select_unwinder(); + + unwinder_faulted = 0; + } + + spin_unlock_irqrestore(&unwinder_lock, flags); + } + + curr_unwinder->dump(task, regs, sp, ops, data); +} +EXPORT_SYMBOL_GPL(unwind_stack); diff --git a/arch/sh/kernel/vmlinux.lds.S b/arch/sh/kernel/vmlinux.lds.S new file mode 100644 index 000000000..b6276a352 --- /dev/null +++ b/arch/sh/kernel/vmlinux.lds.S @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ld script to make SuperH Linux kernel + * Written by Niibe Yutaka and Paul Mundt + */ +OUTPUT_ARCH(sh) +#define RUNTIME_DISCARD_EXIT +#include <asm/thread_info.h> +#include <asm/cache.h> +#include <asm/vmlinux.lds.h> + +#ifdef CONFIG_PMB + #define MEMORY_OFFSET 0 +#else + #define MEMORY_OFFSET __MEMORY_START +#endif + +ENTRY(_start) +SECTIONS +{ + . = PAGE_OFFSET + MEMORY_OFFSET + PHYSICAL_OFFSET + CONFIG_ZERO_PAGE_OFFSET; + + _text = .; /* Text and read-only data */ + + .empty_zero_page : AT(ADDR(.empty_zero_page)) { + *(.empty_zero_page) + } = 0 + + .text : AT(ADDR(.text)) { + HEAD_TEXT + TEXT_TEXT + SCHED_TEXT + CPUIDLE_TEXT + LOCK_TEXT + KPROBES_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) + _etext = .; /* End of text section */ + } = 0x0009 + + EXCEPTION_TABLE(16) + + _sdata = .; + RO_DATA(PAGE_SIZE) + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + _edata = .; + + DWARF_EH_FRAME + + . = ALIGN(PAGE_SIZE); /* Init code and data */ + __init_begin = .; + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + + . = ALIGN(4); + .machvec.init : AT(ADDR(.machvec.init)) { + __machvec_start = .; + *(.machvec.init) + __machvec_end = .; + } + + PERCPU_SECTION(L1_CACHE_BYTES) + + /* + * .exit.text is discarded at runtime, not link time, to deal with + * references from __bug_table + */ + .exit.text : AT(ADDR(.exit.text)) { EXIT_TEXT } + .exit.data : AT(ADDR(.exit.data)) { EXIT_DATA } + + . = ALIGN(PAGE_SIZE); + __init_end = .; + BSS_SECTION(0, PAGE_SIZE, 4) + _end = . ; + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} diff --git a/arch/sh/kernel/vsyscall/.gitignore b/arch/sh/kernel/vsyscall/.gitignore new file mode 100644 index 000000000..530a3031a --- /dev/null +++ b/arch/sh/kernel/vsyscall/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +vsyscall.lds diff --git a/arch/sh/kernel/vsyscall/Makefile b/arch/sh/kernel/vsyscall/Makefile new file mode 100644 index 000000000..6e8664448 --- /dev/null +++ b/arch/sh/kernel/vsyscall/Makefile @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += vsyscall.o vsyscall-syscall.o vsyscall-syms.o + +$(obj)/vsyscall-syscall.o: \ + $(foreach F,trapa,$(obj)/vsyscall-$F.so) + +# Teach kbuild about targets +targets += $(foreach F,trapa,vsyscall-$F.o vsyscall-$F.so) +targets += vsyscall-note.o vsyscall.lds vsyscall-dummy.o + +# The DSO images are built using a special linker script +quiet_cmd_syscall = SYSCALL $@ + cmd_syscall = $(CC) -nostdlib $(SYSCFLAGS_$(@F)) \ + -Wl,-T,$(filter-out FORCE,$^) -o $@ + +export CPPFLAGS_vsyscall.lds += -P -C -Ush + +vsyscall-flags = -shared -s -Wl,-soname=linux-gate.so.1 -Wl,--hash-style=sysv + +SYSCFLAGS_vsyscall-trapa.so = $(vsyscall-flags) + +$(obj)/vsyscall-trapa.so: \ +$(obj)/vsyscall-%.so: $(src)/vsyscall.lds $(obj)/vsyscall-%.o FORCE + $(call if_changed,syscall) + +# We also create a special relocatable object that should mirror the symbol +# table and layout of the linked DSO. With ld -R we can then refer to +# these symbols in the kernel code rather than hand-coded addresses. +SYSCFLAGS_vsyscall-dummy.o = -r +$(obj)/vsyscall-dummy.o: $(src)/vsyscall.lds \ + $(obj)/vsyscall-trapa.o $(obj)/vsyscall-note.o FORCE + $(call if_changed,syscall) + +LDFLAGS_vsyscall-syms.o := -r -R +$(obj)/vsyscall-syms.o: $(obj)/vsyscall-dummy.o FORCE + $(call if_changed,ld) diff --git a/arch/sh/kernel/vsyscall/vsyscall-note.S b/arch/sh/kernel/vsyscall/vsyscall-note.S new file mode 100644 index 000000000..bb350918b --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall-note.S @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include <linux/uts.h> +#include <linux/version.h> + +#define ASM_ELF_NOTE_BEGIN(name, flags, vendor, type) \ + .section name, flags; \ + .balign 4; \ + .long 1f - 0f; /* name length */ \ + .long 3f - 2f; /* data length */ \ + .long type; /* note type */ \ +0: .asciz vendor; /* vendor name */ \ +1: .balign 4; \ +2: + +#define ASM_ELF_NOTE_END \ +3: .balign 4; /* pad out section */ \ + .previous + + ASM_ELF_NOTE_BEGIN(".note.kernel-version", "a", UTS_SYSNAME, 0) + .long LINUX_VERSION_CODE + ASM_ELF_NOTE_END diff --git a/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S new file mode 100644 index 000000000..bece5fa73 --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall-sigreturn.S @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <asm/unistd.h> + + .text + .balign 32 + .globl __kernel_sigreturn + .type __kernel_sigreturn,@function +__kernel_sigreturn: +.LSTART_sigreturn: + mov.w 1f, r3 + trapa #0x10 + or r0, r0 + or r0, r0 + or r0, r0 + or r0, r0 + or r0, r0 + +1: .short __NR_sigreturn +.LEND_sigreturn: + .size __kernel_sigreturn,.-.LSTART_sigreturn + + .balign 32 + .globl __kernel_rt_sigreturn + .type __kernel_rt_sigreturn,@function +__kernel_rt_sigreturn: +.LSTART_rt_sigreturn: + mov.w 1f, r3 + trapa #0x10 + or r0, r0 + or r0, r0 + or r0, r0 + or r0, r0 + or r0, r0 + +1: .short __NR_rt_sigreturn +.LEND_rt_sigreturn: + .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn + .previous + + .section .eh_frame,"a",@progbits +.LCIE1: + .ualong .LCIE1_end - .LCIE1_start +.LCIE1_start: + .ualong 0 /* CIE ID */ + .byte 0x1 /* Version number */ + .string "zRS" /* NUL-terminated augmentation string */ + .uleb128 0x1 /* Code alignment factor */ + .sleb128 -4 /* Data alignment factor */ + .byte 0x11 /* Return address register column */ + .uleb128 0x1 /* Augmentation length and data */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0xc, 0xf, 0x0 /* DW_CFA_def_cfa: r15 ofs 0 */ + + .align 2 +.LCIE1_end: + + .ualong .LFDE0_end-.LFDE0_start /* Length FDE0 */ +.LFDE0_start: + .ualong .LFDE0_start-.LCIE1 /* CIE pointer */ + .ualong .LSTART_sigreturn-. /* PC-relative start address */ + .ualong .LEND_sigreturn-.LSTART_sigreturn + .uleb128 0 /* Augmentation */ + .align 2 +.LFDE0_end: + + .ualong .LFDE1_end-.LFDE1_start /* Length FDE1 */ +.LFDE1_start: + .ualong .LFDE1_start-.LCIE1 /* CIE pointer */ + .ualong .LSTART_rt_sigreturn-. /* PC-relative start address */ + .ualong .LEND_rt_sigreturn-.LSTART_rt_sigreturn + .uleb128 0 /* Augmentation */ + .align 2 +.LFDE1_end: + + .previous diff --git a/arch/sh/kernel/vsyscall/vsyscall-syscall.S b/arch/sh/kernel/vsyscall/vsyscall-syscall.S new file mode 100644 index 000000000..2aeaa2dde --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall-syscall.S @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/init.h> + +__INITDATA + + .globl vsyscall_trapa_start, vsyscall_trapa_end +vsyscall_trapa_start: + .incbin "arch/sh/kernel/vsyscall/vsyscall-trapa.so" +vsyscall_trapa_end: + +__FINIT diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S new file mode 100644 index 000000000..854ea3235 --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + .text + .globl __kernel_vsyscall + .type __kernel_vsyscall,@function +__kernel_vsyscall: +.LSTART_vsyscall: + trapa #0x10 + nop +.LEND_vsyscall: + .size __kernel_vsyscall,.-.LSTART_vsyscall + .previous + + .section .eh_frame,"a",@progbits +.LCIE: + .ualong .LCIE_end - .LCIE_start +.LCIE_start: + .ualong 0 /* CIE ID */ + .byte 0x1 /* Version number */ + .string "zR" /* NUL-terminated augmentation string */ + .uleb128 0x1 /* Code alignment factor */ + .sleb128 -4 /* Data alignment factor */ + .byte 0x11 /* Return address register column */ + .uleb128 0x1 /* Augmentation length and data */ + .byte 0x1b /* DW_EH_PE_pcrel | DW_EH_PE_sdata4. */ + .byte 0xc,0xf,0x0 /* DW_CFA_def_cfa: r15 ofs 0 */ + .align 2 +.LCIE_end: + + .ualong .LFDE_end-.LFDE_start /* Length FDE */ +.LFDE_start: + .ualong .LFDE_start-.LCIE /* CIE pointer */ + .ualong .LSTART_vsyscall-. /* PC-relative start address */ + .ualong .LEND_vsyscall-.LSTART_vsyscall + .uleb128 0 /* Augmentation */ + .align 2 +.LFDE_end: + .previous + +/* Get the common code for the sigreturn entry points */ +#include "vsyscall-sigreturn.S" diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c new file mode 100644 index 000000000..1bd85a694 --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sh/kernel/vsyscall/vsyscall.c + * + * Copyright (C) 2006 Paul Mundt + * + * vDSO randomization + * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar + */ +#include <linux/mm.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/elf.h> +#include <linux/sched.h> +#include <linux/err.h> + +/* + * Should the kernel map a VDSO page into processes and pass its + * address down to glibc upon exec()? + */ +unsigned int __read_mostly vdso_enabled = 1; +EXPORT_SYMBOL_GPL(vdso_enabled); + +static int __init vdso_setup(char *s) +{ + vdso_enabled = simple_strtoul(s, NULL, 0); + return 1; +} +__setup("vdso=", vdso_setup); + +/* + * These symbols are defined by vsyscall.o to mark the bounds + * of the ELF DSO images included therein. + */ +extern const char vsyscall_trapa_start, vsyscall_trapa_end; +static struct page *syscall_pages[1]; + +int __init vsyscall_init(void) +{ + void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC); + syscall_pages[0] = virt_to_page(syscall_page); + + /* + * XXX: Map this page to a fixmap entry if we get around + * to adding the page to ELF core dumps + */ + + memcpy(syscall_page, + &vsyscall_trapa_start, + &vsyscall_trapa_end - &vsyscall_trapa_start); + + return 0; +} + +/* Setup a VMA at program startup for the vsyscall page */ +int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long addr; + int ret; + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ | VM_EXEC | + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, + syscall_pages); + if (unlikely(ret)) + goto up_fail; + + current->mm->context.vdso = (void *)addr; + +up_fail: + mmap_write_unlock(mm); + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) + return "[vdso]"; + + return NULL; +} diff --git a/arch/sh/kernel/vsyscall/vsyscall.lds.S b/arch/sh/kernel/vsyscall/vsyscall.lds.S new file mode 100644 index 000000000..e3582e03c --- /dev/null +++ b/arch/sh/kernel/vsyscall/vsyscall.lds.S @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linker script for vsyscall DSO. The vsyscall page is an ELF shared + * object prelinked to its virtual address, and with only one read-only + * segment (that fits in one page). This script controls its layout. + */ +#include <asm/asm-offsets.h> + +#ifdef CONFIG_CPU_LITTLE_ENDIAN +OUTPUT_FORMAT("elf32-sh-linux", "elf32-sh-linux", "elf32-sh-linux") +#else +OUTPUT_FORMAT("elf32-shbig-linux", "elf32-shbig-linux", "elf32-shbig-linux") +#endif +OUTPUT_ARCH(sh) + +/* The ELF entry point can be used to set the AT_SYSINFO value. */ +ENTRY(__kernel_vsyscall); + +SECTIONS +{ + . = SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + /* + * This linker script is used both with -r and with -shared. + * For the layouts to match, we need to skip more than enough + * space for the dynamic symbol table et al. If this amount + * is insufficient, ld -shared will barf. Just increase it here. + */ + . = 0x400; + + .text : { *(.text) } :text =0x90909090 + .note : { *(.note.*) } :text :note + .eh_frame_hdr : { *(.eh_frame_hdr ) } :text :eh_frame_hdr + .eh_frame : { + KEEP (*(.eh_frame)) + LONG (0) + } :text + .dynamic : { *(.dynamic) } :text :dynamic + .useless : { + *(.got.plt) *(.got) + *(.data .data.* .gnu.linkonce.d.*) + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + } :text +} + +/* + * Very old versions of ld do not recognize this name token; use the constant. + */ +#define PT_GNU_EH_FRAME 0x6474e550 + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_2.6 { + global: + __kernel_vsyscall; + __kernel_sigreturn; + __kernel_rt_sigreturn; + + local: *; + }; +} |