summaryrefslogtreecommitdiffstats
path: root/arch/mips/lantiq
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/mips/lantiq
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/mips/lantiq')
-rw-r--r--arch/mips/lantiq/Kconfig55
-rw-r--r--arch/mips/lantiq/Makefile10
-rw-r--r--arch/mips/lantiq/Platform8
-rw-r--r--arch/mips/lantiq/clk.c201
-rw-r--r--arch/mips/lantiq/clk.h94
-rw-r--r--arch/mips/lantiq/early_printk.c31
-rw-r--r--arch/mips/lantiq/falcon/Makefile2
-rw-r--r--arch/mips/lantiq/falcon/prom.c90
-rw-r--r--arch/mips/lantiq/falcon/reset.c75
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c265
-rw-r--r--arch/mips/lantiq/irq.c433
-rw-r--r--arch/mips/lantiq/prom.c107
-rw-r--r--arch/mips/lantiq/prom.h27
-rw-r--r--arch/mips/lantiq/xway/Makefile4
-rw-r--r--arch/mips/lantiq/xway/clk.c351
-rw-r--r--arch/mips/lantiq/xway/dcdc.c60
-rw-r--r--arch/mips/lantiq/xway/dma.c271
-rw-r--r--arch/mips/lantiq/xway/gptu.c208
-rw-r--r--arch/mips/lantiq/xway/prom.c144
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c588
-rw-r--r--arch/mips/lantiq/xway/vmmc.c64
21 files changed, 3088 insertions, 0 deletions
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
new file mode 100644
index 000000000..6c6802e48
--- /dev/null
+++ b/arch/mips/lantiq/Kconfig
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+if LANTIQ
+
+config SOC_TYPE_XWAY
+ bool
+ select PINCTRL_XWAY
+ default n
+
+choice
+ prompt "SoC Type"
+ default SOC_XWAY
+
+config SOC_AMAZON_SE
+ bool "Amazon SE"
+ select SOC_TYPE_XWAY
+ select MFD_SYSCON
+ select MFD_CORE
+
+config SOC_XWAY
+ bool "XWAY"
+ select SOC_TYPE_XWAY
+ select HAVE_PCI
+ select MFD_SYSCON
+ select MFD_CORE
+
+config SOC_FALCON
+ bool "FALCON"
+ select PINCTRL_FALCON
+
+endchoice
+
+choice
+ prompt "Built-in device tree"
+ help
+ Legacy bootloaders do not pass a DTB pointer to the kernel, so
+ if a "wrapper" is not being used, the kernel will need to include
+ a device tree that matches the target board.
+
+ The builtin DTB will only be used if the firmware does not supply
+ a valid DTB.
+
+config LANTIQ_DT_NONE
+ bool "None"
+
+config DT_EASY50712
+ bool "Easy50712"
+ depends on SOC_XWAY
+ select BUILTIN_DTB
+endchoice
+
+config PCI_LANTIQ
+ bool "PCI Support"
+ depends on SOC_XWAY && PCI
+
+endif
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
new file mode 100644
index 000000000..e7234ca09
--- /dev/null
+++ b/arch/mips/lantiq/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2010 John Crispin <john@phrozen.org>
+#
+
+obj-y := irq.o clk.o prom.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
+obj-$(CONFIG_SOC_FALCON) += falcon/
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform
new file mode 100644
index 000000000..0bc9c0fbd
--- /dev/null
+++ b/arch/mips/lantiq/Platform
@@ -0,0 +1,8 @@
+#
+# Lantiq
+#
+
+cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq
+load-$(CONFIG_LANTIQ) = 0xffffffff80002000
+cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
+cflags-$(CONFIG_SOC_FALCON) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
new file mode 100644
index 000000000..2d5a0bcb0
--- /dev/null
+++ b/arch/mips/lantiq/clk.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/list.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "clk.h"
+#include "prom.h"
+
+/* lantiq socs have 3 static clocks */
+static struct clk cpu_clk_generic[4];
+
+void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+ unsigned long io, unsigned long ppe)
+{
+ cpu_clk_generic[0].rate = cpu;
+ cpu_clk_generic[1].rate = fpi;
+ cpu_clk_generic[2].rate = io;
+ cpu_clk_generic[3].rate = ppe;
+}
+
+struct clk *clk_get_cpu(void)
+{
+ return &cpu_clk_generic[0];
+}
+
+struct clk *clk_get_fpi(void)
+{
+ return &cpu_clk_generic[1];
+}
+EXPORT_SYMBOL_GPL(clk_get_fpi);
+
+struct clk *clk_get_io(void)
+{
+ return &cpu_clk_generic[2];
+}
+EXPORT_SYMBOL_GPL(clk_get_io);
+
+struct clk *clk_get_ppe(void)
+{
+ return &cpu_clk_generic[3];
+}
+EXPORT_SYMBOL_GPL(clk_get_ppe);
+
+static inline int clk_good(struct clk *clk)
+{
+ return clk && !IS_ERR(clk);
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return 0;
+
+ if (clk->rate != 0)
+ return clk->rate;
+
+ if (clk->get_rate != NULL)
+ return clk->get_rate();
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ if (unlikely(!clk_good(clk)))
+ return 0;
+ if (clk->rates && *clk->rates) {
+ unsigned long *r = clk->rates;
+
+ while (*r && (*r != rate))
+ r++;
+ if (!*r) {
+ pr_err("clk %s.%s: trying to set invalid rate %ld\n",
+ clk->cl.dev_id, clk->cl.con_id, rate);
+ return -1;
+ }
+ }
+ clk->rate = rate;
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ if (unlikely(!clk_good(clk)))
+ return 0;
+ if (clk->rates && *clk->rates) {
+ unsigned long *r = clk->rates;
+
+ while (*r && (*r != rate))
+ r++;
+ if (!*r) {
+ return clk->rate;
+ }
+ }
+ return rate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_enable(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return -1;
+
+ if (clk->enable)
+ return clk->enable(clk);
+
+ return -1;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return;
+
+ if (clk->disable)
+ clk->disable(clk);
+}
+EXPORT_SYMBOL(clk_disable);
+
+int clk_activate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return -1;
+
+ if (clk->activate)
+ return clk->activate(clk);
+
+ return -1;
+}
+EXPORT_SYMBOL(clk_activate);
+
+void clk_deactivate(struct clk *clk)
+{
+ if (unlikely(!clk_good(clk)))
+ return;
+
+ if (clk->deactivate)
+ clk->deactivate(clk);
+}
+EXPORT_SYMBOL(clk_deactivate);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ return NULL;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+static inline u32 get_counter_resolution(void)
+{
+ u32 res;
+
+ __asm__ __volatile__(
+ ".set push\n"
+ ".set mips32r2\n"
+ "rdhwr %0, $3\n"
+ ".set pop\n"
+ : "=&r" (res)
+ : /* no input */
+ : "memory");
+
+ return res;
+}
+
+void __init plat_time_init(void)
+{
+ struct clk *clk;
+
+ ltq_soc_init();
+
+ clk = clk_get_cpu();
+ mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution();
+ write_c0_compare(read_c0_count());
+ pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
+ clk_put(clk);
+}
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
new file mode 100644
index 000000000..f135e3035
--- /dev/null
+++ b/arch/mips/lantiq/clk.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LTQ_CLK_H__
+#define _LTQ_CLK_H__
+
+#include <linux/clkdev.h>
+
+/* clock speeds */
+#define CLOCK_33M 33333333
+#define CLOCK_60M 60000000
+#define CLOCK_62_5M 62500000
+#define CLOCK_83M 83333333
+#define CLOCK_83_5M 83500000
+#define CLOCK_98_304M 98304000
+#define CLOCK_100M 100000000
+#define CLOCK_111M 111111111
+#define CLOCK_125M 125000000
+#define CLOCK_133M 133333333
+#define CLOCK_150M 150000000
+#define CLOCK_166M 166666666
+#define CLOCK_167M 166666667
+#define CLOCK_196_608M 196608000
+#define CLOCK_200M 200000000
+#define CLOCK_222M 222000000
+#define CLOCK_240M 240000000
+#define CLOCK_250M 250000000
+#define CLOCK_266M 266666666
+#define CLOCK_288M 288888888
+#define CLOCK_300M 300000000
+#define CLOCK_333M 333333333
+#define CLOCK_360M 360000000
+#define CLOCK_393M 393215332
+#define CLOCK_400M 400000000
+#define CLOCK_432M 432000000
+#define CLOCK_450M 450000000
+#define CLOCK_500M 500000000
+#define CLOCK_600M 600000000
+#define CLOCK_666M 666666666
+#define CLOCK_720M 720000000
+
+/* clock out speeds */
+#define CLOCK_32_768K 32768
+#define CLOCK_1_536M 1536000
+#define CLOCK_2_5M 2500000
+#define CLOCK_12M 12000000
+#define CLOCK_24M 24000000
+#define CLOCK_25M 25000000
+#define CLOCK_30M 30000000
+#define CLOCK_40M 40000000
+#define CLOCK_48M 48000000
+#define CLOCK_50M 50000000
+#define CLOCK_60M 60000000
+
+struct clk {
+ struct clk_lookup cl;
+ unsigned long rate;
+ unsigned long *rates;
+ unsigned int module;
+ unsigned int bits;
+ unsigned long (*get_rate) (void);
+ int (*enable) (struct clk *clk);
+ void (*disable) (struct clk *clk);
+ int (*activate) (struct clk *clk);
+ void (*deactivate) (struct clk *clk);
+ void (*reboot) (struct clk *clk);
+};
+
+extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+ unsigned long io, unsigned long ppe);
+
+extern unsigned long ltq_danube_cpu_hz(void);
+extern unsigned long ltq_danube_fpi_hz(void);
+extern unsigned long ltq_danube_pp32_hz(void);
+
+extern unsigned long ltq_ar9_cpu_hz(void);
+extern unsigned long ltq_ar9_fpi_hz(void);
+
+extern unsigned long ltq_vr9_cpu_hz(void);
+extern unsigned long ltq_vr9_fpi_hz(void);
+extern unsigned long ltq_vr9_pp32_hz(void);
+
+extern unsigned long ltq_ar10_cpu_hz(void);
+extern unsigned long ltq_ar10_fpi_hz(void);
+extern unsigned long ltq_ar10_pp32_hz(void);
+
+extern unsigned long ltq_grx390_cpu_hz(void);
+extern unsigned long ltq_grx390_fpi_hz(void);
+extern unsigned long ltq_grx390_pp32_hz(void);
+
+#endif
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
new file mode 100644
index 000000000..4e4a28be1
--- /dev/null
+++ b/arch/mips/lantiq/early_printk.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/cpu.h>
+#include <lantiq_soc.h>
+#include <asm/setup.h>
+
+#define ASC_BUF 1024
+#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
+#ifdef __BIG_ENDIAN
+#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020 + 3))
+#else
+#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020))
+#endif
+#define TXMASK 0x3F00
+#define TXOFFSET 8
+
+void prom_putchar(char c)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
+ if (c == '\n')
+ ltq_w8('\r', LTQ_ASC_TBUF);
+ ltq_w8(c, LTQ_ASC_TBUF);
+ local_irq_restore(flags);
+}
diff --git a/arch/mips/lantiq/falcon/Makefile b/arch/mips/lantiq/falcon/Makefile
new file mode 100644
index 000000000..98da1e031
--- /dev/null
+++ b/arch/mips/lantiq/falcon/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := prom.o reset.o sysctrl.o
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
new file mode 100644
index 000000000..7b98def10
--- /dev/null
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/kernel.h>
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+#include <asm/io.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_FALCON "Falcon"
+#define SOC_FALCON_D "Falcon-D"
+#define SOC_FALCON_V "Falcon-V"
+#define SOC_FALCON_M "Falcon-M"
+
+#define COMP_FALCON "lantiq,falcon"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFF000
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+#define SREV_SHIFT 22
+#define SREV_MASK 0x03C00000
+#define TYPE_SHIFT 26
+#define TYPE_MASK 0x3C000000
+
+/* reset, nmi and ejtag exception vectors */
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_RVEC (BOOT_REG_BASE | 0x00)
+#define BOOT_NVEC (BOOT_REG_BASE | 0x04)
+#define BOOT_EVEC (BOOT_REG_BASE | 0x08)
+
+void __init ltq_soc_nmi_setup(void)
+{
+ extern void (*nmi_handler)(void);
+
+ ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
+}
+
+void __init ltq_soc_ejtag_setup(void)
+{
+ extern void (*ejtag_debug_handler)(void);
+
+ ltq_w32((unsigned long)&ejtag_debug_handler, (void *)BOOT_EVEC);
+}
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ u32 type;
+ i->partnum = (ltq_r32(FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
+ i->srev = ((ltq_r32(FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
+ i->compatible = COMP_FALCON;
+ i->type = SOC_TYPE_FALCON;
+ sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
+ i->rev & 0x7, (i->srev & 0x3) + 1);
+
+ switch (i->partnum) {
+ case SOC_ID_FALCON:
+ type = (ltq_r32(FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
+ switch (type) {
+ case 0:
+ i->name = SOC_FALCON_D;
+ break;
+ case 1:
+ i->name = SOC_FALCON_V;
+ break;
+ case 2:
+ i->name = SOC_FALCON_M;
+ break;
+ default:
+ i->name = SOC_FALCON;
+ break;
+ }
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+
+ board_nmi_handler_setup = ltq_soc_nmi_setup;
+ board_ejtag_handler_setup = ltq_soc_ejtag_setup;
+}
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
new file mode 100644
index 000000000..261996c23
--- /dev/null
+++ b/arch/mips/lantiq/falcon/reset.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <linux/export.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * Dummy implementation. Used to allow platform code to find out what
+ * source was booted from
+ */
+unsigned char ltq_boot_select(void)
+{
+ return BS_SPI;
+}
+
+#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
+#define BOOT_PW1_REG (BOOT_REG_BASE | 0x20)
+#define BOOT_PW2_REG (BOOT_REG_BASE | 0x24)
+#define BOOT_PW1 0x4C545100
+#define BOOT_PW2 0x0051544C
+
+#define WDT_REG_BASE (KSEG1 | 0x1F8803F0)
+#define WDT_PW1 0x00BE0000
+#define WDT_PW2 0x00DC0000
+
+static void machine_restart(char *command)
+{
+ local_irq_disable();
+
+ /* reboot magic */
+ ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
+ ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
+ ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
+
+ /* watchdog magic */
+ ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
+ ltq_w32(WDT_PW2 |
+ (0x3 << 26) | /* PWL */
+ (0x2 << 24) | /* CLKDIV */
+ (0x1 << 31) | /* enable */
+ (1), /* reload */
+ (void *)WDT_REG_BASE);
+ unreachable();
+}
+
+static void machine_halt(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static void machine_power_off(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = machine_restart;
+ _machine_halt = machine_halt;
+ pm_power_off = machine_power_off;
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
new file mode 100644
index 000000000..446a25369
--- /dev/null
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/of_address.h>
+#include <asm/delay.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+/* infrastructure control register */
+#define SYS1_INFRAC 0x00bc
+/* Configuration fuses for drivers and pll */
+#define STATUS_CONFIG 0x0040
+
+/* GPE frequency selection */
+#define GPPC_OFFSET 24
+#define GPEFREQ_MASK 0x0000C00
+#define GPEFREQ_OFFSET 10
+/* Clock status register */
+#define SYSCTL_CLKS 0x0000
+/* Clock enable register */
+#define SYSCTL_CLKEN 0x0004
+/* Clock clear register */
+#define SYSCTL_CLKCLR 0x0008
+/* Activation Status Register */
+#define SYSCTL_ACTS 0x0020
+/* Activation Register */
+#define SYSCTL_ACT 0x0024
+/* Deactivation Register */
+#define SYSCTL_DEACT 0x0028
+/* reboot Register */
+#define SYSCTL_RBT 0x002c
+/* CPU0 Clock Control Register */
+#define SYS1_CPU0CC 0x0040
+/* HRST_OUT_N Control Register */
+#define SYS1_HRSTOUTC 0x00c0
+/* clock divider bit */
+#define CPU0CC_CPUDIV 0x0001
+
+/* Activation Status Register */
+#define ACTS_ASC0_ACT 0x00001000
+#define ACTS_SSC0 0x00002000
+#define ACTS_ASC1_ACT 0x00000800
+#define ACTS_I2C_ACT 0x00004000
+#define ACTS_P0 0x00010000
+#define ACTS_P1 0x00010000
+#define ACTS_P2 0x00020000
+#define ACTS_P3 0x00020000
+#define ACTS_P4 0x00040000
+#define ACTS_PADCTRL0 0x00100000
+#define ACTS_PADCTRL1 0x00100000
+#define ACTS_PADCTRL2 0x00200000
+#define ACTS_PADCTRL3 0x00200000
+#define ACTS_PADCTRL4 0x00400000
+
+#define sysctl_w32(m, x, y) ltq_w32((x), sysctl_membase[m] + (y))
+#define sysctl_r32(m, x) ltq_r32(sysctl_membase[m] + (x))
+#define sysctl_w32_mask(m, clear, set, reg) \
+ sysctl_w32(m, (sysctl_r32(m, reg) & ~(clear)) | (set), reg)
+
+#define status_w32(x, y) ltq_w32((x), status_membase + (y))
+#define status_r32(x) ltq_r32(status_membase + (x))
+
+static void __iomem *sysctl_membase[3], *status_membase;
+void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
+
+void falcon_trigger_hrst(int level)
+{
+ sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
+}
+
+static inline void sysctl_wait(struct clk *clk,
+ unsigned int test, unsigned int reg)
+{
+ int err = 1000000;
+
+ do {} while (--err && ((sysctl_r32(clk->module, reg)
+ & clk->bits) != test));
+ if (!err)
+ pr_err("module de/activation failed %d %08X %08X %08X\n",
+ clk->module, clk->bits, test,
+ sysctl_r32(clk->module, reg) & clk->bits);
+}
+
+static int sysctl_activate(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
+ sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+ return 0;
+}
+
+static void sysctl_deactivate(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_DEACT);
+ sysctl_wait(clk, 0, SYSCTL_ACTS);
+}
+
+static int sysctl_clken(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
+ sysctl_wait(clk, clk->bits, SYSCTL_CLKS);
+ return 0;
+}
+
+static void sysctl_clkdis(struct clk *clk)
+{
+ sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
+ sysctl_wait(clk, 0, SYSCTL_CLKS);
+}
+
+static void sysctl_reboot(struct clk *clk)
+{
+ unsigned int act;
+ unsigned int bits;
+
+ act = sysctl_r32(clk->module, SYSCTL_ACT);
+ bits = ~act & clk->bits;
+ if (bits != 0) {
+ sysctl_w32(clk->module, bits, SYSCTL_CLKEN);
+ sysctl_w32(clk->module, bits, SYSCTL_ACT);
+ sysctl_wait(clk, bits, SYSCTL_ACTS);
+ }
+ sysctl_w32(clk->module, act & clk->bits, SYSCTL_RBT);
+ sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
+}
+
+/* enable the ONU core */
+static void falcon_gpe_enable(void)
+{
+ unsigned int freq;
+ unsigned int status;
+
+ /* if if the clock is already enabled */
+ status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
+ if (status & (1 << (GPPC_OFFSET + 1)))
+ return;
+
+ freq = (status_r32(STATUS_CONFIG) &
+ GPEFREQ_MASK) >>
+ GPEFREQ_OFFSET;
+ if (freq == 0)
+ freq = 1; /* use 625MHz on unfused chip */
+
+ /* apply new frequency */
+ sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
+ freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
+ udelay(1);
+
+ /* enable new frequency */
+ sysctl_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
+ udelay(1);
+}
+
+static inline void clkdev_add_sys(const char *dev, unsigned int module,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->module = module;
+ clk->bits = bits;
+ clk->activate = sysctl_activate;
+ clk->deactivate = sysctl_deactivate;
+ clk->enable = sysctl_clken;
+ clk->disable = sysctl_clkdis;
+ clk->reboot = sysctl_reboot;
+ clkdev_add(&clk->cl);
+}
+
+void __init ltq_soc_init(void)
+{
+ struct device_node *np_status =
+ of_find_compatible_node(NULL, NULL, "lantiq,status-falcon");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-falcon");
+ struct device_node *np_sys1 =
+ of_find_compatible_node(NULL, NULL, "lantiq,sys1-falcon");
+ struct device_node *np_syseth =
+ of_find_compatible_node(NULL, NULL, "lantiq,syseth-falcon");
+ struct device_node *np_sysgpe =
+ of_find_compatible_node(NULL, NULL, "lantiq,sysgpe-falcon");
+ struct resource res_status, res_ebu, res_sys[3];
+ int i;
+
+ /* check if all the core register ranges are available */
+ if (!np_status || !np_ebu || !np_sys1 || !np_syseth || !np_sysgpe)
+ panic("Failed to load core nodes from devicetree");
+
+ if (of_address_to_resource(np_status, 0, &res_status) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu) ||
+ of_address_to_resource(np_sys1, 0, &res_sys[0]) ||
+ of_address_to_resource(np_syseth, 0, &res_sys[1]) ||
+ of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
+ panic("Failed to get core resources");
+
+ if ((request_mem_region(res_status.start, resource_size(&res_status),
+ res_status.name) < 0) ||
+ (request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name) < 0) ||
+ (request_mem_region(res_sys[0].start,
+ resource_size(&res_sys[0]),
+ res_sys[0].name) < 0) ||
+ (request_mem_region(res_sys[1].start,
+ resource_size(&res_sys[1]),
+ res_sys[1].name) < 0) ||
+ (request_mem_region(res_sys[2].start,
+ resource_size(&res_sys[2]),
+ res_sys[2].name) < 0))
+ pr_err("Failed to request core resources");
+
+ status_membase = ioremap(res_status.start,
+ resource_size(&res_status));
+ ltq_ebu_membase = ioremap(res_ebu.start,
+ resource_size(&res_ebu));
+
+ if (!status_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ for (i = 0; i < 3; i++) {
+ sysctl_membase[i] = ioremap(res_sys[i].start,
+ resource_size(&res_sys[i]));
+ if (!sysctl_membase[i])
+ panic("Failed to remap sysctrl resources");
+ }
+ ltq_sys1_membase = sysctl_membase[0];
+
+ falcon_gpe_enable();
+
+ /* get our 3 static rates for cpu, fpi and io clocks */
+ if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
+ clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M, 0);
+ else
+ clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M, 0);
+
+ /* add our clock domains */
+ clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
+ clkdev_add_sys("1d810100.gpio", SYSCTL_SYSETH, ACTS_P2);
+ clkdev_add_sys("1e800100.gpio", SYSCTL_SYS1, ACTS_P1);
+ clkdev_add_sys("1e800200.gpio", SYSCTL_SYS1, ACTS_P3);
+ clkdev_add_sys("1e800300.gpio", SYSCTL_SYS1, ACTS_P4);
+ clkdev_add_sys("1db01000.pad", SYSCTL_SYSETH, ACTS_PADCTRL0);
+ clkdev_add_sys("1db02000.pad", SYSCTL_SYSETH, ACTS_PADCTRL2);
+ clkdev_add_sys("1e800400.pad", SYSCTL_SYS1, ACTS_PADCTRL1);
+ clkdev_add_sys("1e800500.pad", SYSCTL_SYS1, ACTS_PADCTRL3);
+ clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
+ clkdev_add_sys("1e100b00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
+ clkdev_add_sys("1e100c00.serial", SYSCTL_SYS1, ACTS_ASC0_ACT);
+ clkdev_add_sys("1e100d00.spi", SYSCTL_SYS1, ACTS_SSC0);
+ clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
+}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
new file mode 100644
index 000000000..43c2f271e
--- /dev/null
+++ b/arch/mips/lantiq/irq.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/bootinfo.h>
+#include <asm/irq_cpu.h>
+
+#include <lantiq_soc.h>
+#include <irq.h>
+
+/* register definitions - internal irqs */
+#define LTQ_ICU_ISR 0x0000
+#define LTQ_ICU_IER 0x0008
+#define LTQ_ICU_IOSR 0x0010
+#define LTQ_ICU_IRSR 0x0018
+#define LTQ_ICU_IMR 0x0020
+
+#define LTQ_ICU_IM_SIZE 0x28
+
+/* register definitions - external irqs */
+#define LTQ_EIU_EXIN_C 0x0000
+#define LTQ_EIU_EXIN_INIC 0x0004
+#define LTQ_EIU_EXIN_INC 0x0008
+#define LTQ_EIU_EXIN_INEN 0x000C
+
+/* number of external interrupts */
+#define MAX_EIU 6
+
+/* the performance counter */
+#define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
+
+/*
+ * irqs generated by devices attached to the EBU need to be acked in
+ * a special manner
+ */
+#define LTQ_ICU_EBU_IRQ 22
+
+#define ltq_icu_w32(vpe, m, x, y) \
+ ltq_w32((x), ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (y))
+
+#define ltq_icu_r32(vpe, m, x) \
+ ltq_r32(ltq_icu_membase[vpe] + m*LTQ_ICU_IM_SIZE + (x))
+
+#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
+#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
+
+/* we have a cascade of 8 irqs */
+#define MIPS_CPU_IRQ_CASCADE 8
+
+static int exin_avail;
+static u32 ltq_eiu_irq[MAX_EIU];
+static void __iomem *ltq_icu_membase[NR_CPUS];
+static void __iomem *ltq_eiu_membase;
+static struct irq_domain *ltq_domain;
+static DEFINE_SPINLOCK(ltq_eiu_lock);
+static DEFINE_RAW_SPINLOCK(ltq_icu_lock);
+static int ltq_perfcount_irq;
+
+int ltq_eiu_get_irq(int exin)
+{
+ if (exin < exin_avail)
+ return ltq_eiu_irq[exin];
+ return -1;
+}
+
+void ltq_disable_irq(struct irq_data *d)
+{
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
+ unsigned long flags;
+ int vpe;
+
+ offset %= INT_NUM_IM_OFFSET;
+
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
+ for_each_present_cpu(vpe) {
+ ltq_icu_w32(vpe, im,
+ ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
+ LTQ_ICU_IER);
+ }
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
+}
+
+void ltq_mask_and_ack_irq(struct irq_data *d)
+{
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
+ unsigned long flags;
+ int vpe;
+
+ offset %= INT_NUM_IM_OFFSET;
+
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
+ for_each_present_cpu(vpe) {
+ ltq_icu_w32(vpe, im,
+ ltq_icu_r32(vpe, im, LTQ_ICU_IER) & ~BIT(offset),
+ LTQ_ICU_IER);
+ ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
+ }
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
+}
+
+static void ltq_ack_irq(struct irq_data *d)
+{
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
+ unsigned long flags;
+ int vpe;
+
+ offset %= INT_NUM_IM_OFFSET;
+
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
+ for_each_present_cpu(vpe) {
+ ltq_icu_w32(vpe, im, BIT(offset), LTQ_ICU_ISR);
+ }
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
+}
+
+void ltq_enable_irq(struct irq_data *d)
+{
+ unsigned long offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
+ unsigned long im = offset / INT_NUM_IM_OFFSET;
+ unsigned long flags;
+ int vpe;
+
+ offset %= INT_NUM_IM_OFFSET;
+
+ vpe = cpumask_first(irq_data_get_effective_affinity_mask(d));
+
+ /* This shouldn't be even possible, maybe during CPU hotplug spam */
+ if (unlikely(vpe >= nr_cpu_ids))
+ vpe = smp_processor_id();
+
+ raw_spin_lock_irqsave(&ltq_icu_lock, flags);
+
+ ltq_icu_w32(vpe, im, ltq_icu_r32(vpe, im, LTQ_ICU_IER) | BIT(offset),
+ LTQ_ICU_IER);
+
+ raw_spin_unlock_irqrestore(&ltq_icu_lock, flags);
+}
+
+static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
+{
+ int i;
+ unsigned long flags;
+
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
+ int val = 0;
+ int edge = 0;
+
+ switch (type) {
+ case IRQF_TRIGGER_NONE:
+ break;
+ case IRQF_TRIGGER_RISING:
+ val = 1;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ val = 2;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
+ val = 3;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ val = 5;
+ break;
+ case IRQF_TRIGGER_LOW:
+ val = 6;
+ break;
+ default:
+ pr_err("invalid type %d for irq %ld\n",
+ type, d->hwirq);
+ return -EINVAL;
+ }
+
+ if (edge)
+ irq_set_handler(d->hwirq, handle_edge_irq);
+
+ spin_lock_irqsave(&ltq_eiu_lock, flags);
+ ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
+ (~(7 << (i * 4)))) | (val << (i * 4)),
+ LTQ_EIU_EXIN_C);
+ spin_unlock_irqrestore(&ltq_eiu_lock, flags);
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
+{
+ int i;
+
+ ltq_enable_irq(d);
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
+ /* by default we are low level triggered */
+ ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
+ /* clear all pending */
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
+ LTQ_EIU_EXIN_INC);
+ /* enable */
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
+ LTQ_EIU_EXIN_INEN);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void ltq_shutdown_eiu_irq(struct irq_data *d)
+{
+ int i;
+
+ ltq_disable_irq(d);
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
+ /* disable */
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
+ LTQ_EIU_EXIN_INEN);
+ break;
+ }
+ }
+}
+
+#if defined(CONFIG_SMP)
+static int ltq_icu_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask, bool force)
+{
+ struct cpumask tmask;
+
+ if (!cpumask_and(&tmask, cpumask, cpu_online_mask))
+ return -EINVAL;
+
+ irq_data_update_effective_affinity(d, &tmask);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip ltq_irq_type = {
+ .name = "icu",
+ .irq_enable = ltq_enable_irq,
+ .irq_disable = ltq_disable_irq,
+ .irq_unmask = ltq_enable_irq,
+ .irq_ack = ltq_ack_irq,
+ .irq_mask = ltq_disable_irq,
+ .irq_mask_ack = ltq_mask_and_ack_irq,
+#if defined(CONFIG_SMP)
+ .irq_set_affinity = ltq_icu_irq_set_affinity,
+#endif
+};
+
+static struct irq_chip ltq_eiu_type = {
+ .name = "eiu",
+ .irq_startup = ltq_startup_eiu_irq,
+ .irq_shutdown = ltq_shutdown_eiu_irq,
+ .irq_enable = ltq_enable_irq,
+ .irq_disable = ltq_disable_irq,
+ .irq_unmask = ltq_enable_irq,
+ .irq_ack = ltq_ack_irq,
+ .irq_mask = ltq_disable_irq,
+ .irq_mask_ack = ltq_mask_and_ack_irq,
+ .irq_set_type = ltq_eiu_settype,
+#if defined(CONFIG_SMP)
+ .irq_set_affinity = ltq_icu_irq_set_affinity,
+#endif
+};
+
+static void ltq_hw_irq_handler(struct irq_desc *desc)
+{
+ unsigned int module = irq_desc_get_irq(desc) - 2;
+ u32 irq;
+ irq_hw_number_t hwirq;
+ int vpe = smp_processor_id();
+
+ irq = ltq_icu_r32(vpe, module, LTQ_ICU_IOSR);
+ if (irq == 0)
+ return;
+
+ /*
+ * silicon bug causes only the msb set to 1 to be valid. all
+ * other bits might be bogus
+ */
+ irq = __fls(irq);
+ hwirq = irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module);
+ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
+
+ /* if this is a EBU irq, we need to ack it or get a deadlock */
+ if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
+ LTQ_EBU_PCC_ISTAT);
+}
+
+static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ struct irq_chip *chip = &ltq_irq_type;
+ struct irq_data *data;
+ int i;
+
+ if (hw < MIPS_CPU_IRQ_CASCADE)
+ return 0;
+
+ for (i = 0; i < exin_avail; i++)
+ if (hw == ltq_eiu_irq[i])
+ chip = &ltq_eiu_type;
+
+ data = irq_get_irq_data(irq);
+
+ irq_data_update_effective_affinity(data, cpumask_of(0));
+
+ irq_set_chip_and_handler(irq, chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+ .map = icu_map,
+};
+
+int __init icu_of_init(struct device_node *node, struct device_node *parent)
+{
+ struct device_node *eiu_node;
+ struct resource res;
+ int i, ret, vpe;
+
+ /* load register regions of available ICUs */
+ for_each_possible_cpu(vpe) {
+ if (of_address_to_resource(node, vpe, &res))
+ panic("Failed to get icu%i memory range", vpe);
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ res.name))
+ pr_err("Failed to request icu%i memory\n", vpe);
+
+ ltq_icu_membase[vpe] = ioremap(res.start,
+ resource_size(&res));
+
+ if (!ltq_icu_membase[vpe])
+ panic("Failed to remap icu%i memory", vpe);
+ }
+
+ /* turn off all irqs by default */
+ for_each_possible_cpu(vpe) {
+ for (i = 0; i < MAX_IM; i++) {
+ /* make sure all irqs are turned off by default */
+ ltq_icu_w32(vpe, i, 0, LTQ_ICU_IER);
+
+ /* clear all possibly pending interrupts */
+ ltq_icu_w32(vpe, i, ~0, LTQ_ICU_ISR);
+ ltq_icu_w32(vpe, i, ~0, LTQ_ICU_IMR);
+
+ /* clear resend */
+ ltq_icu_w32(vpe, i, 0, LTQ_ICU_IRSR);
+ }
+ }
+
+ mips_cpu_irq_init();
+
+ for (i = 0; i < MAX_IM; i++)
+ irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
+
+ ltq_domain = irq_domain_add_linear(node,
+ (MAX_IM * INT_NUM_IM_OFFSET) + MIPS_CPU_IRQ_CASCADE,
+ &irq_domain_ops, 0);
+
+ /* tell oprofile which irq to use */
+ ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
+
+ /* the external interrupts are optional and xway only */
+ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
+ if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
+ /* find out how many external irq sources we have */
+ exin_avail = of_property_count_u32_elems(eiu_node,
+ "lantiq,eiu-irqs");
+
+ if (exin_avail > MAX_EIU)
+ exin_avail = MAX_EIU;
+
+ ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
+ ltq_eiu_irq, exin_avail);
+ if (ret)
+ panic("failed to load external irq resources");
+
+ if (!request_mem_region(res.start, resource_size(&res),
+ res.name))
+ pr_err("Failed to request eiu memory");
+
+ ltq_eiu_membase = ioremap(res.start,
+ resource_size(&res));
+ if (!ltq_eiu_membase)
+ panic("Failed to remap eiu memory");
+ }
+
+ return 0;
+}
+
+int get_c0_perfcount_int(void)
+{
+ return ltq_perfcount_irq;
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+unsigned int get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+static const struct of_device_id of_irq_ids[] __initconst = {
+ { .compatible = "lantiq,icu", .data = icu_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
new file mode 100644
index 000000000..2729a4b63
--- /dev/null
+++ b/arch/mips/lantiq/prom.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+#include <asm/prom.h>
+
+#include <lantiq.h>
+
+#include "prom.h"
+#include "clk.h"
+
+/* access to the ebu needs to be locked between different drivers */
+DEFINE_SPINLOCK(ebu_lock);
+EXPORT_SYMBOL_GPL(ebu_lock);
+
+/*
+ * this struct is filled by the soc specific detection code and holds
+ * information about the specific soc type, revision and name
+ */
+static struct ltq_soc_info soc_info;
+
+const char *get_system_type(void)
+{
+ return soc_info.sys_type;
+}
+
+int ltq_soc_type(void)
+{
+ return soc_info.type;
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+static void __init prom_init_cmdline(void)
+{
+ int argc = fw_arg0;
+ char **argv = (char **) KSEG1ADDR(fw_arg1);
+ int i;
+
+ arcs_cmdline[0] = '\0';
+
+ for (i = 0; i < argc; i++) {
+ char *p = (char *) KSEG1ADDR(argv[i]);
+
+ if (CPHYSADDR(p) && *p) {
+ strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
+ strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
+ }
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+ void *dtb;
+
+ ioport_resource.start = IOPORT_RESOURCE_START;
+ ioport_resource.end = IOPORT_RESOURCE_END;
+ iomem_resource.start = IOMEM_RESOURCE_START;
+ iomem_resource.end = IOMEM_RESOURCE_END;
+
+ set_io_port_base((unsigned long) KSEG1);
+
+ if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
+ else if (&__dtb_start != &__dtb_end)
+ dtb = (void *)__dtb_start;
+ else
+ panic("no dtb found");
+
+ /*
+ * Load the devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+ __dt_setup_arch(dtb);
+}
+
+void __init device_tree_init(void)
+{
+ unflatten_and_copy_device_tree();
+}
+
+void __init prom_init(void)
+{
+ /* call the soc specific detetcion code and get it to fill soc_info */
+ ltq_soc_detect(&soc_info);
+ snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev %s",
+ soc_info.name, soc_info.rev_type);
+ soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
+ pr_info("SoC: %s\n", soc_info.sys_type);
+ prom_init_cmdline();
+
+#if defined(CONFIG_MIPS_MT_SMP)
+ if (register_vsmp_smp_ops())
+ panic("failed to register_vsmp_smp_ops()");
+#endif
+}
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
new file mode 100644
index 000000000..5cd29c6b3
--- /dev/null
+++ b/arch/mips/lantiq/prom.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ */
+
+#ifndef _LTQ_PROM_H__
+#define _LTQ_PROM_H__
+
+#define LTQ_SYS_TYPE_LEN 0x100
+#define LTQ_SYS_REV_LEN 0x10
+
+struct ltq_soc_info {
+ unsigned char *name;
+ unsigned int rev;
+ unsigned char rev_type[LTQ_SYS_REV_LEN];
+ unsigned int srev;
+ unsigned int partnum;
+ unsigned int type;
+ unsigned char sys_type[LTQ_SYS_TYPE_LEN];
+ unsigned char *compatible;
+};
+
+extern void ltq_soc_detect(struct ltq_soc_info *i);
+extern void ltq_soc_init(void);
+
+#endif
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
new file mode 100644
index 000000000..c0f02dab7
--- /dev/null
+++ b/arch/mips/lantiq/xway/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := prom.o sysctrl.o clk.o dma.o gptu.o dcdc.o
+
+obj-y += vmmc.o
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
new file mode 100644
index 000000000..47ad21430
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+static unsigned int ram_clocks[] = {
+ CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
+#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
+
+/* legacy xway clock */
+#define CGU_SYS 0x10
+
+/* vr9, ar10/grx390 clock */
+#define CGU_SYS_XRX 0x0c
+#define CGU_IF_CLK_AR10 0x24
+
+unsigned long ltq_danube_fpi_hz(void)
+{
+ unsigned long ddr_clock = DDR_HZ;
+
+ if (ltq_cgu_r32(CGU_SYS) & 0x40)
+ return ddr_clock >> 1;
+ return ddr_clock;
+}
+
+unsigned long ltq_danube_cpu_hz(void)
+{
+ switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
+ case 0:
+ return CLOCK_333M;
+ case 4:
+ return DDR_HZ;
+ case 8:
+ return DDR_HZ << 1;
+ default:
+ return DDR_HZ >> 1;
+ }
+}
+
+unsigned long ltq_danube_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 7) & 3;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_240M;
+ break;
+ case 2:
+ clk = CLOCK_222M;
+ break;
+ case 3:
+ clk = CLOCK_133M;
+ break;
+ default:
+ clk = CLOCK_266M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_ar9_sys_hz(void)
+{
+ if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
+ return CLOCK_393M;
+ return CLOCK_333M;
+}
+
+unsigned long ltq_ar9_fpi_hz(void)
+{
+ unsigned long sys = ltq_ar9_sys_hz();
+
+ if (ltq_cgu_r32(CGU_SYS) & BIT(0))
+ return sys / 3;
+ else
+ return sys / 2;
+}
+
+unsigned long ltq_ar9_cpu_hz(void)
+{
+ if (ltq_cgu_r32(CGU_SYS) & BIT(2))
+ return ltq_ar9_fpi_hz();
+ else
+ return ltq_ar9_sys_hz();
+}
+
+unsigned long ltq_vr9_cpu_hz(void)
+{
+ unsigned int cpu_sel;
+ unsigned long clk;
+
+ cpu_sel = (ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0xf;
+
+ switch (cpu_sel) {
+ case 0:
+ clk = CLOCK_600M;
+ break;
+ case 1:
+ clk = CLOCK_500M;
+ break;
+ case 2:
+ clk = CLOCK_393M;
+ break;
+ case 3:
+ clk = CLOCK_333M;
+ break;
+ case 5:
+ case 6:
+ clk = CLOCK_196_608M;
+ break;
+ case 7:
+ clk = CLOCK_167M;
+ break;
+ case 4:
+ case 8:
+ case 9:
+ clk = CLOCK_125M;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_fpi_hz(void)
+{
+ unsigned int ocp_sel, cpu_clk;
+ unsigned long clk;
+
+ cpu_clk = ltq_vr9_cpu_hz();
+ ocp_sel = ltq_cgu_r32(CGU_SYS_XRX) & 0x3;
+
+ switch (ocp_sel) {
+ case 0:
+ /* OCP ratio 1 */
+ clk = cpu_clk;
+ break;
+ case 2:
+ /* OCP ratio 2 */
+ clk = cpu_clk / 2;
+ break;
+ case 3:
+ /* OCP ratio 2.5 */
+ clk = (cpu_clk * 2) / 5;
+ break;
+ case 4:
+ /* OCP ratio 3 */
+ clk = cpu_clk / 3;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 0:
+ clk = CLOCK_500M;
+ break;
+ case 1:
+ clk = CLOCK_432M;
+ break;
+ case 2:
+ clk = CLOCK_288M;
+ break;
+ default:
+ clk = CLOCK_500M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_ar10_cpu_hz(void)
+{
+ unsigned int clksys;
+ int cpu_fs = (ltq_cgu_r32(CGU_SYS_XRX) >> 8) & 0x1;
+ int freq_div = (ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0x7;
+
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_500M;
+ break;
+ case 1:
+ clksys = CLOCK_600M;
+ break;
+ default:
+ clksys = CLOCK_500M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 0:
+ return clksys;
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys;
+ }
+}
+
+unsigned long ltq_ar10_fpi_hz(void)
+{
+ int freq_fpi = (ltq_cgu_r32(CGU_IF_CLK_AR10) >> 25) & 0xf;
+
+ switch (freq_fpi) {
+ case 1:
+ return CLOCK_300M;
+ case 5:
+ return CLOCK_250M;
+ case 2:
+ return CLOCK_150M;
+ case 6:
+ return CLOCK_125M;
+
+ default:
+ return CLOCK_125M;
+ }
+}
+
+unsigned long ltq_ar10_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_250M;
+ break;
+ case 4:
+ clk = CLOCK_400M;
+ break;
+ default:
+ clk = CLOCK_250M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_grx390_cpu_hz(void)
+{
+ unsigned int clksys;
+ int cpu_fs = ((ltq_cgu_r32(CGU_SYS_XRX) >> 9) & 0x3);
+ int freq_div = ((ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0x7);
+
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_600M;
+ break;
+ case 1:
+ clksys = CLOCK_666M;
+ break;
+ case 2:
+ clksys = CLOCK_720M;
+ break;
+ default:
+ clksys = CLOCK_600M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 0:
+ return clksys;
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys;
+ }
+}
+
+unsigned long ltq_grx390_fpi_hz(void)
+{
+ /* fpi clock is derived from ddr_clk */
+ unsigned int clksys;
+ int cpu_fs = ((ltq_cgu_r32(CGU_SYS_XRX) >> 9) & 0x3);
+ int freq_div = ((ltq_cgu_r32(CGU_SYS_XRX)) & 0x7);
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_600M;
+ break;
+ case 1:
+ clksys = CLOCK_666M;
+ break;
+ case 2:
+ clksys = CLOCK_720M;
+ break;
+ default:
+ clksys = CLOCK_600M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys >> 1;
+ }
+}
+
+unsigned long ltq_grx390_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_250M;
+ break;
+ case 2:
+ clk = CLOCK_432M;
+ break;
+ case 4:
+ clk = CLOCK_400M;
+ break;
+ default:
+ clk = CLOCK_250M;
+ break;
+ }
+ return clk;
+}
diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c
new file mode 100644
index 000000000..4960bee0a
--- /dev/null
+++ b/arch/mips/lantiq/xway/dcdc.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Sameer Ahmad, Lantiq GmbH
+ */
+
+#include <linux/ioport.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+/* Bias and regulator Setup Register */
+#define DCDC_BIAS_VREG0 0xa
+/* Bias and regulator Setup Register */
+#define DCDC_BIAS_VREG1 0xb
+
+#define dcdc_w8(x, y) ltq_w8((x), dcdc_membase + (y))
+#define dcdc_r8(x) ltq_r8(dcdc_membase + (x))
+
+static void __iomem *dcdc_membase;
+
+static int dcdc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dcdc_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dcdc_membase))
+ return PTR_ERR(dcdc_membase);
+
+ dev_info(&pdev->dev, "Core Voltage : %d mV\n",
+ dcdc_r8(DCDC_BIAS_VREG1) * 8);
+
+ return 0;
+}
+
+static const struct of_device_id dcdc_match[] = {
+ { .compatible = "lantiq,dcdc-xrx200" },
+ {},
+};
+
+static struct platform_driver dcdc_driver = {
+ .probe = dcdc_probe,
+ .driver = {
+ .name = "dcdc-xrx200",
+ .of_match_table = dcdc_match,
+ },
+};
+
+int __init dcdc_init(void)
+{
+ int ret = platform_driver_register(&dcdc_driver);
+
+ if (ret)
+ pr_info("dcdc: Error registering platform driver\n");
+ return ret;
+}
+
+arch_initcall(dcdc_init);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
new file mode 100644
index 000000000..ab13e2571
--- /dev/null
+++ b/arch/mips/lantiq/xway/dma.c
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+
+#include <lantiq_soc.h>
+#include <xway_dma.h>
+
+#define LTQ_DMA_ID 0x08
+#define LTQ_DMA_CTRL 0x10
+#define LTQ_DMA_CPOLL 0x14
+#define LTQ_DMA_CS 0x18
+#define LTQ_DMA_CCTRL 0x1C
+#define LTQ_DMA_CDBA 0x20
+#define LTQ_DMA_CDLEN 0x24
+#define LTQ_DMA_CIS 0x28
+#define LTQ_DMA_CIE 0x2C
+#define LTQ_DMA_PS 0x40
+#define LTQ_DMA_PCTRL 0x44
+#define LTQ_DMA_IRNEN 0xf4
+
+#define DMA_ID_CHNR GENMASK(26, 20) /* channel number */
+#define DMA_DESCPT BIT(3) /* descriptor complete irq */
+#define DMA_TX BIT(8) /* TX channel direction */
+#define DMA_CHAN_ON BIT(0) /* channel on / off bit */
+#define DMA_PDEN BIT(6) /* enable packet drop */
+#define DMA_CHAN_RST BIT(1) /* channel on / off bit */
+#define DMA_RESET BIT(0) /* channel on / off bit */
+#define DMA_IRQ_ACK 0x7e /* IRQ status register */
+#define DMA_POLL BIT(31) /* turn on channel polling */
+#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */
+#define DMA_PCTRL_2W_BURST 0x1 /* 2 word burst length */
+#define DMA_PCTRL_4W_BURST 0x2 /* 4 word burst length */
+#define DMA_PCTRL_8W_BURST 0x3 /* 8 word burst length */
+#define DMA_TX_BURST_SHIFT 4 /* tx burst shift */
+#define DMA_RX_BURST_SHIFT 2 /* rx burst shift */
+#define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */
+#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */
+
+#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x))
+#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y))
+#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
+ ltq_dma_membase + (z))
+
+static void __iomem *ltq_dma_membase;
+static DEFINE_SPINLOCK(ltq_dma_lock);
+
+void
+ltq_dma_enable_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
+
+void
+ltq_dma_disable_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
+
+void
+ltq_dma_ack_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
+
+void
+ltq_dma_open(struct ltq_dma_channel *ch)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&ltq_dma_lock, flag);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flag);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_open);
+
+void
+ltq_dma_close(struct ltq_dma_channel *ch)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&ltq_dma_lock, flag);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flag);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_close);
+
+static void
+ltq_dma_alloc(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ch->desc = 0;
+ ch->desc_base = dma_alloc_coherent(ch->dev,
+ LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ &ch->phys, GFP_ATOMIC);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
+ ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ wmb();
+ ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
+ while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
+ ;
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+
+void
+ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ltq_dma_alloc(ch);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
+
+void
+ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ltq_dma_alloc(ch);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
+
+void
+ltq_dma_free(struct ltq_dma_channel *ch)
+{
+ if (!ch->desc_base)
+ return;
+ ltq_dma_close(ch);
+ dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ ch->desc_base, ch->phys);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_free);
+
+void
+ltq_dma_init_port(int p)
+{
+ ltq_dma_w32(p, LTQ_DMA_PS);
+ switch (p) {
+ case DMA_PORT_ETOP:
+ /*
+ * Tell the DMA engine to swap the endianness of data frames and
+ * drop packets if the channel arbitration fails.
+ */
+ ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN,
+ LTQ_DMA_PCTRL);
+ break;
+
+ case DMA_PORT_DEU:
+ ltq_dma_w32((DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT) |
+ (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT),
+ LTQ_DMA_PCTRL);
+ break;
+
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ltq_dma_init_port);
+
+static int
+ltq_dma_init(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+ unsigned int id, nchannels;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ltq_dma_membase))
+ panic("Failed to remap dma resource");
+
+ /* power up and reset the dma engine */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ panic("Failed to get dma clock");
+
+ clk_enable(clk);
+ ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
+
+ usleep_range(1, 10);
+
+ /* disable all interrupts */
+ ltq_dma_w32(0, LTQ_DMA_IRNEN);
+
+ /* reset/configure each channel */
+ id = ltq_dma_r32(LTQ_DMA_ID);
+ nchannels = ((id & DMA_ID_CHNR) >> 20);
+ for (i = 0; i < nchannels; i++) {
+ ltq_dma_w32(i, LTQ_DMA_CS);
+ ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL);
+ ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ }
+
+ dev_info(&pdev->dev,
+ "Init done - hw rev: %X, ports: %d, channels: %d\n",
+ id & 0x1f, (id >> 16) & 0xf, nchannels);
+
+ return 0;
+}
+
+static const struct of_device_id dma_match[] = {
+ { .compatible = "lantiq,dma-xway" },
+ {},
+};
+
+static struct platform_driver dma_driver = {
+ .probe = ltq_dma_init,
+ .driver = {
+ .name = "dma-xway",
+ .of_match_table = dma_match,
+ },
+};
+
+int __init
+dma_init(void)
+{
+ return platform_driver_register(&dma_driver);
+}
+
+postcore_initcall(dma_init);
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
new file mode 100644
index 000000000..200fe9ff6
--- /dev/null
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2012 Lantiq GmbH
+ */
+
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+
+#include <lantiq_soc.h>
+#include "../clk.h"
+
+/* the magic ID byte of the core */
+#define GPTU_MAGIC 0x59
+/* clock control register */
+#define GPTU_CLC 0x00
+/* id register */
+#define GPTU_ID 0x08
+/* interrupt node enable */
+#define GPTU_IRNEN 0xf4
+/* interrupt control register */
+#define GPTU_IRCR 0xf8
+/* interrupt capture register */
+#define GPTU_IRNCR 0xfc
+/* there are 3 identical blocks of 2 timers. calculate register offsets */
+#define GPTU_SHIFT(x) (x % 2 ? 4 : 0)
+#define GPTU_BASE(x) (((x >> 1) * 0x20) + 0x10)
+/* timer control register */
+#define GPTU_CON(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x00)
+/* timer auto reload register */
+#define GPTU_RUN(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x08)
+/* timer manual reload register */
+#define GPTU_RLD(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x10)
+/* timer count register */
+#define GPTU_CNT(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x18)
+
+/* GPTU_CON(x) */
+#define CON_CNT BIT(2)
+#define CON_EDGE_ANY (BIT(7) | BIT(6))
+#define CON_SYNC BIT(8)
+#define CON_CLK_INT BIT(10)
+
+/* GPTU_RUN(x) */
+#define RUN_SEN BIT(0)
+#define RUN_RL BIT(2)
+
+/* set clock to runmode */
+#define CLC_RMC BIT(8)
+/* bring core out of suspend */
+#define CLC_SUSPEND BIT(4)
+/* the disable bit */
+#define CLC_DISABLE BIT(0)
+
+#define gptu_w32(x, y) ltq_w32((x), gptu_membase + (y))
+#define gptu_r32(x) ltq_r32(gptu_membase + (x))
+
+enum gptu_timer {
+ TIMER1A = 0,
+ TIMER1B,
+ TIMER2A,
+ TIMER2B,
+ TIMER3A,
+ TIMER3B
+};
+
+static void __iomem *gptu_membase;
+static struct resource irqres[6];
+
+static irqreturn_t timer_irq_handler(int irq, void *priv)
+{
+ int timer = irq - irqres[0].start;
+ gptu_w32(1 << timer, GPTU_IRNCR);
+ return IRQ_HANDLED;
+}
+
+static void gptu_hwinit(void)
+{
+ gptu_w32(0x00, GPTU_IRNEN);
+ gptu_w32(0xff, GPTU_IRNCR);
+ gptu_w32(CLC_RMC | CLC_SUSPEND, GPTU_CLC);
+}
+
+static void gptu_hwexit(void)
+{
+ gptu_w32(0x00, GPTU_IRNEN);
+ gptu_w32(0xff, GPTU_IRNCR);
+ gptu_w32(CLC_DISABLE, GPTU_CLC);
+}
+
+static int gptu_enable(struct clk *clk)
+{
+ int ret = request_irq(irqres[clk->bits].start, timer_irq_handler,
+ IRQF_TIMER, "gtpu", NULL);
+ if (ret) {
+ pr_err("gptu: failed to request irq\n");
+ return ret;
+ }
+
+ gptu_w32(CON_CNT | CON_EDGE_ANY | CON_SYNC | CON_CLK_INT,
+ GPTU_CON(clk->bits));
+ gptu_w32(1, GPTU_RLD(clk->bits));
+ gptu_w32(gptu_r32(GPTU_IRNEN) | BIT(clk->bits), GPTU_IRNEN);
+ gptu_w32(RUN_SEN | RUN_RL, GPTU_RUN(clk->bits));
+ return 0;
+}
+
+static void gptu_disable(struct clk *clk)
+{
+ gptu_w32(0, GPTU_RUN(clk->bits));
+ gptu_w32(0, GPTU_CON(clk->bits));
+ gptu_w32(0, GPTU_RLD(clk->bits));
+ gptu_w32(gptu_r32(GPTU_IRNEN) & ~BIT(clk->bits), GPTU_IRNEN);
+ free_irq(irqres[clk->bits].start, NULL);
+}
+
+static inline void clkdev_add_gptu(struct device *dev, const char *con,
+ unsigned int timer)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev_name(dev);
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = gptu_enable;
+ clk->disable = gptu_disable;
+ clk->bits = timer;
+ clkdev_add(&clk->cl);
+}
+
+static int gptu_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+
+ if (of_irq_to_resource_table(pdev->dev.of_node, irqres, 6) != 6) {
+ dev_err(&pdev->dev, "Failed to get IRQ list\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* remap gptu register range */
+ gptu_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gptu_membase))
+ return PTR_ERR(gptu_membase);
+
+ /* enable our clock */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return -ENOENT;
+ }
+ clk_enable(clk);
+
+ /* power up the core */
+ gptu_hwinit();
+
+ /* the gptu has a ID register */
+ if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
+ dev_err(&pdev->dev, "Failed to find magic\n");
+ gptu_hwexit();
+ clk_disable(clk);
+ clk_put(clk);
+ return -ENAVAIL;
+ }
+
+ /* register the clocks */
+ clkdev_add_gptu(&pdev->dev, "timer1a", TIMER1A);
+ clkdev_add_gptu(&pdev->dev, "timer1b", TIMER1B);
+ clkdev_add_gptu(&pdev->dev, "timer2a", TIMER2A);
+ clkdev_add_gptu(&pdev->dev, "timer2b", TIMER2B);
+ clkdev_add_gptu(&pdev->dev, "timer3a", TIMER3A);
+ clkdev_add_gptu(&pdev->dev, "timer3b", TIMER3B);
+
+ dev_info(&pdev->dev, "gptu: 6 timers loaded\n");
+
+ return 0;
+}
+
+static const struct of_device_id gptu_match[] = {
+ { .compatible = "lantiq,gptu-xway" },
+ {},
+};
+
+static struct platform_driver dma_driver = {
+ .probe = gptu_probe,
+ .driver = {
+ .name = "gptu-xway",
+ .of_match_table = gptu_match,
+ },
+};
+
+int __init gptu_init(void)
+{
+ int ret = platform_driver_register(&dma_driver);
+
+ if (ret)
+ pr_info("gptu: Error registering platform driver\n");
+ return ret;
+}
+
+arch_initcall(gptu_init);
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
new file mode 100644
index 000000000..544619754
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_DANUBE "Danube"
+#define SOC_TWINPASS "Twinpass"
+#define SOC_AMAZON_SE "Amazon_SE"
+#define SOC_AR9 "AR9"
+#define SOC_GR9 "GRX200"
+#define SOC_VR9 "xRX200"
+#define SOC_VRX220 "xRX220"
+#define SOC_AR10 "xRX300"
+#define SOC_GRX390 "xRX330"
+
+#define COMP_DANUBE "lantiq,danube"
+#define COMP_TWINPASS "lantiq,twinpass"
+#define COMP_AMAZON_SE "lantiq,ase"
+#define COMP_AR9 "lantiq,ar9"
+#define COMP_GR9 "lantiq,gr9"
+#define COMP_VR9 "lantiq,vr9"
+#define COMP_AR10 "lantiq,ar10"
+#define COMP_GRX390 "lantiq,grx390"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFFFFF
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
+ sprintf(i->rev_type, "1.%d", i->rev);
+ switch (i->partnum) {
+ case SOC_ID_DANUBE1:
+ case SOC_ID_DANUBE2:
+ i->name = SOC_DANUBE;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_DANUBE;
+ break;
+
+ case SOC_ID_TWINPASS:
+ i->name = SOC_TWINPASS;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_TWINPASS;
+ break;
+
+ case SOC_ID_ARX188:
+ case SOC_ID_ARX168_1:
+ case SOC_ID_ARX168_2:
+ case SOC_ID_ARX182:
+ i->name = SOC_AR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_AR9;
+ break;
+
+ case SOC_ID_GRX188:
+ case SOC_ID_GRX168:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_AMAZON_SE_1:
+ case SOC_ID_AMAZON_SE_2:
+#ifdef CONFIG_PCI
+ panic("ase is only supported for non pci kernels");
+#endif
+ i->name = SOC_AMAZON_SE;
+ i->type = SOC_TYPE_AMAZON_SE;
+ i->compatible = COMP_AMAZON_SE;
+ break;
+
+ case SOC_ID_VRX282:
+ case SOC_ID_VRX268:
+ case SOC_ID_VRX288:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX268:
+ case SOC_ID_GRX288:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_VRX268_2:
+ case SOC_ID_VRX288_2:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_VRX220:
+ i->name = SOC_VRX220;
+ i->type = SOC_TYPE_VRX220;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX282_2:
+ case SOC_ID_GRX288_2:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_ARX362:
+ case SOC_ID_ARX368:
+ case SOC_ID_ARX382:
+ case SOC_ID_ARX388:
+ case SOC_ID_URX388:
+ i->name = SOC_AR10;
+ i->type = SOC_TYPE_AR10;
+ i->compatible = COMP_AR10;
+ break;
+
+ case SOC_ID_GRX383:
+ case SOC_ID_GRX369:
+ case SOC_ID_GRX387:
+ case SOC_ID_GRX389:
+ i->name = SOC_GRX390;
+ i->type = SOC_TYPE_GRX390;
+ i->compatible = COMP_GRX390;
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+}
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644
index 000000000..084f6caba
--- /dev/null
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011-2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+#include "../prom.h"
+
+/* clock control register for legacy */
+#define CGU_IFCCR 0x0018
+#define CGU_IFCCR_VR9 0x0024
+/* system clock register for legacy */
+#define CGU_SYS 0x0010
+/* pci control register */
+#define CGU_PCICR 0x0034
+#define CGU_PCICR_VR9 0x0038
+/* ephy configuration register */
+#define CGU_EPHY 0x10
+
+/* Legacy PMU register for ar9, ase, danube */
+/* power control register */
+#define PMU_PWDCR 0x1C
+/* power status register */
+#define PMU_PWDSR 0x20
+/* power control register */
+#define PMU_PWDCR1 0x24
+/* power status register */
+#define PMU_PWDSR1 0x28
+/* power control register */
+#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
+/* power status register */
+#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
+
+
+/* PMU register for ar10 and grx390 */
+
+/* First register set */
+#define PMU_CLK_SR 0x20 /* status */
+#define PMU_CLK_CR_A 0x24 /* Enable */
+#define PMU_CLK_CR_B 0x28 /* Disable */
+/* Second register set */
+#define PMU_CLK_SR1 0x30 /* status */
+#define PMU_CLK_CR1_A 0x34 /* Enable */
+#define PMU_CLK_CR1_B 0x38 /* Disable */
+/* Third register set */
+#define PMU_ANA_SR 0x40 /* status */
+#define PMU_ANA_CR_A 0x44 /* Enable */
+#define PMU_ANA_CR_B 0x48 /* Disable */
+
+/* Status */
+static u32 pmu_clk_sr[] = {
+ PMU_CLK_SR,
+ PMU_CLK_SR1,
+ PMU_ANA_SR,
+};
+
+/* Enable */
+static u32 pmu_clk_cr_a[] = {
+ PMU_CLK_CR_A,
+ PMU_CLK_CR1_A,
+ PMU_ANA_CR_A,
+};
+
+/* Disable */
+static u32 pmu_clk_cr_b[] = {
+ PMU_CLK_CR_B,
+ PMU_CLK_CR1_B,
+ PMU_ANA_CR_B,
+};
+
+#define PWDCR_EN_XRX(x) (pmu_clk_cr_a[(x)])
+#define PWDCR_DIS_XRX(x) (pmu_clk_cr_b[(x)])
+#define PWDSR_XRX(x) (pmu_clk_sr[(x)])
+
+/* clock gates that we can en/disable */
+#define PMU_USB0_P BIT(0)
+#define PMU_ASE_SDIO BIT(2) /* ASE special */
+#define PMU_PCI BIT(4)
+#define PMU_DMA BIT(5)
+#define PMU_USB0 BIT(6)
+#define PMU_ASC0 BIT(7)
+#define PMU_EPHY BIT(7) /* ase */
+#define PMU_USIF BIT(7) /* from vr9 until grx390 */
+#define PMU_SPI BIT(8)
+#define PMU_DFE BIT(9)
+#define PMU_EBU BIT(10)
+#define PMU_STP BIT(11)
+#define PMU_GPT BIT(12)
+#define PMU_AHBS BIT(13) /* vr9 */
+#define PMU_FPI BIT(14)
+#define PMU_AHBM BIT(15)
+#define PMU_SDIO BIT(16) /* danube, ar9, vr9 */
+#define PMU_ASC1 BIT(17)
+#define PMU_PPE_QSB BIT(18)
+#define PMU_PPE_SLL01 BIT(19)
+#define PMU_DEU BIT(20)
+#define PMU_PPE_TC BIT(21)
+#define PMU_PPE_EMA BIT(22)
+#define PMU_PPE_DPLUM BIT(23)
+#define PMU_PPE_DP BIT(23)
+#define PMU_PPE_DPLUS BIT(24)
+#define PMU_USB1_P BIT(26)
+#define PMU_GPHY3 BIT(26) /* grx390 */
+#define PMU_USB1 BIT(27)
+#define PMU_SWITCH BIT(28)
+#define PMU_PPE_TOP BIT(29)
+#define PMU_GPHY0 BIT(29) /* ar10, xrx390 */
+#define PMU_GPHY BIT(30)
+#define PMU_GPHY1 BIT(30) /* ar10, xrx390 */
+#define PMU_PCIE_CLK BIT(31)
+#define PMU_GPHY2 BIT(31) /* ar10, xrx390 */
+
+#define PMU1_PCIE_PHY BIT(0) /* vr9-specific,moved in ar10/grx390 */
+#define PMU1_PCIE_CTL BIT(1)
+#define PMU1_PCIE_PDI BIT(4)
+#define PMU1_PCIE_MSI BIT(5)
+#define PMU1_CKE BIT(6)
+#define PMU1_PCIE1_CTL BIT(17)
+#define PMU1_PCIE1_PDI BIT(20)
+#define PMU1_PCIE1_MSI BIT(21)
+#define PMU1_PCIE2_CTL BIT(25)
+#define PMU1_PCIE2_PDI BIT(26)
+#define PMU1_PCIE2_MSI BIT(27)
+
+#define PMU_ANALOG_USB0_P BIT(0)
+#define PMU_ANALOG_USB1_P BIT(1)
+#define PMU_ANALOG_PCIE0_P BIT(8)
+#define PMU_ANALOG_PCIE1_P BIT(9)
+#define PMU_ANALOG_PCIE2_P BIT(10)
+#define PMU_ANALOG_DSL_AFE BIT(16)
+#define PMU_ANALOG_DCDC_2V5 BIT(17)
+#define PMU_ANALOG_DCDC_1VX BIT(18)
+#define PMU_ANALOG_DCDC_1V0 BIT(19)
+
+#define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y))
+#define pmu_r32(x) ltq_r32(pmu_membase + (x))
+
+static void __iomem *pmu_membase;
+void __iomem *ltq_cgu_membase;
+void __iomem *ltq_ebu_membase;
+
+static u32 ifccr = CGU_IFCCR;
+static u32 pcicr = CGU_PCICR;
+
+static DEFINE_SPINLOCK(g_pmu_lock);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_enable(unsigned int module)
+{
+ int retry = 1000000;
+
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
+ do {} while (--retry && (pmu_r32(PMU_PWDSR) & module));
+ spin_unlock(&g_pmu_lock);
+
+ if (!retry)
+ panic("activating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_enable);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_disable(unsigned int module)
+{
+ int retry = 1000000;
+
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
+ do {} while (--retry && (!(pmu_r32(PMU_PWDSR) & module)));
+ spin_unlock(&g_pmu_lock);
+
+ if (!retry)
+ pr_warn("deactivating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_disable);
+
+/* enable a hw clock */
+static int cgu_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) | clk->bits, ifccr);
+ return 0;
+}
+
+/* disable a hw clock */
+static void cgu_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~clk->bits, ifccr);
+}
+
+/* enable a clock gate */
+static int pmu_enable(struct clk *clk)
+{
+ int retry = 1000000;
+
+ if (of_machine_is_compatible("lantiq,ar10")
+ || of_machine_is_compatible("lantiq,grx390")) {
+ pmu_w32(clk->bits, PWDCR_EN_XRX(clk->module));
+ do {} while (--retry &&
+ (!(pmu_r32(PWDSR_XRX(clk->module)) & clk->bits)));
+
+ } else {
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
+ PWDCR(clk->module));
+ do {} while (--retry &&
+ (pmu_r32(PWDSR(clk->module)) & clk->bits));
+ spin_unlock(&g_pmu_lock);
+ }
+
+ if (!retry)
+ panic("activating PMU module failed!");
+
+ return 0;
+}
+
+/* disable a clock gate */
+static void pmu_disable(struct clk *clk)
+{
+ int retry = 1000000;
+
+ if (of_machine_is_compatible("lantiq,ar10")
+ || of_machine_is_compatible("lantiq,grx390")) {
+ pmu_w32(clk->bits, PWDCR_DIS_XRX(clk->module));
+ do {} while (--retry &&
+ (pmu_r32(PWDSR_XRX(clk->module)) & clk->bits));
+ } else {
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
+ PWDCR(clk->module));
+ do {} while (--retry &&
+ (!(pmu_r32(PWDSR(clk->module)) & clk->bits)));
+ spin_unlock(&g_pmu_lock);
+ }
+
+ if (!retry)
+ pr_warn("deactivating PMU module failed!");
+}
+
+/* the pci enable helper */
+static int pci_enable(struct clk *clk)
+{
+ unsigned int val = ltq_cgu_r32(ifccr);
+ /* set bus clock speed */
+ if (of_machine_is_compatible("lantiq,ar9") ||
+ of_machine_is_compatible("lantiq,vr9")) {
+ val &= ~0x1f00000;
+ if (clk->rate == CLOCK_33M)
+ val |= 0xe00000;
+ else
+ val |= 0x700000; /* 62.5M */
+ } else {
+ val &= ~0xf00000;
+ if (clk->rate == CLOCK_33M)
+ val |= 0x800000;
+ else
+ val |= 0x400000; /* 62.5M */
+ }
+ ltq_cgu_w32(val, ifccr);
+ pmu_enable(clk);
+ return 0;
+}
+
+/* enable the external clock as a source */
+static int pci_ext_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~(1 << 16), ifccr);
+ ltq_cgu_w32((1 << 30), pcicr);
+ return 0;
+}
+
+/* disable the external clock as a source */
+static void pci_ext_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) | (1 << 16), ifccr);
+ ltq_cgu_w32((1 << 31) | (1 << 30), pcicr);
+}
+
+/* enable a clockout source */
+static int clkout_enable(struct clk *clk)
+{
+ int i;
+
+ /* get the correct rate */
+ for (i = 0; i < 4; i++) {
+ if (clk->rates[i] == clk->rate) {
+ int shift = 14 - (2 * clk->module);
+ int enable = 7 - clk->module;
+ unsigned int val = ltq_cgu_r32(ifccr);
+
+ val &= ~(3 << shift);
+ val |= i << shift;
+ val |= enable;
+ ltq_cgu_w32(val, ifccr);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/* manage the clock gates via PMU */
+static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
+ unsigned int module, unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = pmu_enable;
+ clk->disable = pmu_disable;
+ clk->module = module;
+ clk->bits = bits;
+ if (deactivate) {
+ /*
+ * Disable it during the initialization. Module should enable
+ * when used
+ */
+ pmu_disable(clk);
+ }
+ clkdev_add(&clk->cl);
+}
+
+/* manage the clock generator */
+static void clkdev_add_cgu(const char *dev, const char *con,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = cgu_enable;
+ clk->disable = cgu_disable;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* pci needs its own enable function as the setup is a bit more complex */
+static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
+
+static void clkdev_add_pci(void)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ /* main pci clock */
+ if (clk) {
+ clk->cl.dev_id = "17000000.pci";
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->rate = CLOCK_33M;
+ clk->rates = valid_pci_rates;
+ clk->enable = pci_enable;
+ clk->disable = pmu_disable;
+ clk->module = 0;
+ clk->bits = PMU_PCI;
+ clkdev_add(&clk->cl);
+ }
+
+ /* use internal/external bus clock */
+ if (clk_ext) {
+ clk_ext->cl.dev_id = "17000000.pci";
+ clk_ext->cl.con_id = "external";
+ clk_ext->cl.clk = clk_ext;
+ clk_ext->enable = pci_ext_enable;
+ clk_ext->disable = pci_ext_disable;
+ clkdev_add(&clk_ext->cl);
+ }
+}
+
+/* xway socs can generate clocks on gpio pins */
+static unsigned long valid_clkout_rates[4][5] = {
+ {CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
+ {CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
+ {CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
+ {CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
+};
+
+static void clkdev_add_clkout(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct clk *clk;
+ char *name;
+
+ name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+ if (!name)
+ continue;
+ sprintf(name, "clkout%d", i);
+
+ clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ if (!clk) {
+ kfree(name);
+ continue;
+ }
+ clk->cl.dev_id = "1f103000.cgu";
+ clk->cl.con_id = name;
+ clk->cl.clk = clk;
+ clk->rate = 0;
+ clk->rates = valid_clkout_rates[i];
+ clk->enable = clkout_enable;
+ clk->module = i;
+ clkdev_add(&clk->cl);
+ }
+}
+
+/* bring up all register ranges that we need for basic system control */
+void __init ltq_soc_init(void)
+{
+ struct resource res_pmu, res_cgu, res_ebu;
+ struct device_node *np_pmu =
+ of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
+ struct device_node *np_cgu =
+ of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
+
+ /* check if all the core register ranges are available */
+ if (!np_pmu || !np_cgu || !np_ebu)
+ panic("Failed to load core nodes from devicetree");
+
+ if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
+ of_address_to_resource(np_cgu, 0, &res_cgu) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu))
+ panic("Failed to get core resources");
+
+ if (!request_mem_region(res_pmu.start, resource_size(&res_pmu),
+ res_pmu.name) ||
+ !request_mem_region(res_cgu.start, resource_size(&res_cgu),
+ res_cgu.name) ||
+ !request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name))
+ pr_err("Failed to request core resources");
+
+ pmu_membase = ioremap(res_pmu.start, resource_size(&res_pmu));
+ ltq_cgu_membase = ioremap(res_cgu.start,
+ resource_size(&res_cgu));
+ ltq_ebu_membase = ioremap(res_ebu.start,
+ resource_size(&res_ebu));
+ if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ /* make sure to unprotect the memory region where flash is located */
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
+
+ /* add our generic xway clocks */
+ clkdev_add_pmu("10000000.fpi", NULL, 0, 0, PMU_FPI);
+ clkdev_add_pmu("1e100a00.gptu", NULL, 1, 0, PMU_GPT);
+ clkdev_add_pmu("1e100bb0.stp", NULL, 1, 0, PMU_STP);
+ clkdev_add_pmu("1e100c00.serial", NULL, 0, 0, PMU_ASC1);
+ clkdev_add_pmu("1e104100.dma", NULL, 1, 0, PMU_DMA);
+ clkdev_add_pmu("1e100800.spi", NULL, 1, 0, PMU_SPI);
+ clkdev_add_pmu("1e105300.ebu", NULL, 0, 0, PMU_EBU);
+ clkdev_add_clkout();
+
+ /* add the soc dependent clocks */
+ if (of_machine_is_compatible("lantiq,vr9")) {
+ ifccr = CGU_IFCCR_VR9;
+ pcicr = CGU_PCICR_VR9;
+ } else {
+ clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE);
+ }
+
+ if (!of_machine_is_compatible("lantiq,ase"))
+ clkdev_add_pci();
+
+ if (of_machine_is_compatible("lantiq,grx390") ||
+ of_machine_is_compatible("lantiq,ar10")) {
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY0);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY1);
+ clkdev_add_pmu("1e108000.switch", "gphy2", 0, 0, PMU_GPHY2);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB0_P);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB1_P);
+ /* rc 0 */
+ clkdev_add_pmu("1f106800.phy", "phy", 1, 2, PMU_ANALOG_PCIE0_P);
+ clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("1f106800.phy", "pdi", 1, 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL);
+ /* rc 1 */
+ clkdev_add_pmu("1f700400.phy", "phy", 1, 2, PMU_ANALOG_PCIE1_P);
+ clkdev_add_pmu("19000000.pcie", "msi", 1, 1, PMU1_PCIE1_MSI);
+ clkdev_add_pmu("1f700400.phy", "pdi", 1, 1, PMU1_PCIE1_PDI);
+ clkdev_add_pmu("19000000.pcie", "ctl", 1, 1, PMU1_PCIE1_CTL);
+ }
+
+ if (of_machine_is_compatible("lantiq,ase")) {
+ if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
+ clkdev_add_static(CLOCK_266M, CLOCK_133M,
+ CLOCK_133M, CLOCK_266M);
+ else
+ clkdev_add_static(CLOCK_133M, CLOCK_133M,
+ CLOCK_133M, CLOCK_133M);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE);
+ clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY);
+ clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,grx390")) {
+ clkdev_add_static(ltq_grx390_cpu_hz(), ltq_grx390_fpi_hz(),
+ ltq_grx390_fpi_hz(), ltq_grx390_pp32_hz());
+ clkdev_add_pmu("1e108000.switch", "gphy3", 0, 0, PMU_GPHY3);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
+ /* rc 2 */
+ clkdev_add_pmu("1f106a00.pcie", "phy", 1, 2, PMU_ANALOG_PCIE2_P);
+ clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI);
+ clkdev_add_pmu("1f106a00.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI);
+ clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ } else if (of_machine_is_compatible("lantiq,ar10")) {
+ clkdev_add_static(ltq_ar10_cpu_hz(), ltq_ar10_fpi_hz(),
+ ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz());
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
+ PMU_PPE_DP | PMU_PPE_TC);
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,vr9")) {
+ clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
+ ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz());
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
+ clkdev_add_pmu("1f106800.phy", "phy", 1, 1, PMU1_PCIE_PHY);
+ clkdev_add_pmu("1d900000.pcie", "bus", 1, 0, PMU_PCIE_CLK);
+ clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("1f106800.phy", "pdi", 1, 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL);
+ clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS);
+
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0,
+ PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
+ PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ PMU_PPE_QSB | PMU_PPE_TOP);
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,ar9")) {
+ clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
+ ltq_ar9_fpi_hz(), CLOCK_250M);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
+ clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0);
+ } else {
+ clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
+ ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0);
+ }
+}
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
new file mode 100644
index 000000000..7a14da8d9
--- /dev/null
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/export.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/dma-mapping.h>
+
+#include <lantiq_soc.h>
+
+static unsigned int *cp1_base;
+
+unsigned int *ltq_get_cp1_base(void)
+{
+ if (!cp1_base)
+ panic("no cp1 base was set\n");
+
+ return cp1_base;
+}
+EXPORT_SYMBOL(ltq_get_cp1_base);
+
+static int vmmc_probe(struct platform_device *pdev)
+{
+#define CP1_SIZE (1 << 20)
+ int gpio_count;
+ dma_addr_t dma;
+
+ cp1_base =
+ (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
+ &dma, GFP_KERNEL));
+
+ gpio_count = of_gpio_count(pdev->dev.of_node);
+ while (gpio_count > 0) {
+ enum of_gpio_flags flags;
+ int gpio = of_get_gpio_flags(pdev->dev.of_node,
+ --gpio_count, &flags);
+ if (gpio_request(gpio, "vmmc-relay"))
+ continue;
+ dev_info(&pdev->dev, "requested GPIO %d\n", gpio);
+ gpio_direction_output(gpio,
+ (flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
+ }
+
+ dev_info(&pdev->dev, "reserved %dMB at 0x%p", CP1_SIZE >> 20, cp1_base);
+
+ return 0;
+}
+
+static const struct of_device_id vmmc_match[] = {
+ { .compatible = "lantiq,vmmc-xway" },
+ {},
+};
+
+static struct platform_driver vmmc_driver = {
+ .probe = vmmc_probe,
+ .driver = {
+ .name = "lantiq,vmmc",
+ .of_match_table = vmmc_match,
+ },
+};
+builtin_platform_driver(vmmc_driver);