summaryrefslogtreecommitdiffstats
path: root/arch/mips/lantiq/xway
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/lantiq/xway')
-rw-r--r--arch/mips/lantiq/xway/Makefile4
-rw-r--r--arch/mips/lantiq/xway/clk.c351
-rw-r--r--arch/mips/lantiq/xway/dcdc.c60
-rw-r--r--arch/mips/lantiq/xway/dma.c300
-rw-r--r--arch/mips/lantiq/xway/gptu.c208
-rw-r--r--arch/mips/lantiq/xway/prom.c144
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c592
-rw-r--r--arch/mips/lantiq/xway/vmmc.c70
8 files changed, 1729 insertions, 0 deletions
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
new file mode 100644
index 000000000..c0f02dab7
--- /dev/null
+++ b/arch/mips/lantiq/xway/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := prom.o sysctrl.o clk.o dma.o gptu.o dcdc.o
+
+obj-y += vmmc.o
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
new file mode 100644
index 000000000..47ad21430
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/io.h>
+#include <linux/export.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+#include <asm/irq.h>
+#include <asm/div64.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+
+static unsigned int ram_clocks[] = {
+ CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
+#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
+
+/* legacy xway clock */
+#define CGU_SYS 0x10
+
+/* vr9, ar10/grx390 clock */
+#define CGU_SYS_XRX 0x0c
+#define CGU_IF_CLK_AR10 0x24
+
+unsigned long ltq_danube_fpi_hz(void)
+{
+ unsigned long ddr_clock = DDR_HZ;
+
+ if (ltq_cgu_r32(CGU_SYS) & 0x40)
+ return ddr_clock >> 1;
+ return ddr_clock;
+}
+
+unsigned long ltq_danube_cpu_hz(void)
+{
+ switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
+ case 0:
+ return CLOCK_333M;
+ case 4:
+ return DDR_HZ;
+ case 8:
+ return DDR_HZ << 1;
+ default:
+ return DDR_HZ >> 1;
+ }
+}
+
+unsigned long ltq_danube_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 7) & 3;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_240M;
+ break;
+ case 2:
+ clk = CLOCK_222M;
+ break;
+ case 3:
+ clk = CLOCK_133M;
+ break;
+ default:
+ clk = CLOCK_266M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_ar9_sys_hz(void)
+{
+ if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
+ return CLOCK_393M;
+ return CLOCK_333M;
+}
+
+unsigned long ltq_ar9_fpi_hz(void)
+{
+ unsigned long sys = ltq_ar9_sys_hz();
+
+ if (ltq_cgu_r32(CGU_SYS) & BIT(0))
+ return sys / 3;
+ else
+ return sys / 2;
+}
+
+unsigned long ltq_ar9_cpu_hz(void)
+{
+ if (ltq_cgu_r32(CGU_SYS) & BIT(2))
+ return ltq_ar9_fpi_hz();
+ else
+ return ltq_ar9_sys_hz();
+}
+
+unsigned long ltq_vr9_cpu_hz(void)
+{
+ unsigned int cpu_sel;
+ unsigned long clk;
+
+ cpu_sel = (ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0xf;
+
+ switch (cpu_sel) {
+ case 0:
+ clk = CLOCK_600M;
+ break;
+ case 1:
+ clk = CLOCK_500M;
+ break;
+ case 2:
+ clk = CLOCK_393M;
+ break;
+ case 3:
+ clk = CLOCK_333M;
+ break;
+ case 5:
+ case 6:
+ clk = CLOCK_196_608M;
+ break;
+ case 7:
+ clk = CLOCK_167M;
+ break;
+ case 4:
+ case 8:
+ case 9:
+ clk = CLOCK_125M;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_fpi_hz(void)
+{
+ unsigned int ocp_sel, cpu_clk;
+ unsigned long clk;
+
+ cpu_clk = ltq_vr9_cpu_hz();
+ ocp_sel = ltq_cgu_r32(CGU_SYS_XRX) & 0x3;
+
+ switch (ocp_sel) {
+ case 0:
+ /* OCP ratio 1 */
+ clk = cpu_clk;
+ break;
+ case 2:
+ /* OCP ratio 2 */
+ clk = cpu_clk / 2;
+ break;
+ case 3:
+ /* OCP ratio 2.5 */
+ clk = (cpu_clk * 2) / 5;
+ break;
+ case 4:
+ /* OCP ratio 3 */
+ clk = cpu_clk / 3;
+ break;
+ default:
+ clk = 0;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_vr9_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 0:
+ clk = CLOCK_500M;
+ break;
+ case 1:
+ clk = CLOCK_432M;
+ break;
+ case 2:
+ clk = CLOCK_288M;
+ break;
+ default:
+ clk = CLOCK_500M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_ar10_cpu_hz(void)
+{
+ unsigned int clksys;
+ int cpu_fs = (ltq_cgu_r32(CGU_SYS_XRX) >> 8) & 0x1;
+ int freq_div = (ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0x7;
+
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_500M;
+ break;
+ case 1:
+ clksys = CLOCK_600M;
+ break;
+ default:
+ clksys = CLOCK_500M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 0:
+ return clksys;
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys;
+ }
+}
+
+unsigned long ltq_ar10_fpi_hz(void)
+{
+ int freq_fpi = (ltq_cgu_r32(CGU_IF_CLK_AR10) >> 25) & 0xf;
+
+ switch (freq_fpi) {
+ case 1:
+ return CLOCK_300M;
+ case 5:
+ return CLOCK_250M;
+ case 2:
+ return CLOCK_150M;
+ case 6:
+ return CLOCK_125M;
+
+ default:
+ return CLOCK_125M;
+ }
+}
+
+unsigned long ltq_ar10_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_250M;
+ break;
+ case 4:
+ clk = CLOCK_400M;
+ break;
+ default:
+ clk = CLOCK_250M;
+ break;
+ }
+
+ return clk;
+}
+
+unsigned long ltq_grx390_cpu_hz(void)
+{
+ unsigned int clksys;
+ int cpu_fs = ((ltq_cgu_r32(CGU_SYS_XRX) >> 9) & 0x3);
+ int freq_div = ((ltq_cgu_r32(CGU_SYS_XRX) >> 4) & 0x7);
+
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_600M;
+ break;
+ case 1:
+ clksys = CLOCK_666M;
+ break;
+ case 2:
+ clksys = CLOCK_720M;
+ break;
+ default:
+ clksys = CLOCK_600M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 0:
+ return clksys;
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys;
+ }
+}
+
+unsigned long ltq_grx390_fpi_hz(void)
+{
+ /* fpi clock is derived from ddr_clk */
+ unsigned int clksys;
+ int cpu_fs = ((ltq_cgu_r32(CGU_SYS_XRX) >> 9) & 0x3);
+ int freq_div = ((ltq_cgu_r32(CGU_SYS_XRX)) & 0x7);
+ switch (cpu_fs) {
+ case 0:
+ clksys = CLOCK_600M;
+ break;
+ case 1:
+ clksys = CLOCK_666M;
+ break;
+ case 2:
+ clksys = CLOCK_720M;
+ break;
+ default:
+ clksys = CLOCK_600M;
+ break;
+ }
+
+ switch (freq_div) {
+ case 1:
+ return clksys >> 1;
+ case 2:
+ return clksys >> 2;
+ default:
+ return clksys >> 1;
+ }
+}
+
+unsigned long ltq_grx390_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 0x7;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_250M;
+ break;
+ case 2:
+ clk = CLOCK_432M;
+ break;
+ case 4:
+ clk = CLOCK_400M;
+ break;
+ default:
+ clk = CLOCK_250M;
+ break;
+ }
+ return clk;
+}
diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c
new file mode 100644
index 000000000..4960bee0a
--- /dev/null
+++ b/arch/mips/lantiq/xway/dcdc.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2010 Sameer Ahmad, Lantiq GmbH
+ */
+
+#include <linux/ioport.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+/* Bias and regulator Setup Register */
+#define DCDC_BIAS_VREG0 0xa
+/* Bias and regulator Setup Register */
+#define DCDC_BIAS_VREG1 0xb
+
+#define dcdc_w8(x, y) ltq_w8((x), dcdc_membase + (y))
+#define dcdc_r8(x) ltq_r8(dcdc_membase + (x))
+
+static void __iomem *dcdc_membase;
+
+static int dcdc_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dcdc_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dcdc_membase))
+ return PTR_ERR(dcdc_membase);
+
+ dev_info(&pdev->dev, "Core Voltage : %d mV\n",
+ dcdc_r8(DCDC_BIAS_VREG1) * 8);
+
+ return 0;
+}
+
+static const struct of_device_id dcdc_match[] = {
+ { .compatible = "lantiq,dcdc-xrx200" },
+ {},
+};
+
+static struct platform_driver dcdc_driver = {
+ .probe = dcdc_probe,
+ .driver = {
+ .name = "dcdc-xrx200",
+ .of_match_table = dcdc_match,
+ },
+};
+
+int __init dcdc_init(void)
+{
+ int ret = platform_driver_register(&dcdc_driver);
+
+ if (ret)
+ pr_info("dcdc: Error registering platform driver\n");
+ return ret;
+}
+
+arch_initcall(dcdc_init);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
new file mode 100644
index 000000000..f8eedeb15
--- /dev/null
+++ b/arch/mips/lantiq/xway/dma.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+
+#include <lantiq_soc.h>
+#include <xway_dma.h>
+
+#define LTQ_DMA_ID 0x08
+#define LTQ_DMA_CTRL 0x10
+#define LTQ_DMA_CPOLL 0x14
+#define LTQ_DMA_CS 0x18
+#define LTQ_DMA_CCTRL 0x1C
+#define LTQ_DMA_CDBA 0x20
+#define LTQ_DMA_CDLEN 0x24
+#define LTQ_DMA_CIS 0x28
+#define LTQ_DMA_CIE 0x2C
+#define LTQ_DMA_PS 0x40
+#define LTQ_DMA_PCTRL 0x44
+#define LTQ_DMA_IRNEN 0xf4
+
+#define DMA_ID_CHNR GENMASK(26, 20) /* channel number */
+#define DMA_DESCPT BIT(3) /* descriptor complete irq */
+#define DMA_TX BIT(8) /* TX channel direction */
+#define DMA_CHAN_ON BIT(0) /* channel on / off bit */
+#define DMA_PDEN BIT(6) /* enable packet drop */
+#define DMA_CHAN_RST BIT(1) /* channel on / off bit */
+#define DMA_RESET BIT(0) /* channel on / off bit */
+#define DMA_IRQ_ACK 0x7e /* IRQ status register */
+#define DMA_POLL BIT(31) /* turn on channel polling */
+#define DMA_CLK_DIV4 BIT(6) /* polling clock divider */
+#define DMA_PCTRL_2W_BURST 0x1 /* 2 word burst length */
+#define DMA_PCTRL_4W_BURST 0x2 /* 4 word burst length */
+#define DMA_PCTRL_8W_BURST 0x3 /* 8 word burst length */
+#define DMA_TX_BURST_SHIFT 4 /* tx burst shift */
+#define DMA_RX_BURST_SHIFT 2 /* rx burst shift */
+#define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */
+#define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */
+
+#define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x))
+#define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y))
+#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
+ ltq_dma_membase + (z))
+
+static void __iomem *ltq_dma_membase;
+static DEFINE_SPINLOCK(ltq_dma_lock);
+
+void
+ltq_dma_enable_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
+
+void
+ltq_dma_disable_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
+
+void
+ltq_dma_ack_irq(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
+
+void
+ltq_dma_open(struct ltq_dma_channel *ch)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&ltq_dma_lock, flag);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flag);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_open);
+
+void
+ltq_dma_close(struct ltq_dma_channel *ch)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&ltq_dma_lock, flag);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
+ spin_unlock_irqrestore(&ltq_dma_lock, flag);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_close);
+
+static void
+ltq_dma_alloc(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ch->desc = 0;
+ ch->desc_base = dma_alloc_coherent(ch->dev,
+ LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ &ch->phys, GFP_ATOMIC);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(ch->nr, LTQ_DMA_CS);
+ ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
+ ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ wmb();
+ ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
+ while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
+ ;
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+
+void
+ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ltq_dma_alloc(ch);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
+
+void
+ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
+{
+ unsigned long flags;
+
+ ltq_dma_alloc(ch);
+
+ spin_lock_irqsave(&ltq_dma_lock, flags);
+ ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
+ ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
+ ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
+ spin_unlock_irqrestore(&ltq_dma_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
+
+void
+ltq_dma_free(struct ltq_dma_channel *ch)
+{
+ if (!ch->desc_base)
+ return;
+ ltq_dma_close(ch);
+ dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ ch->desc_base, ch->phys);
+}
+EXPORT_SYMBOL_GPL(ltq_dma_free);
+
+void
+ltq_dma_init_port(int p, int tx_burst, int rx_burst)
+{
+ ltq_dma_w32(p, LTQ_DMA_PS);
+ switch (p) {
+ case DMA_PORT_ETOP:
+ /*
+ * Tell the DMA engine to swap the endianness of data frames and
+ * drop packets if the channel arbitration fails.
+ */
+ ltq_dma_w32_mask(0, (DMA_ETOP_ENDIANNESS | DMA_PDEN),
+ LTQ_DMA_PCTRL);
+ break;
+
+ default:
+ break;
+ }
+
+ switch (rx_burst) {
+ case 8:
+ ltq_dma_w32_mask(0x0c, (DMA_PCTRL_8W_BURST << DMA_RX_BURST_SHIFT),
+ LTQ_DMA_PCTRL);
+ break;
+ case 4:
+ ltq_dma_w32_mask(0x0c, (DMA_PCTRL_4W_BURST << DMA_RX_BURST_SHIFT),
+ LTQ_DMA_PCTRL);
+ break;
+ case 2:
+ ltq_dma_w32_mask(0x0c, (DMA_PCTRL_2W_BURST << DMA_RX_BURST_SHIFT),
+ LTQ_DMA_PCTRL);
+ break;
+ default:
+ break;
+ }
+
+ switch (tx_burst) {
+ case 8:
+ ltq_dma_w32_mask(0x30, (DMA_PCTRL_8W_BURST << DMA_TX_BURST_SHIFT),
+ LTQ_DMA_PCTRL);
+ break;
+ case 4:
+ ltq_dma_w32_mask(0x30, (DMA_PCTRL_4W_BURST << DMA_TX_BURST_SHIFT),
+ LTQ_DMA_PCTRL);
+ break;
+ case 2:
+ ltq_dma_w32_mask(0x30, (DMA_PCTRL_2W_BURST << DMA_TX_BURST_SHIFT),
+ LTQ_DMA_PCTRL);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ltq_dma_init_port);
+
+static int
+ltq_dma_init(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+ unsigned int id, nchannels;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ltq_dma_membase))
+ panic("Failed to remap dma resource");
+
+ /* power up and reset the dma engine */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ panic("Failed to get dma clock");
+
+ clk_enable(clk);
+ ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
+
+ usleep_range(1, 10);
+
+ /* disable all interrupts */
+ ltq_dma_w32(0, LTQ_DMA_IRNEN);
+
+ /* reset/configure each channel */
+ id = ltq_dma_r32(LTQ_DMA_ID);
+ nchannels = ((id & DMA_ID_CHNR) >> 20);
+ for (i = 0; i < nchannels; i++) {
+ ltq_dma_w32(i, LTQ_DMA_CS);
+ ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL);
+ ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
+ ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
+ }
+
+ dev_info(&pdev->dev,
+ "Init done - hw rev: %X, ports: %d, channels: %d\n",
+ id & 0x1f, (id >> 16) & 0xf, nchannels);
+
+ return 0;
+}
+
+static const struct of_device_id dma_match[] = {
+ { .compatible = "lantiq,dma-xway" },
+ {},
+};
+
+static struct platform_driver dma_driver = {
+ .probe = ltq_dma_init,
+ .driver = {
+ .name = "dma-xway",
+ .of_match_table = dma_match,
+ },
+};
+
+int __init
+dma_init(void)
+{
+ return platform_driver_register(&dma_driver);
+}
+
+postcore_initcall(dma_init);
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
new file mode 100644
index 000000000..200fe9ff6
--- /dev/null
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2012 Lantiq GmbH
+ */
+
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+
+#include <lantiq_soc.h>
+#include "../clk.h"
+
+/* the magic ID byte of the core */
+#define GPTU_MAGIC 0x59
+/* clock control register */
+#define GPTU_CLC 0x00
+/* id register */
+#define GPTU_ID 0x08
+/* interrupt node enable */
+#define GPTU_IRNEN 0xf4
+/* interrupt control register */
+#define GPTU_IRCR 0xf8
+/* interrupt capture register */
+#define GPTU_IRNCR 0xfc
+/* there are 3 identical blocks of 2 timers. calculate register offsets */
+#define GPTU_SHIFT(x) (x % 2 ? 4 : 0)
+#define GPTU_BASE(x) (((x >> 1) * 0x20) + 0x10)
+/* timer control register */
+#define GPTU_CON(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x00)
+/* timer auto reload register */
+#define GPTU_RUN(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x08)
+/* timer manual reload register */
+#define GPTU_RLD(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x10)
+/* timer count register */
+#define GPTU_CNT(x) (GPTU_BASE(x) + GPTU_SHIFT(x) + 0x18)
+
+/* GPTU_CON(x) */
+#define CON_CNT BIT(2)
+#define CON_EDGE_ANY (BIT(7) | BIT(6))
+#define CON_SYNC BIT(8)
+#define CON_CLK_INT BIT(10)
+
+/* GPTU_RUN(x) */
+#define RUN_SEN BIT(0)
+#define RUN_RL BIT(2)
+
+/* set clock to runmode */
+#define CLC_RMC BIT(8)
+/* bring core out of suspend */
+#define CLC_SUSPEND BIT(4)
+/* the disable bit */
+#define CLC_DISABLE BIT(0)
+
+#define gptu_w32(x, y) ltq_w32((x), gptu_membase + (y))
+#define gptu_r32(x) ltq_r32(gptu_membase + (x))
+
+enum gptu_timer {
+ TIMER1A = 0,
+ TIMER1B,
+ TIMER2A,
+ TIMER2B,
+ TIMER3A,
+ TIMER3B
+};
+
+static void __iomem *gptu_membase;
+static struct resource irqres[6];
+
+static irqreturn_t timer_irq_handler(int irq, void *priv)
+{
+ int timer = irq - irqres[0].start;
+ gptu_w32(1 << timer, GPTU_IRNCR);
+ return IRQ_HANDLED;
+}
+
+static void gptu_hwinit(void)
+{
+ gptu_w32(0x00, GPTU_IRNEN);
+ gptu_w32(0xff, GPTU_IRNCR);
+ gptu_w32(CLC_RMC | CLC_SUSPEND, GPTU_CLC);
+}
+
+static void gptu_hwexit(void)
+{
+ gptu_w32(0x00, GPTU_IRNEN);
+ gptu_w32(0xff, GPTU_IRNCR);
+ gptu_w32(CLC_DISABLE, GPTU_CLC);
+}
+
+static int gptu_enable(struct clk *clk)
+{
+ int ret = request_irq(irqres[clk->bits].start, timer_irq_handler,
+ IRQF_TIMER, "gtpu", NULL);
+ if (ret) {
+ pr_err("gptu: failed to request irq\n");
+ return ret;
+ }
+
+ gptu_w32(CON_CNT | CON_EDGE_ANY | CON_SYNC | CON_CLK_INT,
+ GPTU_CON(clk->bits));
+ gptu_w32(1, GPTU_RLD(clk->bits));
+ gptu_w32(gptu_r32(GPTU_IRNEN) | BIT(clk->bits), GPTU_IRNEN);
+ gptu_w32(RUN_SEN | RUN_RL, GPTU_RUN(clk->bits));
+ return 0;
+}
+
+static void gptu_disable(struct clk *clk)
+{
+ gptu_w32(0, GPTU_RUN(clk->bits));
+ gptu_w32(0, GPTU_CON(clk->bits));
+ gptu_w32(0, GPTU_RLD(clk->bits));
+ gptu_w32(gptu_r32(GPTU_IRNEN) & ~BIT(clk->bits), GPTU_IRNEN);
+ free_irq(irqres[clk->bits].start, NULL);
+}
+
+static inline void clkdev_add_gptu(struct device *dev, const char *con,
+ unsigned int timer)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev_name(dev);
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = gptu_enable;
+ clk->disable = gptu_disable;
+ clk->bits = timer;
+ clkdev_add(&clk->cl);
+}
+
+static int gptu_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+
+ if (of_irq_to_resource_table(pdev->dev.of_node, irqres, 6) != 6) {
+ dev_err(&pdev->dev, "Failed to get IRQ list\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* remap gptu register range */
+ gptu_membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gptu_membase))
+ return PTR_ERR(gptu_membase);
+
+ /* enable our clock */
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return -ENOENT;
+ }
+ clk_enable(clk);
+
+ /* power up the core */
+ gptu_hwinit();
+
+ /* the gptu has a ID register */
+ if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
+ dev_err(&pdev->dev, "Failed to find magic\n");
+ gptu_hwexit();
+ clk_disable(clk);
+ clk_put(clk);
+ return -ENAVAIL;
+ }
+
+ /* register the clocks */
+ clkdev_add_gptu(&pdev->dev, "timer1a", TIMER1A);
+ clkdev_add_gptu(&pdev->dev, "timer1b", TIMER1B);
+ clkdev_add_gptu(&pdev->dev, "timer2a", TIMER2A);
+ clkdev_add_gptu(&pdev->dev, "timer2b", TIMER2B);
+ clkdev_add_gptu(&pdev->dev, "timer3a", TIMER3A);
+ clkdev_add_gptu(&pdev->dev, "timer3b", TIMER3B);
+
+ dev_info(&pdev->dev, "gptu: 6 timers loaded\n");
+
+ return 0;
+}
+
+static const struct of_device_id gptu_match[] = {
+ { .compatible = "lantiq,gptu-xway" },
+ {},
+};
+
+static struct platform_driver dma_driver = {
+ .probe = gptu_probe,
+ .driver = {
+ .name = "gptu-xway",
+ .of_match_table = gptu_match,
+ },
+};
+
+int __init gptu_init(void)
+{
+ int ret = platform_driver_register(&dma_driver);
+
+ if (ret)
+ pr_info("gptu: Error registering platform driver\n");
+ return ret;
+}
+
+arch_initcall(gptu_init);
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
new file mode 100644
index 000000000..544619754
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/export.h>
+#include <linux/clk.h>
+#include <asm/bootinfo.h>
+#include <asm/time.h>
+
+#include <lantiq_soc.h>
+
+#include "../prom.h"
+
+#define SOC_DANUBE "Danube"
+#define SOC_TWINPASS "Twinpass"
+#define SOC_AMAZON_SE "Amazon_SE"
+#define SOC_AR9 "AR9"
+#define SOC_GR9 "GRX200"
+#define SOC_VR9 "xRX200"
+#define SOC_VRX220 "xRX220"
+#define SOC_AR10 "xRX300"
+#define SOC_GRX390 "xRX330"
+
+#define COMP_DANUBE "lantiq,danube"
+#define COMP_TWINPASS "lantiq,twinpass"
+#define COMP_AMAZON_SE "lantiq,ase"
+#define COMP_AR9 "lantiq,ar9"
+#define COMP_GR9 "lantiq,gr9"
+#define COMP_VR9 "lantiq,vr9"
+#define COMP_AR10 "lantiq,ar10"
+#define COMP_GRX390 "lantiq,grx390"
+
+#define PART_SHIFT 12
+#define PART_MASK 0x0FFFFFFF
+#define REV_SHIFT 28
+#define REV_MASK 0xF0000000
+
+void __init ltq_soc_detect(struct ltq_soc_info *i)
+{
+ i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
+ i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
+ sprintf(i->rev_type, "1.%d", i->rev);
+ switch (i->partnum) {
+ case SOC_ID_DANUBE1:
+ case SOC_ID_DANUBE2:
+ i->name = SOC_DANUBE;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_DANUBE;
+ break;
+
+ case SOC_ID_TWINPASS:
+ i->name = SOC_TWINPASS;
+ i->type = SOC_TYPE_DANUBE;
+ i->compatible = COMP_TWINPASS;
+ break;
+
+ case SOC_ID_ARX188:
+ case SOC_ID_ARX168_1:
+ case SOC_ID_ARX168_2:
+ case SOC_ID_ARX182:
+ i->name = SOC_AR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_AR9;
+ break;
+
+ case SOC_ID_GRX188:
+ case SOC_ID_GRX168:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_AR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_AMAZON_SE_1:
+ case SOC_ID_AMAZON_SE_2:
+#ifdef CONFIG_PCI
+ panic("ase is only supported for non pci kernels");
+#endif
+ i->name = SOC_AMAZON_SE;
+ i->type = SOC_TYPE_AMAZON_SE;
+ i->compatible = COMP_AMAZON_SE;
+ break;
+
+ case SOC_ID_VRX282:
+ case SOC_ID_VRX268:
+ case SOC_ID_VRX288:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX268:
+ case SOC_ID_GRX288:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_VRX268_2:
+ case SOC_ID_VRX288_2:
+ i->name = SOC_VR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_VRX220:
+ i->name = SOC_VRX220;
+ i->type = SOC_TYPE_VRX220;
+ i->compatible = COMP_VR9;
+ break;
+
+ case SOC_ID_GRX282_2:
+ case SOC_ID_GRX288_2:
+ i->name = SOC_GR9;
+ i->type = SOC_TYPE_VR9_2;
+ i->compatible = COMP_GR9;
+ break;
+
+ case SOC_ID_ARX362:
+ case SOC_ID_ARX368:
+ case SOC_ID_ARX382:
+ case SOC_ID_ARX388:
+ case SOC_ID_URX388:
+ i->name = SOC_AR10;
+ i->type = SOC_TYPE_AR10;
+ i->compatible = COMP_AR10;
+ break;
+
+ case SOC_ID_GRX383:
+ case SOC_ID_GRX369:
+ case SOC_ID_GRX387:
+ case SOC_ID_GRX389:
+ i->name = SOC_GRX390;
+ i->type = SOC_TYPE_GRX390;
+ i->compatible = COMP_GRX390;
+ break;
+
+ default:
+ unreachable();
+ break;
+ }
+}
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644
index 000000000..d444a1b98
--- /dev/null
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2011-2012 John Crispin <john@phrozen.org>
+ * Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
+ */
+
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/clkdev.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <lantiq_soc.h>
+
+#include "../clk.h"
+#include "../prom.h"
+
+/* clock control register for legacy */
+#define CGU_IFCCR 0x0018
+#define CGU_IFCCR_VR9 0x0024
+/* system clock register for legacy */
+#define CGU_SYS 0x0010
+/* pci control register */
+#define CGU_PCICR 0x0034
+#define CGU_PCICR_VR9 0x0038
+/* ephy configuration register */
+#define CGU_EPHY 0x10
+
+/* Legacy PMU register for ar9, ase, danube */
+/* power control register */
+#define PMU_PWDCR 0x1C
+/* power status register */
+#define PMU_PWDSR 0x20
+/* power control register */
+#define PMU_PWDCR1 0x24
+/* power status register */
+#define PMU_PWDSR1 0x28
+/* power control register */
+#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
+/* power status register */
+#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
+
+
+/* PMU register for ar10 and grx390 */
+
+/* First register set */
+#define PMU_CLK_SR 0x20 /* status */
+#define PMU_CLK_CR_A 0x24 /* Enable */
+#define PMU_CLK_CR_B 0x28 /* Disable */
+/* Second register set */
+#define PMU_CLK_SR1 0x30 /* status */
+#define PMU_CLK_CR1_A 0x34 /* Enable */
+#define PMU_CLK_CR1_B 0x38 /* Disable */
+/* Third register set */
+#define PMU_ANA_SR 0x40 /* status */
+#define PMU_ANA_CR_A 0x44 /* Enable */
+#define PMU_ANA_CR_B 0x48 /* Disable */
+
+/* Status */
+static u32 pmu_clk_sr[] = {
+ PMU_CLK_SR,
+ PMU_CLK_SR1,
+ PMU_ANA_SR,
+};
+
+/* Enable */
+static u32 pmu_clk_cr_a[] = {
+ PMU_CLK_CR_A,
+ PMU_CLK_CR1_A,
+ PMU_ANA_CR_A,
+};
+
+/* Disable */
+static u32 pmu_clk_cr_b[] = {
+ PMU_CLK_CR_B,
+ PMU_CLK_CR1_B,
+ PMU_ANA_CR_B,
+};
+
+#define PWDCR_EN_XRX(x) (pmu_clk_cr_a[(x)])
+#define PWDCR_DIS_XRX(x) (pmu_clk_cr_b[(x)])
+#define PWDSR_XRX(x) (pmu_clk_sr[(x)])
+
+/* clock gates that we can en/disable */
+#define PMU_USB0_P BIT(0)
+#define PMU_ASE_SDIO BIT(2) /* ASE special */
+#define PMU_PCI BIT(4)
+#define PMU_DMA BIT(5)
+#define PMU_USB0 BIT(6)
+#define PMU_ASC0 BIT(7)
+#define PMU_EPHY BIT(7) /* ase */
+#define PMU_USIF BIT(7) /* from vr9 until grx390 */
+#define PMU_SPI BIT(8)
+#define PMU_DFE BIT(9)
+#define PMU_EBU BIT(10)
+#define PMU_STP BIT(11)
+#define PMU_GPT BIT(12)
+#define PMU_AHBS BIT(13) /* vr9 */
+#define PMU_FPI BIT(14)
+#define PMU_AHBM BIT(15)
+#define PMU_SDIO BIT(16) /* danube, ar9, vr9 */
+#define PMU_ASC1 BIT(17)
+#define PMU_PPE_QSB BIT(18)
+#define PMU_PPE_SLL01 BIT(19)
+#define PMU_DEU BIT(20)
+#define PMU_PPE_TC BIT(21)
+#define PMU_PPE_EMA BIT(22)
+#define PMU_PPE_DPLUM BIT(23)
+#define PMU_PPE_DP BIT(23)
+#define PMU_PPE_DPLUS BIT(24)
+#define PMU_USB1_P BIT(26)
+#define PMU_GPHY3 BIT(26) /* grx390 */
+#define PMU_USB1 BIT(27)
+#define PMU_SWITCH BIT(28)
+#define PMU_PPE_TOP BIT(29)
+#define PMU_GPHY0 BIT(29) /* ar10, xrx390 */
+#define PMU_GPHY BIT(30)
+#define PMU_GPHY1 BIT(30) /* ar10, xrx390 */
+#define PMU_PCIE_CLK BIT(31)
+#define PMU_GPHY2 BIT(31) /* ar10, xrx390 */
+
+#define PMU1_PCIE_PHY BIT(0) /* vr9-specific,moved in ar10/grx390 */
+#define PMU1_PCIE_CTL BIT(1)
+#define PMU1_PCIE_PDI BIT(4)
+#define PMU1_PCIE_MSI BIT(5)
+#define PMU1_CKE BIT(6)
+#define PMU1_PCIE1_CTL BIT(17)
+#define PMU1_PCIE1_PDI BIT(20)
+#define PMU1_PCIE1_MSI BIT(21)
+#define PMU1_PCIE2_CTL BIT(25)
+#define PMU1_PCIE2_PDI BIT(26)
+#define PMU1_PCIE2_MSI BIT(27)
+
+#define PMU_ANALOG_USB0_P BIT(0)
+#define PMU_ANALOG_USB1_P BIT(1)
+#define PMU_ANALOG_PCIE0_P BIT(8)
+#define PMU_ANALOG_PCIE1_P BIT(9)
+#define PMU_ANALOG_PCIE2_P BIT(10)
+#define PMU_ANALOG_DSL_AFE BIT(16)
+#define PMU_ANALOG_DCDC_2V5 BIT(17)
+#define PMU_ANALOG_DCDC_1VX BIT(18)
+#define PMU_ANALOG_DCDC_1V0 BIT(19)
+
+#define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y))
+#define pmu_r32(x) ltq_r32(pmu_membase + (x))
+
+static void __iomem *pmu_membase;
+void __iomem *ltq_cgu_membase;
+void __iomem *ltq_ebu_membase;
+
+static u32 ifccr = CGU_IFCCR;
+static u32 pcicr = CGU_PCICR;
+
+static DEFINE_SPINLOCK(g_pmu_lock);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_enable(unsigned int module)
+{
+ int retry = 1000000;
+
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
+ do {} while (--retry && (pmu_r32(PMU_PWDSR) & module));
+ spin_unlock(&g_pmu_lock);
+
+ if (!retry)
+ panic("activating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_enable);
+
+/* legacy function kept alive to ease clkdev transition */
+void ltq_pmu_disable(unsigned int module)
+{
+ int retry = 1000000;
+
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
+ do {} while (--retry && (!(pmu_r32(PMU_PWDSR) & module)));
+ spin_unlock(&g_pmu_lock);
+
+ if (!retry)
+ pr_warn("deactivating PMU module failed!");
+}
+EXPORT_SYMBOL(ltq_pmu_disable);
+
+/* enable a hw clock */
+static int cgu_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) | clk->bits, ifccr);
+ return 0;
+}
+
+/* disable a hw clock */
+static void cgu_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~clk->bits, ifccr);
+}
+
+/* enable a clock gate */
+static int pmu_enable(struct clk *clk)
+{
+ int retry = 1000000;
+
+ if (of_machine_is_compatible("lantiq,ar10")
+ || of_machine_is_compatible("lantiq,grx390")) {
+ pmu_w32(clk->bits, PWDCR_EN_XRX(clk->module));
+ do {} while (--retry &&
+ (!(pmu_r32(PWDSR_XRX(clk->module)) & clk->bits)));
+
+ } else {
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
+ PWDCR(clk->module));
+ do {} while (--retry &&
+ (pmu_r32(PWDSR(clk->module)) & clk->bits));
+ spin_unlock(&g_pmu_lock);
+ }
+
+ if (!retry)
+ panic("activating PMU module failed!");
+
+ return 0;
+}
+
+/* disable a clock gate */
+static void pmu_disable(struct clk *clk)
+{
+ int retry = 1000000;
+
+ if (of_machine_is_compatible("lantiq,ar10")
+ || of_machine_is_compatible("lantiq,grx390")) {
+ pmu_w32(clk->bits, PWDCR_DIS_XRX(clk->module));
+ do {} while (--retry &&
+ (pmu_r32(PWDSR_XRX(clk->module)) & clk->bits));
+ } else {
+ spin_lock(&g_pmu_lock);
+ pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
+ PWDCR(clk->module));
+ do {} while (--retry &&
+ (!(pmu_r32(PWDSR(clk->module)) & clk->bits)));
+ spin_unlock(&g_pmu_lock);
+ }
+
+ if (!retry)
+ pr_warn("deactivating PMU module failed!");
+}
+
+/* the pci enable helper */
+static int pci_enable(struct clk *clk)
+{
+ unsigned int val = ltq_cgu_r32(ifccr);
+ /* set bus clock speed */
+ if (of_machine_is_compatible("lantiq,ar9") ||
+ of_machine_is_compatible("lantiq,vr9")) {
+ val &= ~0x1f00000;
+ if (clk->rate == CLOCK_33M)
+ val |= 0xe00000;
+ else
+ val |= 0x700000; /* 62.5M */
+ } else {
+ val &= ~0xf00000;
+ if (clk->rate == CLOCK_33M)
+ val |= 0x800000;
+ else
+ val |= 0x400000; /* 62.5M */
+ }
+ ltq_cgu_w32(val, ifccr);
+ pmu_enable(clk);
+ return 0;
+}
+
+/* enable the external clock as a source */
+static int pci_ext_enable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) & ~(1 << 16), ifccr);
+ ltq_cgu_w32((1 << 30), pcicr);
+ return 0;
+}
+
+/* disable the external clock as a source */
+static void pci_ext_disable(struct clk *clk)
+{
+ ltq_cgu_w32(ltq_cgu_r32(ifccr) | (1 << 16), ifccr);
+ ltq_cgu_w32((1 << 31) | (1 << 30), pcicr);
+}
+
+/* enable a clockout source */
+static int clkout_enable(struct clk *clk)
+{
+ int i;
+
+ /* get the correct rate */
+ for (i = 0; i < 4; i++) {
+ if (clk->rates[i] == clk->rate) {
+ int shift = 14 - (2 * clk->module);
+ int enable = 7 - clk->module;
+ unsigned int val = ltq_cgu_r32(ifccr);
+
+ val &= ~(3 << shift);
+ val |= i << shift;
+ val |= enable;
+ ltq_cgu_w32(val, ifccr);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/* manage the clock gates via PMU */
+static void clkdev_add_pmu(const char *dev, const char *con, bool deactivate,
+ unsigned int module, unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = pmu_enable;
+ clk->disable = pmu_disable;
+ clk->module = module;
+ clk->bits = bits;
+ if (deactivate) {
+ /*
+ * Disable it during the initialization. Module should enable
+ * when used
+ */
+ pmu_disable(clk);
+ }
+ clkdev_add(&clk->cl);
+}
+
+/* manage the clock generator */
+static void clkdev_add_cgu(const char *dev, const char *con,
+ unsigned int bits)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ return;
+ clk->cl.dev_id = dev;
+ clk->cl.con_id = con;
+ clk->cl.clk = clk;
+ clk->enable = cgu_enable;
+ clk->disable = cgu_disable;
+ clk->bits = bits;
+ clkdev_add(&clk->cl);
+}
+
+/* pci needs its own enable function as the setup is a bit more complex */
+static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
+
+static void clkdev_add_pci(void)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ /* main pci clock */
+ if (clk) {
+ clk->cl.dev_id = "17000000.pci";
+ clk->cl.con_id = NULL;
+ clk->cl.clk = clk;
+ clk->rate = CLOCK_33M;
+ clk->rates = valid_pci_rates;
+ clk->enable = pci_enable;
+ clk->disable = pmu_disable;
+ clk->module = 0;
+ clk->bits = PMU_PCI;
+ clkdev_add(&clk->cl);
+ }
+
+ /* use internal/external bus clock */
+ if (clk_ext) {
+ clk_ext->cl.dev_id = "17000000.pci";
+ clk_ext->cl.con_id = "external";
+ clk_ext->cl.clk = clk_ext;
+ clk_ext->enable = pci_ext_enable;
+ clk_ext->disable = pci_ext_disable;
+ clkdev_add(&clk_ext->cl);
+ }
+}
+
+/* xway socs can generate clocks on gpio pins */
+static unsigned long valid_clkout_rates[4][5] = {
+ {CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
+ {CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
+ {CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
+ {CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
+};
+
+static void clkdev_add_clkout(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct clk *clk;
+ char *name;
+
+ name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
+ if (!name)
+ continue;
+ sprintf(name, "clkout%d", i);
+
+ clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ if (!clk) {
+ kfree(name);
+ continue;
+ }
+ clk->cl.dev_id = "1f103000.cgu";
+ clk->cl.con_id = name;
+ clk->cl.clk = clk;
+ clk->rate = 0;
+ clk->rates = valid_clkout_rates[i];
+ clk->enable = clkout_enable;
+ clk->module = i;
+ clkdev_add(&clk->cl);
+ }
+}
+
+/* bring up all register ranges that we need for basic system control */
+void __init ltq_soc_init(void)
+{
+ struct resource res_pmu, res_cgu, res_ebu;
+ struct device_node *np_pmu =
+ of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
+ struct device_node *np_cgu =
+ of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
+ struct device_node *np_ebu =
+ of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
+
+ /* check if all the core register ranges are available */
+ if (!np_pmu || !np_cgu || !np_ebu)
+ panic("Failed to load core nodes from devicetree");
+
+ if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
+ of_address_to_resource(np_cgu, 0, &res_cgu) ||
+ of_address_to_resource(np_ebu, 0, &res_ebu))
+ panic("Failed to get core resources");
+
+ of_node_put(np_pmu);
+ of_node_put(np_cgu);
+ of_node_put(np_ebu);
+
+ if (!request_mem_region(res_pmu.start, resource_size(&res_pmu),
+ res_pmu.name) ||
+ !request_mem_region(res_cgu.start, resource_size(&res_cgu),
+ res_cgu.name) ||
+ !request_mem_region(res_ebu.start, resource_size(&res_ebu),
+ res_ebu.name))
+ pr_err("Failed to request core resources");
+
+ pmu_membase = ioremap(res_pmu.start, resource_size(&res_pmu));
+ ltq_cgu_membase = ioremap(res_cgu.start,
+ resource_size(&res_cgu));
+ ltq_ebu_membase = ioremap(res_ebu.start,
+ resource_size(&res_ebu));
+ if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
+ panic("Failed to remap core resources");
+
+ /* make sure to unprotect the memory region where flash is located */
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
+
+ /* add our generic xway clocks */
+ clkdev_add_pmu("10000000.fpi", NULL, 0, 0, PMU_FPI);
+ clkdev_add_pmu("1e100a00.gptu", NULL, 1, 0, PMU_GPT);
+ clkdev_add_pmu("1e100bb0.stp", NULL, 1, 0, PMU_STP);
+ clkdev_add_pmu("1e100c00.serial", NULL, 0, 0, PMU_ASC1);
+ clkdev_add_pmu("1e104100.dma", NULL, 1, 0, PMU_DMA);
+ clkdev_add_pmu("1e100800.spi", NULL, 1, 0, PMU_SPI);
+ clkdev_add_pmu("1e105300.ebu", NULL, 0, 0, PMU_EBU);
+ clkdev_add_clkout();
+
+ /* add the soc dependent clocks */
+ if (of_machine_is_compatible("lantiq,vr9")) {
+ ifccr = CGU_IFCCR_VR9;
+ pcicr = CGU_PCICR_VR9;
+ } else {
+ clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE);
+ }
+
+ if (!of_machine_is_compatible("lantiq,ase"))
+ clkdev_add_pci();
+
+ if (of_machine_is_compatible("lantiq,grx390") ||
+ of_machine_is_compatible("lantiq,ar10")) {
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY0);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY1);
+ clkdev_add_pmu("1e108000.switch", "gphy2", 0, 0, PMU_GPHY2);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB0_P);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 2, PMU_ANALOG_USB1_P);
+ /* rc 0 */
+ clkdev_add_pmu("1f106800.phy", "phy", 1, 2, PMU_ANALOG_PCIE0_P);
+ clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("1f106800.phy", "pdi", 1, 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL);
+ /* rc 1 */
+ clkdev_add_pmu("1f700400.phy", "phy", 1, 2, PMU_ANALOG_PCIE1_P);
+ clkdev_add_pmu("19000000.pcie", "msi", 1, 1, PMU1_PCIE1_MSI);
+ clkdev_add_pmu("1f700400.phy", "pdi", 1, 1, PMU1_PCIE1_PDI);
+ clkdev_add_pmu("19000000.pcie", "ctl", 1, 1, PMU1_PCIE1_CTL);
+ }
+
+ if (of_machine_is_compatible("lantiq,ase")) {
+ if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
+ clkdev_add_static(CLOCK_266M, CLOCK_133M,
+ CLOCK_133M, CLOCK_266M);
+ else
+ clkdev_add_static(CLOCK_133M, CLOCK_133M,
+ CLOCK_133M, CLOCK_133M);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE);
+ clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY);
+ clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,grx390")) {
+ clkdev_add_static(ltq_grx390_cpu_hz(), ltq_grx390_fpi_hz(),
+ ltq_grx390_fpi_hz(), ltq_grx390_pp32_hz());
+ clkdev_add_pmu("1e108000.switch", "gphy3", 0, 0, PMU_GPHY3);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
+ /* rc 2 */
+ clkdev_add_pmu("1f106a00.pcie", "phy", 1, 2, PMU_ANALOG_PCIE2_P);
+ clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI);
+ clkdev_add_pmu("1f106a00.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI);
+ clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP);
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ } else if (of_machine_is_compatible("lantiq,ar10")) {
+ clkdev_add_static(ltq_ar10_cpu_hz(), ltq_ar10_fpi_hz(),
+ ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz());
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
+ PMU_PPE_DP | PMU_PPE_TC);
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,vr9")) {
+ clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
+ ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz());
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
+ clkdev_add_pmu("1f106800.phy", "phy", 1, 1, PMU1_PCIE_PHY);
+ clkdev_add_pmu("1d900000.pcie", "bus", 1, 0, PMU_PCIE_CLK);
+ clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI);
+ clkdev_add_pmu("1f106800.phy", "pdi", 1, 1, PMU1_PCIE_PDI);
+ clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL);
+ clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS);
+
+ clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
+ clkdev_add_pmu("1e10b308.eth", NULL, 0, 0,
+ PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
+ PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
+ PMU_PPE_QSB | PMU_PPE_TOP);
+ clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ } else if (of_machine_is_compatible("lantiq,ar9")) {
+ clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
+ ltq_ar9_fpi_hz(), CLOCK_250M);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
+ clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
+ clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0);
+ } else {
+ clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
+ ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
+ clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
+ clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
+ clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
+ clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
+ clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
+ clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0);
+ }
+}
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
new file mode 100644
index 000000000..2796e87df
--- /dev/null
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
+
+#include <lantiq_soc.h>
+
+static unsigned int *cp1_base;
+
+unsigned int *ltq_get_cp1_base(void)
+{
+ if (!cp1_base)
+ panic("no cp1 base was set\n");
+
+ return cp1_base;
+}
+EXPORT_SYMBOL(ltq_get_cp1_base);
+
+static int vmmc_probe(struct platform_device *pdev)
+{
+#define CP1_SIZE (1 << 20)
+ struct gpio_desc *gpio;
+ int gpio_count;
+ dma_addr_t dma;
+ int error;
+
+ cp1_base =
+ (void *) CPHYSADDR(dma_alloc_coherent(&pdev->dev, CP1_SIZE,
+ &dma, GFP_KERNEL));
+
+ gpio_count = gpiod_count(&pdev->dev, NULL);
+ while (gpio_count > 0) {
+ gpio = devm_gpiod_get_index(&pdev->dev,
+ NULL, --gpio_count, GPIOD_OUT_HIGH);
+ error = PTR_ERR_OR_ZERO(gpio);
+ if (error) {
+ dev_err(&pdev->dev,
+ "failed to request GPIO idx %d: %d\n",
+ gpio_count, error);
+ continue;
+ }
+
+ gpiod_set_consumer_name(gpio, "vmmc-relay");
+ }
+
+ dev_info(&pdev->dev, "reserved %dMB at 0x%p", CP1_SIZE >> 20, cp1_base);
+
+ return 0;
+}
+
+static const struct of_device_id vmmc_match[] = {
+ { .compatible = "lantiq,vmmc-xway" },
+ {},
+};
+
+static struct platform_driver vmmc_driver = {
+ .probe = vmmc_probe,
+ .driver = {
+ .name = "lantiq,vmmc",
+ .of_match_table = vmmc_match,
+ },
+};
+builtin_platform_driver(vmmc_driver);